From commits-noreply at bitbucket.org Fri Apr 1 00:27:03 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 1 Apr 2011 00:27:03 +0200 (CEST) Subject: [pypy-svn] pypy jit-shadowstack: Fix the value according to the comment just below. Message-ID: <20110331222703.A0A7D282B9C@codespeak.net> Author: Amaury Forgeot d'Arc Branch: jit-shadowstack Changeset: r43063:46f58d4c45e3 Date: 2011-04-01 00:23 +0200 http://bitbucket.org/pypy/pypy/changeset/46f58d4c45e3/ Log: Fix the value according to the comment just below. (although 1Gb of L2 cache would certainly be nice) diff --git a/pypy/rpython/memory/gc/env.py b/pypy/rpython/memory/gc/env.py --- a/pypy/rpython/memory/gc/env.py +++ b/pypy/rpython/memory/gc/env.py @@ -259,7 +259,7 @@ get_L2cache = globals().get('get_L2cache_' + sys.platform, lambda: -1) # implement me for other platforms -NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024*1024 +NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024 # arbitrary 1M. better than default of 131k for most cases # in case it didn't work From commits-noreply at bitbucket.org Fri Apr 1 00:27:04 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 1 Apr 2011 00:27:04 +0200 (CEST) Subject: [pypy-svn] pypy jit-shadowstack: Enfore argument type for this function, to help some tests. Message-ID: <20110331222704.49ED0282BE8@codespeak.net> Author: Amaury Forgeot d'Arc Branch: jit-shadowstack Changeset: r43064:dd803046634a Date: 2011-04-01 00:23 +0200 http://bitbucket.org/pypy/pypy/changeset/dd803046634a/ Log: Enfore argument type for this function, to help some tests. diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -746,6 +746,7 @@ def charpsize2str(cp, size): l = [cp[i] for i in range(size)] return emptystr.join(l) + charpsize2str._annenforceargs_ = [None, int] return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, From commits-noreply at bitbucket.org Fri Apr 1 07:29:10 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Fri, 1 Apr 2011 07:29:10 +0200 (CEST) Subject: [pypy-svn] pypy default: Try to fix wait3, how could it ever have worked? Message-ID: <20110401052910.1652F282B9C@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43065:674159439fdb Date: 2011-04-01 01:28 -0400 http://bitbucket.org/pypy/pypy/changeset/674159439fdb/ Log: Try to fix wait3, how could it ever have worked? diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -3,14 +3,14 @@ from resource import _struct_rusage, struct_rusage libc = CDLL(find_library("c")) -wait3 = libc.wait3 +c_wait3 = libc.wait3 -wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] +c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] def wait3(options): status = c_int() _rusage = _struct_rusage() - pid = wait3(byref(status), c_int(options), byref(_rusage)) + pid = c_wait3(byref(status), c_int(options), byref(_rusage)) rusage = struct_rusage(( float(_rusage.ru_utime), From commits-noreply at bitbucket.org Fri Apr 1 11:03:02 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:02 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: merge arm-backend-2 Message-ID: <20110401090302.0A42C282B9C@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43066:bce69b9a0679 Date: 2011-03-31 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/bce69b9a0679/ Log: merge arm-backend-2 diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -1,3 +1,4 @@ +from pypy.jit.backend.arm.helper.assembler import saved_registers from pypy.jit.backend.arm import conditions as c from pypy.jit.backend.arm import locations from pypy.jit.backend.arm import registers as r @@ -228,10 +229,9 @@ def _gen_leave_jitted_hook_code(self, save_exc=False): mc = ARMv7Builder() - mc.PUSH([reg.value for reg in r.caller_resp] + [r.ip.value]) - addr = self.cpu.get_on_leave_jitted_int(save_exception=save_exc) - mc.BL(addr) - mc.POP([reg.value for reg in r.caller_resp]+[r.ip.value]) + with saved_registers(mc, r.caller_resp + [r.ip]): + addr = self.cpu.get_on_leave_jitted_int(save_exception=save_exc) + mc.BL(addr) assert self._exit_code_addr != 0 mc.B(self._exit_code_addr) return mc.materialize(self.cpu.asmmemmgr, [], @@ -240,15 +240,13 @@ mc = ARMv7Builder() decode_registers_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func) - mc.PUSH([reg.value for reg in r.all_regs]) # registers r0 .. r10 - mc.VPUSH([reg.value for reg in r.all_vfp_regs]) # registers d0 .. d15 - mc.MOV_rr(r.r0.value, r.ip.value) # move mem block address, to r0 - mc.MOV_rr(r.r1.value, r.fp.value) # pass the current frame pointer as second param - mc.MOV_rr(r.r2.value, r.sp.value) # pass the current stack pointer as third param + with saved_registers(mc, r.all_regs): + mc.MOV_rr(r.r0.value, r.ip.value) # move mem block address, to r0 to pass as + mc.MOV_rr(r.r1.value, r.fp.value) # pass the current frame pointer as second param + mc.MOV_rr(r.r2.value, r.sp.value) # pass the current stack pointer as third param - mc.BL(rffi.cast(lltype.Signed, decode_registers_addr)) - mc.MOV_rr(r.ip.value, r.r0.value) - mc.POP([reg.value for reg in r.all_regs]) + mc.BL(rffi.cast(lltype.Signed, decode_registers_addr)) + mc.MOV_rr(r.ip.value, r.r0.value) mc.MOV_rr(r.r0.value, r.ip.value) self.gen_func_epilog(mc=mc) return mc.materialize(self.cpu.asmmemmgr, [], diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -12,7 +12,7 @@ gen_emit_cmp_op, gen_emit_float_op, gen_emit_float_cmp_op, - gen_emit_unary_float_op) + gen_emit_unary_float_op, saved_registers) from pypy.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder from pypy.jit.backend.arm.jump import remap_frame_layout from pypy.jit.backend.arm.regalloc import Regalloc, TempInt @@ -274,33 +274,30 @@ t = TempBox() regalloc.force_allocate_reg(t, selected_reg=regalloc.call_result_location(t)) regalloc.possibly_free_var(t) - self.mc.PUSH([reg.value for reg in r.caller_resp][1:]) + saved_regs = r.caller_resp[1:] else: - self.mc.PUSH([reg.value for reg in r.caller_resp]) + saved_regs = r.caller_resp + with saved_registers(self.mc, saved_regs, regalloc=regalloc): + # all arguments past the 4th go on the stack + if n_args > 4: + stack_args = n_args - 4 + n = stack_args*WORD + self._adjust_sp(n, fcond=fcond) + for i in range(4, n_args): + self.mov_loc_loc(regalloc.loc(args[i]), r.ip) + self.mc.STR_ri(r.ip.value, r.sp.value, (i-4)*WORD) - # all arguments past the 4th go on the stack - if n_args > 4: - stack_args = n_args - 4 - n = stack_args*WORD - self._adjust_sp(n, fcond=fcond) - for i in range(4, n_args): - self.mov_loc_loc(regalloc.loc(args[i]), r.ip) - self.mc.STR_ri(r.ip.value, r.sp.value, (i-4)*WORD) + #the actual call + self.mc.BL(adr) + regalloc.possibly_free_vars(args) + # readjust the sp in case we passed some args on the stack + if n_args > 4: + assert n > 0 + self._adjust_sp(-n, fcond=fcond) - #the actual call - self.mc.BL(adr) - regalloc.possibly_free_vars(args) - # readjust the sp in case we passed some args on the stack - if n_args > 4: - assert n > 0 - self._adjust_sp(-n, fcond=fcond) - - # restore the argumets stored on the stack - if result is not None: - regalloc.after_call(result) - self.mc.POP([reg.value for reg in r.caller_resp][1:]) - else: - self.mc.POP([reg.value for reg in r.caller_resp]) + # restore the argumets stored on the stack + if result is not None: + regalloc.after_call(result) return fcond def emit_op_same_as(self, op, arglocs, regalloc, fcond): @@ -686,11 +683,10 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self.mc.PUSH([reg.value for reg in r.caller_resp][1:]) - # resbox is allready in r0 - self.mov_loc_loc(arglocs[1], r.r1) - self.mc.BL(asm_helper_adr) - self.mc.POP([reg.value for reg in r.caller_resp][1:]) + with saved_registers(self.mc, r.caller_resp[1:], regalloc=regalloc): + # resbox is allready in r0 + self.mov_loc_loc(arglocs[1], r.r1) + self.mc.BL(asm_helper_adr) if op.result: regalloc.after_call(op.result) # jump to merge point diff --git a/pypy/jit/backend/arm/helper/assembler.py b/pypy/jit/backend/arm/helper/assembler.py --- a/pypy/jit/backend/arm/helper/assembler.py +++ b/pypy/jit/backend/arm/helper/assembler.py @@ -1,3 +1,4 @@ +from __future__ import with_statement from pypy.jit.backend.arm import conditions as c from pypy.jit.backend.arm import registers as r from pypy.jit.backend.arm.codebuilder import AbstractARMv7Builder @@ -31,14 +32,11 @@ def f(self, op, arglocs, regalloc, fcond): assert fcond is not None if op.result: - self.mc.PUSH([reg.value for reg in r.caller_resp][1:]) + regs = r.caller_resp[1:] else: - self.mc.PUSH([reg.value for reg in r.caller_resp]) - helper(self.mc, fcond) - if op.result: - self.mc.POP([reg.value for reg in r.caller_resp][1:]) - else: - self.mc.POP([reg.value for reg in r.caller_resp]) + regs = r.caller_resp + with saved_registers(self.mc, regs, regalloc=regalloc): + helper(self.mc, fcond) return fcond return f @@ -81,3 +79,27 @@ self.mc.MOV_ri(res.value, 0, cond=inv) return fcond return f + +class saved_registers(object): + def __init__(self, assembler, regs_to_save, regalloc=None): + self.assembler = assembler + self.regalloc = regalloc + if self.regalloc: + self._filter_regs(regs_to_save) + else: + self.regs = regs_to_save + + def __enter__(self): + if len(self.regs) > 0: + self.assembler.PUSH([r.value for r in self.regs]) + + def __exit__(self, *args): + if len(self.regs) > 0: + self.assembler.POP([r.value for r in self.regs]) + + def _filter_regs(self, regs_to_save): + regs = [] + for box, reg in self.regalloc.reg_bindings.iteritems(): + if reg in regs_to_save or reg is r.ip: + regs.append(reg) + self.regs = regs From commits-noreply at bitbucket.org Fri Apr 1 11:03:04 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:04 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: disable the second entry point for loops until the calling convention implementation supports float Message-ID: <20110401090304.9B15B282B9C@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43067:34a60527b2d4 Date: 2011-03-31 11:44 +0200 http://bitbucket.org/pypy/pypy/changeset/34a60527b2d4/ Log: disable the second entry point for loops until the calling convention implementation supports float diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -361,6 +361,8 @@ return arglocs def gen_direct_bootstrap_code(self, arglocs, loop_head, looptoken): + #XXX fixme later, when float calling conv works + return self.gen_func_prolog() if len(arglocs) > 4: reg_args = 4 From commits-noreply at bitbucket.org Fri Apr 1 11:03:05 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:05 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: fix the order of register allocation and freeing to avoid allocation a register for two arguments at the same time Message-ID: <20110401090305.4CB71282B9C@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43068:3efca17c0fbd Date: 2011-03-31 11:50 +0200 http://bitbucket.org/pypy/pypy/changeset/3efca17c0fbd/ Log: fix the order of register allocation and freeing to avoid allocation a register for two arguments at the same time diff --git a/pypy/jit/backend/arm/helper/regalloc.py b/pypy/jit/backend/arm/helper/regalloc.py --- a/pypy/jit/backend/arm/helper/regalloc.py +++ b/pypy/jit/backend/arm/helper/regalloc.py @@ -56,17 +56,18 @@ locs = [] loc1, box1 = self._ensure_value_is_boxed(op.getarg(0)) locs.append(loc1) - self.vfprm.possibly_free_var(box1) if base: loc2, box2 = self._ensure_value_is_boxed(op.getarg(1)) locs.append(loc2) self.vfprm.possibly_free_var(box2) + self.vfprm.possibly_free_var(box1) if float_result: res = self.vfprm.force_allocate_reg(op.result) self.vfprm.possibly_free_var(op.result) else: res = self.rm.force_allocate_reg(op.result) self.rm.possibly_free_var(op.result) + self.vfprm.possibly_free_var(box1) locs.append(res) return locs return f From commits-noreply at bitbucket.org Fri Apr 1 11:03:07 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:07 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: save vfp registers around calls Message-ID: <20110401090307.24366282BE8@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43069:81b2937e8130 Date: 2011-03-31 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/81b2937e8130/ Log: save vfp registers around calls diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -240,7 +240,7 @@ mc = ARMv7Builder() decode_registers_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func) - with saved_registers(mc, r.all_regs): + with saved_registers(mc, r.all_regs, r.all_vfp_regs): mc.MOV_rr(r.r0.value, r.ip.value) # move mem block address, to r0 to pass as mc.MOV_rr(r.r1.value, r.fp.value) # pass the current frame pointer as second param mc.MOV_rr(r.r2.value, r.sp.value) # pass the current stack pointer as third param @@ -289,7 +289,7 @@ else: assert 0, 'unknown type' - if loc.is_reg(): + if loc.is_reg() or loc.is_vfp_reg(): mem[j] = chr(loc.value) j += 1 elif loc.is_imm(): @@ -592,16 +592,17 @@ # regalloc support def load(self, loc, value): - assert loc.is_reg() + assert (loc.is_reg() and values.is_imm() + or loc.is_vfp_reg() and value.is_imm_float()) if value.is_imm(): self.mc.gen_load_int(loc.value, value.getint()) elif value.is_imm_float(): - #XXX this is wrong self.mc.gen_load_int(r.ip.value, value.getint()) - self.mc.VLDR(loc.value, r.ip.value) + self.mc.VLDR(loc.value, r.ip.value) - # XXX needs float support + # XXX needs float support def regalloc_mov(self, prev_loc, loc, cond=c.AL): + import pdb; pdb.set_trace() if prev_loc.is_imm(): if loc.is_reg(): new_loc = loc diff --git a/pypy/jit/backend/arm/helper/assembler.py b/pypy/jit/backend/arm/helper/assembler.py --- a/pypy/jit/backend/arm/helper/assembler.py +++ b/pypy/jit/backend/arm/helper/assembler.py @@ -81,25 +81,37 @@ return f class saved_registers(object): - def __init__(self, assembler, regs_to_save, regalloc=None): + def __init__(self, assembler, regs_to_save, vfp_regs_to_save=None, regalloc=None): self.assembler = assembler self.regalloc = regalloc + if vfp_regs_to_save is None: + vfp_regs_to_save = [] if self.regalloc: - self._filter_regs(regs_to_save) + self._filter_regs(regs_to_save, vfp_regs_to_save) else: self.regs = regs_to_save + self.vfp_regs = vfp_regs_to_save def __enter__(self): if len(self.regs) > 0: self.assembler.PUSH([r.value for r in self.regs]) + if len(self.vfp_regs) > 0: + self.assembler.VPUSH([r.value for r in self.vfp_regs]) def __exit__(self, *args): + if len(self.vfp_regs) > 0: + self.assembler.VPOP([r.value for r in self.vfp_regs]) if len(self.regs) > 0: self.assembler.POP([r.value for r in self.regs]) - def _filter_regs(self, regs_to_save): + def _filter_regs(self, regs_to_save, vfp_regs_to_save): regs = [] - for box, reg in self.regalloc.reg_bindings.iteritems(): + for box, reg in self.regalloc.rm.reg_bindings.iteritems(): if reg in regs_to_save or reg is r.ip: regs.append(reg) self.regs = regs + regs = [] + for box, reg in self.regalloc.vfprm.reg_bindings.iteritems(): + if reg in vfp_regs_to_save: + regs.append(reg) + self.vfp_regs = regs diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -12,6 +12,9 @@ def is_reg(self): return False + def is_vfp_reg(self): + return False + def is_imm_float(self): return False @@ -42,6 +45,12 @@ def __repr__(self): return 'f%d' % self.value + def is_reg(self): + return False + + def is_vfp_reg(self): + return True + class ImmLocation(AssemblerLocation): _immutable_ = True @@ -61,6 +70,8 @@ return self.value + 20 class ConstFloatLoc(AssemblerLocation): + """This class represents an imm float value which is stored in memory at + the address stored in the field value""" _immutable_ = True def __init__(self, value): @@ -70,7 +81,7 @@ return self.value def __repr__(self): - return "imm_float(%d)" % (self.value) + return "imm_float(stored at %d)" % (self.value) def is_imm_float(self): return True From commits-noreply at bitbucket.org Fri Apr 1 11:03:11 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:11 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: spill vfp registers around calls Message-ID: <20110401090311.B6264282BE8@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43070:13974a14d7b0 Date: 2011-03-31 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/13974a14d7b0/ Log: spill vfp registers around calls diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -229,7 +229,8 @@ def _gen_leave_jitted_hook_code(self, save_exc=False): mc = ARMv7Builder() - with saved_registers(mc, r.caller_resp + [r.ip]): + # XXX add a check if cpu supports floats + with saved_registers(mc, r.caller_resp + [r.ip], r.caller_vfp_resp): addr = self.cpu.get_on_leave_jitted_int(save_exception=save_exc) mc.BL(addr) assert self._exit_code_addr != 0 @@ -329,10 +330,14 @@ mc = self.mc mc.MOV_rr(r.sp.value, r.fp.value, cond=cond) mc.ADD_ri(r.sp.value, r.sp.value, WORD, cond=cond) + if self.cpu.supports_floats: + mc.VPOP([reg.value for reg in r.callee_saved_vfp_registers]) mc.POP([reg.value for reg in r.callee_restored_registers], cond=cond) def gen_func_prolog(self): self.mc.PUSH([reg.value for reg in r.callee_saved_registers]) + if self.cpu.supports_floats: + self.mc.VPUSH([reg.value for reg in r.callee_saved_vfp_registers]) self.mc.SUB_ri(r.sp.value, r.sp.value, WORD) self.mc.MOV_rr(r.fp.value, r.sp.value) @@ -592,7 +597,7 @@ # regalloc support def load(self, loc, value): - assert (loc.is_reg() and values.is_imm() + assert (loc.is_reg() and value.is_imm() or loc.is_vfp_reg() and value.is_imm_float()) if value.is_imm(): self.mc.gen_load_int(loc.value, value.getint()) @@ -602,7 +607,6 @@ # XXX needs float support def regalloc_mov(self, prev_loc, loc, cond=c.AL): - import pdb; pdb.set_trace() if prev_loc.is_imm(): if loc.is_reg(): new_loc = loc diff --git a/pypy/jit/backend/arm/helper/assembler.py b/pypy/jit/backend/arm/helper/assembler.py --- a/pypy/jit/backend/arm/helper/assembler.py +++ b/pypy/jit/backend/arm/helper/assembler.py @@ -35,7 +35,7 @@ regs = r.caller_resp[1:] else: regs = r.caller_resp - with saved_registers(self.mc, regs, regalloc=regalloc): + with saved_registers(self.mc, regs, r.caller_vfp_resp, regalloc=regalloc): helper(self.mc, fcond) return fcond return f diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -43,7 +43,7 @@ return [VFPRegisterLocation(i) for i in [self.value*2, self.value*2+1]] def __repr__(self): - return 'f%d' % self.value + return 'vfp%d' % self.value def is_reg(self): return False From commits-noreply at bitbucket.org Fri Apr 1 11:03:14 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:14 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: start extending the calling convention implementation to support floats Message-ID: <20110401090314.3C7AC282BEB@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43071:44a933e393c8 Date: 2011-03-31 16:00 +0200 http://bitbucket.org/pypy/pypy/changeset/44a933e393c8/ Log: start extending the calling convention implementation to support floats diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -641,6 +641,10 @@ assert 0, 'unsupported case' elif loc.is_reg() and prev_loc.is_reg(): self.mc.MOV_rr(loc.value, prev_loc.value, cond=cond) + elif loc.is_reg() and prev_loc.is_vfp_reg(): + self.mc.VMOV_rc(loc.value, prev_loc.value, cond=cond) + elif loc.is_vfp_reg() and prev_loc.is_reg(): + self.mc.VMOV_cr(loc.value, prev_loc.value, cond=cond) else: assert 0, 'unsupported case' mov_loc_loc = regalloc_mov diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -248,7 +248,8 @@ descr = op.getdescr() #XXX Hack, Hack, Hack if op.result and not we_are_translated() and not isinstance(descr, LoopToken): - loc = regalloc.call_result_location(op.result) + #XXX check result type + loc = regalloc.rm.call_result_location(op.result) size = descr.get_result_size(False) signed = descr.is_result_signed() self._ensure_result_bit_extension(loc, size, signed) @@ -258,13 +259,28 @@ # emit_op_call_may_force # XXX improve freeing of stuff here def _emit_call(self, adr, args, regalloc, fcond=c.AL, result=None): - n = 0 n_args = len(args) - reg_args = min(n_args, 4) - # prepare arguments passed in registers - for i in range(0, reg_args): - l = regalloc.make_sure_var_in_reg(args[i], - selected_reg=r.all_regs[i]) + reg_args = 0 + for x in range(min(n_args, 4)): + if args[x].type == FLOAT: + reg_args += 2 + else: + reg_args += 1 + if reg_args > 4: + reg_args = x - 1 + break + + # collect the locations of the arguments and spill those that are in + # the caller saved registers + locs = [] + for v in range(reg_args): + var = args[v] + loc = regalloc.loc(var) + if loc in r.caller_resp: + regalloc.force_spill(var) + loc = regalloc.loc(var) + locs.append(loc) + # save caller saved registers if result: # XXX hack if the call has a result force the value in r0 to be @@ -274,10 +290,25 @@ t = TempBox() regalloc.force_allocate_reg(t, selected_reg=regalloc.call_result_location(t)) regalloc.possibly_free_var(t) - saved_regs = r.caller_resp[1:] + if result.type == FLOAT: + saved_regs = r.caller_resp[2:] + else: + saved_regs = r.caller_resp[1:] else: saved_regs = r.caller_resp - with saved_registers(self.mc, saved_regs, regalloc=regalloc): + + with saved_registers(self.mc, saved_regs, r.caller_vfp_resp, regalloc): + # move variables to the argument registers + num = 0 + for i in range(reg_args): + arg = args[i] + reg = r.all_regs[num] + self.mov_loc_loc(locs[i], reg) + if arg.type == FLOAT: + num += 2 + else: + num += 1 + # all arguments past the 4th go on the stack if n_args > 4: stack_args = n_args - 4 @@ -297,7 +328,13 @@ # restore the argumets stored on the stack if result is not None: - regalloc.after_call(result) + # support floats here + resloc = regalloc.after_call(result) + if result.type == FLOAT: + # XXX ugly and fragile + # move result to the allocated register + self.mov_loc_loc(resloc, r.r0) + return fcond def emit_op_same_as(self, op, arglocs, regalloc, fcond): @@ -683,7 +720,7 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - with saved_registers(self.mc, r.caller_resp[1:], regalloc=regalloc): + with saved_registers(self.mc, r.caller_resp[1:], r.caller_vfp_resp, regalloc=regalloc): # resbox is allready in r0 self.mov_loc_loc(arglocs[1], r.r1) self.mc.BL(asm_helper_adr) diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py --- a/pypy/jit/backend/arm/codebuilder.py +++ b/pypy/jit/backend/arm/codebuilder.py @@ -72,6 +72,41 @@ | 0xB << 8 | nregs) self.write32(instr) + + def VMOV_rc(self, rt, dm, cond=cond.AL): + """This instruction copies two words from two ARM core registers into a + doubleword extension register, or from a doubleword extension register + to two ARM core registers. + This implementation is modified in way that it takes to consecutive + core registers (rt and rt+1)""" + rt2 = rt + 1 + op = 1 + instr = (cond << 28 + | 0xC << 24 + | 0x4 << 20 + | op << 20 + | (rt2 & 0xF) << 16 + | (rt & 0xF) << 12 + | 0xB << 8 + | (dm & 0xF)) + + # VMOV , , + def VMOV_cr(self, dm, rt, cond=cond.AL): + """This instruction copies two words from two ARM core registers into a + doubleword extension register, or from a doubleword extension register + to two ARM core registers. + This implementation is modified in way that it takes to consecutive + core registers (rt and rt+1)""" + rt2 = rt + 1 + op = 0 + instr = (cond << 28 + | 0xC << 24 + | 0x4 << 20 + | op << 20 + | (rt2 & 0xF) << 16 + | (rt & 0xF) << 12 + | 0xB << 8 + | (dm & 0xF)) def VCVT_float_to_int(self, target, source, cond=cond.AL): opc2 = 0x5 diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -69,6 +69,13 @@ def __init__(self, longevity, frame_manager=None, assembler=None): RegisterManager.__init__(self, longevity, frame_manager, assembler) + def after_call(self, v): + """ Adjust registers according to the result of the call, + which is in variable v. + """ + self._check_type(v) + r = self.force_allocate_reg(v) + return r class ARMv7RegisterMananger(RegisterManager): all_regs = r.all_regs box_types = None # or a list of acceptable types @@ -136,6 +143,12 @@ else: return self.rm.stays_alive(v) + def call_result_location(self, v): + if v.type == FLOAT: + return self.vfprm.call_result_location(v) + else: + return self.rm.call_result_location(v) + def after_call(self, v): if v.type == FLOAT: return self.vfprm.after_call(v) From commits-noreply at bitbucket.org Fri Apr 1 11:03:18 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:18 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: continue implemeting float support in the calling convention Message-ID: <20110401090318.5A6C3282C1E@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43072:70c086bd2476 Date: 2011-03-31 18:02 +0200 http://bitbucket.org/pypy/pypy/changeset/70c086bd2476/ Log: continue implemeting float support in the calling convention diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -205,6 +205,18 @@ self.reg_bindings[v] = loc return loc + def force_spill_var(self, var): + self._sync_var(var) + try: + loc = self.reg_bindings[var] + del self.reg_bindings[var] + self.free_regs.append(loc) + except KeyError: + if not we_are_translated(): + import pdb; pdb.set_trace() + else: + raise ValueError + def loc(self, box): """ Return the location of 'box'. """ diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py --- a/pypy/jit/backend/arm/codebuilder.py +++ b/pypy/jit/backend/arm/codebuilder.py @@ -89,6 +89,7 @@ | (rt & 0xF) << 12 | 0xB << 8 | (dm & 0xF)) + self.write32(instr) # VMOV , , def VMOV_cr(self, dm, rt, cond=cond.AL): @@ -107,6 +108,7 @@ | (rt & 0xF) << 12 | 0xB << 8 | (dm & 0xF)) + self.write32(instr) def VCVT_float_to_int(self, target, source, cond=cond.AL): opc2 = 0x5 diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -98,7 +98,7 @@ # One of INT, REF, FLOAT assert num_words == 1 assert type == INT - #self.type = type + self.type = type def frame_size(self): return self.width // WORD diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -641,6 +641,7 @@ assert 0, 'unsupported case' elif loc.is_reg() and prev_loc.is_reg(): self.mc.MOV_rr(loc.value, prev_loc.value, cond=cond) + #XXX these instructions do not work elif loc.is_reg() and prev_loc.is_vfp_reg(): self.mc.VMOV_rc(loc.value, prev_loc.value, cond=cond) elif loc.is_vfp_reg() and prev_loc.is_reg(): @@ -655,6 +656,8 @@ self.mc.PUSH([r.ip.value]) elif loc.is_reg(): self.mc.PUSH([loc.value]) + elif loc.is_vfp_reg(): + self.mc.VPUSH([loc.value]) else: assert 0, 'ffuu' diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -269,16 +269,22 @@ if reg_args > 4: reg_args = x - 1 break + + #spill all vars that are stored in caller saved registers + #XXX good idea?? + vars_to_spill = [] + for v, reg in regalloc.rm.reg_bindings.iteritems(): + if reg in r.caller_resp: + vars_to_spill.append(v) - # collect the locations of the arguments and spill those that are in - # the caller saved registers + for v in vars_to_spill: + regalloc.force_spill_var(v) + # collect the locations of the arguments that go in the argument + # registers locs = [] for v in range(reg_args): var = args[v] loc = regalloc.loc(var) - if loc in r.caller_resp: - regalloc.force_spill(var) - loc = regalloc.loc(var) locs.append(loc) # save caller saved registers @@ -300,9 +306,10 @@ with saved_registers(self.mc, saved_regs, r.caller_vfp_resp, regalloc): # move variables to the argument registers num = 0 + import pdb; pdb.set_trace() for i in range(reg_args): arg = args[i] - reg = r.all_regs[num] + reg = r.caller_resp[num] self.mov_loc_loc(locs[i], reg) if arg.type == FLOAT: num += 2 @@ -310,13 +317,14 @@ num += 1 # all arguments past the 4th go on the stack - if n_args > 4: - stack_args = n_args - 4 - n = stack_args*WORD - self._adjust_sp(n, fcond=fcond) - for i in range(4, n_args): - self.mov_loc_loc(regalloc.loc(args[i]), r.ip) - self.mc.STR_ri(r.ip.value, r.sp.value, (i-4)*WORD) + if n_args > reg_args: + n = 0 + for i in range(n_args-1, reg_args-1, -1): + if args[i].type == FLOAT: + n += 2*WORD + else: + n += WORD + self.regalloc_push(regalloc.loc(args[i])) #the actual call self.mc.BL(adr) @@ -330,8 +338,8 @@ if result is not None: # support floats here resloc = regalloc.after_call(result) + # XXX ugly and fragile if result.type == FLOAT: - # XXX ugly and fragile # move result to the allocated register self.mov_loc_loc(resloc, r.r0) diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -46,8 +46,9 @@ @staticmethod def frame_pos(loc, type): + assert type == INT # XXX for now we only have one word stack locs - return locations.StackLocation(loc) + return locations.StackLocation(loc, type=type) def void(self, op, fcond): return [] @@ -228,17 +229,10 @@ def force_spill_var(self, var): - self._sync_var(var) - try: - loc = self.reg_bindings[var] - del self.reg_bindings[var] - self.free_regs.append(loc) - except KeyError: - if not we_are_translated(): - import pdb; pdb.set_trace() - else: - raise ValueError - + if var.type == FLOAT: + self.vfprm.force_spill_var(var) + else: + self.rm.force_spill_var(var) def _ensure_value_is_boxed(self, thing, forbidden_vars=[]): box = None From commits-noreply at bitbucket.org Fri Apr 1 11:03:19 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:19 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: add missing bit to VMOV instructions Message-ID: <20110401090319.B36A2282BE9@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43073:7bf4fce6b7f1 Date: 2011-04-01 09:41 +0200 http://bitbucket.org/pypy/pypy/changeset/7bf4fce6b7f1/ Log: add missing bit to VMOV instructions diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py --- a/pypy/jit/backend/arm/codebuilder.py +++ b/pypy/jit/backend/arm/codebuilder.py @@ -88,6 +88,7 @@ | (rt2 & 0xF) << 16 | (rt & 0xF) << 12 | 0xB << 8 + | 0x1 << 4 | (dm & 0xF)) self.write32(instr) @@ -107,6 +108,7 @@ | (rt2 & 0xF) << 16 | (rt & 0xF) << 12 | 0xB << 8 + | 0x1 << 4 | (dm & 0xF)) self.write32(instr) From commits-noreply at bitbucket.org Fri Apr 1 11:03:20 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:20 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: pass arguments correctly and restore a float result correctly Message-ID: <20110401090320.B57D6282BEC@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43074:678b40482cf8 Date: 2011-04-01 10:04 +0200 http://bitbucket.org/pypy/pypy/changeset/678b40482cf8/ Log: pass arguments correctly and restore a float result correctly diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -261,20 +261,22 @@ def _emit_call(self, adr, args, regalloc, fcond=c.AL, result=None): n_args = len(args) reg_args = 0 + words = 0 for x in range(min(n_args, 4)): if args[x].type == FLOAT: - reg_args += 2 + words += 2 else: - reg_args += 1 - if reg_args > 4: - reg_args = x - 1 + words += 1 + reg_args += 1 + if words > 4: + reg_args = x break - + #spill all vars that are stored in caller saved registers #XXX good idea?? vars_to_spill = [] for v, reg in regalloc.rm.reg_bindings.iteritems(): - if reg in r.caller_resp: + if reg in r.caller_resp and regalloc.stays_alive(v): vars_to_spill.append(v) for v in vars_to_spill: @@ -289,13 +291,6 @@ # save caller saved registers if result: - # XXX hack if the call has a result force the value in r0 to be - # spilled - if reg_args == 0 or (isinstance(args[0], Box) and - regalloc.stays_alive(args[0])): - t = TempBox() - regalloc.force_allocate_reg(t, selected_reg=regalloc.call_result_location(t)) - regalloc.possibly_free_var(t) if result.type == FLOAT: saved_regs = r.caller_resp[2:] else: @@ -306,7 +301,6 @@ with saved_registers(self.mc, saved_regs, r.caller_vfp_resp, regalloc): # move variables to the argument registers num = 0 - import pdb; pdb.set_trace() for i in range(reg_args): arg = args[i] reg = r.caller_resp[num] @@ -341,7 +335,7 @@ # XXX ugly and fragile if result.type == FLOAT: # move result to the allocated register - self.mov_loc_loc(resloc, r.r0) + self.mov_loc_loc(r.r0, resloc) return fcond From commits-noreply at bitbucket.org Fri Apr 1 11:03:21 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:21 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: improve interface of VMOV Message-ID: <20110401090321.BF438282BE9@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43075:6e9a2057e911 Date: 2011-04-01 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/6e9a2057e911/ Log: improve interface of VMOV diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -641,11 +641,10 @@ assert 0, 'unsupported case' elif loc.is_reg() and prev_loc.is_reg(): self.mc.MOV_rr(loc.value, prev_loc.value, cond=cond) - #XXX these instructions do not work elif loc.is_reg() and prev_loc.is_vfp_reg(): - self.mc.VMOV_rc(loc.value, prev_loc.value, cond=cond) + self.mc.VMOV_rc(loc.value, loc.value+1, prev_loc.value, cond=cond) elif loc.is_vfp_reg() and prev_loc.is_reg(): - self.mc.VMOV_cr(loc.value, prev_loc.value, cond=cond) + self.mc.VMOV_cr(loc.value, prev_loc.value, prev_loc.value+1, cond=cond) else: assert 0, 'unsupported case' mov_loc_loc = regalloc_mov diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py --- a/pypy/jit/backend/arm/codebuilder.py +++ b/pypy/jit/backend/arm/codebuilder.py @@ -73,13 +73,11 @@ | nregs) self.write32(instr) - def VMOV_rc(self, rt, dm, cond=cond.AL): + def VMOV_rc(self, rt, rt2, dm, cond=cond.AL): """This instruction copies two words from two ARM core registers into a doubleword extension register, or from a doubleword extension register to two ARM core registers. - This implementation is modified in way that it takes to consecutive - core registers (rt and rt+1)""" - rt2 = rt + 1 + """ op = 1 instr = (cond << 28 | 0xC << 24 @@ -93,13 +91,11 @@ self.write32(instr) # VMOV , , - def VMOV_cr(self, dm, rt, cond=cond.AL): + def VMOV_cr(self, dm, rt, rt2, cond=cond.AL): """This instruction copies two words from two ARM core registers into a doubleword extension register, or from a doubleword extension register to two ARM core registers. - This implementation is modified in way that it takes to consecutive - core registers (rt and rt+1)""" - rt2 = rt + 1 + """ op = 0 instr = (cond << 28 | 0xC << 24 From commits-noreply at bitbucket.org Fri Apr 1 11:03:22 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:22 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: here we also need to spill caller saved registers that are not used after the call Message-ID: <20110401090322.63898282C1A@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43076:331ea1f83386 Date: 2011-04-01 10:59 +0200 http://bitbucket.org/pypy/pypy/changeset/331ea1f83386/ Log: here we also need to spill caller saved registers that are not used after the call diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -276,7 +276,7 @@ #XXX good idea?? vars_to_spill = [] for v, reg in regalloc.rm.reg_bindings.iteritems(): - if reg in r.caller_resp and regalloc.stays_alive(v): + if reg in r.caller_resp: vars_to_spill.append(v) for v in vars_to_spill: From commits-noreply at bitbucket.org Fri Apr 1 11:03:23 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 11:03:23 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: Around calls save only registers that are going to be used afterwards Message-ID: <20110401090323.CE35F282C22@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43077:f8185b0f5341 Date: 2011-04-01 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/f8185b0f5341/ Log: Around calls save only registers that are going to be used afterwards diff --git a/pypy/jit/backend/arm/helper/assembler.py b/pypy/jit/backend/arm/helper/assembler.py --- a/pypy/jit/backend/arm/helper/assembler.py +++ b/pypy/jit/backend/arm/helper/assembler.py @@ -107,11 +107,11 @@ def _filter_regs(self, regs_to_save, vfp_regs_to_save): regs = [] for box, reg in self.regalloc.rm.reg_bindings.iteritems(): - if reg in regs_to_save or reg is r.ip: + if reg is r.ip or (reg in regs_to_save and self.regalloc.stays_alive(box)): regs.append(reg) self.regs = regs regs = [] for box, reg in self.regalloc.vfprm.reg_bindings.iteritems(): - if reg in vfp_regs_to_save: + if reg in vfp_regs_to_save and self.regalloc.stays_alive(box): regs.append(reg) self.vfp_regs = regs From commits-noreply at bitbucket.org Fri Apr 1 13:57:24 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 1 Apr 2011 13:57:24 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix the value according to the comment just below. Message-ID: <20110401115724.1F7F9282B9C@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43078:4d87baf21b10 Date: 2011-04-01 00:23 +0200 http://bitbucket.org/pypy/pypy/changeset/4d87baf21b10/ Log: Fix the value according to the comment just below. (although 1Gb of L2 cache would certainly be nice) diff --git a/pypy/rpython/memory/gc/env.py b/pypy/rpython/memory/gc/env.py --- a/pypy/rpython/memory/gc/env.py +++ b/pypy/rpython/memory/gc/env.py @@ -259,7 +259,7 @@ get_L2cache = globals().get('get_L2cache_' + sys.platform, lambda: -1) # implement me for other platforms -NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024*1024 +NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024 # arbitrary 1M. better than default of 131k for most cases # in case it didn't work From commits-noreply at bitbucket.org Fri Apr 1 14:01:52 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 1 Apr 2011 14:01:52 +0200 (CEST) Subject: [pypy-svn] pypy jit-shadowstack: Fix translation: 'count' was an unsigned number here. Message-ID: <20110401120152.1AB50282B9C@codespeak.net> Author: Armin Rigo Branch: jit-shadowstack Changeset: r43079:08ddd81eb627 Date: 2011-04-01 12:02 +0000 http://bitbucket.org/pypy/pypy/changeset/08ddd81eb627/ Log: Fix translation: 'count' was an unsigned number here. diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -112,6 +112,7 @@ try: while True: count = fread(buf, 1, BUF_SIZE, fp) + count = rffi.cast(lltype.Signed, count) source += rffi.charpsize2str(buf, count) if count < BUF_SIZE: if feof(fp): From commits-noreply at bitbucket.org Fri Apr 1 15:17:59 2011 From: commits-noreply at bitbucket.org (l.diekmann) Date: Fri, 1 Apr 2011 15:17:59 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110401131759.D8F1A282B9E@codespeak.net> Author: Lukas Diekmann Branch: Changeset: r43081:f21b9f8c7bbc Date: 2011-04-01 15:17 +0200 http://bitbucket.org/pypy/pypy/changeset/f21b9f8c7bbc/ Log: merge diff --git a/pypy/rpython/memory/gc/env.py b/pypy/rpython/memory/gc/env.py --- a/pypy/rpython/memory/gc/env.py +++ b/pypy/rpython/memory/gc/env.py @@ -259,7 +259,7 @@ get_L2cache = globals().get('get_L2cache_' + sys.platform, lambda: -1) # implement me for other platforms -NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024*1024 +NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024 # arbitrary 1M. better than default of 131k for most cases # in case it didn't work From commits-noreply at bitbucket.org Fri Apr 1 15:17:59 2011 From: commits-noreply at bitbucket.org (l.diekmann) Date: Fri, 1 Apr 2011 15:17:59 +0200 (CEST) Subject: [pypy-svn] pypy default: (cfbolz, l.diekmann): fix dict.setdefault to only do one hash computation and dict lookup Message-ID: <20110401131759.51520282B9C@codespeak.net> Author: Lukas Diekmann Branch: Changeset: r43080:a0f33904910e Date: 2011-04-01 15:16 +0200 http://bitbucket.org/pypy/pypy/changeset/a0f33904910e/ Log: (cfbolz, l.diekmann): fix dict.setdefault to only do one hash computation and dict lookup diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -108,6 +108,11 @@ #return w_value or None return None + def impl_setdefault(self, w_key, w_default): + # here the dict is always empty + self._as_rdict().impl_fallback_setitem(w_key, w_default) + return w_default + def impl_setitem(self, w_key, w_value): self._as_rdict().impl_fallback_setitem(w_key, w_value) @@ -181,6 +186,9 @@ # _________________________________________________________________ # fallback implementation methods + def impl_fallback_setdefault(self, w_key, w_default): + return self.r_dict_content.setdefault(w_key, w_default) + def impl_fallback_setitem(self, w_key, w_value): self.r_dict_content[w_key] = w_value @@ -227,6 +235,7 @@ ("length", 0), ("setitem_str", 2), ("setitem", 2), + ("setdefault", 2), ("delitem", 1), ("iter", 0), ("items", 0), @@ -317,6 +326,14 @@ def impl_setitem_str(self, key, w_value): self.content[key] = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + return self.content.setdefault(space.str_w(w_key), w_default) + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) @@ -788,12 +805,7 @@ def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default): # XXX should be more efficient, with only one dict lookup - w_value = w_dict.getitem(w_key) - if w_value is not None: - return w_value - else: - w_dict.setitem(w_key, w_default) - return w_default + return w_dict.setdefault(w_key, w_default) def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w): len_defaults = len(defaults_w) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -604,6 +604,18 @@ else: self._as_rdict().impl_fallback_setitem(w_key, w_value) + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + key = space.str_w(w_key) + w_result = self.impl_getitem_str(key) + if w_result is not None: + return w_result + self.impl_setitem_str(key, w_default) + return w_default + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import \ W_DictMultiObject, setitem__DictMulti_ANY_ANY, getitem__DictMulti_ANY, \ @@ -259,7 +260,22 @@ d[33] = 99 assert d == dd assert x == 99 - + + def test_setdefault_fast(self): + class Key(object): + calls = 0 + def __hash__(self): + self.calls += 1 + return object.__hash__(self) + + k = Key() + d = {} + d.setdefault(k, []) + assert k.calls == 1 + + d.setdefault(k, 1) + assert k.calls == 2 + def test_update(self): d = {1:2, 3:4} dd = d.copy() @@ -704,13 +720,20 @@ class FakeString(str): + hash_count = 0 def unwrap(self, space): self.unwrapped = True return str(self) + def __hash__(self): + self.hash_count += 1 + return str.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: + hash_count = 0 def hash_w(self, obj): + self.hash_count += 1 return hash(obj) def unwrap(self, x): return x @@ -726,6 +749,8 @@ return [] DictObjectCls = W_DictMultiObject def type(self, w_obj): + if isinstance(w_obj, FakeString): + return str return type(w_obj) w_str = str def str_w(self, string): @@ -890,6 +915,19 @@ impl.setitem(x, x) assert impl.r_dict_content is not None + def test_setdefault_fast(self): + on_pypy = "__pypy__" in sys.builtin_module_names + impl = self.impl + key = FakeString(self.string) + x = impl.setdefault(key, 1) + assert x == 1 + if on_pypy: + assert key.hash_count == 1 + x = impl.setdefault(key, 2) + assert x == 1 + if on_pypy: + assert key.hash_count == 2 + class TestStrDictImplementation(BaseTestRDictImplementation): ImplementionClass = StrDictImplementation diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -34,11 +34,7 @@ @jit.purefunction def _getcell_makenew(self, key): - res = self.content.get(key, None) - if res is not None: - return res - result = self.content[key] = ModuleCell() - return result + return self.content.setdefault(key, ModuleCell()) def impl_setitem(self, w_key, w_value): space = self.space @@ -50,6 +46,16 @@ def impl_setitem_str(self, name, w_value): self.getcell(name, True).w_value = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + cell = self.getcell(space.str_w(w_key), True) + if cell.w_value is None: + cell.w_value = w_default + return cell.w_value + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) From commits-noreply at bitbucket.org Fri Apr 1 15:46:23 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 1 Apr 2011 15:46:23 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix this test when run with "-A" on top of CPython. Message-ID: <20110401134623.C0B1C282B9C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43082:92490211ef94 Date: 2011-04-01 15:45 +0200 http://bitbucket.org/pypy/pypy/changeset/92490211ef94/ Log: Fix this test when run with "-A" on top of CPython. Also fix it when run on top of PyPy to check that when setdefault() inserts the value in a *non-empty* dict, it is still done with only one call to __hash__(). diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -804,7 +804,6 @@ return w_default def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default): - # XXX should be more efficient, with only one dict lookup return w_dict.setdefault(w_key, w_default) def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -152,6 +152,8 @@ class AppTest_DictObject: + def setup_class(cls): + cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names) def test_equality(self): d = {1:2} @@ -271,10 +273,17 @@ k = Key() d = {} d.setdefault(k, []) - assert k.calls == 1 + if self.on_pypy: + assert k.calls == 1 d.setdefault(k, 1) - assert k.calls == 2 + if self.on_pypy: + assert k.calls == 2 + + k = Key() + d.setdefault(k, 42) + if self.on_pypy: + assert k.calls == 1 def test_update(self): d = {1:2, 3:4} From commits-noreply at bitbucket.org Fri Apr 1 16:05:07 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 16:05:07 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: loading of float value into core and vfp registers Message-ID: <20110401140507.EAB48282B9C@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43083:adef3a5e35f5 Date: 2011-04-01 11:15 +0200 http://bitbucket.org/pypy/pypy/changeset/adef3a5e35f5/ Log: loading of float value into core and vfp registers diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -620,7 +620,19 @@ prev_loc = new_loc if not loc.is_stack(): return - + if prev_loc.is_imm_float(): + temp = r.lr + self.mc.gen_load_int(temp.value, prev_loc.getint()) + if loc.is_reg(): + # we need to load one word to loc and one to loc+1 which are + # two 32-bit core registers + self.mc.LDR_ri(loc.value, temp.value) + self.mc.LDR_ri(loc.value+1, temp.value, imm=WORD) + elif loc.is_vfp_reg(): + # we need to load the thing into loc, which is a vfp reg + self.mc.VLDR(loc.value, temp.value) + assert not loc.is_stack() + return if loc.is_stack() or prev_loc.is_stack(): temp = r.lr if loc.is_stack() and prev_loc.is_reg(): From commits-noreply at bitbucket.org Fri Apr 1 16:05:09 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 16:05:09 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: start adding support for floats to call_assembler Message-ID: <20110401140509.12A5C282B9E@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43084:9a2e9ccf4dee Date: 2011-04-01 12:15 +0200 http://bitbucket.org/pypy/pypy/changeset/9a2e9ccf4dee/ Log: start adding support for floats to call_assembler diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -366,19 +366,20 @@ return arglocs def gen_direct_bootstrap_code(self, arglocs, loop_head, looptoken): - #XXX fixme later, when float calling conv works - return self.gen_func_prolog() - if len(arglocs) > 4: - reg_args = 4 - else: - reg_args = len(arglocs) + #import pdb; pdb.set_trace() + reg_args = self._count_reg_args(arglocs) stack_locs = len(arglocs) - reg_args + selected_reg = 0 for i in range(reg_args): loc = arglocs[i] - self.mov_loc_loc(r.all_regs[i], loc) + self.mov_loc_loc(r.all_regs[selected_reg], loc) + if arglocs[i].type == FLOAT: + selected_reg += 2 + else: + selected_reg += 1 for i in range(stack_locs): loc = arglocs[reg_args + i] diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -255,6 +255,19 @@ self._ensure_result_bit_extension(loc, size, signed) return cond + def _count_reg_args(self, args): + reg_args = 0 + words = 0 + for x in range(min(len(args), 4)): + if args[x].type == FLOAT: + words += 2 + else: + words += 1 + reg_args += 1 + if words > 4: + reg_args = x + break + return reg_args # XXX improve this interface # emit_op_call_may_force # XXX improve freeing of stuff here @@ -330,7 +343,6 @@ # restore the argumets stored on the stack if result is not None: - # support floats here resloc = regalloc.after_call(result) # XXX ugly and fragile if result.type == FLOAT: @@ -726,8 +738,13 @@ # resbox is allready in r0 self.mov_loc_loc(arglocs[1], r.r1) self.mc.BL(asm_helper_adr) - if op.result: - regalloc.after_call(op.result) + if op.result: + resloc = regalloc.after_call(op.result) + # XXX ugly and fragile + if op.result.type == FLOAT: + # move result to the allocated register + self.mov_loc_loc(r.r0, resloc) + # jump to merge point jmp_pos = self.mc.currpos() #jmp_location = self.mc.curraddr() @@ -746,6 +763,7 @@ fielddescr = jd.vable_token_descr assert isinstance(fielddescr, BaseFieldDescr) ofs = fielddescr.offset + import pdb; pdb.set_trace() resloc = regalloc.force_allocate_reg(resbox) self.mov_loc_loc(arglocs[1], r.ip) self.mc.MOV_ri(resloc.value, 0) @@ -759,12 +777,17 @@ adr = self.fail_boxes_int.get_addr_for_num(0) elif kind == REF: adr = self.fail_boxes_ptr.get_addr_for_num(0) + elif kind == FLOAT: + adr = self.fail_boxes_float.get_addr_for_num(0) else: raise AssertionError(kind) resloc = regalloc.force_allocate_reg(op.result) regalloc.possibly_free_var(resbox) self.mc.gen_load_int(r.ip.value, adr) - self.mc.LDR_ri(resloc.value, r.ip.value) + if op.result.type == FLOAT: + self.mc.VLDR(resloc.value, r.ip.value) + else: + self.mc.LDR_ri(resloc.value, r.ip.value) # merge point offset = self.mc.currpos() - jmp_pos diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -1,7 +1,8 @@ -from pypy.jit.metainterp.history import INT +from pypy.jit.metainterp.history import INT, FLOAT from pypy.jit.backend.arm.arch import WORD class AssemblerLocation(object): _immutable_ = True + type = INT def is_imm(self): return False @@ -38,6 +39,7 @@ class VFPRegisterLocation(RegisterLocation): _immutable_ = True + type = FLOAT def get_single_precision_regs(self): return [VFPRegisterLocation(i) for i in [self.value*2, self.value*2+1]] @@ -73,6 +75,7 @@ """This class represents an imm float value which is stored in memory at the address stored in the field value""" _immutable_ = True + type = FLOAT def __init__(self, value): self.value = value From commits-noreply at bitbucket.org Fri Apr 1 16:05:10 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 16:05:10 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: add float support for (get/set)field and (get/set)arrayitem operations Message-ID: <20110401140510.46635282B9E@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43085:1fc6793fb908 Date: 2011-04-01 14:06 +0200 http://bitbucket.org/pypy/pypy/changeset/1fc6793fb908/ Log: add float support for (get/set)field and (get/set)arrayitem operations diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -394,7 +394,14 @@ def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs, size = arglocs - if size.value == 4: + if size.value == 8: + assert value_loc.is_vfp_reg() + if ofs.is_reg(): + base_loc = r.ip + ofs = imm(0) + self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value) + self.mc.VSTR(value_loc.value, base_loc.value, ofs.value) + elif size.value == 4: if ofs.is_imm(): self.mc.STR_ri(value_loc.value, base_loc.value, ofs.value) else: @@ -417,7 +424,14 @@ def emit_op_getfield_gc(self, op, arglocs, regalloc, fcond): base_loc, ofs, res, size = arglocs - if size.value == 4: + if size.value == 8: + assert res.is_vfp_reg() + if ofs.is_reg(): + base_loc = r.ip + ofs = imm(0) + self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value) + self.mc.VLDR(res.value, base_loc.value, ofs.value) + elif size.value == 4: if ofs.is_imm(): self.mc.LDR_ri(res.value, base_loc.value, ofs.value) else: @@ -470,7 +484,10 @@ self.mc.ADD_ri(r.ip.value, scale_loc.value, ofs.value) scale_loc = r.ip - if scale.value == 2: + if scale.value == 4: + assert value_loc.is_vfp_reg() + self.mc.VSTR(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) + elif scale.value == 2: self.mc.STR_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) elif scale.value == 1: self.mc.STRH_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) @@ -493,7 +510,10 @@ self.mc.ADD_ri(r.ip.value, scale_loc.value, imm=ofs.value) scale_loc = r.ip - if scale.value == 2: + if scale.value == 4: + assert res.is_vfp_reg() + self.mc.VLDR(res.value, base_loc.value, scale_loc.value, cond=fcond) + elif scale.value == 2: self.mc.LDR_rr(res.value, base_loc.value, scale_loc.value, cond=fcond) elif scale.value == 1: self.mc.LDRH_rr(res.value, base_loc.value, scale_loc.value, cond=fcond) From commits-noreply at bitbucket.org Fri Apr 1 16:05:11 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 16:05:11 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: support floats in guard_value Message-ID: <20110401140511.1EB89282B9E@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43086:513b86cd49d4 Date: 2011-04-01 14:17 +0200 http://bitbucket.org/pypy/pypy/changeset/513b86cd49d4/ Log: support floats in guard_value diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -168,10 +168,15 @@ l1 = arglocs[1] failargs = arglocs[2:] - if l1.is_imm(): - self.mc.CMP_ri(l0.value, l1.getint()) - else: - self.mc.CMP_rr(l0.value, l1.value) + if l0.is_reg(): + if l1.is_imm(): + self.mc.CMP_ri(l0.value, l1.getint()) + else: + self.mc.CMP_rr(l0.value, l1.value) + elif l0.is_vfp_reg(): + assert l1.is_vfp_reg() + self.mc.VCMP(l0.value, l1.value) + self.mc.VMRS(cond=fcond) fcond = self._emit_guard(op, failargs, c.EQ) return fcond @@ -783,7 +788,6 @@ fielddescr = jd.vable_token_descr assert isinstance(fielddescr, BaseFieldDescr) ofs = fielddescr.offset - import pdb; pdb.set_trace() resloc = regalloc.force_allocate_reg(resbox) self.mov_loc_loc(arglocs[1], r.ip) self.mc.MOV_ri(resloc.value, 0) From commits-noreply at bitbucket.org Fri Apr 1 16:05:11 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 16:05:11 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: replace uses of TempBox with TempXXX which contain type information Message-ID: <20110401140511.D5F0B282B9E@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43087:49ddd428caa5 Date: 2011-04-01 16:01 +0200 http://bitbucket.org/pypy/pypy/changeset/49ddd428caa5/ Log: replace uses of TempBox with TempXXX which contain type information diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -238,14 +238,12 @@ box = None loc = None if isinstance(thing, Const): - if isinstance(thing, ConstInt): - box = TempInt() - elif isinstance(thing, ConstPtr): + if isinstance(thing, ConstPtr): box = TempPtr() elif isinstance(thing, ConstFloat): box = TempFloat() else: - box = TempBox() + box = TempInt() loc = self.force_allocate_reg(box, forbidden_vars=forbidden_vars) if isinstance(thing, ConstFloat): @@ -449,7 +447,7 @@ arg0 = ConstInt(rffi.cast(lltype.Signed, op.getarg(0).getint())) loc, box = self._ensure_value_is_boxed(arg0) boxes.append(box) - box = TempBox() + box = TempInt() loc1 = self.force_allocate_reg(box, boxes) boxes.append(box) if op.result in self.longevity: @@ -484,7 +482,7 @@ x, x_box = self._ensure_value_is_boxed(boxes[0], boxes) boxes.append(x_box) - t = TempBox() + t = TempInt() y = self.force_allocate_reg(t, boxes) boxes.append(t) y_val = rffi.cast(lltype.Signed, op.getarg(1).getint()) @@ -833,7 +831,7 @@ arglocs = [] for i in range(len(args)): arg = args[i] - t = TempBox() + t = TempInt() l = self.force_allocate_reg(t, selected_reg=r.all_regs[i]) self.assembler.load(l, imm(arg)) arglocs.append(t) From commits-noreply at bitbucket.org Fri Apr 1 16:05:13 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 16:05:13 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: change the float comparisson operations a bit, because transfering the condition flags from the vfp to the core processor flags changes slighlty the meaning of the condition codes Message-ID: <20110401140513.11233282B9E@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43088:32b4f23cc658 Date: 2011-04-01 16:04 +0200 http://bitbucket.org/pypy/pypy/changeset/32b4f23cc658/ Log: change the float comparisson operations a bit, because transfering the condition flags from the vfp to the core processor flags changes slighlty the meaning of the condition codes diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -898,8 +898,8 @@ emit_op_float_neg = gen_emit_unary_float_op('VNEG') emit_op_float_abs = gen_emit_unary_float_op('VABS') - emit_op_float_lt = gen_emit_float_cmp_op(c.LT) - emit_op_float_le = gen_emit_float_cmp_op(c.LE) + emit_op_float_lt = gen_emit_float_cmp_op(c.VFP_LT) + emit_op_float_le = gen_emit_float_cmp_op(c.VFP_LE) emit_op_float_eq = gen_emit_float_cmp_op(c.EQ) emit_op_float_ne = gen_emit_float_cmp_op(c.NE) emit_op_float_gt = gen_emit_float_cmp_op(c.GT) diff --git a/pypy/jit/backend/arm/conditions.py b/pypy/jit/backend/arm/conditions.py --- a/pypy/jit/backend/arm/conditions.py +++ b/pypy/jit/backend/arm/conditions.py @@ -18,3 +18,7 @@ def get_opposite_of(operation): return opposites[operation] +# see mapping for floating poin according to +# http://blogs.arm.com/software-enablement/405-condition-codes-4-floating-point-comparisons-using-vfp/ +VFP_LT = CC +VFP_LE = LS From commits-noreply at bitbucket.org Fri Apr 1 17:12:34 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 1 Apr 2011 17:12:34 +0200 (CEST) Subject: [pypy-svn] pypy default: Merge jit-shadowstack, adding support for the shadowstack gc root Message-ID: <20110401151234.685E6282B9C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43089:d968d3b683fb Date: 2011-04-01 17:11 +0200 http://bitbucket.org/pypy/pypy/changeset/d968d3b683fb/ Log: Merge jit-shadowstack, adding support for the shadowstack gc root tracker in the JIT too. diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -1,17 +1,29 @@ # Constants that depend on whether we are on 32-bit or 64-bit +# The frame size gives the standard fixed part at the start of +# every assembler frame: the saved value of some registers, +# one word for the force_index, and some extra space used only +# during a malloc that needs to go via its slow path. + import sys if sys.maxint == (2**31 - 1): WORD = 4 - # ebp + ebx + esi + edi + force_index = 5 words - FRAME_FIXED_SIZE = 5 + # ebp + ebx + esi + edi + 4 extra words + force_index = 9 words + FRAME_FIXED_SIZE = 9 + FORCE_INDEX_OFS = -8*WORD + MY_COPY_OF_REGS = -7*WORD IS_X86_32 = True IS_X86_64 = False else: WORD = 8 - # rbp + rbx + r12 + r13 + r14 + r15 + force_index = 7 words - FRAME_FIXED_SIZE = 7 + # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 + FRAME_FIXED_SIZE = 18 + FORCE_INDEX_OFS = -17*WORD + MY_COPY_OF_REGS = -16*WORD IS_X86_32 = False IS_X86_64 = True -FORCE_INDEX_OFS = -(FRAME_FIXED_SIZE-1)*WORD +# The extra space has room for almost all registers, apart from eax and edx +# which are used in the malloc itself. They are: +# ecx, ebx, esi, edi [32 and 64 bits] +# r8, r9, r10, r12, r13, r14, r15 [64 bits only] diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -854,6 +854,9 @@ def op_gc_adr_of_nursery_free(self): raise NotImplementedError + def op_gc_adr_of_root_stack_top(self): + raise NotImplementedError + def op_gc_call_rtti_destructor(self, rtti, addr): if hasattr(rtti._obj, 'destructor_funcptr'): d = rtti._obj.destructor_funcptr diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,5 +1,5 @@ -from pypy.jit.metainterp.history import Const, Box +from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated class TempBox(Box): @@ -313,11 +313,12 @@ self.assembler.regalloc_mov(reg, to) # otherwise it's clean - def before_call(self, force_store=[], save_all_regs=False): + def before_call(self, force_store=[], save_all_regs=0): """ Spill registers before a call, as described by 'self.save_around_call_regs'. Registers are not spilled if they don't survive past the current operation, unless they - are listed in 'force_store'. + are listed in 'force_store'. 'save_all_regs' can be 0 (default), + 1 (save all), or 2 (save default+PTRs). """ for v, reg in self.reg_bindings.items(): if v not in force_store and self.longevity[v][1] <= self.position: @@ -325,9 +326,11 @@ del self.reg_bindings[v] self.free_regs.append(reg) continue - if not save_all_regs and reg not in self.save_around_call_regs: - # we don't have to - continue + if save_all_regs != 1 and reg not in self.save_around_call_regs: + if save_all_regs == 0: + continue # we don't have to + if v.type != REF: + continue # only save GC pointers self._sync_var(v) del self.reg_bindings[v] self.free_regs.append(reg) diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -112,6 +112,7 @@ try: while True: count = fread(buf, 1, BUF_SIZE, fp) + count = rffi.cast(lltype.Signed, count) source += rffi.charpsize2str(buf, count) if count < BUF_SIZE: if feof(fp): diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,3 +1,4 @@ +import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror @@ -212,10 +213,12 @@ return addr_ref -class GcRootMap_asmgcc: +class GcRootMap_asmgcc(object): """Handles locating the stack roots in the assembler. This is the class supporting --gcrootfinder=asmgcc. """ + is_shadow_stack = False + LOC_REG = 0 LOC_ESP_PLUS = 1 LOC_EBP_PLUS = 2 @@ -224,7 +227,7 @@ GCMAP_ARRAY = rffi.CArray(lltype.Signed) CALLSHAPE_ARRAY_PTR = rffi.CArrayPtr(rffi.UCHAR) - def __init__(self): + def __init__(self, gcdescr=None): # '_gcmap' is an array of length '_gcmap_maxlength' of addresses. # '_gcmap_curlength' tells how full the array really is. # The addresses are actually grouped in pairs: @@ -237,6 +240,13 @@ self._gcmap_deadentries = 0 self._gcmap_sorted = True + def add_jit2gc_hooks(self, jit2gc): + jit2gc.update({ + 'gcmapstart': lambda: self.gcmapstart(), + 'gcmapend': lambda: self.gcmapend(), + 'gcmarksorted': lambda: self.gcmarksorted(), + }) + def initialize(self): # hack hack hack. Remove these lines and see MissingRTypeAttribute # when the rtyper tries to annotate these methods only when GC-ing... @@ -365,7 +375,7 @@ number >>= 7 shape.append(chr(number | flag)) - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset @@ -388,6 +398,126 @@ return rawaddr +class GcRootMap_shadowstack(object): + """Handles locating the stack roots in the assembler. + This is the class supporting --gcrootfinder=shadowstack. + """ + is_shadow_stack = True + MARKER = 8 + + # The "shadowstack" is a portable way in which the GC finds the + # roots that live in the stack. Normally it is just a list of + # pointers to GC objects. The pointers may be moved around by a GC + # collection. But with the JIT, an entry can also be MARKER, in + # which case the next entry points to an assembler stack frame. + # During a residual CALL from the assembler (which may indirectly + # call the GC), we use the force_index stored in the assembler + # stack frame to identify the call: we can go from the force_index + # to a list of where the GC pointers are in the frame (this is the + # purpose of the present class). + # + # Note that across CALL_MAY_FORCE or CALL_ASSEMBLER, we can also go + # from the force_index to a ResumeGuardForcedDescr instance, which + # is used if the virtualizable or the virtualrefs need to be forced + # (see pypy.jit.backend.model). The force_index number in the stack + # frame is initially set to a non-negative value x, but it is + # occasionally turned into (~x) in case of forcing. + + INTARRAYPTR = rffi.CArrayPtr(rffi.INT) + CALLSHAPES_ARRAY = rffi.CArray(INTARRAYPTR) + + def __init__(self, gcdescr): + self._callshapes = lltype.nullptr(self.CALLSHAPES_ARRAY) + self._callshapes_maxlength = 0 + self.force_index_ofs = gcdescr.force_index_ofs + + def add_jit2gc_hooks(self, jit2gc): + # + def collect_jit_stack_root(callback, gc, addr): + if addr.signed[0] != GcRootMap_shadowstack.MARKER: + # common case + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return WORD + else: + # case of a MARKER followed by an assembler stack frame + follow_stack_frame_of_assembler(callback, gc, addr) + return 2 * WORD + # + def follow_stack_frame_of_assembler(callback, gc, addr): + frame_addr = addr.signed[1] + addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + callshape = self._callshapes[force_index] + n = 0 + while True: + offset = rffi.cast(lltype.Signed, callshape[n]) + if offset == 0: + break + addr = llmemory.cast_int_to_adr(frame_addr + offset) + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + n += 1 + # + jit2gc.update({ + 'rootstackhook': collect_jit_stack_root, + }) + + def initialize(self): + pass + + def get_basic_shape(self, is_64_bit=False): + return [] + + def add_frame_offset(self, shape, offset): + assert offset != 0 + shape.append(offset) + + def add_callee_save_reg(self, shape, register): + msg = "GC pointer in %s was not spilled" % register + os.write(2, '[llsupport/gc] %s\n' % msg) + raise AssertionError(msg) + + def compress_callshape(self, shape, datablockwrapper): + length = len(shape) + SZINT = rffi.sizeof(rffi.INT) + rawaddr = datablockwrapper.malloc_aligned((length + 1) * SZINT, SZINT) + p = rffi.cast(self.INTARRAYPTR, rawaddr) + for i in range(length): + p[i] = rffi.cast(rffi.INT, shape[i]) + p[length] = rffi.cast(rffi.INT, 0) + return p + + def write_callshape(self, p, force_index): + if force_index >= self._callshapes_maxlength: + self._enlarge_callshape_list(force_index + 1) + self._callshapes[force_index] = p + + def _enlarge_callshape_list(self, minsize): + newlength = 250 + (self._callshapes_maxlength // 3) * 4 + if newlength < minsize: + newlength = minsize + newarray = lltype.malloc(self.CALLSHAPES_ARRAY, newlength, + flavor='raw', track_allocation=False) + if self._callshapes: + i = self._callshapes_maxlength - 1 + while i >= 0: + newarray[i] = self._callshapes[i] + i -= 1 + lltype.free(self._callshapes, flavor='raw') + self._callshapes = newarray + self._callshapes_maxlength = newlength + + def freeing_block(self, start, stop): + pass # nothing needed here + + def get_root_stack_top_addr(self): + rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) + return rffi.cast(lltype.Signed, rst_addr) + + class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 @@ -437,7 +567,7 @@ except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls() + gcrootmap = cls(gcdescr) self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) @@ -446,12 +576,9 @@ # where it can be fished and reused by the FrameworkGCTransformer self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = { - 'layoutbuilder': self.layoutbuilder, - 'gcmapstart': lambda: gcrootmap.gcmapstart(), - 'gcmapend': lambda: gcrootmap.gcmapend(), - 'gcmarksorted': lambda: gcrootmap.gcmarksorted(), - } + self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -9,6 +9,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert +from pypy.rlib.objectmodel import we_are_translated from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc from pypy.annotation import model as annmodel @@ -151,8 +152,13 @@ # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.root_stack_jit_hook = None if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + try: + self.root_stack_jit_hook = translator._jit2gc['rootstackhook'] + except KeyError: + pass else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) self.layoutbuilder.transformer = self @@ -500,6 +506,10 @@ s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) + s_gc_data = self.translator.annotator.bookkeeper.valueoftype( + gctypelayout.GCData) + r_gc_data = self.translator.rtyper.getrepr(s_gc_data) + self.c_const_gcdata = rmodel.inputconst(r_gc_data, self.gcdata) self.malloc_zero_filled = GCClass.malloc_zero_filled HDR = self.HDR = self.gcdata.gc.gcheaderbuilder.HDR @@ -786,6 +796,15 @@ resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) + def gct_gc_adr_of_root_stack_top(self, hop): + op = hop.spaceop + ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, + 'inst_root_stack_top') + c_ofs = rmodel.inputconst(lltype.Signed, ofs) + v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], + resulttype=llmemory.Address) + hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) + def gct_gc_x_swap_pool(self, hop): op = hop.spaceop [v_malloced] = op.args @@ -1327,6 +1346,14 @@ return top self.decr_stack = decr_stack + self.rootstackhook = gctransformer.root_stack_jit_hook + if self.rootstackhook is None: + def collect_stack_root(callback, gc, addr): + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return sizeofaddr + self.rootstackhook = collect_stack_root + def push_stack(self, addr): top = self.incr_stack(1) top.address[0] = addr @@ -1348,12 +1375,11 @@ def walk_stack_roots(self, collect_stack_root): gcdata = self.gcdata gc = self.gc + rootstackhook = self.rootstackhook addr = gcdata.root_stack_base end = gcdata.root_stack_top while addr != end: - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - addr += sizeofaddr + addr += rootstackhook(collect_stack_root, gc, addr) if self.collect_stacks_from_other_threads is not None: self.collect_stacks_from_other_threads(collect_stack_root) @@ -1460,12 +1486,11 @@ # collect all valid stacks from the dict (the entry # corresponding to the current thread is not valid) gc = self.gc + rootstackhook = self.rootstackhook end = stacktop - sizeofaddr addr = end.address[0] while addr != end: - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - addr += sizeofaddr + addr += rootstackhook(callback, gc, addr) def collect_more_stacks(callback): ll_assert(get_aid() == gcdata.active_thread, diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,8 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox -from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong, r_uint class X86RegisterManager(RegisterManager): @@ -34,6 +35,12 @@ esi: 2, edi: 3, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + } def call_result_location(self, v): return eax @@ -61,6 +68,19 @@ r14: 4, r15: 5, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + r8: MY_COPY_OF_REGS + 4 * WORD, + r9: MY_COPY_OF_REGS + 5 * WORD, + r10: MY_COPY_OF_REGS + 6 * WORD, + r12: MY_COPY_OF_REGS + 7 * WORD, + r13: MY_COPY_OF_REGS + 8 * WORD, + r14: MY_COPY_OF_REGS + 9 * WORD, + r15: MY_COPY_OF_REGS + 10 * WORD, + } class X86XMMRegisterManager(RegisterManager): @@ -117,6 +137,16 @@ else: return 1 +if WORD == 4: + gpr_reg_mgr_cls = X86RegisterManager + xmm_reg_mgr_cls = X86XMMRegisterManager +elif WORD == 8: + gpr_reg_mgr_cls = X86_64_RegisterManager + xmm_reg_mgr_cls = X86_64_XMMRegisterManager +else: + raise AssertionError("Word size should be 4 or 8") + + class RegAlloc(object): def __init__(self, assembler, translate_support_code=False): @@ -135,16 +165,6 @@ # compute longevity of variables longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity - # XXX - if cpu.WORD == 4: - gpr_reg_mgr_cls = X86RegisterManager - xmm_reg_mgr_cls = X86XMMRegisterManager - elif cpu.WORD == 8: - gpr_reg_mgr_cls = X86_64_RegisterManager - xmm_reg_mgr_cls = X86_64_XMMRegisterManager - else: - raise AssertionError("Word size should be 4 or 8") - self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) @@ -738,8 +758,12 @@ def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None + self.xrm.before_call(force_store, save_all_regs=save_all_regs) + if not save_all_regs: + gcrootmap = gc_ll_descr = self.assembler.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + save_all_regs = 2 self.rm.before_call(force_store, save_all_regs=save_all_regs) - self.xrm.before_call(force_store, save_all_regs=save_all_regs) if op.result is not None: if op.result.type == FLOAT: resloc = self.xrm.after_call(op.result) @@ -840,16 +864,26 @@ assert isinstance(descr, BaseSizeDescr) gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) - # We need to force-allocate each of save_around_call_regs now. - # The alternative would be to save and restore them around the - # actual call to malloc(), in the rare case where we need to do - # it; however, mark_gc_roots() would need to be adapted to know - # where the variables end up being saved. Messy. - for reg in self.rm.save_around_call_regs: - if reg is not eax: - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=reg) - self.rm.possibly_free_var(tmp_box) + + if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + # We need edx as a temporary, but otherwise don't save any more + # register. See comments in _build_malloc_fixedsize_slowpath(). + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=edx) + self.rm.possibly_free_var(tmp_box) + else: + # ---- asmgcc ---- + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) self.assembler.malloc_cond_fixedsize( gc_ll_descr.get_nursery_free_addr(), @@ -1132,7 +1166,7 @@ # call memcpy() self.rm.before_call() self.xrm.before_call() - self.assembler._emit_call(imm(self.assembler.memcpy_addr), + self.assembler._emit_call(-1, imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) self.rm.possibly_free_var(length_box) self.rm.possibly_free_var(dstaddr_box) @@ -1200,18 +1234,24 @@ def consider_jit_debug(self, op): pass - def get_mark_gc_roots(self, gcrootmap): + def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) - gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position)) + gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) for v, reg in self.rm.reg_bindings.items(): if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX - gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + if use_copy_area: + assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS + area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] + gcrootmap.add_frame_offset(shape, area_offset) + else: + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg( + shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -9,7 +9,6 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.test.test_optimizeopt import equaloplists -from pypy.rpython.memory.gctransform import asmgcroot def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -75,8 +74,8 @@ num2a = ((-num2|3) >> 7) | 128 num2b = (-num2|3) & 127 shape = gcrootmap.get_basic_shape() - gcrootmap.add_ebp_offset(shape, num1) - gcrootmap.add_ebp_offset(shape, num2) + gcrootmap.add_frame_offset(shape, num1) + gcrootmap.add_frame_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, @@ -228,6 +227,33 @@ gc.asmgcroot = saved +class TestGcRootMapShadowStack: + class FakeGcDescr: + force_index_ofs = 92 + + def test_make_shapes(self): + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = gcrootmap.get_basic_shape() + gcrootmap.add_frame_offset(shape, 16) + gcrootmap.add_frame_offset(shape, -24) + assert shape == [16, -24] + + def test_compress_callshape(self): + class FakeDataBlockWrapper: + def malloc_aligned(self, size, alignment): + assert alignment == 4 # even on 64-bits + assert size == 12 # 4*3, even on 64-bits + return rffi.cast(lltype.Signed, p) + datablockwrapper = FakeDataBlockWrapper() + p = lltype.malloc(rffi.CArray(rffi.INT), 3, immortal=True) + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = [16, -24] + gcrootmap.compress_callshape(shape, datablockwrapper) + assert rffi.cast(lltype.Signed, p[0]) == 16 + assert rffi.cast(lltype.Signed, p[1]) == -24 + assert rffi.cast(lltype.Signed, p[2]) == 0 + + class FakeLLOp(object): def __init__(self): self.record = [] diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -23,18 +23,22 @@ self.fail_descr_list = [] self.fail_descr_free_list = [] + def reserve_some_free_fail_descr_number(self): + lst = self.fail_descr_list + if len(self.fail_descr_free_list) > 0: + n = self.fail_descr_free_list.pop() + assert lst[n] is None + else: + n = len(lst) + lst.append(None) + return n + def get_fail_descr_number(self, descr): assert isinstance(descr, history.AbstractFailDescr) n = descr.index if n < 0: - lst = self.fail_descr_list - if len(self.fail_descr_free_list) > 0: - n = self.fail_descr_free_list.pop() - assert lst[n] is None - lst[n] = descr - else: - n = len(lst) - lst.append(descr) + n = self.reserve_some_free_fail_descr_number() + self.fail_descr_list[n] = descr descr.index = n return n @@ -294,6 +298,13 @@ def record_faildescr_index(self, n): self.faildescr_indices.append(n) + def reserve_and_record_some_faildescr_index(self): + # like record_faildescr_index(), but invent and return a new, + # unused faildescr index + n = self.cpu.reserve_some_free_fail_descr_number() + self.record_faildescr_index(n) + return n + def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -19,6 +19,8 @@ def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): + if gcdescr is not None: + gcdescr.force_index_ofs = FORCE_INDEX_OFS AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) @@ -127,7 +129,7 @@ fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) - rffi.cast(TP, addr_of_force_index)[0] = -1 + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index frb = self.assembler._find_failure_recovery_bytecode(faildescr) bytecode = rffi.cast(rffi.UCHARP, frb) # start of "no gc operation!" block @@ -147,7 +149,6 @@ WORD = 4 NUM_REGS = 8 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 supports_longlong = True @@ -163,7 +164,6 @@ WORD = 8 NUM_REGS = 16 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 def __init__(self, *args, **kwargs): assert sys.maxint == (2**63 - 1) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -487,7 +487,9 @@ # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), # ^^^ returns an address of pointer, since it can change at runtime - + 'gc_adr_of_root_stack_top': LLOp(), + # ^^^ returns the address of gcdata.root_stack_top (for shadowstack only) + # experimental operations in support of thread cloning, only # implemented by the Mark&Sweep GC 'gc_x_swap_pool': LLOp(canraise=(MemoryError,), canunwindgc=True), diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -746,6 +746,7 @@ def charpsize2str(cp, size): l = [cp[i] for i in range(size)] return emptystr.join(l) + charpsize2str._annenforceargs_ = [None, int] return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -349,6 +349,8 @@ INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) INSN_rj = insn(rex_w, chr(base+3), register(1,8), '\x05', immediate(2)) + INSN_ji8 = insn(rex_w, '\x83', orbyte(base), '\x05', immediate(1), + immediate(2,'b')) INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b')) INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2)) @@ -366,7 +368,8 @@ INSN_bi32(mc, offset, immed) INSN_bi._always_inline_ = True # try to constant-fold single_byte() - return INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj + return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, + INSN_ji8) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -444,13 +447,13 @@ # ------------------------------ Arithmetic ------------------------------ - ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj = common_modes(0) - OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj = common_modes(1) - AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj = common_modes(4) - SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj = common_modes(5) - SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj = common_modes(3) - XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj = common_modes(6) - CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj = common_modes(7) + ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj, _ = common_modes(0) + OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj, _ = common_modes(1) + AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj, _ = common_modes(4) + SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj, SUB_ji8 = common_modes(5) + SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj, _ = common_modes(3) + XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj, _ = common_modes(6) + CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj, _ = common_modes(7) CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) @@ -540,6 +543,9 @@ # x87 instructions FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + # reserved as an illegal instruction + UD2 = insn('\x0F\x0B') + # ------------------------------ SSE2 ------------------------------ # Conversion diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -8,9 +8,8 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, - X86XMMRegisterManager, get_ebp_ofs, - _get_scale) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, + _get_scale, gpr_reg_mgr_cls) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -133,6 +132,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" + self.current_clt = looptoken.compiled_loop_token self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -145,6 +145,7 @@ self.mc = None self.looppos = -1 self.currently_compiling_loop = None + self.current_clt = None def finish_once(self): if self._debug: @@ -171,25 +172,46 @@ self.float_const_abs_addr = float_constants + 16 def _build_malloc_fixedsize_slowpath(self): + # With asmgcc, we need two helpers, so that we can write two CALL + # instructions in assembler, with a mark_gc_roots in between. + # With shadowstack, this is not needed, so we produce a single helper. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + # # ---------- first helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() if self.cpu.supports_floats: # save the XMM registers in for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - if IS_X86_32: - mc.MOV_sr(WORD, edx.value) # save it as the new argument - elif IS_X86_64: - # rdi can be clobbered: its content was forced to the stack - # by _fastpath_malloc(), like all other save_around_call_regs. - mc.MOV_rr(edi.value, edx.value) - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() - mc.JMP(imm(addr)) # tail call to the real malloc - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart - # ---------- second helper for the slow path of malloc ---------- - mc = codebuf.MachineCodeBlockWrapper() + # + if gcrootmap is not None and gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_br(ofs, reg.value) + mc.SUB_ri(esp.value, 16 - WORD) # stack alignment of 16 bytes + if IS_X86_32: + mc.MOV_sr(0, edx.value) # push argument + elif IS_X86_64: + mc.MOV_rr(edi.value, edx.value) + mc.CALL(imm(addr)) + mc.ADD_ri(esp.value, 16 - WORD) + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_rb(reg.value, ofs) + else: + # ---- asmgcc ---- + if IS_X86_32: + mc.MOV_sr(WORD, edx.value) # save it as the new argument + elif IS_X86_64: + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. + mc.MOV_rr(edi.value, edx.value) + mc.JMP(imm(addr)) # tail call to the real malloc + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.malloc_fixedsize_slowpath1 = rawstart + # ---------- second helper for the slow path of malloc ---------- + mc = codebuf.MachineCodeBlockWrapper() + # if self.cpu.supports_floats: # restore the XMM registers for i in range(self.cpu.NUM_REGS):# from where they were saved mc.MOVSD_xs(i, (WORD*2)+8*i) @@ -544,7 +566,7 @@ def _get_offset_of_ebp_from_esp(self, allocated_depth): # Given that [EBP] is where we saved EBP, i.e. in the last word # of our fixed frame, then the 'words' value is: - words = (self.cpu.FRAME_FIXED_SIZE - 1) + allocated_depth + words = (FRAME_FIXED_SIZE - 1) + allocated_depth # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP return -WORD * aligned_words @@ -557,6 +579,10 @@ for regloc in self.cpu.CALLEE_SAVE_REGISTERS: self.mc.PUSH_r(regloc.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(gcrootmap) + def _call_header_with_stack_check(self): if self.stack_check_slowpath == 0: pass # no stack check (e.g. not translated) @@ -578,12 +604,32 @@ def _call_footer(self): self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) self.mc.POP_r(ebp.value) self.mc.RET() + def _call_header_shadowstack(self, gcrootmap): + # we need to put two words into the shadowstack: the MARKER + # and the address of the frame (ebp, actually) + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] + self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER + self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp + self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + + def _call_footer_shadowstack(self, gcrootmap): + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.SUB_ji8(rst, 2*WORD) # SUB [rootstacktop], 2*WORD + def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): if IS_X86_64: return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) @@ -693,8 +739,8 @@ nonfloatlocs, floatlocs = arglocs self._call_header() stackadjustpos = self._patchable_stackadjust() - tmp = X86RegisterManager.all_regs[0] - xmmtmp = X86XMMRegisterManager.all_regs[0] + tmp = eax + xmmtmp = xmm0 self.mc.begin_reuse_scratch_register() for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] @@ -903,7 +949,7 @@ self.implement_guard(guard_token, checkfalsecond) return genop_cmp_guard_float - def _emit_call(self, x, arglocs, start=0, tmp=eax): + def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: return self._emit_call_64(x, arglocs, start) @@ -931,9 +977,9 @@ self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) - def _emit_call_64(self, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start=0): src_locs = [] dst_locs = [] xmm_src_locs = [] @@ -991,12 +1037,27 @@ self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) def call(self, addr, args, res): - self._emit_call(imm(addr), args) + force_index = self.write_new_force_index() + self._emit_call(force_index, imm(addr), args) assert res is eax + def write_new_force_index(self): + # for shadowstack only: get a new, unused force_index number and + # write it to FORCE_INDEX_OFS. Used to record the call shape + # (i.e. where the GC pointers are in the stack) around a CALL + # instruction that doesn't already have a force_index. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + clt = self.current_clt + force_index = clt.reserve_and_record_some_faildescr_index() + self.mc.MOV_bi(FORCE_INDEX_OFS, force_index) + return force_index + else: + return 0 + genop_int_neg = _unaryop("NEG") genop_int_invert = _unaryop("NOT") genop_int_add = _binaryop("ADD", True) @@ -1790,6 +1851,10 @@ self.pending_guard_tokens.append(guard_token) def genop_call(self, op, arglocs, resloc): + force_index = self.write_new_force_index() + self._genop_call(op, arglocs, resloc, force_index) + + def _genop_call(self, op, arglocs, resloc, force_index): sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -1803,8 +1868,8 @@ tmp = ecx else: tmp = eax - - self._emit_call(x, arglocs, 3, tmp=tmp) + + self._emit_call(force_index, x, arglocs, 3, tmp=tmp) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return @@ -1835,7 +1900,7 @@ faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - self.genop_call(op, arglocs, result_loc) + self._genop_call(op, arglocs, result_loc, fail_index) self.mc.CMP_bi(FORCE_INDEX_OFS, 0) self.implement_guard(guard_token, 'L') @@ -1849,8 +1914,8 @@ assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2, - tmp=eax) + self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_void_v @@ -1875,7 +1940,7 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self._emit_call(imm(asm_helper_adr), [eax, arglocs[1]], 0, + self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0, tmp=ecx) if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: self.mc.FSTP_b(result_loc.value) @@ -1902,7 +1967,7 @@ # load the return value from fail_boxes_xxx[0] kind = op.result.type if kind == FLOAT: - xmmtmp = X86XMMRegisterManager.all_regs[0] + xmmtmp = xmm0 adr = self.fail_boxes_float.get_addr_for_num(0) self.mc.MOVSD(xmmtmp, heap(adr)) self.mc.MOVSD(result_loc, xmmtmp) @@ -1997,11 +2062,16 @@ not_implemented("not implemented operation (guard): %s" % op.getopname()) - def mark_gc_roots(self): + def mark_gc_roots(self, force_index, use_copy_area=False): + if force_index < 0: + return # not needed gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: - mark = self._regalloc.get_mark_gc_roots(gcrootmap) - self.mc.insert_gcroot_marker(mark) + mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area) + if gcrootmap.is_shadow_stack: + gcrootmap.write_callshape(mark, force_index) + else: + self.mc.insert_gcroot_marker(mark) def target_arglocs(self, loop_token): return loop_token._x86_arglocs @@ -2032,12 +2102,19 @@ # result in EAX; slowpath_addr2 additionally returns in EDX a # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + # reserve room for the argument to the real malloc and the # 8 saved XMM regs self._regalloc.reserve_param(1+16) - self.mc.CALL(imm(slowpath_addr1)) - self.mark_gc_roots() + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) + if not shadow_stack: + # there are two helpers to call only with asmgcc + slowpath_addr1 = self.malloc_fixedsize_slowpath1 + self.mc.CALL(imm(slowpath_addr1)) + self.mark_gc_roots(self.write_new_force_index(), + use_copy_area=shadow_stack) slowpath_addr2 = self.malloc_fixedsize_slowpath2 self.mc.CALL(imm(slowpath_addr2)) @@ -2045,6 +2122,7 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) # on 64-bits, 'tid' is a value that fits in 31 bits + assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -26,9 +26,10 @@ CPU = getcpuclass() class MockGcRootMap(object): + is_shadow_stack = False def get_basic_shape(self, is_64_bit): return ['shape'] - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): shape.append(offset) def add_callee_save_reg(self, shape, reg_index): index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -6,7 +6,7 @@ """ import weakref, random -import py +import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -72,6 +72,20 @@ return entrypoint +def get_functions_to_patch(): + from pypy.jit.backend.llsupport import gc + # + can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc + def can_inline_malloc2(*args): + try: + if os.environ['PYPY_NO_INLINE_MALLOC']: + return False + except KeyError: + pass + return can_inline_malloc1(*args) + # + return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + def compile(f, gc, **kwds): from pypy.annotation.listdef import s_list_of_strings from pypy.translator.translator import TranslationContext @@ -87,8 +101,21 @@ ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy()) ann.build_types(f, [s_list_of_strings], main_entry_point=True) t.buildrtyper().specialize() + if kwds['jit']: - apply_jit(t, enable_opts='') + patch = get_functions_to_patch() + old_value = {} + try: + for (obj, attr), value in patch.items(): + old_value[obj, attr] = getattr(obj, attr) + setattr(obj, attr, value) + # + apply_jit(t, enable_opts='') + # + finally: + for (obj, attr), oldvalue in old_value.items(): + setattr(obj, attr, oldvalue) + cbuilder = genc.CStandaloneBuilder(t, f, t.config) cbuilder.generate_source() cbuilder.compile() @@ -127,7 +154,7 @@ # ______________________________________________________________________ -class TestCompileFramework(object): +class CompileFrameworkTests(object): # Test suite using (so far) the minimark GC. def setup_class(cls): funcs = [] @@ -178,15 +205,21 @@ try: GcLLDescr_framework.DEBUG = True cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, - gcrootfinder="asmgcc", jit=True) + gcrootfinder=cls.gcrootfinder, jit=True) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG + def _run(self, name, n, env): + res = self.cbuilder.cmdexec("%s %d" %(name, n), env=env) + assert int(res) == 20 + def run(self, name, n=2000): pypylog = udir.join('TestCompileFramework.log') - res = self.cbuilder.cmdexec("%s %d" %(name, n), - env={'PYPYLOG': ':%s' % pypylog}) - assert int(res) == 20 + env = {'PYPYLOG': ':%s' % pypylog, + 'PYPY_NO_INLINE_MALLOC': '1'} + self._run(name, n, env) + env['PYPY_NO_INLINE_MALLOC'] = '' + self._run(name, n, env) def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) @@ -576,3 +609,10 @@ def test_compile_framework_minimal_size_in_nursery(self): self.run('compile_framework_minimal_size_in_nursery') + + +class TestShadowStack(CompileFrameworkTests): + gcrootfinder = "shadowstack" + +class TestAsmGcc(CompileFrameworkTests): + gcrootfinder = "asmgcc" From commits-noreply at bitbucket.org Fri Apr 1 17:12:34 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 1 Apr 2011 17:12:34 +0200 (CEST) Subject: [pypy-svn] pypy jit-shadowstack: Close branch. Message-ID: <20110401151234.C6FCF282B9E@codespeak.net> Author: Armin Rigo Branch: jit-shadowstack Changeset: r43090:356cd517771a Date: 2011-04-01 17:11 +0200 http://bitbucket.org/pypy/pypy/changeset/356cd517771a/ Log: Close branch. From commits-noreply at bitbucket.org Fri Apr 1 17:17:11 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 17:17:11 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: correctly support floats in (get/set)arrayitem Message-ID: <20110401151711.E8BE7282B9C@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43091:a5909f2a375f Date: 2011-04-01 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/a5909f2a375f/ Log: correctly support floats in (get/set)arrayitem diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -489,8 +489,12 @@ self.mc.ADD_ri(r.ip.value, scale_loc.value, ofs.value) scale_loc = r.ip - if scale.value == 4: + if scale.value == 3: assert value_loc.is_vfp_reg() + if scale_loc.is_reg(): + self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value) + base_loc = r.ip + scale_loc = locations.imm(0) self.mc.VSTR(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) elif scale.value == 2: self.mc.STR_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) @@ -515,8 +519,12 @@ self.mc.ADD_ri(r.ip.value, scale_loc.value, imm=ofs.value) scale_loc = r.ip - if scale.value == 4: + if scale.value == 3: assert res.is_vfp_reg() + if scale_loc.is_reg(): + self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value) + base_loc = r.ip + scale_loc = locations.imm(0) self.mc.VLDR(res.value, base_loc.value, scale_loc.value, cond=fcond) elif scale.value == 2: self.mc.LDR_rr(res.value, base_loc.value, scale_loc.value, cond=fcond) From commits-noreply at bitbucket.org Fri Apr 1 17:24:09 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 1 Apr 2011 17:24:09 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: simplify the code a bit, here we now values are in register Message-ID: <20110401152409.9EA3636C202@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43092:b083504273b0 Date: 2011-04-01 17:23 +0200 http://bitbucket.org/pypy/pypy/changeset/b083504273b0/ Log: simplify the code a bit, here we now values are in register diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -491,11 +491,9 @@ if scale.value == 3: assert value_loc.is_vfp_reg() - if scale_loc.is_reg(): - self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value) - base_loc = r.ip - scale_loc = locations.imm(0) - self.mc.VSTR(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) + assert scale_loc.is_reg() + self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value) + self.mc.VSTR(value_loc.value, r.ip.value, cond=fcond) elif scale.value == 2: self.mc.STR_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond) elif scale.value == 1: @@ -521,11 +519,9 @@ if scale.value == 3: assert res.is_vfp_reg() - if scale_loc.is_reg(): - self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value) - base_loc = r.ip - scale_loc = locations.imm(0) - self.mc.VLDR(res.value, base_loc.value, scale_loc.value, cond=fcond) + assert scale_loc.is_reg() + self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value) + self.mc.VLDR(res.value, r.ip.value, cond=fcond) elif scale.value == 2: self.mc.LDR_rr(res.value, base_loc.value, scale_loc.value, cond=fcond) elif scale.value == 1: From commits-noreply at bitbucket.org Fri Apr 1 17:26:22 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 1 Apr 2011 17:26:22 +0200 (CEST) Subject: [pypy-svn] pypy default: Skip or fix some tests when run with -A Message-ID: <20110401152622.A5E8D282B9C@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43093:7293a2ae4f45 Date: 2011-03-31 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7293a2ae4f45/ Log: Skip or fix some tests when run with -A diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -35,7 +35,8 @@ def test_load_dynamic(self): raises(ImportError, self.imp.load_dynamic, 'foo', 'bar') - raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', 'baz.so') + raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', + open(self.file_module)) def test_suffixes(self): for suffix, mode, type in self.imp.get_suffixes(): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -1007,7 +1007,8 @@ class AppTestPyPyExtension(object): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['imp', 'zipimport']) + cls.space = gettestobjspace(usemodules=['imp', 'zipimport', + '__pypy__']) cls.w_udir = cls.space.wrap(str(udir)) def test_run_compiled_module(self): From commits-noreply at bitbucket.org Fri Apr 1 17:26:23 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 1 Apr 2011 17:26:23 +0200 (CEST) Subject: [pypy-svn] pypy default: Add __package__ support to the import machinery Message-ID: <20110401152623.7028C282B9C@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43094:e5ce4f03d51a Date: 2011-04-01 16:38 +0200 http://bitbucket.org/pypy/pypy/changeset/e5ce4f03d51a/ Log: Add __package__ support to the import machinery diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -438,6 +438,38 @@ res = __import__('', mydict, None, ['bar'], 2) assert res is pkg + def test__package__(self): + # Regression test for http://bugs.python.org/issue3221. + def check_absolute(): + exec "from os import path" in ns + def check_relative(): + exec "from . import a" in ns + + # Check both OK with __package__ and __name__ correct + ns = dict(__package__='pkg', __name__='pkg.notarealmodule') + check_absolute() + check_relative() + + # Check both OK with only __name__ wrong + ns = dict(__package__='pkg', __name__='notarealpkg.notarealmodule') + check_absolute() + check_relative() + + # Check relative fails with only __package__ wrong + ns = dict(__package__='foo', __name__='pkg.notarealmodule') + check_absolute() # XXX check warnings + raises(SystemError, check_relative) + + # Check relative fails with __package__ and __name__ wrong + ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') + check_absolute() # XXX check warnings + raises(SystemError, check_relative) + + # Check both fail with package set to a non-string + ns = dict(__package__=object()) + raises(ValueError, check_absolute) + raises(ValueError, check_relative) + def test_universal_newlines(self): import pkg_univnewlines assert pkg_univnewlines.a == 5 diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -118,6 +118,105 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) +def _get_relative_name(space, modulename, level, w_globals): + w = space.wrap + ctxt_w_package = space.finditem(w_globals, w('__package__')) + + ctxt_package = None + if ctxt_w_package is not None and ctxt_w_package is not space.w_None: + try: + ctxt_package = space.str_w(ctxt_w_package) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_ValueError, space.wrap( + "__package__ set to non-string")) + + if ctxt_package is not None: + # __package__ is set, so use it + package_parts = ctxt_package.split('.') + while level > 1 and package_parts: + level -= 1 + if package_parts: + package_parts.pop() + if not package_parts: + if len(ctxt_package) == 0: + msg = "Attempted relative import in non-package" + else: + msg = "Attempted relative import beyond toplevel package" + raise OperationError(space.w_ValueError, w(msg)) + + # Try to import parent package + try: + w_parent = absolute_import(space, ctxt_package, 0, + None, tentative=False) + except OperationError, e: + if not e.match(space, space.w_ImportError): + raise + if level > 0: + raise OperationError(space.w_SystemError, space.wrap( + "Parent module '%s' not loaded, " + "cannot perform relative import" % ctxt_package)) + else: + space.warn("Parent module '%s' not found " + "while handling absolute import" % ctxt_package, + space.w_RuntimeWarning) + + if modulename: + package_parts.append(modulename) + rel_level = len(package_parts) + rel_modulename = '.'.join(package_parts) + else: + # __package__ not set, so figure it out and set it + ctxt_w_name = space.finditem(w_globals, w('__name__')) + ctxt_w_path = space.finditem(w_globals, w('__path__')) + + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.str_w(ctxt_w_name) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + + if not ctxt_name: + return None, 0 + + ctxt_name_prefix_parts = ctxt_name.split('.') + if level > 0: + n = len(ctxt_name_prefix_parts)-level+1 + assert n>=0 + ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] + if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module + ctxt_name_prefix_parts.pop() + + if level > 0 and not ctxt_name_prefix_parts: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) + + rel_modulename = '.'.join(ctxt_name_prefix_parts) + + if ctxt_w_path is not None: + # __path__ is set, so __name__ is already the package name + space.setitem(w_globals, w("__package__"), ctxt_w_name) + else: + # Normal module, so work out the package name if any + if '.' not in ctxt_name: + space.setitem(w_globals, w("__package__"), space.w_None) + elif rel_modulename: + space.setitem(w_globals, w("__package__"), w(rel_modulename)) + + if modulename: + if rel_modulename: + rel_modulename += '.' + modulename + else: + rel_modulename = modulename + + rel_level = len(ctxt_name_prefix_parts) + + return rel_modulename, rel_level + + @unwrap_spec(name=str, level=int) def importhook(space, name, w_globals=None, w_locals=None, w_fromlist=None, level=-1): @@ -139,58 +238,30 @@ w_globals is not None and space.isinstance_w(w_globals, space.w_dict)): - ctxt_w_name = space.finditem(w_globals, w('__name__')) - ctxt_w_path = space.finditem(w_globals, w('__path__')) + rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) - ctxt_name = None - if ctxt_w_name is not None: - try: - ctxt_name = space.str_w(ctxt_w_name) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise + if rel_modulename: + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + tentative = True + else: + tentative = False - if ctxt_name is not None: - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - if ctxt_name_prefix_parts: - rel_modulename = '.'.join(ctxt_name_prefix_parts) - if modulename: - rel_modulename += '.' + modulename - baselevel = len(ctxt_name_prefix_parts) - if rel_modulename is not None: - # XXX What is this check about? There is no test for it - w_mod = check_sys_modules(space, w(rel_modulename)) + w_mod = absolute_import(space, rel_modulename, rel_level, + fromlist_w, tentative=tentative) + if w_mod is not None: + space.timer.stop_name("importhook", modulename) + return w_mod - if (w_mod is None or - not space.is_w(w_mod, space.w_None) or - level > 0): + ## if level > 0: + ## msg = "Attempted relative import in non-package" + ## raise OperationError(space.w_ValueError, w(msg)) - # if no level was set, ignore import errors, and - # fall back to absolute import at the end of the - # function. - if level == -1: - tentative = True - else: - tentative = False + ## if not modulename: + ## return None - w_mod = absolute_import(space, rel_modulename, - baselevel, fromlist_w, - tentative=tentative) - if w_mod is not None: - space.timer.stop_name("importhook", modulename) - return w_mod - else: - rel_modulename = None - - if level > 0: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) w_mod = absolute_import_try(space, modulename, 0, fromlist_w) if w_mod is None or space.is_w(w_mod, space.w_None): w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) From commits-noreply at bitbucket.org Fri Apr 1 17:43:59 2011 From: commits-noreply at bitbucket.org (l.diekmann) Date: Fri, 1 Apr 2011 17:43:59 +0200 (CEST) Subject: [pypy-svn] pypy new-dict-proxy: (cfbolz, l.diekmann): rewrite dict proxies as a new dict implementation. This Message-ID: <20110401154359.AE001282B9C@codespeak.net> Author: Lukas Diekmann Branch: new-dict-proxy Changeset: r43095:1a99cd95989a Date: 2011-04-01 17:43 +0200 http://bitbucket.org/pypy/pypy/changeset/1a99cd95989a/ Log: (cfbolz, l.diekmann): rewrite dict proxies as a new dict implementation. This is a lot simpler than the current hack. It changes behaviour vs. CPython slightly: the dictproxy looks and behaves (almost) like a normal dict. diff --git a/pypy/objspace/std/dictproxytype.py b/pypy/objspace/std/dictproxytype.py deleted file mode 100644 --- a/pypy/objspace/std/dictproxytype.py +++ /dev/null @@ -1,51 +0,0 @@ -from pypy.interpreter import gateway -from pypy.interpreter.typedef import GetSetProperty -from pypy.interpreter.error import OperationError -from pypy.objspace.std.stdtypedef import StdTypeDef - -# ____________________________________________________________ - -def _proxymethod(name): - def fget(space, w_obj): - from pypy.objspace.std.dictproxyobject import W_DictProxyObject - if not isinstance(w_obj, W_DictProxyObject): - raise OperationError(space.w_TypeError, - space.wrap("expected dictproxy")) - return space.getattr(w_obj.w_dict, space.wrap(name)) - return GetSetProperty(fget) - -def _compareproxymethod(opname): - def compare(space, w_obj1, w_obj2): - from pypy.objspace.std.dictproxyobject import W_DictProxyObject - if not isinstance(w_obj1, W_DictProxyObject): - raise OperationError(space.w_TypeError, - space.wrap("expected dictproxy")) - return getattr(space, opname)(w_obj1.w_dict, w_obj2) - compare.func_name = "dictproxy_compare_%s" % (opname, ) - return gateway.interp2app(compare) - -# ____________________________________________________________ - -dictproxy_typedef = StdTypeDef("dictproxy", - has_key = _proxymethod('has_key'), - get = _proxymethod('get'), - keys = _proxymethod('keys'), - values = _proxymethod('values'), - items = _proxymethod('items'), - iterkeys = _proxymethod('iterkeys'), - itervalues = _proxymethod('itervalues'), - iteritems = _proxymethod('iteritems'), - copy = _proxymethod('copy'), - __len__ = _proxymethod('__len__'), - __getitem__ = _proxymethod('__getitem__'), - __contains__ = _proxymethod('__contains__'), - __str__ = _proxymethod('__str__'), - __iter__ = _proxymethod('__iter__'), - __lt__ = _compareproxymethod('lt'), - __le__ = _compareproxymethod('le'), - __eq__ = _compareproxymethod('eq'), - __ne__ = _compareproxymethod('ne'), - __gt__ = _compareproxymethod('gt'), - __ge__ = _compareproxymethod('ge'), -) -dictproxy_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -1,15 +1,86 @@ from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.dictmultiobject import W_DictMultiObject, IteratorImplementation +from pypy.interpreter.error import OperationError -def descr_get_dictproxy(space, w_obj): - return W_DictProxyObject(w_obj.getdict(space)) +class W_DictProxyObject(W_DictMultiObject): + def __init__(w_self, space, w_type): + W_DictMultiObject.__init__(w_self, space) + w_self.w_type = w_type -class W_DictProxyObject(W_Object): - from pypy.objspace.std.dictproxytype import dictproxy_typedef as typedef + def impl_getitem(self, w_lookup): + space = self.space + w_lookup_type = space.type(w_lookup) + if space.is_w(w_lookup_type, space.w_str): + return self.impl_getitem_str(space.str_w(w_lookup)) + else: + return None - def __init__(w_self, w_dict): - w_self.w_dict = w_dict + def impl_getitem_str(self, lookup): + return self.w_type.getdictvalue(self.space, lookup) -registerimplementation(W_DictProxyObject) + def impl_setitem(self, w_key, w_value): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + self.impl_setitem_str(self.space.str_w(w_key), w_value) + else: + raise OperationError(space.w_TypeError, space.wrap("cannot add non-string keys to dict of a type")) -register_all(vars()) + def impl_setitem_str(self, name, w_value): + self.w_type.setdictvalue(self.space, name, w_value) + + def impl_setdefault(self, w_key, w_default): + space = self.space + w_result = self.impl_getitem(w_key) + if w_result is not None: + return w_result + self.impl_setitem(w_key, w_default) + return w_default + + def impl_delitem(self, w_key): + space = self.space + w_key_type = space.type(w_key) + if space.is_w(w_key_type, space.w_str): + if not self.w_type.deldictvalue(space, w_key): + raise KeyError + else: + raise KeyError + + def impl_length(self): + return len(self.w_type.dict_w) + + def impl_iter(self): + return DictProxyIteratorImplementation(self.space, self) + + def impl_keys(self): + space = self.space + return [space.wrap(key) for key in self.w_type.dict_w.iterkeys()] + + def impl_values(self): + return self.w_type.dict_w.values() + + def impl_items(self): + space = self.space + return [space.newtuple([space.wrap(key), w_value]) + for (key, w_value) in self.w_type.dict_w.iteritems()] + + def impl_clear(self): + self.w_type.dict_w.clear() + self.w_type.mutated() + + def _as_rdict(self): + assert 0, "should be unreachable" + + def _clear_fields(self): + assert 0, "should be unreachable" + +class DictProxyIteratorImplementation(IteratorImplementation): + def __init__(self, space, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + self.iterator = dictimplementation.w_type.dict_w.iteritems() + + def next_entry(self): + for key, w_value in self.iterator: + return (self.space.wrap(key), w_value) + else: + return (None, None) diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -4,18 +4,21 @@ def test_dictproxy(self): class NotEmpty(object): a = 1 - assert isinstance(NotEmpty.__dict__, dict) == False + #assert isinstance(NotEmpty.__dict__, dict) == False assert 'a' in NotEmpty.__dict__ assert 'a' in NotEmpty.__dict__.keys() assert 'b' not in NotEmpty.__dict__ - assert isinstance(NotEmpty.__dict__.copy(), dict) - assert NotEmpty.__dict__ == NotEmpty.__dict__.copy() - try: - NotEmpty.__dict__['b'] = 1 - except: - pass - else: - raise AssertionError, 'this should not have been writable' + #assert isinstance(NotEmpty.__dict__.copy(), dict) + NotEmpty.__dict__['b'] = 4 + assert NotEmpty.b == 4 + del NotEmpty.__dict__['b'] + assert NotEmpty.__dict__.get("b") is None + raises(TypeError, 'NotEmpty.__dict__[15] = "y"') + raises(KeyError, 'del NotEmpty.__dict__[15]') + assert NotEmpty.__dict__.setdefault("string", 1) == 1 + assert NotEmpty.__dict__.setdefault("string", 2) == 1 + assert NotEmpty.string == 1 + raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)') def test_dictproxyeq(self): class a(object): @@ -34,6 +37,6 @@ class a(object): pass s = repr(a.__dict__) - assert s.startswith('') + #assert s.startswith('') s = str(a.__dict__) assert s.startswith('{') and s.endswith('}') diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -54,7 +54,6 @@ from pypy.objspace.std.slicetype import slice_typedef from pypy.objspace.std.longtype import long_typedef from pypy.objspace.std.unicodetype import unicode_typedef - from pypy.objspace.std.dictproxytype import dictproxy_typedef from pypy.objspace.std.nonetype import none_typedef from pypy.objspace.std.itertype import iter_typedef self.pythontypes = [value for key, value in result.__dict__.items() @@ -123,7 +122,6 @@ iterobject.W_FastTupleIterObject: [], iterobject.W_ReverseSeqIterObject: [], unicodeobject.W_UnicodeObject: [], - dictproxyobject.W_DictProxyObject: [], dictmultiobject.W_DictViewKeysObject: [], dictmultiobject.W_DictViewItemsObject: [], dictmultiobject.W_DictViewValuesObject: [], diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -225,6 +225,34 @@ return w_value return w_value + def setdictvalue(w_self, space, name, w_value): + if (not space.config.objspace.std.mutable_builtintypes + and not w_self.is_heaptype()): + msg = "can't set attributes on type object '%s'" + raise operationerrfmt(space.w_TypeError, msg, w_self.name) + if name == "__del__" and name not in w_self.dict_w: + msg = "a __del__ method added to an existing type will not be called" + space.warn(msg, space.w_RuntimeWarning) + w_self.mutated() + w_self.dict_w[name] = w_value + return True + + def deldictvalue(w_self, space, w_key): + if w_self.lazyloaders: + w_self._freeze_() # force un-lazification + key = space.str_w(w_key) + if (not space.config.objspace.std.mutable_builtintypes + and not w_self.is_heaptype()): + msg = "can't delete attributes on type object '%s'" + raise operationerrfmt(space.w_TypeError, msg, w_self.name) + try: + del w_self.dict_w[key] + except KeyError: + return False + else: + w_self.mutated() + return True + def lookup(w_self, name): # note that this doesn't call __get__ on the result at all space = w_self.space @@ -359,8 +387,7 @@ def getdict(w_self, space): # returning a dict-proxy! if w_self.lazyloaders: w_self._freeze_() # force un-lazification - newdic = space.newdict(from_strdict_shared=w_self.dict_w) - return W_DictProxyObject(newdic) + return W_DictProxyObject(space, w_self) def unwrap(w_self, space): if w_self.instancetypedef.fakedcpytype is not None: @@ -799,52 +826,9 @@ "type object '%s' has no attribute '%s'", w_type.name, name) -def setattr__Type_ANY_ANY(space, w_type, w_name, w_value): - # Note. This is exactly the same thing as descroperation.descr__setattr__, - # but it is needed at bootstrap to avoid a call to w_type.getdict() which - # would un-lazify the whole type. - name = space.str_w(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - space.set(w_descr, w_type, w_value) - return - - if (not space.config.objspace.std.mutable_builtintypes - and not w_type.is_heaptype()): - msg = "can't set attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_type.name) - if name == "__del__" and name not in w_type.dict_w: - msg = "a __del__ method added to an existing type will not be called" - space.warn(msg, space.w_RuntimeWarning) - w_type.mutated() - w_type.dict_w[name] = w_value - def eq__Type_Type(space, w_self, w_other): return space.is_(w_self, w_other) -def delattr__Type_ANY(space, w_type, w_name): - if w_type.lazyloaders: - w_type._freeze_() # force un-lazification - name = space.str_w(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - space.delete(w_descr, w_type) - return - if (not space.config.objspace.std.mutable_builtintypes - and not w_type.is_heaptype()): - msg = "can't delete attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_type.name) - try: - del w_type.dict_w[name] - except KeyError: - raise OperationError(space.w_AttributeError, w_name) - else: - w_type.mutated() - return - - # ____________________________________________________________ From commits-noreply at bitbucket.org Fri Apr 1 18:24:58 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 1 Apr 2011 18:24:58 +0200 (CEST) Subject: [pypy-svn] pypy default: Update to account for the extra "-1" argument now sent to ll_split. Message-ID: <20110401162458.F034F282B9C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43096:54658554479b Date: 2011-04-01 18:24 +0200 http://bitbucket.org/pypy/pypy/changeset/54658554479b/ Log: Update to account for the extra "-1" argument now sent to ll_split. diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -685,7 +685,7 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, descr=) + p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) @@ -1068,4 +1068,4 @@ guard_no_overflow(descr=) --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) - """) \ No newline at end of file + """) From commits-noreply at bitbucket.org Fri Apr 1 18:37:19 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Fri, 1 Apr 2011 18:37:19 +0200 (CEST) Subject: [pypy-svn] pypy default: Tests the right wait3 function Message-ID: <20110401163719.E6B9F282B9E@codespeak.net> Author: Guillebert Romain Branch: Changeset: r43097:1fff2f18d231 Date: 2011-04-01 17:36 +0100 http://bitbucket.org/pypy/pypy/changeset/1fff2f18d231/ Log: Tests the right wait3 function diff --git a/lib_pypy/pypy_test/test_os_wait3.py b/lib_pypy/pypy_test/test_os_wait3.py --- a/lib_pypy/pypy_test/test_os_wait3.py +++ b/lib_pypy/pypy_test/test_os_wait3.py @@ -1,5 +1,12 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + import os +from lib_pypy._pypy_wait import wait3 + if hasattr(os, 'wait3'): def test_os_wait3(): exit_status = 0x33 @@ -11,7 +18,7 @@ if child == 0: # in child os._exit(exit_status) else: - pid, status, rusage = os.wait3(0) + pid, status, rusage = wait3(0) assert child == pid assert os.WIFEXITED(status) assert os.WEXITSTATUS(status) == exit_status From commits-noreply at bitbucket.org Fri Apr 1 18:44:23 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 1 Apr 2011 18:44:23 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: small tweaks, add a citation, reword a paragraph Message-ID: <20110401164423.C03D9282B9E@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3450:548bf69a3313 Date: 2011-04-01 18:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/548bf69a3313/ Log: small tweaks, add a citation, reword a paragraph diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -104,7 +104,7 @@ One of the hardest parts of implementing a dynamic language efficiently is to optimize its object model. This is made harder by the fact that many recent languages such as Python, JavaScript or Ruby have rather complex core object -semantics. For them, implementing just an interpreter is already a complex +semantics. For them, even implementing just an interpreter is already a complex task. Implementing them efficiently with a just-in-time compiler (JIT) is extremely challenging, because of their many corner-cases. @@ -113,14 +113,14 @@ renaissance of this idea around the approach of tracing just-in-time compilers. A number of projects have attempted this approach. SPUR \cite{bebenita_spur:_2010} is a tracing JIT for .NET together with a JavaScript implementation in C\#. PyPy -\cite{armin_rigo_pypys_2006} contains a tracing JIT for RPython (a restricted +\cite{armin_rigo_pypys_2006} contains a tracing JIT for RPython \cite{davide_ancona_rpython:_2007} (a restricted subset of Python). This JIT is then used to trace a number of languages implementations written in RPython. A number of other experiments in this directions were done, such as an interpreter for Lua in JavaScript, which is run on and optimized with a tracing JIT for JavaScript \cite{yermolovich_optimization_2009}. -These projects have in common that they work one meta-level down, providing a tracing JIT for the implementation +These projects have in common that they work one meta-level down, providing a tracing JIT for the language used to implement the dynamic language, and not for the dynamic language itself. The tracing JIT then will trace through the object model of the dynamic language implementation. This makes the object model transparent to the tracer @@ -134,12 +134,23 @@ In this paper we present two of these hints that are extensively used in the PyPy project to improve the performance of its Python interpreter. -Conceptually the significant speed-ups that can be achieved with -dynamic compilation depend on feeding into compilation and exploiting -values observed at runtime that are slow-varying in practice. To exploit the -runtime feedback, the implementation code and data structures need to be -structured so that many such values are at hand. The hints that we present allow -exactly to implement such feedback and exploitation in a meta-tracing context. +% XXX: paragraph rephrased by anto; feel free to pick the one you like best + +Conceptually, it is possible to achieve significant speed-ups by feeding into +the compiler some information that is observed at runtime: in particular, if +there are values which vary very slowly, it is possible to compile multiple +specialized versions of the same code, one for each actual value. To exploit +the runtime feedback, the implementation code and data structures need to be +structured so that many such slow-varying values are at hand. The hints that +we present allow exactly to implement such feedback and exploitation in a +meta-tracing context. + +% Conceptually the significant speed-ups that can be achieved with +% dynamic compilation depend on feeding into compilation and exploiting +% values observed at runtime that are slow-varying in practice. To exploit the +% runtime feedback, the implementation code and data structures need to be +% structured so that many such values are at hand. The hints that we present allow +% exactly to implement such feedback and exploitation in a meta-tracing context. Concretely these hints are used to control how the optimizer of the tracing JIT can improve the traces of the object model. More @@ -195,6 +206,10 @@ implementation details. Another aspect of the final VM that is added semi-automatically to the generated VM is a tracing JIT compiler. +The advantage of this approach is that writing an interpreter is much easier +and less error prone than manually writing a JIT compiler. Similarly, writing +in a high level language such as RPython is easier than writing in C. + We call the code that runs on top of an interpreter implemented with PyPy the \emph{user code} or \emph{user program}. From commits-noreply at bitbucket.org Fri Apr 1 18:44:24 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 1 Apr 2011 18:44:24 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: merge heads Message-ID: <20110401164424.567A7282B9E@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3451:4edabb03d15d Date: 2011-04-01 18:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/4edabb03d15d/ Log: merge heads diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -117,7 +117,7 @@ One of the hardest parts of implementing a dynamic language efficiently is to optimize its object model. This is made harder by the fact that many recent languages such as Python, JavaScript or Ruby have rather complex core object -semantics. For them, implementing just an interpreter is already a complex +semantics. For them, even implementing just an interpreter is already a complex task. Implementing them efficiently with a just-in-time compiler (JIT) is extremely challenging, because of their many corner-cases. @@ -126,14 +126,14 @@ renaissance of this idea around the approach of tracing just-in-time compilers. A number of projects have attempted this approach. SPUR \cite{bebenita_spur:_2010} is a tracing JIT for .NET together with a JavaScript implementation in C\#. PyPy -\cite{armin_rigo_pypys_2006} contains a tracing JIT for Python (a restricted +\cite{armin_rigo_pypys_2006} contains a tracing JIT for RPython \cite{davide_ancona_rpython:_2007} (a restricted subset of Python). This JIT is then used to trace a number of languages implementations written in RPython. A number of other experiments in this directions were done, such as an interpreter for Lua in JavaScript, which is run on and optimized with a tracing JIT for JavaScript \cite{yermolovich_optimization_2009}. -These projects have in common that they work one meta-level down, providing a tracing JIT for the implementation +These projects have in common that they work one meta-level down, providing a tracing JIT for the language used to implement the dynamic language, and not for the dynamic language itself. The tracing JIT then will trace through the object model of the dynamic language implementation. This makes the object model transparent to the tracer @@ -147,12 +147,23 @@ In this paper we present two of these hints that are extensively used in the PyPy project to improve the performance of its Python interpreter. -Conceptually the significant speed-ups that can be achieved with -dynamic compilation depend on feeding into compilation and exploiting -values observed at runtime that are slow-varying in practice. To exploit the -runtime feedback, the implementation code and data structures need to be -structured so that many such values are at hand. The hints that we present allow -exactly to implement such feedback and exploitation in a meta-tracing context. +% XXX: paragraph rephrased by anto; feel free to pick the one you like best + +Conceptually, it is possible to achieve significant speed-ups by feeding into +the compiler some information that is observed at runtime: in particular, if +there are values which vary very slowly, it is possible to compile multiple +specialized versions of the same code, one for each actual value. To exploit +the runtime feedback, the implementation code and data structures need to be +structured so that many such slow-varying values are at hand. The hints that +we present allow exactly to implement such feedback and exploitation in a +meta-tracing context. + +% Conceptually the significant speed-ups that can be achieved with +% dynamic compilation depend on feeding into compilation and exploiting +% values observed at runtime that are slow-varying in practice. To exploit the +% runtime feedback, the implementation code and data structures need to be +% structured so that many such values are at hand. The hints that we present allow +% exactly to implement such feedback and exploitation in a meta-tracing context. Concretely these hints are used to control how the optimizer of the tracing JIT can improve the traces of the object model. More @@ -208,6 +219,10 @@ implementation details. Another aspect of the final VM that is added semi-automatically to the generated VM is a tracing JIT compiler. +The advantage of this approach is that writing an interpreter is much easier +and less error prone than manually writing a JIT compiler. Similarly, writing +in a high level language such as RPython is easier than writing in C. + We call the code that runs on top of an interpreter implemented with PyPy the \emph{user code} or \emph{user program}. From commits-noreply at bitbucket.org Fri Apr 1 18:44:25 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 1 Apr 2011 18:44:25 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: swap these two paragraphs; I think it is more readable this way Message-ID: <20110401164425.357A0282BE9@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3452:d9eba5a92d91 Date: 2011-04-01 18:38 +0200 http://bitbucket.org/pypy/extradoc/changeset/d9eba5a92d91/ Log: swap these two paragraphs; I think it is more readable this way diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -243,6 +243,11 @@ when a function call is encountered the operations of the called functions are simply put into the trace too. +Because the traces always correspond to a concrete execution they cannot +contain any control flow splits. Therefore they encode the control flow +decisions needed to stay on the trace with the help of \emph{guards}. Those are +operations that check that the assumptions are still true when the compiled trace is later executed with different values. + To be able to do this recording, VMs with a tracing JIT typically contain an interpreter. After a user program is started the interpreter is used; only the most frequently executed paths through the user @@ -250,11 +255,6 @@ that correspond to loops in the traced program, but most tracing JITs now also have support for tracing non-loops \cite{andreas_gal_incremental_2006}. -Because the traces always correspond to a concrete execution they cannot -contain any control flow splits. Therefore they encode the control flow -decisions needed to stay on the trace with the help of \emph{guards}. Those are -operations that check that the assumptions are still true when the compiled trace is later executed with different values. - One disadvantage of (tracing) JITs which makes them not directly applicable to PyPy is that they need to encode the language semantics of the language they are tracing. Since PyPy wants to be a From commits-noreply at bitbucket.org Fri Apr 1 18:44:26 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 1 Apr 2011 18:44:26 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: another small tweak Message-ID: <20110401164426.E82B7282C18@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3453:bb796d54bc75 Date: 2011-04-01 18:44 +0200 http://bitbucket.org/pypy/extradoc/changeset/bb796d54bc75/ Log: another small tweak diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -273,8 +273,8 @@ While the operations in a trace are those of the interpreter, the loops that are traced by the tracer are the loops in the user program. This means that the tracer stops tracing after one iteration of -the loop in the user function that is being considered. At this point, it can -have traced many iterations of the interpreter main loop. +the loop in the user function that is being considered. At this point, it probably +traced many iterations of the interpreter main loop. \begin{figure} \includegraphics[scale=0.5]{figures/trace-levels} From commits-noreply at bitbucket.org Fri Apr 1 21:56:11 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 1 Apr 2011 21:56:11 +0200 (CEST) Subject: [pypy-svn] pypy default: backout e5ce4f03d51a which broke too many things :-( Message-ID: <20110401195611.9D3A7282B9E@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43098:ece3c038a59e Date: 2011-04-01 21:55 +0200 http://bitbucket.org/pypy/pypy/changeset/ece3c038a59e/ Log: backout e5ce4f03d51a which broke too many things :-( diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -438,38 +438,6 @@ res = __import__('', mydict, None, ['bar'], 2) assert res is pkg - def test__package__(self): - # Regression test for http://bugs.python.org/issue3221. - def check_absolute(): - exec "from os import path" in ns - def check_relative(): - exec "from . import a" in ns - - # Check both OK with __package__ and __name__ correct - ns = dict(__package__='pkg', __name__='pkg.notarealmodule') - check_absolute() - check_relative() - - # Check both OK with only __name__ wrong - ns = dict(__package__='pkg', __name__='notarealpkg.notarealmodule') - check_absolute() - check_relative() - - # Check relative fails with only __package__ wrong - ns = dict(__package__='foo', __name__='pkg.notarealmodule') - check_absolute() # XXX check warnings - raises(SystemError, check_relative) - - # Check relative fails with __package__ and __name__ wrong - ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') - check_absolute() # XXX check warnings - raises(SystemError, check_relative) - - # Check both fail with package set to a non-string - ns = dict(__package__=object()) - raises(ValueError, check_absolute) - raises(ValueError, check_relative) - def test_universal_newlines(self): import pkg_univnewlines assert pkg_univnewlines.a == 5 diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -118,105 +118,6 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) -def _get_relative_name(space, modulename, level, w_globals): - w = space.wrap - ctxt_w_package = space.finditem(w_globals, w('__package__')) - - ctxt_package = None - if ctxt_w_package is not None and ctxt_w_package is not space.w_None: - try: - ctxt_package = space.str_w(ctxt_w_package) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_ValueError, space.wrap( - "__package__ set to non-string")) - - if ctxt_package is not None: - # __package__ is set, so use it - package_parts = ctxt_package.split('.') - while level > 1 and package_parts: - level -= 1 - if package_parts: - package_parts.pop() - if not package_parts: - if len(ctxt_package) == 0: - msg = "Attempted relative import in non-package" - else: - msg = "Attempted relative import beyond toplevel package" - raise OperationError(space.w_ValueError, w(msg)) - - # Try to import parent package - try: - w_parent = absolute_import(space, ctxt_package, 0, - None, tentative=False) - except OperationError, e: - if not e.match(space, space.w_ImportError): - raise - if level > 0: - raise OperationError(space.w_SystemError, space.wrap( - "Parent module '%s' not loaded, " - "cannot perform relative import" % ctxt_package)) - else: - space.warn("Parent module '%s' not found " - "while handling absolute import" % ctxt_package, - space.w_RuntimeWarning) - - if modulename: - package_parts.append(modulename) - rel_level = len(package_parts) - rel_modulename = '.'.join(package_parts) - else: - # __package__ not set, so figure it out and set it - ctxt_w_name = space.finditem(w_globals, w('__name__')) - ctxt_w_path = space.finditem(w_globals, w('__path__')) - - ctxt_name = None - if ctxt_w_name is not None: - try: - ctxt_name = space.str_w(ctxt_w_name) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - - if not ctxt_name: - return None, 0 - - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - - if level > 0 and not ctxt_name_prefix_parts: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) - - rel_modulename = '.'.join(ctxt_name_prefix_parts) - - if ctxt_w_path is not None: - # __path__ is set, so __name__ is already the package name - space.setitem(w_globals, w("__package__"), ctxt_w_name) - else: - # Normal module, so work out the package name if any - if '.' not in ctxt_name: - space.setitem(w_globals, w("__package__"), space.w_None) - elif rel_modulename: - space.setitem(w_globals, w("__package__"), w(rel_modulename)) - - if modulename: - if rel_modulename: - rel_modulename += '.' + modulename - else: - rel_modulename = modulename - - rel_level = len(ctxt_name_prefix_parts) - - return rel_modulename, rel_level - - @unwrap_spec(name=str, level=int) def importhook(space, name, w_globals=None, w_locals=None, w_fromlist=None, level=-1): @@ -238,30 +139,58 @@ w_globals is not None and space.isinstance_w(w_globals, space.w_dict)): - rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) + ctxt_w_name = space.finditem(w_globals, w('__name__')) + ctxt_w_path = space.finditem(w_globals, w('__path__')) - if rel_modulename: - # if no level was set, ignore import errors, and - # fall back to absolute import at the end of the - # function. - if level == -1: - tentative = True - else: - tentative = False + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.str_w(ctxt_w_name) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise - w_mod = absolute_import(space, rel_modulename, rel_level, - fromlist_w, tentative=tentative) - if w_mod is not None: - space.timer.stop_name("importhook", modulename) - return w_mod + if ctxt_name is not None: + ctxt_name_prefix_parts = ctxt_name.split('.') + if level > 0: + n = len(ctxt_name_prefix_parts)-level+1 + assert n>=0 + ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] + if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module + ctxt_name_prefix_parts.pop() + if ctxt_name_prefix_parts: + rel_modulename = '.'.join(ctxt_name_prefix_parts) + if modulename: + rel_modulename += '.' + modulename + baselevel = len(ctxt_name_prefix_parts) + if rel_modulename is not None: + # XXX What is this check about? There is no test for it + w_mod = check_sys_modules(space, w(rel_modulename)) - ## if level > 0: - ## msg = "Attempted relative import in non-package" - ## raise OperationError(space.w_ValueError, w(msg)) + if (w_mod is None or + not space.is_w(w_mod, space.w_None) or + level > 0): - ## if not modulename: - ## return None + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + tentative = True + else: + tentative = False + w_mod = absolute_import(space, rel_modulename, + baselevel, fromlist_w, + tentative=tentative) + if w_mod is not None: + space.timer.stop_name("importhook", modulename) + return w_mod + else: + rel_modulename = None + + if level > 0: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) w_mod = absolute_import_try(space, modulename, 0, fromlist_w) if w_mod is None or space.is_w(w_mod, space.w_None): w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) From commits-noreply at bitbucket.org Fri Apr 1 23:23:34 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Fri, 1 Apr 2011 23:23:34 +0200 (CEST) Subject: [pypy-svn] pypy default: Implements wait4 in the standard library Message-ID: <20110401212334.530F8282B9E@codespeak.net> Author: Guillebert Romain Branch: Changeset: r43099:027cf2c9195d Date: 2011-04-01 22:21 +0100 http://bitbucket.org/pypy/pypy/changeset/027cf2c9195d/ Log: Implements wait4 in the standard library diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -2,34 +2,50 @@ from ctypes.util import find_library from resource import _struct_rusage, struct_rusage +__all__ = ["wait3", "wait4"] + libc = CDLL(find_library("c")) c_wait3 = libc.wait3 c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] +c_wait4 = libc.wait4 + +c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] + +def create_struct_rusage(c_struct): + return struct_rusage(( + float(c_struct.ru_utime), + float(c_struct.ru_stime), + c_struct.ru_maxrss, + c_struct.ru_ixrss, + c_struct.ru_idrss, + c_struct.ru_isrss, + c_struct.ru_minflt, + c_struct.ru_majflt, + c_struct.ru_nswap, + c_struct.ru_inblock, + c_struct.ru_oublock, + c_struct.ru_msgsnd, + c_struct.ru_msgrcv, + c_struct.ru_nsignals, + c_struct.ru_nvcsw, + c_struct.ru_nivcsw)) + def wait3(options): status = c_int() _rusage = _struct_rusage() pid = c_wait3(byref(status), c_int(options), byref(_rusage)) - rusage = struct_rusage(( - float(_rusage.ru_utime), - float(_rusage.ru_stime), - _rusage.ru_maxrss, - _rusage.ru_ixrss, - _rusage.ru_idrss, - _rusage.ru_isrss, - _rusage.ru_minflt, - _rusage.ru_majflt, - _rusage.ru_nswap, - _rusage.ru_inblock, - _rusage.ru_oublock, - _rusage.ru_msgsnd, - _rusage.ru_msgrcv, - _rusage.ru_nsignals, - _rusage.ru_nvcsw, - _rusage.ru_nivcsw)) + rusage = create_struct_rusage(_rusage) return pid, status.value, rusage -__all__ = ["wait3"] +def wait4(pid, options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage diff --git a/lib_pypy/pypy_test/test_os_wait.py b/lib_pypy/pypy_test/test_os_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_os_wait.py @@ -0,0 +1,44 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + +import os + +from lib_pypy._pypy_wait import wait3, wait4 + +if hasattr(os, 'wait3'): + def test_os_wait3(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait3()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) + +if hasattr(os, 'wait4'): + def test_os_wait4(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait4()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -203,6 +203,14 @@ from _pypy_wait import wait3 return wait3(options) + def wait4(pid, options) + """ wait4(pid, options) -> (pid, status, rusage) + + Wait for completion of the child process "pid" and provides resource usage informations + """ + from _pypy_wait import wait4 + return wait4(pid, options) + else: # Windows implementations diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -33,6 +33,8 @@ appleveldefs['wait'] = 'app_posix.wait' if hasattr(os, 'wait3'): appleveldefs['wait3'] = 'app_posix.wait3' + if hasattr(os, 'wait4'): + appleveldefs['wait4'] = 'app_posix.wait4' interpleveldefs = { 'open' : 'interp_posix.open', diff --git a/lib_pypy/pypy_test/test_os_wait3.py b/lib_pypy/pypy_test/test_os_wait3.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_os_wait3.py +++ /dev/null @@ -1,26 +0,0 @@ -# Generates the resource cache -from __future__ import absolute_import -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('resource.ctc.py') - -import os - -from lib_pypy._pypy_wait import wait3 - -if hasattr(os, 'wait3'): - def test_os_wait3(): - exit_status = 0x33 - - if not hasattr(os, "fork"): - skip("Need fork() to test wait3()") - - child = os.fork() - if child == 0: # in child - os._exit(exit_status) - else: - pid, status, rusage = wait3(0) - assert child == pid - assert os.WIFEXITED(status) - assert os.WEXITSTATUS(status) == exit_status - assert isinstance(rusage.ru_utime, float) - assert isinstance(rusage.ru_maxrss, int) From commits-noreply at bitbucket.org Fri Apr 1 23:23:36 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Fri, 1 Apr 2011 23:23:36 +0200 (CEST) Subject: [pypy-svn] pypy default: Fixes a mistake in wait3's docstring Message-ID: <20110401212336.03E3F282BD4@codespeak.net> Author: Guillebert Romain Branch: Changeset: r43100:6ee1c8f9c102 Date: 2011-04-01 22:21 +0100 http://bitbucket.org/pypy/pypy/changeset/6ee1c8f9c102/ Log: Fixes a mistake in wait3's docstring diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -196,7 +196,7 @@ return posix.waitpid(-1, 0) def wait3(options): - """ wait3() -> (pid, status, rusage) + """ wait3(options) -> (pid, status, rusage) Wait for completion of a child process and provides resource usage informations """ From commits-noreply at bitbucket.org Sat Apr 2 07:22:01 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 2 Apr 2011 07:22:01 +0200 (CEST) Subject: [pypy-svn] pypy default: Syntax error. Message-ID: <20110402052201.B07B8282BD4@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43101:ab35b5e3fdbc Date: 2011-04-02 01:21 -0400 http://bitbucket.org/pypy/pypy/changeset/ab35b5e3fdbc/ Log: Syntax error. diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -190,7 +190,7 @@ def wait(): """ wait() -> (pid, status) - + Wait for completion of a child process. """ return posix.waitpid(-1, 0) @@ -203,7 +203,7 @@ from _pypy_wait import wait3 return wait3(options) - def wait4(pid, options) + def wait4(pid, options): """ wait4(pid, options) -> (pid, status, rusage) Wait for completion of the child process "pid" and provides resource usage informations @@ -213,7 +213,7 @@ else: # Windows implementations - + # Supply os.popen() based on subprocess def popen(cmd, mode="r", bufsize=-1): """popen(command [, mode='r' [, bufsize]]) -> pipe @@ -301,7 +301,7 @@ raise TypeError("invalid cmd type (%s, expected string)" % (type(cmd),)) return cmd - + # A proxy for a file whose close waits for the process class _wrap_close(object): def __init__(self, stream, proc): From commits-noreply at bitbucket.org Sat Apr 2 11:09:42 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 11:09:42 +0200 (CEST) Subject: [pypy-svn] pypy default: Typo. Message-ID: <20110402090942.47AF6282BD4@codespeak.net> Author: Armin Rigo Branch: Changeset: r43102:ec5861fb221f Date: 2011-04-01 20:22 +0200 http://bitbucket.org/pypy/pypy/changeset/ec5861fb221f/ Log: Typo. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -382,7 +382,7 @@ send_bridge_to_backend(metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.token) - def copy_all_attrbutes_into(self, res): + def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here res.rd_snapshot = self.rd_snapshot res.rd_frame_info_list = self.rd_frame_info_list @@ -393,13 +393,13 @@ def _clone_if_mutable(self): res = ResumeGuardDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeGuardForcedDescr(ResumeGuardDescr): @@ -473,7 +473,7 @@ def _clone_if_mutable(self): res = ResumeGuardForcedDescr(self.metainterp_sd, self.jitdriver_sd) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res From commits-noreply at bitbucket.org Sat Apr 2 11:09:45 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 11:09:45 +0200 (CEST) Subject: [pypy-svn] pypy default: Redo "jit-invariant-fields" as "quasi-immutable fields". Message-ID: <20110402090945.32777282BDD@codespeak.net> Author: Armin Rigo Branch: Changeset: r43103:d5252efe3dd9 Date: 2011-04-01 23:07 +0200 http://bitbucket.org/pypy/pypy/changeset/d5252efe3dd9/ Log: Redo "jit-invariant-fields" as "quasi-immutable fields". Reuse as much the existing code as possible. diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py --- a/pypy/rpython/ootypesystem/rclass.py +++ b/pypy/rpython/ootypesystem/rclass.py @@ -262,6 +262,10 @@ self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef) self.rbase.setup() + for name, attrdef in selfattrs.iteritems(): + if not attrdef.readonly and self.is_quasi_immutable(name): + ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT}) + classattributes = {} baseInstance = self.lowleveltype._superclass classrepr = getclassrepr(self.rtyper, self.classdef) @@ -476,11 +480,9 @@ mangled_name = mangle(attr, self.rtyper.getconfig()) cname = inputconst(ootype.Void, mangled_name) self.hook_access_field(vinst, cname, llops, flags) + self.hook_setfield(vinst, attr, llops) llops.genop('oosetfield', [vinst, cname, vvalue]) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - def rtype_is_true(self, hop): vinst, = hop.inputargs(self) return hop.genop('oononnull', [vinst], resulttype=ootype.Bool) diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,6 +525,9 @@ def op_jit_force_virtual(x): return x +def op_jit_force_quasiinvariant(x): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -3,7 +3,8 @@ #from pypy.annotation.classdef import isclassdef from pypy.annotation import description from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, getgcflavor +from pypy.rpython.rmodel import Repr, getgcflavor, inputconst +from pypy.rpython.lltypesystem.lltype import Void class FieldListAccessor(object): @@ -155,7 +156,8 @@ self.classdef = classdef def _setup_repr(self): - pass + if self.classdef is None: + self.immutable_field_set = set() def _check_for_immutable_hints(self, hints): loc = self.classdef.classdesc.lookup('_immutable_') @@ -167,13 +169,13 @@ self.classdef,)) hints = hints.copy() hints['immutable'] = True - self.immutable_field_list = [] # unless overwritten below + self.immutable_field_set = set() # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() immutable_fields = self.classdef.classdesc.classdict.get( '_immutable_fields_') if immutable_fields is not None: - self.immutable_field_list = immutable_fields.value + self.immutable_field_set = set(immutable_fields.value) accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -201,20 +203,23 @@ if "immutable_fields" in hints: accessor = hints["immutable_fields"] if not hasattr(accessor, 'fields'): - immutable_fields = [] + immutable_fields = set() rbase = self while rbase.classdef is not None: - immutable_fields += rbase.immutable_field_list + immutable_fields.update(rbase.immutable_field_set) rbase = rbase.rbase self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): with_suffix = {} for name in fields: - if name.endswith('[*]'): + if name.endswith('[*]'): # for virtualizables' lists name = name[:-3] suffix = '[*]' - else: + elif name.endswith('?'): # a quasi-immutable field + name = name[:-1] + suffix = '?' + else: # a regular immutable/green field suffix = '' try: mangled_name, r = self._get_field(name) @@ -227,7 +232,6 @@ def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as # mutable in some parent class but that is now declared immutable - from pypy.rpython.lltypesystem.lltype import Void is_self_immutable = "immutable" in self.object_type._hints base = self while base.classdef is not None: @@ -248,12 +252,30 @@ "class %r has _immutable_=True, but parent class %r " "defines (at least) the mutable field %r" % ( self, base, fieldname)) - if fieldname in self.immutable_field_list: + if (fieldname in self.immutable_field_set or + (fieldname + '?') in self.immutable_field_set): raise ImmutableConflictError( "field %r is defined mutable in class %r, but " "listed in _immutable_fields_ in subclass %r" % ( fieldname, base, self)) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + + def hook_setfield(self, vinst, fieldname, llops): + if self.is_quasi_immutable(fieldname): + c_fieldname = inputconst(Void, 'mutate_' + fieldname) + llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname]) + + def is_quasi_immutable(self, fieldname): + search = fieldname + '?' + rbase = self + while rbase.classdef is not None: + if search in rbase.immutable_field_set: + return True + rbase = rbase.rbase + return False + def new_instance(self, llops, classcallhop=None): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -433,6 +433,7 @@ 'jit_marker': LLOp(), 'jit_force_virtualizable':LLOp(canrun=True), 'jit_force_virtual': LLOp(canrun=True), + 'jit_force_quasi_immutable': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -895,6 +895,30 @@ B_TYPE = deref(graph.getreturnvar().concretetype) assert B_TYPE._hints["immutable"] + def test_quasi_immutable(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['x', 'y?', 'z?'] + class B(A): + pass + def f(): + A().x = 42 + A().y = 43 + b = B() + b.y = 41 + b.z = 44 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + accessor = B_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_z" : "?"} or \ + accessor.fields == {'ox':'', 'oy':'?', 'oz':'?'} # for ootype + found = [] + for op in graph.startblock.operations: + if op.opname == 'jit_force_quasi_immutable': + found.append(op.args[1].value) + assert found == ['mutate_y', 'mutate_y', 'mutate_z'] + class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/rpython/lltypesystem/rclass.py b/pypy/rpython/lltypesystem/rclass.py --- a/pypy/rpython/lltypesystem/rclass.py +++ b/pypy/rpython/lltypesystem/rclass.py @@ -322,6 +322,7 @@ # before they are fully built, to avoid strange bugs in case # of recursion where other code would uses these # partially-initialized dicts. + AbstractInstanceRepr._setup_repr(self) self.rclass = getclassrepr(self.rtyper, self.classdef) fields = {} allinstancefields = {} @@ -370,6 +371,11 @@ kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True + + for name, attrdef in attrs: + if not attrdef.readonly and self.is_quasi_immutable(name): + llfields.append(('mutate_' + name, OBJECTPTR)) + object_type = MkStruct(self.classdef.name, ('super', self.rbase.object_type), hints=hints, @@ -488,6 +494,7 @@ if force_cast: vinst = llops.genop('cast_pointer', [vinst], resulttype=self) self.hook_access_field(vinst, cname, llops, flags) + self.hook_setfield(vinst, attr, llops) llops.genop('setfield', [vinst, cname, vvalue]) else: if self.classdef is None: @@ -495,9 +502,6 @@ self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True, flags=flags) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - def new_instance(self, llops, classcallhop=None): """Build a new instance, without calling __init__.""" flavor = self.gcflavor From commits-noreply at bitbucket.org Sat Apr 2 11:09:51 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 11:09:51 +0200 (CEST) Subject: [pypy-svn] pypy default: Instead of having even more strings on immutable fields, Message-ID: <20110402090951.810C9282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43104:c1b009ab62c5 Date: 2011-04-02 11:05 +0200 http://bitbucket.org/pypy/pypy/changeset/c1b009ab62c5/ Log: Instead of having even more strings on immutable fields, introduce the IR_XXX constants whose boolean value correspond to the standard definition of being immutable. diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py --- a/pypy/rpython/lltypesystem/test/test_lloperation.py +++ b/pypy/rpython/lltypesystem/test/test_lloperation.py @@ -54,6 +54,7 @@ def test_is_pure(): from pypy.objspace.flow.model import Variable, Constant + from pypy.rpython import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) @@ -85,38 +86,50 @@ assert llop.getarrayitem.is_pure([v_a2, Variable()]) assert llop.getarraysize.is_pure([v_a2]) # - accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': ''}) - v_s3 = Variable() - v_s3.concretetype = lltype.Ptr(S3) - assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) - assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) - assert llop.getfield.is_pure([v_s3, Constant('x')]) - assert not llop.getfield.is_pure([v_s3, Constant('y')]) + for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, + rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: + accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': kind}) + v_s3 = Variable() + v_s3.concretetype = lltype.Ptr(S3) + assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) + assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) + assert llop.getfield.is_pure([v_s3, Constant('x')]) is kind + assert not llop.getfield.is_pure([v_s3, Constant('y')]) def test_getfield_pure(): S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) S2 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable': True}) accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') s2 = lltype.malloc(S2); s2.x = 45 assert llop.getfield(lltype.Signed, s2, 'x') == 45 - s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 - assert llop.getfield(lltype.Signed, s3, 'x') == 46 - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') # py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s1, 'x') assert llop.getinteriorfield(lltype.Signed, s2, 'x') == 45 - assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 - py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s3, 'y') + # + for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, + rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: + # + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': kind}) + s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 + if kind in [rclass.IR_IMMUTABLE, rclass.IR_ARRAY_IMMUTABLE]: + assert llop.getfield(lltype.Signed, s3, 'x') == 46 + assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 + else: + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'x') + py.test.raises(TypeError, llop.getinteriorfield, + lltype.Signed, s3, 'x') + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') + py.test.raises(TypeError, llop.getinteriorfield, + lltype.Signed, s3, 'y') # ___________________________________________________________________________ # This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync. diff --git a/pypy/rpython/test/test_rvirtualizable2.py b/pypy/rpython/test/test_rvirtualizable2.py --- a/pypy/rpython/test/test_rvirtualizable2.py +++ b/pypy/rpython/test/test_rvirtualizable2.py @@ -5,6 +5,7 @@ from pypy.rlib.jit import hint from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy import conftest @@ -116,8 +117,8 @@ TYPE = self.gettype(v_inst) accessor = TYPE._hints['virtualizable2_accessor'] assert accessor.TYPE == TYPE - assert accessor.fields == {self.prefix + 'v1' : "", - self.prefix + 'v2': "[*]"} + assert accessor.fields == {self.prefix + 'v1': IR_IMMUTABLE, + self.prefix + 'v2': IR_ARRAY_IMMUTABLE} # def fn2(n): Base().base1 = 42 diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -13,6 +13,8 @@ assert type(fields) is dict self.TYPE = TYPE self.fields = fields + for x in fields.itervalues(): + assert isinstance(x, ImmutableRanking) def __repr__(self): return '' % getattr(self, 'TYPE', '?') @@ -20,6 +22,20 @@ def _freeze_(self): return True +class ImmutableRanking(object): + def __init__(self, name, is_immutable): + self.name = name + self.is_immutable = is_immutable + def __nonzero__(self): + return self.is_immutable + def __repr__(self): + return '<%s>' % self.name + +IR_MUTABLE = ImmutableRanking('mutable', False) +IR_IMMUTABLE = ImmutableRanking('immutable', True) +IR_ARRAY_IMMUTABLE = ImmutableRanking('array_immutable', True) +IR_QUASI_IMMUTABLE = ImmutableRanking('quasi_immutable', False) + class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" @@ -211,23 +227,23 @@ self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): - with_suffix = {} + ranking = {} for name in fields: if name.endswith('[*]'): # for virtualizables' lists name = name[:-3] - suffix = '[*]' + rank = IR_ARRAY_IMMUTABLE elif name.endswith('?'): # a quasi-immutable field name = name[:-1] - suffix = '?' + rank = IR_QUASI_IMMUTABLE else: # a regular immutable/green field - suffix = '' + rank = IR_IMMUTABLE try: mangled_name, r = self._get_field(name) except KeyError: continue - with_suffix[mangled_name] = suffix - accessor.initialize(self.object_type, with_suffix) - return with_suffix + ranking[mangled_name] = rank + accessor.initialize(self.object_type, ranking) + return ranking def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -268,13 +268,14 @@ return self._superclass._get_fields_with_default() + self._fields_with_default def _immutable_field(self, field): + if self._hints.get('immutable'): + return True if 'immutable_fields' in self._hints: try: - s = self._hints['immutable_fields'].fields[field] - return s or True + return self._hints['immutable_fields'].fields[field] except KeyError: pass - return self._hints.get('immutable', False) + return False class SpecializableType(OOType): diff --git a/pypy/rpython/rvirtualizable2.py b/pypy/rpython/rvirtualizable2.py --- a/pypy/rpython/rvirtualizable2.py +++ b/pypy/rpython/rvirtualizable2.py @@ -50,7 +50,7 @@ def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): - if cname.value in self.my_redirected_fields: + if self.my_redirected_fields.get(cname.value): cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -309,13 +309,14 @@ return _struct(self, n, initialization='example') def _immutable_field(self, field): + if self._hints.get('immutable'): + return True if 'immutable_fields' in self._hints: try: - s = self._hints['immutable_fields'].fields[field] - return s or True + return self._hints['immutable_fields'].fields[field] except KeyError: pass - return self._hints.get('immutable', False) + return False class RttiStruct(Struct): _runtime_type_info = None diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -5,6 +5,8 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.rarithmetic import intmask, r_longlong from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE +from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.objspace.flow.model import summary class EmptyBase(object): @@ -746,8 +748,10 @@ t, typer, graph = self.gengraph(f, []) A_TYPE = deref(graph.getreturnvar().concretetype) accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ - accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE, + "inst_y": IR_ARRAY_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_ARRAY_IMMUTABLE} # for ootype def test_immutable_fields_subclass_1(self): from pypy.jit.metainterp.typesystem import deref @@ -765,8 +769,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : ""} or \ - accessor.fields == {"ox" : ""} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE} # for ootype def test_immutable_fields_subclass_2(self): from pypy.jit.metainterp.typesystem import deref @@ -785,8 +789,10 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ - accessor.fields == {"ox" : "", "oy" : ""} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE, + "inst_y": IR_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_IMMUTABLE} # for ootype def test_immutable_fields_only_in_subclass(self): from pypy.jit.metainterp.typesystem import deref @@ -804,8 +810,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y" : ""} or \ - accessor.fields == {"oy" : ""} # for ootype + assert accessor.fields == {"inst_y": IR_IMMUTABLE} or \ + accessor.fields == {"oy": IR_IMMUTABLE} # for ootype def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -849,8 +855,8 @@ except AttributeError: A_TYPE = B_TYPE._superclass # for ootype accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_v" : ""} or \ - accessor.fields == {"ov" : ""} # for ootype + assert accessor.fields == {"inst_v": IR_IMMUTABLE} or \ + accessor.fields == {"ov": IR_IMMUTABLE} # for ootype def test_immutable_subclass_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -898,26 +904,33 @@ def test_quasi_immutable(self): from pypy.jit.metainterp.typesystem import deref class A(object): - _immutable_fields_ = ['x', 'y?', 'z?'] + _immutable_fields_ = ['x', 'y', 'a?', 'b?'] class B(A): pass def f(): - A().x = 42 - A().y = 43 + a = A() + a.x = 42 + a.a = 142 b = B() + b.x = 43 b.y = 41 - b.z = 44 + b.a = 44 + b.b = 45 return B() t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_z" : "?"} or \ - accessor.fields == {'ox':'', 'oy':'?', 'oz':'?'} # for ootype + assert accessor.fields == {"inst_y": IR_IMMUTABLE, + "inst_b": IR_QUASI_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_IMMUTABLE, + "oa": IR_QUASI_IMMUTABLE, + "ob": IR_QUASI_IMMUTABLE} # for ootype found = [] for op in graph.startblock.operations: if op.opname == 'jit_force_quasi_immutable': found.append(op.args[1].value) - assert found == ['mutate_y', 'mutate_y', 'mutate_z'] + assert found == ['mutate_a', 'mutate_a', 'mutate_b'] class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -794,15 +794,8 @@ def __init__(self, fields): self.fields = fields S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x':''})}) - assert S._immutable_field('x') == True - # - class FieldListAccessor(object): - def __init__(self, fields): - self.fields = fields - S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) - assert S._immutable_field('x') == '[*]' + hints={'immutable_fields': FieldListAccessor({'x': 1234})}) + assert S._immutable_field('x') == 1234 class TestTrackAllocation: From commits-noreply at bitbucket.org Sat Apr 2 11:13:59 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 11:13:59 +0200 (CEST) Subject: [pypy-svn] pypy default: Backed out changeset c1b009ab62c5 Message-ID: <20110402091359.DF567282BD4@codespeak.net> Author: Armin Rigo Branch: Changeset: r43105:72b237531599 Date: 2011-04-02 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/72b237531599/ Log: Backed out changeset c1b009ab62c5 diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py --- a/pypy/rpython/lltypesystem/test/test_lloperation.py +++ b/pypy/rpython/lltypesystem/test/test_lloperation.py @@ -54,7 +54,6 @@ def test_is_pure(): from pypy.objspace.flow.model import Variable, Constant - from pypy.rpython import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) @@ -86,50 +85,38 @@ assert llop.getarrayitem.is_pure([v_a2, Variable()]) assert llop.getarraysize.is_pure([v_a2]) # - for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, - rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: - accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': kind}) - v_s3 = Variable() - v_s3.concretetype = lltype.Ptr(S3) - assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) - assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) - assert llop.getfield.is_pure([v_s3, Constant('x')]) is kind - assert not llop.getfield.is_pure([v_s3, Constant('y')]) + accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': ''}) + v_s3 = Variable() + v_s3.concretetype = lltype.Ptr(S3) + assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) + assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) + assert llop.getfield.is_pure([v_s3, Constant('x')]) + assert not llop.getfield.is_pure([v_s3, Constant('y')]) def test_getfield_pure(): S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) S2 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable': True}) accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') s2 = lltype.malloc(S2); s2.x = 45 assert llop.getfield(lltype.Signed, s2, 'x') == 45 + s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 + assert llop.getfield(lltype.Signed, s3, 'x') == 46 + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') # py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s1, 'x') assert llop.getinteriorfield(lltype.Signed, s2, 'x') == 45 - # - for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, - rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: - # - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': kind}) - s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 - if kind in [rclass.IR_IMMUTABLE, rclass.IR_ARRAY_IMMUTABLE]: - assert llop.getfield(lltype.Signed, s3, 'x') == 46 - assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 - else: - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'x') - py.test.raises(TypeError, llop.getinteriorfield, - lltype.Signed, s3, 'x') - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') - py.test.raises(TypeError, llop.getinteriorfield, - lltype.Signed, s3, 'y') + assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 + py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s3, 'y') # ___________________________________________________________________________ # This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync. diff --git a/pypy/rpython/test/test_rvirtualizable2.py b/pypy/rpython/test/test_rvirtualizable2.py --- a/pypy/rpython/test/test_rvirtualizable2.py +++ b/pypy/rpython/test/test_rvirtualizable2.py @@ -5,7 +5,6 @@ from pypy.rlib.jit import hint from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy import conftest @@ -117,8 +116,8 @@ TYPE = self.gettype(v_inst) accessor = TYPE._hints['virtualizable2_accessor'] assert accessor.TYPE == TYPE - assert accessor.fields == {self.prefix + 'v1': IR_IMMUTABLE, - self.prefix + 'v2': IR_ARRAY_IMMUTABLE} + assert accessor.fields == {self.prefix + 'v1' : "", + self.prefix + 'v2': "[*]"} # def fn2(n): Base().base1 = 42 diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -13,8 +13,6 @@ assert type(fields) is dict self.TYPE = TYPE self.fields = fields - for x in fields.itervalues(): - assert isinstance(x, ImmutableRanking) def __repr__(self): return '' % getattr(self, 'TYPE', '?') @@ -22,20 +20,6 @@ def _freeze_(self): return True -class ImmutableRanking(object): - def __init__(self, name, is_immutable): - self.name = name - self.is_immutable = is_immutable - def __nonzero__(self): - return self.is_immutable - def __repr__(self): - return '<%s>' % self.name - -IR_MUTABLE = ImmutableRanking('mutable', False) -IR_IMMUTABLE = ImmutableRanking('immutable', True) -IR_ARRAY_IMMUTABLE = ImmutableRanking('array_immutable', True) -IR_QUASI_IMMUTABLE = ImmutableRanking('quasi_immutable', False) - class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" @@ -227,23 +211,23 @@ self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): - ranking = {} + with_suffix = {} for name in fields: if name.endswith('[*]'): # for virtualizables' lists name = name[:-3] - rank = IR_ARRAY_IMMUTABLE + suffix = '[*]' elif name.endswith('?'): # a quasi-immutable field name = name[:-1] - rank = IR_QUASI_IMMUTABLE + suffix = '?' else: # a regular immutable/green field - rank = IR_IMMUTABLE + suffix = '' try: mangled_name, r = self._get_field(name) except KeyError: continue - ranking[mangled_name] = rank - accessor.initialize(self.object_type, ranking) - return ranking + with_suffix[mangled_name] = suffix + accessor.initialize(self.object_type, with_suffix) + return with_suffix def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -268,14 +268,13 @@ return self._superclass._get_fields_with_default() + self._fields_with_default def _immutable_field(self, field): - if self._hints.get('immutable'): - return True if 'immutable_fields' in self._hints: try: - return self._hints['immutable_fields'].fields[field] + s = self._hints['immutable_fields'].fields[field] + return s or True except KeyError: pass - return False + return self._hints.get('immutable', False) class SpecializableType(OOType): diff --git a/pypy/rpython/rvirtualizable2.py b/pypy/rpython/rvirtualizable2.py --- a/pypy/rpython/rvirtualizable2.py +++ b/pypy/rpython/rvirtualizable2.py @@ -50,7 +50,7 @@ def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): - if self.my_redirected_fields.get(cname.value): + if cname.value in self.my_redirected_fields: cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -309,14 +309,13 @@ return _struct(self, n, initialization='example') def _immutable_field(self, field): - if self._hints.get('immutable'): - return True if 'immutable_fields' in self._hints: try: - return self._hints['immutable_fields'].fields[field] + s = self._hints['immutable_fields'].fields[field] + return s or True except KeyError: pass - return False + return self._hints.get('immutable', False) class RttiStruct(Struct): _runtime_type_info = None diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -5,8 +5,6 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.rarithmetic import intmask, r_longlong from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE -from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.objspace.flow.model import summary class EmptyBase(object): @@ -748,10 +746,8 @@ t, typer, graph = self.gengraph(f, []) A_TYPE = deref(graph.getreturnvar().concretetype) accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE, - "inst_y": IR_ARRAY_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_ARRAY_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ + accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype def test_immutable_fields_subclass_1(self): from pypy.jit.metainterp.typesystem import deref @@ -769,8 +765,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : ""} or \ + accessor.fields == {"ox" : ""} # for ootype def test_immutable_fields_subclass_2(self): from pypy.jit.metainterp.typesystem import deref @@ -789,10 +785,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE, - "inst_y": IR_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ + accessor.fields == {"ox" : "", "oy" : ""} # for ootype def test_immutable_fields_only_in_subclass(self): from pypy.jit.metainterp.typesystem import deref @@ -810,8 +804,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y": IR_IMMUTABLE} or \ - accessor.fields == {"oy": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_y" : ""} or \ + accessor.fields == {"oy" : ""} # for ootype def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -855,8 +849,8 @@ except AttributeError: A_TYPE = B_TYPE._superclass # for ootype accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_v": IR_IMMUTABLE} or \ - accessor.fields == {"ov": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype def test_immutable_subclass_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -904,33 +898,26 @@ def test_quasi_immutable(self): from pypy.jit.metainterp.typesystem import deref class A(object): - _immutable_fields_ = ['x', 'y', 'a?', 'b?'] + _immutable_fields_ = ['x', 'y?', 'z?'] class B(A): pass def f(): - a = A() - a.x = 42 - a.a = 142 + A().x = 42 + A().y = 43 b = B() - b.x = 43 b.y = 41 - b.a = 44 - b.b = 45 + b.z = 44 return B() t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y": IR_IMMUTABLE, - "inst_b": IR_QUASI_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_IMMUTABLE, - "oa": IR_QUASI_IMMUTABLE, - "ob": IR_QUASI_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_z" : "?"} or \ + accessor.fields == {'ox':'', 'oy':'?', 'oz':'?'} # for ootype found = [] for op in graph.startblock.operations: if op.opname == 'jit_force_quasi_immutable': found.append(op.args[1].value) - assert found == ['mutate_a', 'mutate_a', 'mutate_b'] + assert found == ['mutate_y', 'mutate_y', 'mutate_z'] class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -794,8 +794,15 @@ def __init__(self, fields): self.fields = fields S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x': 1234})}) - assert S._immutable_field('x') == 1234 + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' class TestTrackAllocation: From commits-noreply at bitbucket.org Sat Apr 2 11:14:02 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 11:14:02 +0200 (CEST) Subject: [pypy-svn] pypy default: Backed out d5252efe3dd9 Message-ID: <20110402091402.A44DE282BE8@codespeak.net> Author: Armin Rigo Branch: Changeset: r43106:6f71208004cd Date: 2011-04-02 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/6f71208004cd/ Log: Backed out d5252efe3dd9 diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py --- a/pypy/rpython/ootypesystem/rclass.py +++ b/pypy/rpython/ootypesystem/rclass.py @@ -262,10 +262,6 @@ self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef) self.rbase.setup() - for name, attrdef in selfattrs.iteritems(): - if not attrdef.readonly and self.is_quasi_immutable(name): - ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT}) - classattributes = {} baseInstance = self.lowleveltype._superclass classrepr = getclassrepr(self.rtyper, self.classdef) @@ -480,9 +476,11 @@ mangled_name = mangle(attr, self.rtyper.getconfig()) cname = inputconst(ootype.Void, mangled_name) self.hook_access_field(vinst, cname, llops, flags) - self.hook_setfield(vinst, attr, llops) llops.genop('oosetfield', [vinst, cname, vvalue]) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + def rtype_is_true(self, hop): vinst, = hop.inputargs(self) return hop.genop('oononnull', [vinst], resulttype=ootype.Bool) diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,9 +525,6 @@ def op_jit_force_virtual(x): return x -def op_jit_force_quasiinvariant(x): - pass - def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -3,8 +3,7 @@ #from pypy.annotation.classdef import isclassdef from pypy.annotation import description from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, getgcflavor, inputconst -from pypy.rpython.lltypesystem.lltype import Void +from pypy.rpython.rmodel import Repr, getgcflavor class FieldListAccessor(object): @@ -156,8 +155,7 @@ self.classdef = classdef def _setup_repr(self): - if self.classdef is None: - self.immutable_field_set = set() + pass def _check_for_immutable_hints(self, hints): loc = self.classdef.classdesc.lookup('_immutable_') @@ -169,13 +167,13 @@ self.classdef,)) hints = hints.copy() hints['immutable'] = True - self.immutable_field_set = set() # unless overwritten below + self.immutable_field_list = [] # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() immutable_fields = self.classdef.classdesc.classdict.get( '_immutable_fields_') if immutable_fields is not None: - self.immutable_field_set = set(immutable_fields.value) + self.immutable_field_list = immutable_fields.value accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -203,23 +201,20 @@ if "immutable_fields" in hints: accessor = hints["immutable_fields"] if not hasattr(accessor, 'fields'): - immutable_fields = set() + immutable_fields = [] rbase = self while rbase.classdef is not None: - immutable_fields.update(rbase.immutable_field_set) + immutable_fields += rbase.immutable_field_list rbase = rbase.rbase self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): with_suffix = {} for name in fields: - if name.endswith('[*]'): # for virtualizables' lists + if name.endswith('[*]'): name = name[:-3] suffix = '[*]' - elif name.endswith('?'): # a quasi-immutable field - name = name[:-1] - suffix = '?' - else: # a regular immutable/green field + else: suffix = '' try: mangled_name, r = self._get_field(name) @@ -232,6 +227,7 @@ def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void is_self_immutable = "immutable" in self.object_type._hints base = self while base.classdef is not None: @@ -252,30 +248,12 @@ "class %r has _immutable_=True, but parent class %r " "defines (at least) the mutable field %r" % ( self, base, fieldname)) - if (fieldname in self.immutable_field_set or - (fieldname + '?') in self.immutable_field_set): + if fieldname in self.immutable_field_list: raise ImmutableConflictError( "field %r is defined mutable in class %r, but " "listed in _immutable_fields_ in subclass %r" % ( fieldname, base, self)) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - - def hook_setfield(self, vinst, fieldname, llops): - if self.is_quasi_immutable(fieldname): - c_fieldname = inputconst(Void, 'mutate_' + fieldname) - llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname]) - - def is_quasi_immutable(self, fieldname): - search = fieldname + '?' - rbase = self - while rbase.classdef is not None: - if search in rbase.immutable_field_set: - return True - rbase = rbase.rbase - return False - def new_instance(self, llops, classcallhop=None): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -433,7 +433,6 @@ 'jit_marker': LLOp(), 'jit_force_virtualizable':LLOp(canrun=True), 'jit_force_virtual': LLOp(canrun=True), - 'jit_force_quasi_immutable': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -895,30 +895,6 @@ B_TYPE = deref(graph.getreturnvar().concretetype) assert B_TYPE._hints["immutable"] - def test_quasi_immutable(self): - from pypy.jit.metainterp.typesystem import deref - class A(object): - _immutable_fields_ = ['x', 'y?', 'z?'] - class B(A): - pass - def f(): - A().x = 42 - A().y = 43 - b = B() - b.y = 41 - b.z = 44 - return B() - t, typer, graph = self.gengraph(f, []) - B_TYPE = deref(graph.getreturnvar().concretetype) - accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_z" : "?"} or \ - accessor.fields == {'ox':'', 'oy':'?', 'oz':'?'} # for ootype - found = [] - for op in graph.startblock.operations: - if op.opname == 'jit_force_quasi_immutable': - found.append(op.args[1].value) - assert found == ['mutate_y', 'mutate_y', 'mutate_z'] - class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/rpython/lltypesystem/rclass.py b/pypy/rpython/lltypesystem/rclass.py --- a/pypy/rpython/lltypesystem/rclass.py +++ b/pypy/rpython/lltypesystem/rclass.py @@ -322,7 +322,6 @@ # before they are fully built, to avoid strange bugs in case # of recursion where other code would uses these # partially-initialized dicts. - AbstractInstanceRepr._setup_repr(self) self.rclass = getclassrepr(self.rtyper, self.classdef) fields = {} allinstancefields = {} @@ -371,11 +370,6 @@ kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True - - for name, attrdef in attrs: - if not attrdef.readonly and self.is_quasi_immutable(name): - llfields.append(('mutate_' + name, OBJECTPTR)) - object_type = MkStruct(self.classdef.name, ('super', self.rbase.object_type), hints=hints, @@ -494,7 +488,6 @@ if force_cast: vinst = llops.genop('cast_pointer', [vinst], resulttype=self) self.hook_access_field(vinst, cname, llops, flags) - self.hook_setfield(vinst, attr, llops) llops.genop('setfield', [vinst, cname, vvalue]) else: if self.classdef is None: @@ -502,6 +495,9 @@ self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True, flags=flags) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + def new_instance(self, llops, classcallhop=None): """Build a new instance, without calling __init__.""" flavor = self.gcflavor From commits-noreply at bitbucket.org Sat Apr 2 11:26:29 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 11:26:29 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: A restart of the out-of-line-guards branch. Message-ID: <20110402092629.E16E7282BD4@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43107:342e3344cff5 Date: 2011-04-02 11:15 +0200 http://bitbucket.org/pypy/pypy/changeset/342e3344cff5/ Log: A restart of the out-of-line-guards branch. I will pull selectively code and ideas from the old branch. From commits-noreply at bitbucket.org Sat Apr 2 11:26:31 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 11:26:31 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Redo "jit-invariant-fields" as "quasi-immutable fields". Reuse as Message-ID: <20110402092631.BF02F282BD4@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43108:87905b4483df Date: 2011-04-02 11:20 +0200 http://bitbucket.org/pypy/pypy/changeset/87905b4483df/ Log: Redo "jit-invariant-fields" as "quasi-immutable fields". Reuse as much the existing code as possible. diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py --- a/pypy/rpython/ootypesystem/rclass.py +++ b/pypy/rpython/ootypesystem/rclass.py @@ -262,6 +262,10 @@ self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef) self.rbase.setup() + for name, attrdef in selfattrs.iteritems(): + if not attrdef.readonly and self.is_quasi_immutable(name): + ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT}) + classattributes = {} baseInstance = self.lowleveltype._superclass classrepr = getclassrepr(self.rtyper, self.classdef) @@ -476,11 +480,9 @@ mangled_name = mangle(attr, self.rtyper.getconfig()) cname = inputconst(ootype.Void, mangled_name) self.hook_access_field(vinst, cname, llops, flags) + self.hook_setfield(vinst, attr, llops) llops.genop('oosetfield', [vinst, cname, vvalue]) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - def rtype_is_true(self, hop): vinst, = hop.inputargs(self) return hop.genop('oononnull', [vinst], resulttype=ootype.Bool) diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,6 +525,9 @@ def op_jit_force_virtual(x): return x +def op_jit_force_quasi_immutable(x): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -3,7 +3,8 @@ #from pypy.annotation.classdef import isclassdef from pypy.annotation import description from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, getgcflavor +from pypy.rpython.rmodel import Repr, getgcflavor, inputconst +from pypy.rpython.lltypesystem.lltype import Void class FieldListAccessor(object): @@ -155,7 +156,8 @@ self.classdef = classdef def _setup_repr(self): - pass + if self.classdef is None: + self.immutable_field_set = set() def _check_for_immutable_hints(self, hints): loc = self.classdef.classdesc.lookup('_immutable_') @@ -167,13 +169,13 @@ self.classdef,)) hints = hints.copy() hints['immutable'] = True - self.immutable_field_list = [] # unless overwritten below + self.immutable_field_set = set() # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() immutable_fields = self.classdef.classdesc.classdict.get( '_immutable_fields_') if immutable_fields is not None: - self.immutable_field_list = immutable_fields.value + self.immutable_field_set = set(immutable_fields.value) accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -201,20 +203,23 @@ if "immutable_fields" in hints: accessor = hints["immutable_fields"] if not hasattr(accessor, 'fields'): - immutable_fields = [] + immutable_fields = set() rbase = self while rbase.classdef is not None: - immutable_fields += rbase.immutable_field_list + immutable_fields.update(rbase.immutable_field_set) rbase = rbase.rbase self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): with_suffix = {} for name in fields: - if name.endswith('[*]'): + if name.endswith('[*]'): # for virtualizables' lists name = name[:-3] suffix = '[*]' - else: + elif name.endswith('?'): # a quasi-immutable field + name = name[:-1] + suffix = '?' + else: # a regular immutable/green field suffix = '' try: mangled_name, r = self._get_field(name) @@ -227,7 +232,6 @@ def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as # mutable in some parent class but that is now declared immutable - from pypy.rpython.lltypesystem.lltype import Void is_self_immutable = "immutable" in self.object_type._hints base = self while base.classdef is not None: @@ -248,12 +252,30 @@ "class %r has _immutable_=True, but parent class %r " "defines (at least) the mutable field %r" % ( self, base, fieldname)) - if fieldname in self.immutable_field_list: + if (fieldname in self.immutable_field_set or + (fieldname + '?') in self.immutable_field_set): raise ImmutableConflictError( "field %r is defined mutable in class %r, but " "listed in _immutable_fields_ in subclass %r" % ( fieldname, base, self)) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + + def hook_setfield(self, vinst, fieldname, llops): + if self.is_quasi_immutable(fieldname): + c_fieldname = inputconst(Void, 'mutate_' + fieldname) + llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname]) + + def is_quasi_immutable(self, fieldname): + search = fieldname + '?' + rbase = self + while rbase.classdef is not None: + if search in rbase.immutable_field_set: + return True + rbase = rbase.rbase + return False + def new_instance(self, llops, classcallhop=None): raise NotImplementedError diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -433,6 +433,7 @@ 'jit_marker': LLOp(), 'jit_force_virtualizable':LLOp(canrun=True), 'jit_force_virtual': LLOp(canrun=True), + 'jit_force_quasi_immutable': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -895,6 +895,30 @@ B_TYPE = deref(graph.getreturnvar().concretetype) assert B_TYPE._hints["immutable"] + def test_quasi_immutable(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['x', 'y?', 'z?'] + class B(A): + pass + def f(): + A().x = 42 + A().y = 43 + b = B() + b.y = 41 + b.z = 44 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + accessor = B_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_z" : "?"} or \ + accessor.fields == {'ox':'', 'oy':'?', 'oz':'?'} # for ootype + found = [] + for op in graph.startblock.operations: + if op.opname == 'jit_force_quasi_immutable': + found.append(op.args[1].value) + assert found == ['mutate_y', 'mutate_y', 'mutate_z'] + class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/rpython/lltypesystem/rclass.py b/pypy/rpython/lltypesystem/rclass.py --- a/pypy/rpython/lltypesystem/rclass.py +++ b/pypy/rpython/lltypesystem/rclass.py @@ -322,6 +322,7 @@ # before they are fully built, to avoid strange bugs in case # of recursion where other code would uses these # partially-initialized dicts. + AbstractInstanceRepr._setup_repr(self) self.rclass = getclassrepr(self.rtyper, self.classdef) fields = {} allinstancefields = {} @@ -370,6 +371,11 @@ kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True + + for name, attrdef in attrs: + if not attrdef.readonly and self.is_quasi_immutable(name): + llfields.append(('mutate_' + name, OBJECTPTR)) + object_type = MkStruct(self.classdef.name, ('super', self.rbase.object_type), hints=hints, @@ -488,6 +494,7 @@ if force_cast: vinst = llops.genop('cast_pointer', [vinst], resulttype=self) self.hook_access_field(vinst, cname, llops, flags) + self.hook_setfield(vinst, attr, llops) llops.genop('setfield', [vinst, cname, vvalue]) else: if self.classdef is None: @@ -495,9 +502,6 @@ self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True, flags=flags) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - def new_instance(self, llops, classcallhop=None): """Build a new instance, without calling __init__.""" flavor = self.gcflavor From commits-noreply at bitbucket.org Sat Apr 2 11:26:36 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 11:26:36 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Instead of having even more strings on immutable fields, introduce Message-ID: <20110402092636.6C7D6282BDD@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43109:4d5bc8f7b36c Date: 2011-04-02 11:21 +0200 http://bitbucket.org/pypy/pypy/changeset/4d5bc8f7b36c/ Log: Instead of having even more strings on immutable fields, introduce the IR_XXX constants whose boolean value correspond to the standard definition of being immutable. diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py --- a/pypy/rpython/lltypesystem/test/test_lloperation.py +++ b/pypy/rpython/lltypesystem/test/test_lloperation.py @@ -54,6 +54,7 @@ def test_is_pure(): from pypy.objspace.flow.model import Variable, Constant + from pypy.rpython import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) @@ -85,38 +86,50 @@ assert llop.getarrayitem.is_pure([v_a2, Variable()]) assert llop.getarraysize.is_pure([v_a2]) # - accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': ''}) - v_s3 = Variable() - v_s3.concretetype = lltype.Ptr(S3) - assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) - assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) - assert llop.getfield.is_pure([v_s3, Constant('x')]) - assert not llop.getfield.is_pure([v_s3, Constant('y')]) + for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, + rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: + accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': kind}) + v_s3 = Variable() + v_s3.concretetype = lltype.Ptr(S3) + assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) + assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) + assert llop.getfield.is_pure([v_s3, Constant('x')]) is kind + assert not llop.getfield.is_pure([v_s3, Constant('y')]) def test_getfield_pure(): S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) S2 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable': True}) accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') s2 = lltype.malloc(S2); s2.x = 45 assert llop.getfield(lltype.Signed, s2, 'x') == 45 - s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 - assert llop.getfield(lltype.Signed, s3, 'x') == 46 - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') # py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s1, 'x') assert llop.getinteriorfield(lltype.Signed, s2, 'x') == 45 - assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 - py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s3, 'y') + # + for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, + rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: + # + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': kind}) + s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 + if kind in [rclass.IR_IMMUTABLE, rclass.IR_ARRAY_IMMUTABLE]: + assert llop.getfield(lltype.Signed, s3, 'x') == 46 + assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 + else: + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'x') + py.test.raises(TypeError, llop.getinteriorfield, + lltype.Signed, s3, 'x') + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') + py.test.raises(TypeError, llop.getinteriorfield, + lltype.Signed, s3, 'y') # ___________________________________________________________________________ # This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync. diff --git a/pypy/rpython/test/test_rvirtualizable2.py b/pypy/rpython/test/test_rvirtualizable2.py --- a/pypy/rpython/test/test_rvirtualizable2.py +++ b/pypy/rpython/test/test_rvirtualizable2.py @@ -5,6 +5,7 @@ from pypy.rlib.jit import hint from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy import conftest @@ -116,8 +117,8 @@ TYPE = self.gettype(v_inst) accessor = TYPE._hints['virtualizable2_accessor'] assert accessor.TYPE == TYPE - assert accessor.fields == {self.prefix + 'v1' : "", - self.prefix + 'v2': "[*]"} + assert accessor.fields == {self.prefix + 'v1': IR_IMMUTABLE, + self.prefix + 'v2': IR_ARRAY_IMMUTABLE} # def fn2(n): Base().base1 = 42 diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -13,6 +13,8 @@ assert type(fields) is dict self.TYPE = TYPE self.fields = fields + for x in fields.itervalues(): + assert isinstance(x, ImmutableRanking) def __repr__(self): return '' % getattr(self, 'TYPE', '?') @@ -20,6 +22,20 @@ def _freeze_(self): return True +class ImmutableRanking(object): + def __init__(self, name, is_immutable): + self.name = name + self.is_immutable = is_immutable + def __nonzero__(self): + return self.is_immutable + def __repr__(self): + return '<%s>' % self.name + +IR_MUTABLE = ImmutableRanking('mutable', False) +IR_IMMUTABLE = ImmutableRanking('immutable', True) +IR_ARRAY_IMMUTABLE = ImmutableRanking('array_immutable', True) +IR_QUASI_IMMUTABLE = ImmutableRanking('quasi_immutable', False) + class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" @@ -211,23 +227,23 @@ self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): - with_suffix = {} + ranking = {} for name in fields: if name.endswith('[*]'): # for virtualizables' lists name = name[:-3] - suffix = '[*]' + rank = IR_ARRAY_IMMUTABLE elif name.endswith('?'): # a quasi-immutable field name = name[:-1] - suffix = '?' + rank = IR_QUASI_IMMUTABLE else: # a regular immutable/green field - suffix = '' + rank = IR_IMMUTABLE try: mangled_name, r = self._get_field(name) except KeyError: continue - with_suffix[mangled_name] = suffix - accessor.initialize(self.object_type, with_suffix) - return with_suffix + ranking[mangled_name] = rank + accessor.initialize(self.object_type, ranking) + return ranking def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -268,13 +268,14 @@ return self._superclass._get_fields_with_default() + self._fields_with_default def _immutable_field(self, field): + if self._hints.get('immutable'): + return True if 'immutable_fields' in self._hints: try: - s = self._hints['immutable_fields'].fields[field] - return s or True + return self._hints['immutable_fields'].fields[field] except KeyError: pass - return self._hints.get('immutable', False) + return False class SpecializableType(OOType): diff --git a/pypy/rpython/rvirtualizable2.py b/pypy/rpython/rvirtualizable2.py --- a/pypy/rpython/rvirtualizable2.py +++ b/pypy/rpython/rvirtualizable2.py @@ -50,7 +50,7 @@ def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): - if cname.value in self.my_redirected_fields: + if self.my_redirected_fields.get(cname.value): cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -309,13 +309,14 @@ return _struct(self, n, initialization='example') def _immutable_field(self, field): + if self._hints.get('immutable'): + return True if 'immutable_fields' in self._hints: try: - s = self._hints['immutable_fields'].fields[field] - return s or True + return self._hints['immutable_fields'].fields[field] except KeyError: pass - return self._hints.get('immutable', False) + return False class RttiStruct(Struct): _runtime_type_info = None diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -5,6 +5,8 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.rarithmetic import intmask, r_longlong from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE +from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.objspace.flow.model import summary class EmptyBase(object): @@ -746,8 +748,10 @@ t, typer, graph = self.gengraph(f, []) A_TYPE = deref(graph.getreturnvar().concretetype) accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ - accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE, + "inst_y": IR_ARRAY_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_ARRAY_IMMUTABLE} # for ootype def test_immutable_fields_subclass_1(self): from pypy.jit.metainterp.typesystem import deref @@ -765,8 +769,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : ""} or \ - accessor.fields == {"ox" : ""} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE} # for ootype def test_immutable_fields_subclass_2(self): from pypy.jit.metainterp.typesystem import deref @@ -785,8 +789,10 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ - accessor.fields == {"ox" : "", "oy" : ""} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE, + "inst_y": IR_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_IMMUTABLE} # for ootype def test_immutable_fields_only_in_subclass(self): from pypy.jit.metainterp.typesystem import deref @@ -804,8 +810,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y" : ""} or \ - accessor.fields == {"oy" : ""} # for ootype + assert accessor.fields == {"inst_y": IR_IMMUTABLE} or \ + accessor.fields == {"oy": IR_IMMUTABLE} # for ootype def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -849,8 +855,8 @@ except AttributeError: A_TYPE = B_TYPE._superclass # for ootype accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_v" : ""} or \ - accessor.fields == {"ov" : ""} # for ootype + assert accessor.fields == {"inst_v": IR_IMMUTABLE} or \ + accessor.fields == {"ov": IR_IMMUTABLE} # for ootype def test_immutable_subclass_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -898,26 +904,33 @@ def test_quasi_immutable(self): from pypy.jit.metainterp.typesystem import deref class A(object): - _immutable_fields_ = ['x', 'y?', 'z?'] + _immutable_fields_ = ['x', 'y', 'a?', 'b?'] class B(A): pass def f(): - A().x = 42 - A().y = 43 + a = A() + a.x = 42 + a.a = 142 b = B() + b.x = 43 b.y = 41 - b.z = 44 + b.a = 44 + b.b = 45 return B() t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_z" : "?"} or \ - accessor.fields == {'ox':'', 'oy':'?', 'oz':'?'} # for ootype + assert accessor.fields == {"inst_y": IR_IMMUTABLE, + "inst_b": IR_QUASI_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_IMMUTABLE, + "oa": IR_QUASI_IMMUTABLE, + "ob": IR_QUASI_IMMUTABLE} # for ootype found = [] for op in graph.startblock.operations: if op.opname == 'jit_force_quasi_immutable': found.append(op.args[1].value) - assert found == ['mutate_y', 'mutate_y', 'mutate_z'] + assert found == ['mutate_a', 'mutate_a', 'mutate_b'] class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -794,15 +794,8 @@ def __init__(self, fields): self.fields = fields S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x':''})}) - assert S._immutable_field('x') == True - # - class FieldListAccessor(object): - def __init__(self, fields): - self.fields = fields - S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) - assert S._immutable_field('x') == '[*]' + hints={'immutable_fields': FieldListAccessor({'x': 1234})}) + assert S._immutable_field('x') == 1234 class TestTrackAllocation: From commits-noreply at bitbucket.org Sat Apr 2 20:17:21 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:21 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Implement these two cast functions to "work" when run Message-ID: <20110402181721.E7054282BD7@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43110:5848fef711b0 Date: 2011-04-02 17:08 +0200 http://bitbucket.org/pypy/pypy/changeset/5848fef711b0/ Log: Implement these two cast functions to "work" when run untranslated too. This is equivalent to the ropaque module in the previous out- of-line-guards branch, and serves the same purpose: for tests, temporarily hiding an RPython instance into a field of a low-level Struct. diff --git a/pypy/rpython/annlowlevel.py b/pypy/rpython/annlowlevel.py --- a/pypy/rpython/annlowlevel.py +++ b/pypy/rpython/annlowlevel.py @@ -480,7 +480,23 @@ # ____________________________________________________________ def cast_object_to_ptr(PTR, object): - raise NotImplementedError("cast_object_to_ptr") + """NOT_RPYTHON: hack. The object may be disguised as a PTR now. + Limited to casting a given object to a single type. + """ + if isinstance(PTR, lltype.Ptr): + if not hasattr(object, '_TYPE'): + object._TYPE = PTR.TO + else: + assert object._TYPE == PTR.TO + return lltype._ptr(PTR, object, True) + elif isinstance(PTR, ootype.Instance): + if not hasattr(object, '_TYPE'): + object._TYPE = PTR + else: + assert object._TYPE == PTR + return object + else: + raise NotImplementedError("cast_object_to_ptr(%r, ...)" % PTR) def cast_instance_to_base_ptr(instance): return cast_object_to_ptr(base_ptr_lltype(), instance) @@ -535,7 +551,13 @@ # ____________________________________________________________ def cast_base_ptr_to_instance(Class, ptr): - raise NotImplementedError("cast_base_ptr_to_instance") + """NOT_RPYTHON: hack. Reverse the hacking done in cast_object_to_ptr().""" + if isinstance(lltype.typeOf(ptr), lltype.Ptr): + ptr = ptr._as_obj() + if not isinstance(ptr, Class): + raise NotImplementedError("cast_base_ptr_to_instance: casting %r to %r" + % (ptr, Class)) + return ptr class CastBasePtrToInstanceEntry(extregistry.ExtRegistryEntry): _about_ = cast_base_ptr_to_instance diff --git a/pypy/rpython/test/test_annlowlevel.py b/pypy/rpython/test/test_annlowlevel.py --- a/pypy/rpython/test/test_annlowlevel.py +++ b/pypy/rpython/test/test_annlowlevel.py @@ -4,9 +4,12 @@ from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode +from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, oostr from pypy.rpython.annlowlevel import hlunicode, llunicode +from pypy.rpython import annlowlevel + class TestLLType(BaseRtypingTest, LLRtypeMixin): def test_hlstr(self): @@ -53,6 +56,15 @@ res = self.interpret(f, [self.unicode_to_ll(u"abc")]) assert res == 3 + def test_cast_instance_to_base_ptr(self): + class X(object): + pass + x = X() + ptr = annlowlevel.cast_instance_to_base_ptr(x) + assert lltype.typeOf(ptr) == annlowlevel.base_ptr_lltype() + y = annlowlevel.cast_base_ptr_to_instance(X, ptr) + assert y is x + class TestOOType(BaseRtypingTest, OORtypeMixin): def test_hlstr(self): @@ -71,3 +83,12 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 + + def test_cast_instance_to_base_obj(self): + class X(object): + pass + x = X() + obj = annlowlevel.cast_instance_to_base_obj(x) + assert lltype.typeOf(obj) == annlowlevel.base_obj_ootype() + y = annlowlevel.cast_base_ptr_to_instance(X, obj) + assert y is x From commits-noreply at bitbucket.org Sat Apr 2 20:17:23 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:23 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: On getfields of quasi-immutable fields, write in the jitcodes a Message-ID: <20110402181723.568E4282BD7@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43111:cdc159d28520 Date: 2011-04-02 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/cdc159d28520/ Log: On getfields of quasi-immutable fields, write in the jitcodes a 'record_quasiimmut_field' operation in addition to the getfield_gc_*. As for setfields of quasi-immutable fields, they are hidden from the JIT; for now this is done by making the whole function containing them as don't-look-inside. diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -7,8 +7,9 @@ from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.codewriter.policy import log +from pypy.jit.codewriter.policy import log, check_skip_operation from pypy.jit.metainterp.typesystem import deref, arrayItem +from pypy.jit.metainterp import quasiimmut from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj @@ -561,7 +562,8 @@ arraydescr) return [] # check for _immutable_fields_ hints - if v_inst.concretetype.TO._immutable_field(c_fieldname.value): + immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + if immut: if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): @@ -574,10 +576,20 @@ descr = self.cpu.fielddescrof(v_inst.concretetype.TO, c_fieldname.value) kind = getkind(RESULT)[0] - return SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), - [v_inst, descr], op.result) + op1 = SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), + [v_inst, descr], op.result) + # + if immut is quasiimmut.IR_QUASI_IMMUTABLE: + descr1 = self.cpu.fielddescrof( + v_inst.concretetype.TO, + quasiimmut.get_mutate_field_name(c_fieldname.value)) + op1 = [SpaceOperation('record_quasiimmut_field', + [v_inst, descr1], None), + op1] + return op1 def rewrite_op_setfield(self, op): + check_skip_operation(op) # just to check it doesn't raise if self.is_typeptr_getset(op): # ignore the operation completely -- instead, it's done by 'new' return diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -171,7 +171,8 @@ class VirtualizableAnalyzer(BoolGraphAnalyzer): def analyze_simple_operation(self, op, graphinfo): return op.opname in ('jit_force_virtualizable', - 'jit_force_virtual') + 'jit_force_virtual', + 'jit_force_quasi_immutable') # ____________________________________________________________ diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -1,5 +1,5 @@ from pypy.translator.simplify import get_funcobj -from pypy.jit.metainterp import history +from pypy.jit.metainterp import history, quasiimmut from pypy.rpython.lltypesystem import lltype, rclass from pypy.tool.udir import udir @@ -85,12 +85,20 @@ getkind(v.concretetype, supports_floats, supports_longlong) v = op.result getkind(v.concretetype, supports_floats, supports_longlong) + check_skip_operation(op) except NotImplementedError, e: log.WARNING('%s, ignoring graph' % (e,)) log.WARNING(' %s' % (graph,)) return True return False +def check_skip_operation(op): + if op.opname == 'setfield': + if quasiimmut.is_quasi_immutable(op.args[0].concretetype.TO, + op.args[1].value): + raise NotImplementedError("write to quasi-immutable field %r" + % (op.args[1].value,)) + # ____________________________________________________________ class StopAtXPolicy(JitPolicy): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -947,3 +947,42 @@ assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) assert op1.args[3] == ListOfKind('ref', [v1, v2]) + +def test_quasi_immutable(): + from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + v2 = varoftype(lltype.Signed) + STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: + op = SpaceOperation('getfield', [v_x, Constant('inst_x', lltype.Void)], + v2) + tr = Transformer(FakeCPU()) + [op1, op2] = tr.rewrite_operation(op) + assert op1.opname == 'record_quasiimmut_field' + assert len(op1.args) == 2 + assert op1.args[0] == v_x + assert op1.args[1] == ('fielddescr', STRUCT, 'mutate_x') + assert op1.result is None + assert op2.opname == 'getfield_gc_i' + assert len(op2.args) == 2 + assert op2.args[0] == v_x + assert op2.args[1] == ('fielddescr', STRUCT, 'inst_x') + assert op2.result is op.result + +def test_quasi_immutable_setfield(): + from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + v1 = varoftype(lltype.Signed) + STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: + op = SpaceOperation('setfield', + [v_x, Constant('inst_x', lltype.Void), v1], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU()) + raises(NotImplementedError, tr.rewrite_operation, op) From commits-noreply at bitbucket.org Sat Apr 2 20:17:24 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:24 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Start a module containing all support code for the front-end Message-ID: <20110402181724.636C4282BE8@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43112:b5bf46d6ee96 Date: 2011-04-02 17:17 +0200 http://bitbucket.org/pypy/pypy/changeset/b5bf46d6ee96/ Log: Start a module containing all support code for the front-end for quasi-immutable fields. diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/quasiimmut.py @@ -0,0 +1,42 @@ +from pypy.rpython.rclass import IR_QUASI_IMMUTABLE +from pypy.rpython.annlowlevel import cast_base_ptr_to_instance + + +def is_quasi_immutable(STRUCT, fieldname): + imm_fields = STRUCT._hints.get('immutable_fields') + return (imm_fields is not None and + imm_fields.fields.get(fieldname) is IR_QUASI_IMMUTABLE) + +def get_mutate_field_name(fieldname): + if fieldname.startswith('inst_'): # lltype + return 'mutate_' + fieldname[5:] + elif fieldname.startswith('o'): # ootype + return 'mutate_' + fieldname[1:] + else: + raise AssertionError(fieldname) + +def get_current_mutate_instance(cpu, gcref, mutatefielddescr): + """Returns the current SlowMutate instance in the field, + possibly creating one. + """ + mutate_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) + if mutate_gcref: + mutate = SlowMutate.show(cpu, mutate_gcref) + else: + mutate = SlowMutate() + cpu.bh_setfield_gc_r(gcref, mutatefielddescr, mutate.hide(cpu)) + return mutate + + +class SlowMutate(object): + def __init__(self): + pass + + def hide(self, cpu): + mutate_ptr = cpu.ts.cast_instance_to_base_ref(self) + return cpu.ts.cast_to_ref(mutate_ptr) + + @staticmethod + def show(cpu, mutate_gcref): + mutate_ptr = cpu.ts.cast_to_baseclass(mutate_gcref) + return cast_base_ptr_to_instance(SlowMutate, mutate_ptr) diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -0,0 +1,40 @@ +from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE +from pypy.jit.metainterp import typesystem +from pypy.jit.metainterp.quasiimmut import SlowMutate +from pypy.jit.metainterp.quasiimmut import get_current_mutate_instance + + +def test_get_current_mutate_instance(): + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + STRUCT = lltype.GcStruct('Foo', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + foo = lltype.malloc(STRUCT, zero=True) + foo.inst_x = 42 + assert not foo.mutate_x + + class FakeCPU: + ts = typesystem.llhelper + + def bh_getfield_gc_r(self, gcref, fielddescr): + assert fielddescr == mutatefielddescr + foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) + result = foo.mutate_x + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + + def bh_setfield_gc_r(self, gcref, fielddescr, newvalue_gcref): + assert fielddescr == mutatefielddescr + foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) + newvalue = lltype.cast_opaque_ptr(rclass.OBJECTPTR, newvalue_gcref) + foo.mutate_x = newvalue + + cpu = FakeCPU() + mutatefielddescr = ('fielddescr', STRUCT, 'mutate_x') + + foo_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) + sm1 = get_current_mutate_instance(cpu, foo_gcref, mutatefielddescr) + assert isinstance(sm1, SlowMutate) + sm2 = get_current_mutate_instance(cpu, foo_gcref, mutatefielddescr) + assert sm1 is sm2 From commits-noreply at bitbucket.org Sat Apr 2 20:17:25 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:25 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Fix. Message-ID: <20110402181725.E2FEC282BE8@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43113:bc77e493eb45 Date: 2011-04-02 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/bc77e493eb45/ Log: Fix. diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,7 +525,7 @@ def op_jit_force_virtual(x): return x -def op_jit_force_quasi_immutable(x): +def op_jit_force_quasi_immutable(*args): pass def op_get_group_member(TYPE, grpptr, memberoffset): From commits-noreply at bitbucket.org Sat Apr 2 20:17:28 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:28 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Fixes. Message-ID: <20110402181728.C7854282C19@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43114:7142ac33e6a5 Date: 2011-04-02 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/7142ac33e6a5/ Log: Fixes. diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -998,6 +998,8 @@ return None # null pointer if type(p._obj0) is int: return p # a pointer obtained by cast_int_to_ptr + if getattr(p._obj0, '_carry_around_for_tests', False): + return p # a pointer obtained by cast_instance_to_base_ptr container = obj._normalizedcontainer() if type(container) is int: # this must be an opaque ptr originating from an integer @@ -1850,8 +1852,8 @@ if self.__class__ is not other.__class__: return NotImplemented if hasattr(self, 'container') and hasattr(other, 'container'): - obj1 = self.container._normalizedcontainer() - obj2 = other.container._normalizedcontainer() + obj1 = self._normalizedcontainer() + obj2 = other._normalizedcontainer() return obj1 == obj2 else: return self is other @@ -1875,6 +1877,8 @@ # an integer, cast to a ptr, cast to an opaque if type(self.container) is int: return self.container + if getattr(self.container, '_carry_around_for_tests', False): + return self.container return self.container._normalizedcontainer() else: return _parentable._normalizedcontainer(self) diff --git a/pypy/rpython/annlowlevel.py b/pypy/rpython/annlowlevel.py --- a/pypy/rpython/annlowlevel.py +++ b/pypy/rpython/annlowlevel.py @@ -484,16 +484,19 @@ Limited to casting a given object to a single type. """ if isinstance(PTR, lltype.Ptr): - if not hasattr(object, '_TYPE'): - object._TYPE = PTR.TO - else: - assert object._TYPE == PTR.TO + TO = PTR.TO + else: + TO = PTR + if not hasattr(object, '_carry_around_for_tests'): + assert not hasattr(object, '_TYPE') + object._carry_around_for_tests = True + object._TYPE = TO + else: + assert object._TYPE == TO + # + if isinstance(PTR, lltype.Ptr): return lltype._ptr(PTR, object, True) elif isinstance(PTR, ootype.Instance): - if not hasattr(object, '_TYPE'): - object._TYPE = PTR - else: - assert object._TYPE == PTR return object else: raise NotImplementedError("cast_object_to_ptr(%r, ...)" % PTR) From commits-noreply at bitbucket.org Sat Apr 2 20:17:34 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:34 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: First test passing on the front-end. Message-ID: <20110402181734.EC5FE282BE9@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43115:78ddcf590d26 Date: 2011-04-02 18:35 +0200 http://bitbucket.org/pypy/pypy/changeset/78ddcf590d26/ Log: First test passing on the front-end. diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py --- a/pypy/jit/metainterp/quasiimmut.py +++ b/pypy/jit/metainterp/quasiimmut.py @@ -1,5 +1,6 @@ from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.jit.metainterp.history import AbstractDescr def is_quasi_immutable(STRUCT, fieldname): @@ -40,3 +41,14 @@ def show(cpu, mutate_gcref): mutate_ptr = cpu.ts.cast_to_baseclass(mutate_gcref) return cast_base_ptr_to_instance(SlowMutate, mutate_ptr) + + +class SlowMutateDescr(AbstractDescr): + def __init__(self, cpu, gcref, + constantfieldbox, fielddescr, mutatefielddescr): + self.cpu = cpu + self.gcref = gcref + self.constantfieldbox = constantfieldbox + self.fielddescr = fielddescr + self.mutatefielddescr = mutatefielddescr + self.mutate = get_current_mutate_instance(cpu, gcref, mutatefielddescr) diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -555,6 +555,19 @@ opimpl_setfield_raw_r = _opimpl_setfield_raw_any opimpl_setfield_raw_f = _opimpl_setfield_raw_any + @arguments("box", "descr", "descr") + def opimpl_record_quasiimmut_field(self, box, fielddescr, + mutatefielddescr): + from pypy.jit.metainterp.quasiimmut import SlowMutateDescr + cpu = self.metainterp.cpu + fieldbox = executor.execute(cpu, self.metainterp, rop.GETFIELD_GC, + fielddescr, box) + fieldbox = fieldbox.constbox() + descr = SlowMutateDescr(cpu, box.getref_base(), fieldbox, + fielddescr, mutatefielddescr) + self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], + None, descr=descr) + def _nonstandard_virtualizable(self, pc, box): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -962,9 +962,10 @@ tr = Transformer(FakeCPU()) [op1, op2] = tr.rewrite_operation(op) assert op1.opname == 'record_quasiimmut_field' - assert len(op1.args) == 2 + assert len(op1.args) == 3 assert op1.args[0] == v_x - assert op1.args[1] == ('fielddescr', STRUCT, 'mutate_x') + assert op1.args[1] == ('fielddescr', STRUCT, 'inst_x') + assert op1.args[2] == ('fielddescr', STRUCT, 'mutate_x') assert op1.result is None assert op2.opname == 'getfield_gc_i' assert len(op2.args) == 2 diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -3,6 +3,8 @@ from pypy.jit.metainterp import typesystem from pypy.jit.metainterp.quasiimmut import SlowMutate from pypy.jit.metainterp.quasiimmut import get_current_mutate_instance +from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.rlib.jit import JitDriver def test_get_current_mutate_instance(): @@ -38,3 +40,30 @@ assert isinstance(sm1, SlowMutate) sm2 = get_current_mutate_instance(cpu, foo_gcref, mutatefielddescr) assert sm1 is sm2 + + +class QuasiImmutTests: + + def test_simple_1(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + x -= 1 + return total + # + res = self.meta_interp(f, [100, 7]) + assert res == 700 + self.check_loops(getfield_gc=0, everywhere=True) + + +class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -378,6 +378,23 @@ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) + def optimize_QUASIIMMUT_FIELD(self, op): + # Pattern: QUASIIMMUT_FIELD(s, descr=SlowMutateDescr) + # x = GETFIELD_GC(s, descr='inst_x') + # If 's' is a constant (after optimizations), then we make 's.inst_x' + # a constant too, and we rely on the rest of the optimizations to + # constant-fold the following getfield_gc. + structvalue = self.getvalue(op.getarg(0)) + if structvalue.is_constant(): + from pypy.jit.metainterp.quasiimmut import SlowMutateDescr + # XXX check that the value is still correct! + # XXX record as an out-of-line guard! + smdescr = op.getdescr() + assert isinstance(smdescr, SlowMutateDescr) + fieldvalue = self.getvalue(smdescr.constantfieldbox) + cf = self.field_cache(smdescr.fielddescr) + cf.remember_field_value(structvalue, fieldvalue) + def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -20,6 +20,9 @@ op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) self.emit_operation(op) + def optimize_QUASIIMMUT_FIELD(self, op): + pass + def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -312,6 +312,7 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.QUASIIMMUT_FIELD, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1166,6 +1166,11 @@ def bhimpl_setfield_raw_f(cpu, struct, fielddescr, newvalue): cpu.bh_setfield_raw_f(struct, fielddescr, newvalue) + @arguments("cpu", "r", "d", "d") + def bhimpl_record_quasiimmut_field(self, struct, fielddescr, + mutatefielddescr): + pass + @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -584,7 +584,7 @@ v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) op1 = [SpaceOperation('record_quasiimmut_field', - [v_inst, descr1], None), + [v_inst, descr, descr1], None), op1] return op1 diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -41,7 +41,8 @@ # during preamble but to keep it during the loop optimizations.append(o) - if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: + if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts + or 'heap' not in enable_opts): optimizations.append(OptSimplify()) if inline_short_preamble: diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -475,6 +475,7 @@ 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', + 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', From commits-noreply at bitbucket.org Sat Apr 2 20:17:36 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:36 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Detect cases were the value stored in the quasi-immutable field Message-ID: <20110402181736.4C566282BE9@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43116:941dde3b5b4e Date: 2011-04-02 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/941dde3b5b4e/ Log: Detect cases were the value stored in the quasi-immutable field changed already between tracing and optimization. diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py --- a/pypy/jit/metainterp/quasiimmut.py +++ b/pypy/jit/metainterp/quasiimmut.py @@ -44,11 +44,22 @@ class SlowMutateDescr(AbstractDescr): - def __init__(self, cpu, gcref, - constantfieldbox, fielddescr, mutatefielddescr): + def __init__(self, cpu, structbox, fielddescr, mutatefielddescr): self.cpu = cpu - self.gcref = gcref - self.constantfieldbox = constantfieldbox + self.structbox = structbox self.fielddescr = fielddescr self.mutatefielddescr = mutatefielddescr + gcref = structbox.getref_base() self.mutate = get_current_mutate_instance(cpu, gcref, mutatefielddescr) + self.constantfieldbox = self.get_current_constant_fieldvalue() + + def get_current_constant_fieldvalue(self): + from pypy.jit.metainterp import executor + from pypy.jit.metainterp.resoperation import rop + fieldbox = executor.execute(self.cpu, None, rop.GETFIELD_GC, + self.fielddescr, self.structbox) + return fieldbox.constbox() + + def is_still_valid(self): + currentbox = self.get_current_constant_fieldvalue() + return self.constantfieldbox.same_constant(currentbox) diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -385,15 +385,22 @@ # a constant too, and we rely on the rest of the optimizations to # constant-fold the following getfield_gc. structvalue = self.getvalue(op.getarg(0)) - if structvalue.is_constant(): - from pypy.jit.metainterp.quasiimmut import SlowMutateDescr - # XXX check that the value is still correct! - # XXX record as an out-of-line guard! - smdescr = op.getdescr() - assert isinstance(smdescr, SlowMutateDescr) - fieldvalue = self.getvalue(smdescr.constantfieldbox) - cf = self.field_cache(smdescr.fielddescr) - cf.remember_field_value(structvalue, fieldvalue) + if not structvalue.is_constant(): + return # not a constant at all; ignore QUASIIMMUT_FIELD + # + from pypy.jit.metainterp.quasiimmut import SlowMutateDescr + smdescr = op.getdescr() + assert isinstance(smdescr, SlowMutateDescr) + # check that the value is still correct; it could have changed + # already between the tracing and now. In this case, we are + # simply ignoring the QUASIIMMUT_FIELD hint and compiling it + # as a regular getfield. + if not smdescr.is_still_valid(): + return + # XXX record as an out-of-line guard! + fieldvalue = self.getvalue(smdescr.constantfieldbox) + cf = self.field_cache(smdescr.fielddescr) + cf.remember_field_value(structvalue, fieldvalue) def propagate_forward(self, op): opnum = op.getopnum() diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -4,7 +4,7 @@ from pypy.jit.metainterp.quasiimmut import SlowMutate from pypy.jit.metainterp.quasiimmut import get_current_mutate_instance from pypy.jit.metainterp.test.test_basic import LLJitMixin -from pypy.rlib.jit import JitDriver +from pypy.rlib.jit import JitDriver, dont_look_inside def test_get_current_mutate_instance(): @@ -64,6 +64,31 @@ assert res == 700 self.check_loops(getfield_gc=0, everywhere=True) + def test_change_during_tracing(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + @dont_look_inside + def residual_call(foo): + foo.a += 1 + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + residual_call(foo) + x -= 1 + return total + # + assert f(100, 7) == 721 + res = self.meta_interp(f, [100, 7]) + assert res == 721 + self.check_loops(getfield_gc=1) + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -560,11 +560,7 @@ mutatefielddescr): from pypy.jit.metainterp.quasiimmut import SlowMutateDescr cpu = self.metainterp.cpu - fieldbox = executor.execute(cpu, self.metainterp, rop.GETFIELD_GC, - fielddescr, box) - fieldbox = fieldbox.constbox() - descr = SlowMutateDescr(cpu, box.getref_base(), fieldbox, - fielddescr, mutatefielddescr) + descr = SlowMutateDescr(cpu, box, fielddescr, mutatefielddescr) self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], None, descr=descr) From commits-noreply at bitbucket.org Sat Apr 2 20:17:37 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:37 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Add a test. Message-ID: <20110402181737.964DA282BD7@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43117:90342c0be849 Date: 2011-04-02 18:53 +0200 http://bitbucket.org/pypy/pypy/changeset/90342c0be849/ Log: Add a test. diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -64,6 +64,29 @@ assert res == 700 self.check_loops(getfield_gc=0, everywhere=True) + def test_nonopt_1(self): + myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def setup(x): + return [Foo(100 + i) for i in range(x)] + def f(a, x): + lst = setup(x) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(lst=lst, x=x, total=total) + # read a quasi-immutable field out of a variable + x -= 1 + total += lst[x].a + return total + # + assert f(100, 7) == 721 + res = self.meta_interp(f, [100, 7]) + assert res == 721 + self.check_loops(getfield_gc=1) + def test_change_during_tracing(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) class Foo: From commits-noreply at bitbucket.org Sat Apr 2 20:17:39 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:39 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Fix a mistake: we must not detect if the field changed -- because Message-ID: <20110402181739.E038C282C20@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43118:35338e02ae5d Date: 2011-04-02 19:18 +0200 http://bitbucket.org/pypy/pypy/changeset/35338e02ae5d/ Log: Fix a mistake: we must not detect if the field changed -- because it can have changed several times and be back to its original value. Instead, we must handle jit_force_quasi_immutable lloperations and detect if the SlowMutate was invalidated. diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py --- a/pypy/jit/metainterp/quasiimmut.py +++ b/pypy/jit/metainterp/quasiimmut.py @@ -1,4 +1,5 @@ from pypy.rpython.rclass import IR_QUASI_IMMUTABLE +from pypy.rpython.lltypesystem import lltype, rclass from pypy.rpython.annlowlevel import cast_base_ptr_to_instance from pypy.jit.metainterp.history import AbstractDescr @@ -28,6 +29,21 @@ cpu.bh_setfield_gc_r(gcref, mutatefielddescr, mutate.hide(cpu)) return mutate +def make_invalidation_function(STRUCT, mutatefieldname): + # + def _invalidate_now(p): + mutate_ptr = getattr(p, mutatefieldname) + setattr(p, mutatefieldname, lltype.nullptr(rclass.OBJECT)) + mutate = cast_base_ptr_to_instance(SlowMutate, mutate_ptr) + mutate.invalidate() + _invalidate_now._dont_inline_ = True + # + def invalidation(p): + if getattr(p, mutatefieldname): + _invalidate_now(p) + # + return invalidation + class SlowMutate(object): def __init__(self): @@ -42,6 +58,9 @@ mutate_ptr = cpu.ts.cast_to_baseclass(mutate_gcref) return cast_base_ptr_to_instance(SlowMutate, mutate_ptr) + def invalidate(self): + pass # XXX + class SlowMutateDescr(AbstractDescr): def __init__(self, cpu, structbox, fielddescr, mutatefielddescr): @@ -61,5 +80,12 @@ return fieldbox.constbox() def is_still_valid(self): - currentbox = self.get_current_constant_fieldvalue() - return self.constantfieldbox.same_constant(currentbox) + gcref = self.structbox.getref_base() + curmut = get_current_mutate_instance(self.cpu, gcref, + self.mutatefielddescr) + if curmut is not self.mutate: + return False + else: + currentbox = self.get_current_constant_fieldvalue() + assert self.constantfieldbox.same_constant(currentbox) + return True diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -87,7 +87,7 @@ assert res == 721 self.check_loops(getfield_gc=1) - def test_change_during_tracing(self): + def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) class Foo: _immutable_fields_ = ['a?'] @@ -112,6 +112,32 @@ assert res == 721 self.check_loops(getfield_gc=1) + def test_change_during_tracing_2(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + @dont_look_inside + def residual_call(foo, difference): + foo.a += difference + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + residual_call(foo, +1) + residual_call(foo, -1) + x -= 1 + return total + # + assert f(100, 7) == 700 + res = self.meta_interp(f, [100, 7]) + assert res == 700 + self.check_loops(getfield_gc=1) + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -131,6 +131,16 @@ def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') +def find_force_quasi_immutable(graphs): + results = [] + for graph in graphs: + for block in graph.iterblocks(): + for i in range(len(block.operations)): + op = block.operations[i] + if op.opname == 'jit_force_quasi_immutable': + results.append((graph, block, i)) + return results + def get_stats(): return pyjitpl._warmrunnerdesc.stats @@ -187,6 +197,7 @@ self.rewrite_can_enter_jits() self.rewrite_set_param() self.rewrite_force_virtual(vrefinfo) + self.rewrite_force_quasi_immutable() self.add_finish() self.metainterp_sd.finish_setup(self.codewriter) @@ -842,6 +853,28 @@ all_graphs = self.translator.graphs vrefinfo.replace_force_virtual_with_call(all_graphs) + def replace_force_quasiimmut_with_direct_call(self, op): + ARG = op.args[0].concretetype + mutatefieldname = op.args[1].value + key = (ARG, mutatefieldname) + if key in self._cache_force_quasiimmed_funcs: + cptr = self._cache_force_quasiimmed_funcs[key] + else: + from pypy.jit.metainterp import quasiimmut + func = quasiimmut.make_invalidation_function(ARG, mutatefieldname) + FUNC = lltype.Ptr(lltype.FuncType([ARG], lltype.Void)) + llptr = self.helper_func(FUNC, func) + cptr = Constant(llptr, FUNC) + self._cache_force_quasiimmed_funcs[key] = cptr + op.opname = 'direct_call' + op.args = [cptr, op.args[0]] + + def rewrite_force_quasi_immutable(self): + self._cache_force_quasiimmed_funcs = {} + graphs = self.translator.graphs + for graph, block, i in find_force_quasi_immutable(graphs): + self.replace_force_quasiimmut_with_direct_call(block.operations[i]) + # ____________________________________________________________ def execute_token(self, loop_token): From commits-noreply at bitbucket.org Sat Apr 2 20:17:42 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 2 Apr 2011 20:17:42 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Build on the TreeLoop instance a dict 'quasi_immutable_deps' of all Message-ID: <20110402181742.569D4282BDD@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43119:836d0f8d527a Date: 2011-04-02 19:50 +0200 http://bitbucket.org/pypy/pypy/changeset/836d0f8d527a/ Log: Build on the TreeLoop instance a dict 'quasi_immutable_deps' of all SlowMutate instances. The next step will be to send that to the backend. diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -267,6 +267,8 @@ virtual_state = modifier.get_virtual_state(jump_args) loop.preamble.operations = self.optimizer.newoperations + loop.preamble.quasi_immutable_deps = ( + self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.reconstruct_for_next_iteration() inputargs = self.inline(self.cloned_operations, loop.inputargs, jump_args) @@ -276,6 +278,7 @@ loop.preamble.operations.append(jmp) loop.operations = self.optimizer.newoperations + loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() assert isinstance(start_resumedescr, ResumeGuardDescr) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -788,6 +788,7 @@ operations = None token = None call_pure_results = None + quasi_immutable_deps = None def __init__(self, name): self.name = name diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -63,6 +63,12 @@ res = self.meta_interp(f, [100, 7]) assert res == 700 self.check_loops(getfield_gc=0, everywhere=True) + # + from pypy.jit.metainterp.warmspot import get_stats + loops = get_stats().loops + for loop in loops: + assert len(loop.quasi_immutable_deps) == 1 + assert isinstance(loop.quasi_immutable_deps.keys()[0], SlowMutate) def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) @@ -86,6 +92,11 @@ res = self.meta_interp(f, [100, 7]) assert res == 721 self.check_loops(getfield_gc=1) + # + from pypy.jit.metainterp.warmspot import get_stats + loops = get_stats().loops + for loop in loops: + assert loop.quasi_immutable_deps is None def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -397,7 +397,11 @@ # as a regular getfield. if not smdescr.is_still_valid(): return - # XXX record as an out-of-line guard! + # record as an out-of-line guard + if self.optimizer.quasi_immutable_deps is None: + self.optimizer.quasi_immutable_deps = {} + self.optimizer.quasi_immutable_deps[smdescr.mutate] = None + # perform the replacement in the list of operations fieldvalue = self.getvalue(smdescr.constantfieldbox) cf = self.field_cache(smdescr.fielddescr) cf.remember_field_value(structvalue, fieldvalue) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -257,6 +257,7 @@ self.pendingfields = [] self.posponedop = None self.exception_might_have_happened = False + self.quasi_immutable_deps = None self.newoperations = [] if loop is not None: self.call_pure_results = loop.call_pure_results @@ -309,6 +310,7 @@ new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None + new.quasi_immutable_deps = self.quasi_immutable_deps return new @@ -410,6 +412,7 @@ self.first_optimization.propagate_forward(op) self.i += 1 self.loop.operations = self.newoperations + self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) From commits-noreply at bitbucket.org Sat Apr 2 20:33:46 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 2 Apr 2011 20:33:46 +0200 (CEST) Subject: [pypy-svn] pypy default: The default thread should also get an instance dict for thread locals. Message-ID: <20110402183346.1ED04282BD7@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43120:b43534d1019d Date: 2011-04-02 14:33 -0400 http://bitbucket.org/pypy/pypy/changeset/b43534d1019d/ Log: The default thread should also get an instance dict for thread locals. diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -12,7 +12,7 @@ def __init__(self, space, initargs): self.initargs = initargs ident = thread.get_ident() - self.dicts = {ident: space.newdict()} + self.dicts = {ident: space.newdict(instance=True)} def getdict(self, space): ident = thread.get_ident() From commits-noreply at bitbucket.org Sat Apr 2 22:08:23 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 2 Apr 2011 22:08:23 +0200 (CEST) Subject: [pypy-svn] pypy default: ll_thread.getident is a loop invariant. Message-ID: <20110402200823.7854636C206@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43121:188c4e729ee3 Date: 2011-04-02 16:07 -0400 http://bitbucket.org/pypy/pypy/changeset/188c4e729ee3/ Log: ll_thread.getident is a loop invariant. diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -1,10 +1,10 @@ -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rpython.tool import rffi_platform as platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import py, os from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rlib import jit from pypy.rlib.debug import ll_assert from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem.lloperation import llop @@ -79,6 +79,7 @@ # wrappers... + at jit.loop_invariant def get_ident(): return rffi.cast(lltype.Signed, c_thread_get_ident()) From commits-noreply at bitbucket.org Sat Apr 2 22:27:38 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 2 Apr 2011 22:27:38 +0200 (CEST) Subject: [pypy-svn] pypy default: kill some oldstyle code, less verbose now Message-ID: <20110402202738.7DF58282BD7@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43122:75ca95717dbc Date: 2011-04-02 16:27 -0400 http://bitbucket.org/pypy/pypy/changeset/75ca95717dbc/ Log: kill some oldstyle code, less verbose now diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -17,8 +17,8 @@ '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file From commits-noreply at bitbucket.org Sun Apr 3 14:15:19 2011 From: commits-noreply at bitbucket.org (lac) Date: Sun, 3 Apr 2011 14:15:19 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Initial draft Message-ID: <20110403121519.95755282B8B@codespeak.net> Author: Laura Creighton Branch: extradoc Changeset: r3454:238bb3ea6426 Date: 2011-04-03 14:13 +0200 http://bitbucket.org/pypy/extradoc/changeset/238bb3ea6426/ Log: Initial draft diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/gothenburg-2011/people.txt @@ -0,0 +1,15 @@ +People coming to the Gothenburg sprint April 25 - May 1 2011 +============================================================ + +People who have a ``?`` in their arrive/depart or accomodation +column are known to be coming but there are no details +available yet from them. + +==================== ============== ===================== ================== + Name Arrive/Depart Accomodation Food +==================== ============== ===================== ================== +Jacob Hallen lives there no peppers +Laura Creighton lives there +Carl Friedrich Bolz ? J+L's house Vegan +==================== ============== ===================== ================== + diff --git a/sprintinfo/gothenburg-2011/announce.txt b/sprintinfo/gothenburg-2011/announce.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/gothenburg-2011/announce.txt @@ -0,0 +1,103 @@ +PyPy G�teborg Post-Easter Sprint April 25 - May 1 2011 +====================================================== + +The next PyPy sprint will be in Gothenburg, Sweden. It will +focus on . It is a public sprint, suitable for +newcomers. + +Topics and goals +---------------- + +Helping people get their code running with PyPy? + +Summer of Code stuff? + +1.5 release if it hasn't happened already? + +Location +-------- + +The sprint will be held in the apartment of Laura Creighton and Jacob Hall�n +which is at G�tabergsgatan 22 in Gothenburg, Sweden. Here is a map_. This is +in central Gothenburg. It is between the tram_ stops of Vasaplatsen and +Valand, (a distance of 4 blocks) where many lines call -- the 2, 3, 4, 5, +7, 10 and 13. + +.. _tram: http://www.vasttrafik.se/en/ + +.. _map: http://maps.google.se/maps?f=q&source=s_q&hl=sv&geocode=&q=G%C3%B6tabergsgatan+22,+G%C3%B6teborg&aq=&sll=57.698781,11.972952&sspn=0.009815,0.026565&ie=UTF8&hq=&hnear=G%C3%B6tabergsgatan+22,+411+34+G%C3%B6teborg,+V%C3%A4stra+G%C3%B6talands+L%C3%A4n&z=15 + +Probably cheapest and not too far away is to book accomodation at `SGS +Veckobostader`_. The `Elite Park Avenyn Hotel`_ is a luxury hotel just a +few blocks away. There are scores of hotels a short walk away from the +sprint location, suitable for every budget, desire for luxury, and desire +for the unusual. You could, for instance, stay on a `boat`_. Options are +too numerous to go into here. Just ask in the mailing list or on the blog. + +.. _`SGS Veckobostader`: http://www.sgsveckobostader.se/en +.. _`Elite Park Avenyn Hotel`: http://www.elite.se/hotell/goteborg/park/ +.. _`boat`: http://www.liseberg.se/en/home/Accommodation/Hotel/Hotel-Barken-Viking/ + +Hours will be +from 10:00 until people have had enough. It's a good idea to arrive a +day before the sprint starts and leave a day later. In the middle of +the sprint there usually is a break day and it's usually ok to take +half-days off if you feel like it. + + +Good to Know +------------ + +Sweden is not part of the Euro zone. One SEK (krona in singular, kronor +in plural) is roughly 1/10th of a Euro (9.36 SEK to 1 Euro). + +The venue is central in Gothenburg. There is a large selection of +places to get food nearby, from edible-and-cheap to outstanding. We +often cook meals together, so let us know if you have any food allergies, +dislikes, or special requirements. + +Sweden uses the same kind of plugs as Germany. 230V AC. + +The Sprint will be held the week following Easter. This means, as always, +that Gothcon_ will be taking place the weekend before (Easter weekend). +Gothcon, now in its 35 year, is the largest European game players conference. +Some of you may be interested in arriving early for the board games. +The conference site is only in Swedish, alas. You don't need to register +in advance unless you are planning to host a tournament, (and it's too +late for that anyway). + +.. _Gothcon: http://www.gothcon.se/ + + +Getting Here +------------ +If are coming train, you will arrive at the `Central Station`_. It is +about 12 blocks to the site from there, or you can take a tram_. + +There are two airports which are local to G�teborg, `Landvetter`_ (the main +one) and `Gothenburg City Airport`_ (where some budget airlines fly). +If you arrive at `Landvetter`_ the airport bus stops right downtown at +`Elite Park Avenyn Hotel`_ which is the second stop, 4 blocks from the +Sprint site, as well as the end of the line, which is the `Central Station`_. +If you arrive at `Gothenburg City Airport`_ take the bus to the end of the +line. You will be at the `Central Station`_. + +You can also arrive by ferry_, from either Kiel in Germany or Frederikshavn +in Denmark. + +..`Central Station`_: http://maps.google.se/maps?f=q&source=s_q&hl=sv&geocode=&q=Centralstationen+G%C3%B6teborg&aq=&sll=57.698789,11.972946&sspn=0.009815,0.026565&g=G%C3%B6tabergsgatan+22,+G%C3%B6teborg&ie=UTF8&hq=Centralstationen&hnear=G%C3%B6teborg,+V%C3%A4stra+G%C3%B6talands+L%C3%A4n&ll=57.703551,11.979475&spn=0.018481,0.053129&z=14 + +.. _`Landvetter`: http://swedavia.se/en/Goteborg/Traveller-information/Traffic-information/ +.. _`Gothenburg City Airport`: http://www.goteborgairport.se/eng.asp + +.. _ferry: http://www.stenaline.nl/en/ferry/ + +Who's Coming? +-------------- + +If you'd like to come, please let us know when you will be arriving and +leaving, as well as letting us know your interests We'll keep a list +of `people`_ which we'll update (which you can do so yourself if you +have bitbucket pypy commit rights). + +.. _`people`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/gothenburg-2011/people.txt From commits-noreply at bitbucket.org Sun Apr 3 14:15:20 2011 From: commits-noreply at bitbucket.org (lac) Date: Sun, 3 Apr 2011 14:15:20 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: merge heads Message-ID: <20110403121520.5BE62282B8B@codespeak.net> Author: Laura Creighton Branch: extradoc Changeset: r3455:0fccd8e897dc Date: 2011-04-03 14:14 +0200 http://bitbucket.org/pypy/extradoc/changeset/0fccd8e897dc/ Log: merge heads diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -117,7 +117,7 @@ One of the hardest parts of implementing a dynamic language efficiently is to optimize its object model. This is made harder by the fact that many recent languages such as Python, JavaScript or Ruby have rather complex core object -semantics. For them, implementing just an interpreter is already a complex +semantics. For them, even implementing just an interpreter is already a complex task. Implementing them efficiently with a just-in-time compiler (JIT) is extremely challenging, because of their many corner-cases. @@ -126,14 +126,14 @@ renaissance of this idea around the approach of tracing just-in-time compilers. A number of projects have attempted this approach. SPUR \cite{bebenita_spur:_2010} is a tracing JIT for .NET together with a JavaScript implementation in C\#. PyPy -\cite{armin_rigo_pypys_2006} contains a tracing JIT for Python (a restricted +\cite{armin_rigo_pypys_2006} contains a tracing JIT for RPython \cite{davide_ancona_rpython:_2007} (a restricted subset of Python). This JIT is then used to trace a number of languages implementations written in RPython. A number of other experiments in this directions were done, such as an interpreter for Lua in JavaScript, which is run on and optimized with a tracing JIT for JavaScript \cite{yermolovich_optimization_2009}. -These projects have in common that they work one meta-level down, providing a tracing JIT for the implementation +These projects have in common that they work one meta-level down, providing a tracing JIT for the language used to implement the dynamic language, and not for the dynamic language itself. The tracing JIT then will trace through the object model of the dynamic language implementation. This makes the object model transparent to the tracer @@ -147,12 +147,23 @@ In this paper we present two of these hints that are extensively used in the PyPy project to improve the performance of its Python interpreter. -Conceptually the significant speed-ups that can be achieved with -dynamic compilation depend on feeding into compilation and exploiting -values observed at runtime that are slow-varying in practice. To exploit the -runtime feedback, the implementation code and data structures need to be -structured so that many such values are at hand. The hints that we present allow -exactly to implement such feedback and exploitation in a meta-tracing context. +% XXX: paragraph rephrased by anto; feel free to pick the one you like best + +Conceptually, it is possible to achieve significant speed-ups by feeding into +the compiler some information that is observed at runtime: in particular, if +there are values which vary very slowly, it is possible to compile multiple +specialized versions of the same code, one for each actual value. To exploit +the runtime feedback, the implementation code and data structures need to be +structured so that many such slow-varying values are at hand. The hints that +we present allow exactly to implement such feedback and exploitation in a +meta-tracing context. + +% Conceptually the significant speed-ups that can be achieved with +% dynamic compilation depend on feeding into compilation and exploiting +% values observed at runtime that are slow-varying in practice. To exploit the +% runtime feedback, the implementation code and data structures need to be +% structured so that many such values are at hand. The hints that we present allow +% exactly to implement such feedback and exploitation in a meta-tracing context. Concretely these hints are used to control how the optimizer of the tracing JIT can improve the traces of the object model. More @@ -208,6 +219,10 @@ implementation details. Another aspect of the final VM that is added semi-automatically to the generated VM is a tracing JIT compiler. +The advantage of this approach is that writing an interpreter is much easier +and less error prone than manually writing a JIT compiler. Similarly, writing +in a high level language such as RPython is easier than writing in C. + We call the code that runs on top of an interpreter implemented with PyPy the \emph{user code} or \emph{user program}. @@ -228,6 +243,11 @@ when a function call is encountered the operations of the called functions are simply put into the trace too. +Because the traces always correspond to a concrete execution they cannot +contain any control flow splits. Therefore they encode the control flow +decisions needed to stay on the trace with the help of \emph{guards}. Those are +operations that check that the assumptions are still true when the compiled trace is later executed with different values. + To be able to do this recording, VMs with a tracing JIT typically contain an interpreter. After a user program is started the interpreter is used; only the most frequently executed paths through the user @@ -235,11 +255,6 @@ that correspond to loops in the traced program, but most tracing JITs now also have support for tracing non-loops \cite{andreas_gal_incremental_2006}. -Because the traces always correspond to a concrete execution they cannot -contain any control flow splits. Therefore they encode the control flow -decisions needed to stay on the trace with the help of \emph{guards}. Those are -operations that check that the assumptions are still true when the compiled trace is later executed with different values. - One disadvantage of (tracing) JITs which makes them not directly applicable to PyPy is that they need to encode the language semantics of the language they are tracing. Since PyPy wants to be a @@ -258,8 +273,8 @@ While the operations in a trace are those of the interpreter, the loops that are traced by the tracer are the loops in the user program. This means that the tracer stops tracing after one iteration of -the loop in the user function that is being considered. At this point, it can -have traced many iterations of the interpreter main loop. +the loop in the user function that is being considered. At this point, it probably +traced many iterations of the interpreter main loop. \begin{figure} \includegraphics[scale=0.5]{figures/trace-levels} From commits-noreply at bitbucket.org Sun Apr 3 16:00:59 2011 From: commits-noreply at bitbucket.org (lac) Date: Sun, 3 Apr 2011 16:00:59 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: list some things we might want to do at the sprint Message-ID: <20110403140059.8989C282B8B@codespeak.net> Author: Laura Creighton Branch: extradoc Changeset: r3456:62cdbad827b2 Date: 2011-04-03 16:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/62cdbad827b2/ Log: list some things we might want to do at the sprint diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -8,7 +8,7 @@ ==================== ============== ===================== ================== Name Arrive/Depart Accomodation Food ==================== ============== ===================== ================== -Jacob Hallen lives there no peppers +Jacob Hall�n lives there no peppers Laura Creighton lives there Carl Friedrich Bolz ? J+L's house Vegan ==================== ============== ===================== ================== diff --git a/sprintinfo/gothenburg-2011/announce.txt b/sprintinfo/gothenburg-2011/announce.txt --- a/sprintinfo/gothenburg-2011/announce.txt +++ b/sprintinfo/gothenburg-2011/announce.txt @@ -1,18 +1,26 @@ PyPy G�teborg Post-Easter Sprint April 25 - May 1 2011 ====================================================== -The next PyPy sprint will be in Gothenburg, Sweden. It will -focus on . It is a public sprint, suitable for -newcomers. +The next PyPy sprint will be in Gothenburg, Sweden. It is a public sprint, +very suitable for newcomers. We'll focus on making the 1.5 release (if +it hasn't already happened) and whatever interests the Sprint attendees. Topics and goals ---------------- -Helping people get their code running with PyPy? +- 1.5 release if it hasn't already happened -Summer of Code stuff? +- Going over our documentation, and classifying our docs in terms of + mouldiness. Deciding what needs writing, and maybe writing it. -1.5 release if it hasn't happened already? +- Helping people get their code running with PyPy + +- maybe work on EuroPython Training, and talks + +- Summer of Code preparation + +- speed.pypy.org + Location -------- From commits-noreply at bitbucket.org Sun Apr 3 16:12:34 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 3 Apr 2011 16:12:34 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Add a potential bug. Message-ID: <20110403141234.D2EA3282B8B@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3457:10ed94c02d83 Date: 2011-04-03 16:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/10ed94c02d83/ Log: Add a potential bug. diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -1,3 +1,9 @@ +BUGS +---- + +* a CALL that may release the GIL needs to have effectinfo=None, + because random other code can run at that point. + INVESTIGATIONS -------------- From commits-noreply at bitbucket.org Sun Apr 3 16:12:35 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 3 Apr 2011 16:12:35 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Expand on the "release 1.5" task. Add a general "other" programming task. Message-ID: <20110403141235.69A7C282B8B@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3458:b3513e9b8bb7 Date: 2011-04-03 16:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/b3513e9b8bb7/ Log: Expand on the "release 1.5" task. Add a general "other" programming task. diff --git a/sprintinfo/gothenburg-2011/announce.txt b/sprintinfo/gothenburg-2011/announce.txt --- a/sprintinfo/gothenburg-2011/announce.txt +++ b/sprintinfo/gothenburg-2011/announce.txt @@ -8,7 +8,9 @@ Topics and goals ---------------- -- 1.5 release if it hasn't already happened +The main goal is to polish and release PyPy 1.5, supporting Python 2.7 +as well as the last few months' improvements in the JIT (provided that +it hasn't already happened). Other topics: - Going over our documentation, and classifying our docs in terms of mouldiness. Deciding what needs writing, and maybe writing it. @@ -21,6 +23,9 @@ - speed.pypy.org +- any other programming task is welcome too and depend on everybody's + interest; e.g. tweaking the Python or JavaScript interpreter, Stackless + support, and so on. Location -------- From commits-noreply at bitbucket.org Sun Apr 3 16:24:01 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 3 Apr 2011 16:24:01 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Simplify sentence, killing a part that doesn't make sense in Message-ID: <20110403142401.3207F282B8B@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3459:603e47be6828 Date: 2011-04-03 16:21 +0200 http://bitbucket.org/pypy/extradoc/changeset/603e47be6828/ Log: Simplify sentence, killing a part that doesn't make sense in english and isn't really useful anyway. diff --git a/sprintinfo/gothenburg-2011/announce.txt b/sprintinfo/gothenburg-2011/announce.txt --- a/sprintinfo/gothenburg-2011/announce.txt +++ b/sprintinfo/gothenburg-2011/announce.txt @@ -23,9 +23,8 @@ - speed.pypy.org -- any other programming task is welcome too and depend on everybody's - interest; e.g. tweaking the Python or JavaScript interpreter, Stackless - support, and so on. +- any other programming task is welcome too -- e.g. tweaking the + Python or JavaScript interpreter, Stackless support, and so on. Location -------- From commits-noreply at bitbucket.org Sun Apr 3 18:37:07 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 3 Apr 2011 18:37:07 +0200 (CEST) Subject: [pypy-svn] pypy default: Forgot to rename this method here too. Message-ID: <20110403163707.BCBDE282B8B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43124:def54d829641 Date: 2011-04-03 18:30 +0200 http://bitbucket.org/pypy/pypy/changeset/def54d829641/ Log: Forgot to rename this method here too. diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -135,7 +135,7 @@ return type(self) is type(other) # xxx obscure def clone_if_mutable(self): res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res def _sortboxes(boxes): From commits-noreply at bitbucket.org Sun Apr 3 18:37:08 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 3 Apr 2011 18:37:08 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix on 64-bits. Message-ID: <20110403163708.7E5BC282B8B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43125:b5d5b0a4fac6 Date: 2011-04-03 18:33 +0200 http://bitbucket.org/pypy/pypy/changeset/b5d5b0a4fac6/ Log: Fix on 64-bits. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -951,7 +951,7 @@ def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: - return self._emit_call_64(x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start) p = 0 n = len(arglocs) @@ -979,7 +979,7 @@ self.mc.CALL(x) self.mark_gc_roots(force_index) - def _emit_call_64(self, force_index, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start): src_locs = [] dst_locs = [] xmm_src_locs = [] From commits-noreply at bitbucket.org Sun Apr 3 18:37:10 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 3 Apr 2011 18:37:10 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110403163710.003B5282BE8@codespeak.net> Author: Armin Rigo Branch: Changeset: r43126:6d184371dd6b Date: 2011-04-03 18:36 +0200 http://bitbucket.org/pypy/pypy/changeset/6d184371dd6b/ Log: merge heads diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -17,8 +17,8 @@ '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -486,6 +486,7 @@ class W_IMap(Wrappable): _error_name = "imap" + _immutable_fields_ = ["w_fun", "iterators_w"] def __init__(self, space, w_fun, args_w): self.space = space diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file From commits-noreply at bitbucket.org Mon Apr 4 01:37:29 2011 From: commits-noreply at bitbucket.org (gutworth) Date: Mon, 4 Apr 2011 01:37:29 +0200 (CEST) Subject: [pypy-svn] pypy default: update buildbot url Message-ID: <20110403233729.1D18336C20F@codespeak.net> Author: Benjamin Peterson Branch: Changeset: r43127:b302bbe9e6bd Date: 2011-04-03 17:05 -0500 http://bitbucket.org/pypy/pypy/changeset/b302bbe9e6bd/ Log: update buildbot url diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks From commits-noreply at bitbucket.org Mon Apr 4 01:37:30 2011 From: commits-noreply at bitbucket.org (gutworth) Date: Mon, 4 Apr 2011 01:37:30 +0200 (CEST) Subject: [pypy-svn] pypy default: these things are fixed Message-ID: <20110403233730.BF075282B8B@codespeak.net> Author: Benjamin Peterson Branch: Changeset: r43128:c42d7437c204 Date: 2011-04-03 18:38 -0500 http://bitbucket.org/pypy/pypy/changeset/c42d7437c204/ Log: these things are fixed diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ From commits-noreply at bitbucket.org Mon Apr 4 11:36:56 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 4 Apr 2011 11:36:56 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: two comments, a few rephrasing Message-ID: <20110404093656.04217282BDE@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3460:8b3947b57b54 Date: 2011-04-04 11:36 +0200 http://bitbucket.org/pypy/extradoc/changeset/8b3947b57b54/ Log: two comments, a few rephrasing diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -314,6 +314,8 @@ \subsection{Running Example} \label{sub:running} +\anto{this example is not referenced until section \ref{sec:fastobjmodel}: I + would put it just before it} As the running example of this paper we will use a very simple and bare-bones object model that just supports classes and instances, without any @@ -343,6 +345,8 @@ an interpreter, a huge amount of time will be spent doing lookups in these dictionaries. Let's assume we trace through code that sums three attributes, such as: +\anto{I still think it's a bit weird to call them ``methods'' and then use + them as attributes in the example} \begin{lstlisting}[mathescape,basicstyle=\ttfamily] inst.getattr("a") + inst.getattr("b") + inst.getattr("c") @@ -437,8 +441,9 @@ of PyPy's JIT. Promotion is a technique that only works well in JIT compilers; in static compilers it is significantly less applicable. -Promotion is essentially a tool for trace specialization. In some places in the -interpreter it would be very useful if a variable were constant, even though it +Promotion is essentially a tool for trace specialization. There are places in +the interpreter where knowing that a value if constant opens a lot of +optimization opportunities, even though it could have different values in practice. In such a place, promotion is used. The typical reason to do that is if there is a lot of computation depending on the value of that variable. @@ -638,7 +643,7 @@ side effects, because it changes the memoizing dictionary. However, because this side effect is not externally visible, the function from the outside is pure. This is a property that is not easily detectable by analysis. Therefore, the purity -of this function needs to be annotated. +of this function needs to be manually annotated. From commits-noreply at bitbucket.org Mon Apr 4 11:39:49 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 11:39:49 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: add myself and lukas Message-ID: <20110404093949.92352282BE8@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3461:7a084fee5205 Date: 2011-04-04 11:38 +0200 http://bitbucket.org/pypy/extradoc/changeset/7a084fee5205/ Log: add myself and lukas diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -10,6 +10,7 @@ ==================== ============== ===================== ================== Jacob Hall�n lives there no peppers Laura Creighton lives there -Carl Friedrich Bolz ? J+L's house Vegan +Carl Friedrich Bolz 24-30 J+L's house Vegan +Lukas Diekmann 24-30 J+L's house ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Mon Apr 4 11:46:23 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 11:46:23 +0200 (CEST) Subject: [pypy-svn] pypy default: don't generate one huuuge long here and mask it at the end, but mask it Message-ID: <20110404094623.D8EB3282BE8@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43129:f76f5816b0e1 Date: 2011-04-01 21:31 +0200 http://bitbucket.org/pypy/pypy/changeset/f76f5816b0e1/ Log: don't generate one huuuge long here and mask it at the end, but mask it continuously diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -255,7 +255,7 @@ x = ord(s[0]) << 7 i = 0 while i < length: - x = (1000003*x) ^ ord(s[i]) + x = intmask((1000003*x) ^ ord(s[i])) i += 1 x ^= length return intmask(x) From commits-noreply at bitbucket.org Mon Apr 4 11:46:24 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 11:46:24 +0200 (CEST) Subject: [pypy-svn] pypy default: no reason to make new functions every iteration Message-ID: <20110404094624.7A081282BE8@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43130:d27b78a910eb Date: 2011-04-01 21:57 +0200 http://bitbucket.org/pypy/pypy/changeset/d27b78a910eb/ Log: no reason to make new functions every iteration diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -497,6 +497,19 @@ assert block.operations == () assert block.exits == () + def definevar(v, only_in_link=None): + assert isinstance(v, Variable) + assert v not in vars, "duplicate variable %r" % (v,) + assert v not in vars_previous_blocks, ( + "variable %r used in more than one block" % (v,)) + vars[v] = only_in_link + + def usevar(v, in_link=None): + assert v in vars + if in_link is not None: + assert vars[v] is None or vars[v] is in_link + + for block in graph.iterblocks(): assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( @@ -506,18 +519,6 @@ assert block in exitblocks vars = {} - def definevar(v, only_in_link=None): - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = only_in_link - - def usevar(v, in_link=None): - assert v in vars - if in_link is not None: - assert vars[v] is None or vars[v] is in_link - for v in block.inputargs: definevar(v) From commits-noreply at bitbucket.org Mon Apr 4 11:46:25 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 11:46:25 +0200 (CEST) Subject: [pypy-svn] pypy default: kill some dead old functions Message-ID: <20110404094625.0BF14282BE8@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43131:36bc5f74e90d Date: 2011-04-01 21:58 +0200 http://bitbucket.org/pypy/pypy/changeset/36bc5f74e90d/ Log: kill some dead old functions diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -115,46 +115,6 @@ # in the second block! return split_block(annotator, block, 0, _forcelink=block.inputargs) -def remove_direct_loops(annotator, graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def remove_double_links(annotator, graph): - """This can be useful for code generators: it ensures that no block has - more than one incoming links from one and the same other block. It allows - argument passing along links to be implemented with phi nodes since the - value of an argument can be determined by looking from which block the - control passed. """ - def visit(block): - if isinstance(block, Block): - double_links = [] - seen = {} - for link in block.exits: - if link.target in seen: - double_links.append(link) - seen[link.target] = True - for link in double_links: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def no_links_to_startblock(graph): - """Ensure no links to start block.""" - links_to_start_block = False - for block in graph.iterblocks(): - for link in block.exits: - if link.target == graph.startblock: - links_to_start_block = True - break - if links_to_start_block: - insert_empty_startblock(None, graph) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from pypy.annotation import model as annmodel From commits-noreply at bitbucket.org Mon Apr 4 11:46:25 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 11:46:25 +0200 (CEST) Subject: [pypy-svn] pypy default: replace uses of traverse with the proper iterators Message-ID: <20110404094625.BDFBE282BE8@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43132:f869177c8044 Date: 2011-04-01 22:00 +0200 http://bitbucket.org/pypy/pypy/changeset/f869177c8044/ Log: replace uses of traverse with the proper iterators diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -9,7 +9,7 @@ from pypy.objspace.flow import operation from pypy.objspace.flow.model import (SpaceOperation, Variable, Constant, Block, Link, c_last_exception, checkgraph, - traverse, mkentrymap) + mkentrymap) from pypy.rlib import rarithmetic from pypy.translator import unsimplify from pypy.translator.backendopt import ssa @@ -76,23 +76,19 @@ def desugar_isinstance(graph): """Replace isinstance operation with a call to isinstance.""" constant_isinstance = Constant(isinstance) - def visit(block): - if not isinstance(block, Block): - return + for block in graph.iterblocks(): for i in range(len(block.operations) - 1, -1, -1): op = block.operations[i] if op.opname == "isinstance": args = [constant_isinstance, op.args[0], op.args[1]] new_op = SpaceOperation("simple_call", args, op.result) block.operations[i] = new_op - traverse(visit, graph) def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): + for link in list(graph.iterlinks()): while not link.target.operations: block1 = link.target if block1.exitswitch is not None: @@ -113,7 +109,6 @@ link.args = outputargs link.target = exit.target # the while loop above will simplify recursively the new link - traverse(visit, graph) def transform_ovfcheck(graph): """The special function calls ovfcheck and ovfcheck_lshift need to @@ -174,11 +169,10 @@ def rename(v): return renaming.get(v, v) - def visit(block): - if not (isinstance(block, Block) - and block.exitswitch == clastexc + for block in graph.iterblocks(): + if not (block.exitswitch == clastexc and block.exits[-1].exitcase is Exception): - return + continue covered = [link.exitcase for link in block.exits[1:-1]] seen = [] preserve = list(block.exits[:-1]) @@ -233,8 +227,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) - traverse(visit, graph) - def transform_xxxitem(graph): # xxx setitem too for block in graph.iterblocks(): @@ -262,9 +254,9 @@ return True return False - def visit(block): - if not (isinstance(block, Block) and block.exitswitch == clastexc): - return + for block in list(graph.iterblocks()): + if block.exitswitch != clastexc: + continue exits = [] seen = [] for link in block.exits: @@ -283,8 +275,6 @@ seen.append(case) block.recloseblock(*exits) - traverse(visit, graph) - def join_blocks(graph): """Links can be deleted if they are the single exit of a block and the single entry point of the next block. When this happens, we can @@ -340,8 +330,7 @@ this is how implicit exceptions are removed (see _implicit_ in flowcontext.py). """ - def visit(block): - if isinstance(block, Block): + for block in list(graph.iterblocks()): for i in range(len(block.exits)-1, -1, -1): exit = block.exits[i] if not (exit.target is graph.exceptblock and @@ -361,7 +350,6 @@ lst = list(block.exits) del lst[i] block.recloseblock(*lst) - traverse(visit, graph) # _____________________________________________________________________ @@ -627,12 +615,11 @@ tgts.append((exit.exitcase, tgt)) return tgts - def visit(block): - if isinstance(block, Block) and block.operations and block.operations[-1].opname == 'is_true': + for block in graph.iterblocks(): + if block.operations and block.operations[-1].opname == 'is_true': tgts = has_is_true_exitpath(block) if tgts: candidates.append((block, tgts)) - traverse(visit, graph) while candidates: cand, tgts = candidates.pop() From commits-noreply at bitbucket.org Mon Apr 4 11:46:32 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 11:46:32 +0200 (CEST) Subject: [pypy-svn] pypy default: kill flatten, which is useless nowadays Message-ID: <20110404094632.E00D0282C1D@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43133:d6dfe5acd0d4 Date: 2011-04-01 22:01 +0200 http://bitbucket.org/pypy/pypy/changeset/d6dfe5acd0d4/ Log: kill flatten, which is useless nowadays diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -22,8 +22,7 @@ remover = cls.MallocRemover() checkgraph(graph) count1 = count2 = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == cls.MallocRemover.MALLOC_OP: S = op.args[0].value diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -5,7 +5,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem import lltype, llmemory, lloperation @@ -33,8 +33,7 @@ def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 count_calls = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == 'malloc': count_mallocs += 1 diff --git a/pypy/translator/backendopt/test/test_ssa.py b/pypy/translator/backendopt/test/test_ssa.py --- a/pypy/translator/backendopt/test/test_ssa.py +++ b/pypy/translator/backendopt/test/test_ssa.py @@ -1,6 +1,6 @@ from pypy.translator.backendopt.ssa import * from pypy.translator.translator import TranslationContext -from pypy.objspace.flow.model import flatten, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import SpaceOperation diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -2,7 +2,7 @@ import new import py from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse -from pypy.objspace.flow.model import flatten, mkentrymap, c_last_exception +from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph from pypy.objspace.flow.objspace import FlowObjSpace, error @@ -573,7 +573,8 @@ def test_highly_branching_example(self): x = self.codetest(self.highly_branching_example) - assert len(flatten(x)) < 60 # roughly 20 blocks + 30 links + # roughly 20 blocks + 30 links + assert len(list(x.iterblocks())) + len(list(x.iterlinks())) < 60 #__________________________________________________________ def test_unfrozen_user_class1(self): diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -82,7 +82,6 @@ pieces.headerblock.exits[1], pieces.whileblock, pieces.whileblock.exits[0]] - assert flatten(graph) == lst def test_mkentrymap(): entrymap = mkentrymap(graph) diff --git a/pypy/translator/backendopt/test/test_mallocprediction.py b/pypy/translator/backendopt/test/test_mallocprediction.py --- a/pypy/translator/backendopt/test/test_mallocprediction.py +++ b/pypy/translator/backendopt/test/test_mallocprediction.py @@ -4,7 +4,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.conftest import option import sys diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Block, Constant, Variable, flatten +from pypy.objspace.flow.model import Block, Constant, Variable from pypy.objspace.flow.model import checkgraph, mkentrymap from pypy.translator.backendopt.support import log diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -395,11 +395,6 @@ stack += block.exits[::-1] -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - def flattenobj(*args): for arg in args: try: diff --git a/pypy/translator/backendopt/ssa.py b/pypy/translator/backendopt/ssa.py --- a/pypy/translator/backendopt/ssa.py +++ b/pypy/translator/backendopt/ssa.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block +from pypy.objspace.flow.model import Variable, mkentrymap, Block from pypy.tool.algo.unionfind import UnionFind class DataFlowFamilyBuilder: diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -6,7 +6,7 @@ from pypy.translator.test.snippet import simple_method from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter diff --git a/pypy/translator/backendopt/test/test_merge_if_blocks.py b/pypy/translator/backendopt/test/test_merge_if_blocks.py --- a/pypy/translator/backendopt/test/test_merge_if_blocks.py +++ b/pypy/translator/backendopt/test/test_merge_if_blocks.py @@ -2,7 +2,7 @@ from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof as tgraphof -from pypy.objspace.flow.model import flatten, Block +from pypy.objspace.flow.model import Block from pypy.translator.backendopt.removenoops import remove_same_as from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int From commits-noreply at bitbucket.org Mon Apr 4 11:46:35 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 11:46:35 +0200 (CEST) Subject: [pypy-svn] pypy default: get rid of some more traverse calls Message-ID: <20110404094635.28295282BF2@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43134:0922b6a4d0f1 Date: 2011-04-01 23:30 +0200 http://bitbucket.org/pypy/pypy/changeset/0922b6a4d0f1/ Log: get rid of some more traverse calls diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -37,12 +37,10 @@ def all_operations(self, graph): result = {} - def visit(node): - if isinstance(node, Block): - for op in node.operations: - result.setdefault(op.opname, 0) - result[op.opname] += 1 - traverse(visit, graph) + for node in graph.iterblocks(): + for op in node.operations: + result.setdefault(op.opname, 0) + result[op.opname] += 1 return result @@ -246,12 +244,9 @@ x = self.codetest(self.implicitException) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock - def implicitAttributeError(x): try: x = getattr(x, "y") @@ -263,10 +258,8 @@ x = self.codetest(self.implicitAttributeError) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock #__________________________________________________________ def implicitException_int_and_id(x): @@ -311,14 +304,12 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: if isinstance(link.args[0], Constant): found[link.args[0].value] = True else: found[link.exitcase] = None - traverse(find_exceptions, x) assert found == {IndexError: True, KeyError: True, Exception: None} def reraiseAnything(x): @@ -332,12 +323,10 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: assert isinstance(link.args[0], Constant) found[link.args[0].value] = True - traverse(find_exceptions, x) assert found == {ValueError: True, ZeroDivisionError: True, OverflowError: True} def loop_in_bare_except_bug(lst): @@ -521,11 +510,9 @@ def test_jump_target_specialization(self): x = self.codetest(self.jump_target_specialization) - def visitor(node): - if isinstance(node, Block): - for op in node.operations: - assert op.opname != 'mul', "mul should have disappeared" - traverse(visitor, x) + for block in x.iterblocks(): + for op in block.operations: + assert op.opname != 'mul', "mul should have disappeared" #__________________________________________________________ def highly_branching_example(a,b,c,d,e,f,g,h,i,j): @@ -590,11 +577,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 2 def test_unfrozen_user_class2(self): @@ -608,11 +593,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert not isinstance(results[0], Constant) def test_frozen_user_class1(self): @@ -631,11 +614,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 1 def test_frozen_user_class2(self): @@ -651,11 +632,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert results == [Constant(4)] def test_const_star_call(self): @@ -664,14 +643,9 @@ def f(): return g(1,*(2,3)) graph = self.codetest(f) - call_args = [] - def visit(block): - if isinstance(block, Block): - for op in block.operations: - if op.opname == "call_args": - call_args.append(op) - traverse(visit, graph) - assert not call_args + for block in graph.iterblocks(): + for op in block.operations: + assert not op.opname == "call_args" def test_catch_importerror_1(self): def f(): @@ -998,11 +972,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, AttributeError] @@ -1020,11 +992,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, TypeError] diff --git a/pypy/translator/goal/query.py b/pypy/translator/goal/query.py --- a/pypy/translator/goal/query.py +++ b/pypy/translator/goal/query.py @@ -30,15 +30,13 @@ def polluted_qgen(translator): """list functions with still real SomeObject variables""" annotator = translator.annotator - def visit(block): - if isinstance(block, flowmodel.Block): - for v in block.getvariables(): - s = annotator.binding(v, None) - if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: - raise Found for g in translator.graphs: try: - flowmodel.traverse(visit, g) + for block in g.iterblocks(): + for v in block.getvariables(): + s = annotator.binding(v, None) + if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: + raise Found except Found: line = "%s: %s" % (g, graph_sig(translator, g)) yield line diff --git a/pypy/translator/gensupp.py b/pypy/translator/gensupp.py --- a/pypy/translator/gensupp.py +++ b/pypy/translator/gensupp.py @@ -6,15 +6,13 @@ import sys from pypy.objspace.flow.model import Block -from pypy.objspace.flow.model import traverse # ordering the blocks of a graph by source position def ordered_blocks(graph): # collect all blocks allblocks = [] - def visit(block): - if isinstance(block, Block): + for block in graph.iterblocks(): # first we order by offset in the code string if block.operations: ofs = block.operations[0].offset @@ -26,7 +24,6 @@ else: txt = "dummy" allblocks.append((ofs, txt, block)) - traverse(visit, graph) allblocks.sort() #for ofs, txt, block in allblocks: # print ofs, txt, block diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link from pypy.objspace.flow.model import SpaceOperation, c_last_exception from pypy.objspace.flow.model import FunctionGraph -from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph +from pypy.objspace.flow.model import mkentrymap, checkgraph from pypy.annotation import model as annmodel from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr from pypy.rpython.lltypesystem.lltype import normalizeptr diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.simplify import (get_graph, transform_dead_op_vars, desugar_isinstance) -from pypy.objspace.flow.model import traverse, Block, Constant, summary +from pypy.objspace.flow.model import Block, Constant, summary from pypy import conftest def translate(func, argtypes, backend_optimize=True): diff --git a/pypy/translator/backendopt/test/test_tailrecursion.py b/pypy/translator/backendopt/test/test_tailrecursion.py --- a/pypy/translator/backendopt/test/test_tailrecursion.py +++ b/pypy/translator/backendopt/test/test_tailrecursion.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter From commits-noreply at bitbucket.org Mon Apr 4 13:31:51 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 13:31:51 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: add support for moving values between vfp registers and simplify the same_as operation Message-ID: <20110404113151.41DEC282BDA@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43135:f7de9f224f4a Date: 2011-04-01 20:02 +0200 http://bitbucket.org/pypy/pypy/changeset/f7de9f224f4a/ Log: add support for moving values between vfp registers and simplify the same_as operation diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -658,6 +658,8 @@ self.mc.VMOV_rc(loc.value, loc.value+1, prev_loc.value, cond=cond) elif loc.is_vfp_reg() and prev_loc.is_reg(): self.mc.VMOV_cr(loc.value, prev_loc.value, prev_loc.value+1, cond=cond) + elif loc.is_vfp_reg() and prev_loc.is_vfp_reg(): + self.mc.VMOV_cc(loc.value, prev_loc.value, cond=cond) else: assert 0, 'unsupported case' mov_loc_loc = regalloc_mov diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -358,10 +358,7 @@ def emit_op_same_as(self, op, arglocs, regalloc, fcond): argloc, resloc = arglocs - if argloc.is_imm(): - self.mc.MOV_ri(resloc.value, argloc.getint()) - else: - self.mc.MOV_rr(resloc.value, argloc.value) + self.mov_loc_loc(argloc, resloc) return fcond def emit_op_guard_no_exception(self, op, arglocs, regalloc, fcond): diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py --- a/pypy/jit/backend/arm/codebuilder.py +++ b/pypy/jit/backend/arm/codebuilder.py @@ -108,6 +108,17 @@ | (dm & 0xF)) self.write32(instr) + def VMOV_cc(self, dd, dm, cond=cond.AL): + sz = 1 # for 64-bit mode + instr = (cond << 28 + | 0xEB << 20 + | (dd & 0xF) << 12 + | 0x5 << 9 + | (sz & 0x1) << 8 + | 0x1 << 6 + | (dm & 0xF)) + self.write32(instr) + def VCVT_float_to_int(self, target, source, cond=cond.AL): opc2 = 0x5 sz = 1 From commits-noreply at bitbucket.org Mon Apr 4 13:31:52 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 13:31:52 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: remove more uses of TempBox Message-ID: <20110404113152.6298A282BDA@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43136:bcd6bcbd6924 Date: 2011-04-01 20:07 +0200 http://bitbucket.org/pypy/pypy/changeset/bcd6bcbd6924/ Log: remove more uses of TempBox diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -15,10 +15,10 @@ gen_emit_unary_float_op, saved_registers) from pypy.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder from pypy.jit.backend.arm.jump import remap_frame_layout -from pypy.jit.backend.arm.regalloc import Regalloc, TempInt +from pypy.jit.backend.arm.regalloc import Regalloc, TempInt, TempPtr from pypy.jit.backend.llsupport import symbolic from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr -from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity, TempBox +from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity from pypy.jit.metainterp.history import (Const, ConstInt, BoxInt, Box, AbstractFailDescr, LoopToken, INT, FLOAT, REF) from pypy.jit.metainterp.resoperation import rop @@ -594,7 +594,7 @@ regalloc.possibly_free_var(args[0]) if args[3] is not args[2] is not args[4]: # MESS MESS MESS: don't free regalloc.possibly_free_var(args[2]) # it if ==args[3] or args[4] - srcaddr_box = TempBox() + srcaddr_box = TempPtr() forbidden_vars = [args[1], args[3], args[4], srcaddr_box] srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, selected_reg=r.r1) self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc, @@ -602,7 +602,7 @@ # compute the destination address forbidden_vars = [args[4], args[3], srcaddr_box] - dstaddr_box = TempBox() + dstaddr_box = TempPtr() dstaddr_loc = regalloc.force_allocate_reg(dstaddr_box, selected_reg=r.r0) forbidden_vars.append(dstaddr_box) base_loc, box = regalloc._ensure_value_is_boxed(args[1], forbidden_vars) @@ -624,7 +624,7 @@ args.append(length_box) if is_unicode: forbidden_vars = [srcaddr_box, dstaddr_box] - bytes_box = TempBox() + bytes_box = TempPtr() bytes_loc = regalloc.force_allocate_reg(bytes_box, forbidden_vars) scale = self._get_unicode_item_scale() assert length_loc.is_reg() diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.history import INT, FLOAT +from pypy.jit.metainterp.history import INT, FLOAT, REF from pypy.jit.backend.arm.arch import WORD class AssemblerLocation(object): _immutable_ = True @@ -100,7 +100,7 @@ self.width = num_words * WORD # One of INT, REF, FLOAT assert num_words == 1 - assert type == INT + assert type == INT or type == REF self.type = type def frame_size(self): diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -46,7 +46,7 @@ @staticmethod def frame_pos(loc, type): - assert type == INT + assert type == INT or type == REF # XXX for now we only have one word stack locs return locations.StackLocation(loc, type=type) From commits-noreply at bitbucket.org Mon Apr 4 13:31:54 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 13:31:54 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: support spilling of float vars Message-ID: <20110404113154.88850282BF7@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43137:dd948f833978 Date: 2011-04-02 14:55 +0200 http://bitbucket.org/pypy/pypy/changeset/dd948f833978/ Log: support spilling of float vars diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -149,13 +149,15 @@ i += 4 elif res == self.STACK_LOC: stack_loc = self.decode32(enc, i+1) - value = self.decode32(stack, frame_depth - stack_loc*WORD) + if group == self.FLOAT_TYPE: + value = self.decode64(stack, frame_depth - stack_loc*WORD) + else: + value = self.decode32(stack, frame_depth - stack_loc*WORD) i += 4 else: # REG_LOC reg = ord(enc[i]) if group == self.FLOAT_TYPE: - t = self.decode64(vfp_regs, reg*2*WORD) - value = longlong2float(t) + value = self.decode64(vfp_regs, reg*2*WORD) else: value = self.decode32(regs, reg*WORD) @@ -164,7 +166,7 @@ elif group == self.REF_TYPE: self.fail_boxes_ptr.setitem(fail_index, rffi.cast(llmemory.GCREF, value)) elif group == self.FLOAT_TYPE: - self.fail_boxes_float.setitem(fail_index, longlong.getfloatstorage(value)) + self.fail_boxes_float.setitem(fail_index, value) else: assert 0, 'unknown type' @@ -194,6 +196,8 @@ # XXX decode imm if necessary assert 0, 'Imm Locations are not supported' elif res == self.STACK_LOC: + if res_type == FLOAT: + assert 0, 'float on stack' stack_loc = self.decode32(enc, j+1) loc = regalloc.frame_manager.frame_pos(stack_loc, INT) j += 4 @@ -371,7 +375,6 @@ reg_args = self._count_reg_args(arglocs) stack_locs = len(arglocs) - reg_args - selected_reg = 0 for i in range(reg_args): loc = arglocs[i] @@ -650,6 +653,24 @@ self.mc.LDR_rr(loc.value, r.fp.value, temp.value, cond=cond) else: self.mc.LDR_ri(loc.value, r.fp.value, offset.value, cond=cond) + elif loc.is_stack() and prev_loc.is_vfp_reg(): + # spill vfp register + offset = ConstInt(loc.position*-WORD) + if not _check_imm_arg(offset): + self.mc.gen_load_int(temp.value, offset.value) + self.mc.ADD_rr(temp.value, r.fp.value, temp.value) + else: + self.mc.ADD_rr(temp.value, r.fp.value, offset) + self.mc.VSTR(prev_loc.value, temp.value, cond=cond) + elif loc.is_vfp_reg() and prev_loc.is_stack(): + # load spilled value into vfp reg + offset = ConstInt(prev_loc.position*-WORD) + if not _check_imm_arg(offset): + self.mc.gen_load_int(temp.value, offset.value) + self.mc.ADD_rr(temp.value, r.fp.value, temp.value) + else: + self.mc.ADD_rr(temp.value, r.fp.value, offset) + self.mc.VLDR(loc.value, temp.value, cond=cond) else: assert 0, 'unsupported case' elif loc.is_reg() and prev_loc.is_reg(): @@ -681,6 +702,8 @@ self.regalloc_mov(r.ip, loc) elif loc.is_reg(): self.mc.POP([loc.value]) + elif loc.is_vfp_reg(): + self.mc.VPOP([loc.value]) else: assert 0, 'ffuu' diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -53,6 +53,9 @@ def is_vfp_reg(self): return True + def as_key(self): + return self.value + 20 + class ImmLocation(AssemblerLocation): _immutable_ = True @@ -69,7 +72,7 @@ return True def as_key(self): - return self.value + 20 + return self.value + 40 class ConstFloatLoc(AssemblerLocation): """This class represents an imm float value which is stored in memory at @@ -98,9 +101,6 @@ def __init__(self, position, num_words=1, type=INT): self.position = position self.width = num_words * WORD - # One of INT, REF, FLOAT - assert num_words == 1 - assert type == INT or type == REF self.type = type def frame_size(self): diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -46,9 +46,11 @@ @staticmethod def frame_pos(loc, type): - assert type == INT or type == REF - # XXX for now we only have one word stack locs - return locations.StackLocation(loc, type=type) + if type == INT or type == REF: + num_words = 1 + else: + num_words = 2 + return locations.StackLocation(loc, num_words=num_words, type=type) def void(self, op, fcond): return [] From commits-noreply at bitbucket.org Mon Apr 4 13:31:55 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 13:31:55 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: correctly associate float registers when updating the bindings to compile a bridge Message-ID: <20110404113155.7580E282BF7@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43138:fa8ce72df7c9 Date: 2011-04-02 14:58 +0200 http://bitbucket.org/pypy/pypy/changeset/fa8ce72df7c9/ Log: correctly associate float registers when updating the bindings to compile a bridge diff --git a/pypy/jit/backend/arm/helper/regalloc.py b/pypy/jit/backend/arm/helper/regalloc.py --- a/pypy/jit/backend/arm/helper/regalloc.py +++ b/pypy/jit/backend/arm/helper/regalloc.py @@ -59,15 +59,13 @@ if base: loc2, box2 = self._ensure_value_is_boxed(op.getarg(1)) locs.append(loc2) - self.vfprm.possibly_free_var(box2) - self.vfprm.possibly_free_var(box1) + self.possibly_free_var(box2) + self.possibly_free_var(box1) if float_result: res = self.vfprm.force_allocate_reg(op.result) - self.vfprm.possibly_free_var(op.result) else: res = self.rm.force_allocate_reg(op.result) - self.rm.possibly_free_var(op.result) - self.vfprm.possibly_free_var(box1) + self.possibly_free_var(op.result) locs.append(res) return locs return f diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -207,12 +207,11 @@ arg = inputargs[i] i += 1 if loc.is_reg(): - if arg.type == FLOAT: - self.vfprm.reg_bindings[arg] = loc - else: - self.rm.reg_bindings[arg] = loc - #XXX add float + self.rm.reg_bindings[arg] = loc + elif loc.is_vfp_reg: + self.vfprm.reg_bindings[arg] = loc else: + assert loc.is_stack() self.frame_manager.frame_bindings[arg] = loc used[loc] = None From commits-noreply at bitbucket.org Mon Apr 4 13:31:57 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 13:31:57 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: support forcing in combination with floats Message-ID: <20110404113157.DB506282C22@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43139:7c83ac5efe76 Date: 2011-04-02 15:31 +0200 http://bitbucket.org/pypy/pypy/changeset/7c83ac5efe76/ Log: support forcing in combination with floats diff --git a/pypy/jit/backend/arm/runner.py b/pypy/jit/backend/arm/runner.py --- a/pypy/jit/backend/arm/runner.py +++ b/pypy/jit/backend/arm/runner.py @@ -1,6 +1,6 @@ from pypy.jit.backend.arm.assembler import AssemblerARM from pypy.jit.backend.arm.arch import WORD -from pypy.jit.backend.arm.registers import all_regs +from pypy.jit.backend.arm.registers import all_regs, all_vfp_regs from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, rffi, llmemory @@ -106,9 +106,11 @@ faildescr = self.get_fail_descr_from_number(fail_index) rffi.cast(TP, addr_of_force_index)[0] = -1 # start of "no gc operation!" block - frame_depth = faildescr._arm_frame_depth + frame_depth = faildescr._arm_frame_depth*WORD addr_end_of_frame = (addr_of_force_index - - (frame_depth+len(all_regs))*WORD) + (frame_depth + + len(all_regs)*WORD + + len(all_vfp_regs)*2*WORD)) fail_index_2 = self.assembler.failure_recovery_func( faildescr._failure_recovery_code, addr_of_force_index, diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -804,7 +804,7 @@ self.assembler._write_fail_index(fail_index) args = [imm(rffi.cast(lltype.Signed, op.getarg(0).getint()))] for v in guard_op.getfailargs(): - if v in self.reg_bindings: + if v in self.rm.reg_bindings or v in self.vfprm.reg_bindings: self.force_spill_var(v) self.assembler.emit_op_call(op, args, self, fcond) locs = self._prepare_guard(guard_op) From commits-noreply at bitbucket.org Mon Apr 4 13:31:59 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 13:31:59 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: add type based indirection to convert_to_imm Message-ID: <20110404113159.8CF32282C18@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43140:bccfb0d3680f Date: 2011-04-04 10:03 +0200 http://bitbucket.org/pypy/pypy/changeset/bccfb0d3680f/ Log: add type based indirection to convert_to_imm diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -198,6 +198,12 @@ return self.rm.make_sure_var_in_reg(var, forbidden_vars, selected_reg, need_lower_byte) + def convert_to_imm(self, value): + if isinstance(value, ConstInt): + return self.rm.convert_to_imm(value) + else: + assert isinstance(value, ConstFloat) + return self.vfprm.convert_to_imm(value) def update_bindings(self, locs, frame_depth, inputargs): used = {} From commits-noreply at bitbucket.org Mon Apr 4 13:32:00 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 13:32:00 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: support floats in the direct bootstrap code called when executing call_assembler Message-ID: <20110404113200.5613F282C18@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43141:8ba7cc275378 Date: 2011-04-04 13:27 +0200 http://bitbucket.org/pypy/pypy/changeset/8ba7cc275378/ Log: support floats in the direct bootstrap code called when executing call_assembler diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -384,16 +384,33 @@ else: selected_reg += 1 - for i in range(stack_locs): - loc = arglocs[reg_args + i] - stack_position = (len(r.callee_saved_registers) + 1 +i)*WORD + stack_position = len(r.callee_saved_registers)*WORD + \ + len(r.callee_saved_vfp_registers)*2*WORD + \ + WORD # for the FAIL INDEX + for i in range(reg_args, len(arglocs)): + loc = arglocs[i] if loc.is_reg(): self.mc.LDR_ri(loc.value, r.fp.value, stack_position) + elif loc.is_vfp_reg(): + self.mc.VLDR(loc.value, r.fp.value, stack_position) elif loc.is_stack(): - self.mc.LDR_ri(r.ip.value, r.fp.value, stack_position) - self.mov_loc_loc(r.ip, loc) + if loc.type == FLOAT: + with saved_registers(self.mc, [], [r.d0]): + self.mc.VLDR(r.d0.value, r.fp.value, stack_position) + self.mov_loc_loc(r.d0, loc) + elif loc.type == INT or loc.type == REF: + self.mc.LDR_ri(r.ip.value, r.fp.value, stack_position) + self.mov_loc_loc(r.ip, loc) + else: + assert 0, 'invalid location' else: assert 0, 'invalid location' + if loc.type == FLOAT: + size = 2 + else: + size = 1 + stack_position += size * WORD + sp_patch_location = self._prepare_sp_patch_position() self.mc.B_offs(loop_head) self._patch_sp_offset(sp_patch_location, looptoken._arm_frame_depth) From commits-noreply at bitbucket.org Mon Apr 4 14:17:47 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 14:17:47 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: shorten links Message-ID: <20110404121747.E117E36C20B@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3462:03cd86af8a1b Date: 2011-04-04 14:17 +0200 http://bitbucket.org/pypy/extradoc/changeset/03cd86af8a1b/ Log: shorten links diff --git a/sprintinfo/gothenburg-2011/announce.txt b/sprintinfo/gothenburg-2011/announce.txt --- a/sprintinfo/gothenburg-2011/announce.txt +++ b/sprintinfo/gothenburg-2011/announce.txt @@ -37,7 +37,7 @@ .. _tram: http://www.vasttrafik.se/en/ -.. _map: http://maps.google.se/maps?f=q&source=s_q&hl=sv&geocode=&q=G%C3%B6tabergsgatan+22,+G%C3%B6teborg&aq=&sll=57.698781,11.972952&sspn=0.009815,0.026565&ie=UTF8&hq=&hnear=G%C3%B6tabergsgatan+22,+411+34+G%C3%B6teborg,+V%C3%A4stra+G%C3%B6talands+L%C3%A4n&z=15 +.. _map: http://bit.ly/grRuQe Probably cheapest and not too far away is to book accomodation at `SGS Veckobostader`_. The `Elite Park Avenyn Hotel`_ is a luxury hotel just a @@ -97,7 +97,7 @@ You can also arrive by ferry_, from either Kiel in Germany or Frederikshavn in Denmark. -..`Central Station`_: http://maps.google.se/maps?f=q&source=s_q&hl=sv&geocode=&q=Centralstationen+G%C3%B6teborg&aq=&sll=57.698789,11.972946&sspn=0.009815,0.026565&g=G%C3%B6tabergsgatan+22,+G%C3%B6teborg&ie=UTF8&hq=Centralstationen&hnear=G%C3%B6teborg,+V%C3%A4stra+G%C3%B6talands+L%C3%A4n&ll=57.703551,11.979475&spn=0.018481,0.053129&z=14 +.. _`Central Station`: http://bit.ly/fON43p .. _`Landvetter`: http://swedavia.se/en/Goteborg/Traveller-information/Traffic-information/ .. _`Gothenburg City Airport`: http://www.goteborgairport.se/eng.asp From commits-noreply at bitbucket.org Mon Apr 4 15:19:36 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 15:19:36 +0200 (CEST) Subject: [pypy-svn] pypy default: kill the rest of the cases that used traverse Message-ID: <20110404131936.763FE282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43142:09e4a95b2969 Date: 2011-04-04 15:19 +0200 http://bitbucket.org/pypy/pypy/changeset/09e4a95b2969/ Log: kill the rest of the cases that used traverse diff --git a/pypy/translator/goal/old_queries.py b/pypy/translator/goal/old_queries.py --- a/pypy/translator/goal/old_queries.py +++ b/pypy/translator/goal/old_queries.py @@ -415,12 +415,10 @@ ops = 0 count = Counter() def visit(block): - if isinstance(block, flowmodel.Block): + for block in graph.iterblocks(): count.blocks += 1 count.ops += len(block.operations) - elif isinstance(block, flowmodel.Link): - count.links += 1 - flowmodel.traverse(visit, graph) + count.links = len(list(graph.iterlinks())) return count.blocks, count.links, count.ops # better used before backends opts diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,7 +1,7 @@ from __future__ import with_statement import new import py -from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse +from pypy.objspace.flow.model import Constant, Block, Link, Variable from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -379,22 +379,6 @@ return result -def traverse(visit, functiongraph): - block = functiongraph.startblock - visit(block) - seen = identity_dict() - seen[block] = True - stack = list(block.exits[::-1]) - while stack: - link = stack.pop() - visit(link) - block = link.target - if block not in seen: - visit(block) - seen[block] = True - stack += block.exits[::-1] - - def flattenobj(*args): for arg in args: try: diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -311,8 +311,7 @@ # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations # that will be performed later on the flow graph. - def fixegg(link): - if isinstance(link, Link): + for link in list(self.graph.iterlinks()): block = link.target if isinstance(block, EggBlock): if (not block.operations and len(block.exits) == 1 and @@ -324,15 +323,14 @@ link.args = list(link2.args) link.target = link2.target assert link2.exitcase is None - fixegg(link) else: mapping = {} for a in block.inputargs: mapping[a] = Variable(a) block.renamevariables(mapping) - elif isinstance(link, SpamBlock): + for block in self.graph.iterblocks(): + if isinstance(link, SpamBlock): del link.framestate # memory saver - traverse(fixegg, self.graph) def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -71,18 +71,6 @@ pieces.headerblock.exits[1], pieces.whileblock.exits[0]] -def test_traverse(): - lst = [] - traverse(lst.append, graph) - assert lst == [pieces.startblock, - pieces.startblock.exits[0], - pieces.headerblock, - pieces.headerblock.exits[0], - graph.returnblock, - pieces.headerblock.exits[1], - pieces.whileblock, - pieces.whileblock.exits[0]] - def test_mkentrymap(): entrymap = mkentrymap(graph) startlink = entrymap[graph.startblock][0] diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -1,5 +1,5 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.objspace.flow.model import SpaceOperation, traverse +from pypy.objspace.flow.model import SpaceOperation from pypy.tool.algo.unionfind import UnionFind from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype @@ -149,8 +149,7 @@ set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except") set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except") - def visit(node): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname in self.IDENTITY_OPS: # special-case these operations to identify their input @@ -167,7 +166,7 @@ if isinstance(node.exitswitch, Variable): set_use_point(node, node.exitswitch, "exitswitch", node) - if isinstance(node, Link): + for node in graph.iterlinks(): if isinstance(node.last_exception, Variable): set_creation_point(node.prevblock, node.last_exception, "last_exception") @@ -187,7 +186,6 @@ else: d[arg] = True - traverse(visit, graph) return lifetimes.infos() def _try_inline_malloc(self, info): diff --git a/pypy/translator/backendopt/test/test_inline.py b/pypy/translator/backendopt/test/test_inline.py --- a/pypy/translator/backendopt/test/test_inline.py +++ b/pypy/translator/backendopt/test/test_inline.py @@ -1,7 +1,7 @@ # XXX clean up these tests to use more uniform helpers import py import os -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import last_exception, checkgraph from pypy.translator.backendopt import canraise from pypy.translator.backendopt.inline import simple_inline_function, CannotInline @@ -20,29 +20,27 @@ from pypy.translator.backendopt import removenoops from pypy.objspace.flow.model import summary -def no_missing_concretetype(node): - if isinstance(node, Block): - for v in node.inputargs: - assert hasattr(v, 'concretetype') - for op in node.operations: - for v in op.args: - assert hasattr(v, 'concretetype') - assert hasattr(op.result, 'concretetype') - if isinstance(node, Link): - if node.exitcase is not None: - assert hasattr(node, 'llexitcase') - for v in node.args: - assert hasattr(v, 'concretetype') - if isinstance(node.last_exception, (Variable, Constant)): - assert hasattr(node.last_exception, 'concretetype') - if isinstance(node.last_exc_value, (Variable, Constant)): - assert hasattr(node.last_exc_value, 'concretetype') - def sanity_check(t): # look for missing '.concretetype' for graph in t.graphs: checkgraph(graph) - traverse(no_missing_concretetype, graph) + for node in graph.iterblocks(): + for v in node.inputargs: + assert hasattr(v, 'concretetype') + for op in node.operations: + for v in op.args: + assert hasattr(v, 'concretetype') + assert hasattr(op.result, 'concretetype') + for node in graph.iterlinks(): + if node.exitcase is not None: + assert hasattr(node, 'llexitcase') + for v in node.args: + assert hasattr(v, 'concretetype') + if isinstance(node.last_exception, (Variable, Constant)): + assert hasattr(node.last_exception, 'concretetype') + if isinstance(node.last_exc_value, (Variable, Constant)): + assert hasattr(node.last_exc_value, 'concretetype') + class CustomError1(Exception): def __init__(self): From commits-noreply at bitbucket.org Mon Apr 4 15:21:52 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 4 Apr 2011 15:21:52 +0200 (CEST) Subject: [pypy-svn] pypy default: - add fast paths to init__List Message-ID: <20110404132152.168C0282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43143:9095a2b3aea7 Date: 2011-04-04 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/9095a2b3aea7/ Log: - add fast paths to init__List - make init__List more transparent to the JIT diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -36,29 +36,35 @@ init_defaults = Defaults([None]) def init__List(space, w_list, __args__): + from pypy.objspace.std.tupleobject import W_TupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - # - # this is the old version of the loop at the end of this function: - # - # w_list.wrappeditems = space.unpackiterable(w_iterable) - # - # This is commented out to avoid assigning a new RPython list to - # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. - # items_w = w_list.wrappeditems del items_w[:] if w_iterable is not None: - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - items_w.append(w_item) + # unfortunately this is duplicating space.unpackiterable to avoid + # assigning a new RPython list to 'wrappeditems', which defeats the + # W_FastSeqIterObject optimization. + if isinstance(w_iterable, W_ListObject): + items_w.extend(w_iterable.wrappeditems) + elif isinstance(w_iterable, W_TupleObject): + items_w.extend(w_iterable.wrappeditems) + else: + _init_from_iterable(space, items_w, w_iterable) + +def _init_from_iterable(space, items_w, w_iterable): + # in its own function to make the JIT look into init__List + # XXX this would need a JIT driver somehow? + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + items_w.append(w_item) def len__List(space, w_list): result = len(w_list.wrappeditems) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -347,8 +347,9 @@ assert list('') == [] assert list('abc') == ['a', 'b', 'c'] assert list((1, 2)) == [1, 2] - l = [] + l = [1] assert list(l) is not l + assert list(l) == l assert list(range(10)) == range(10) def test_explicit_new_init(self): From commits-noreply at bitbucket.org Mon Apr 4 15:34:13 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 15:34:13 +0200 (CEST) Subject: [pypy-svn] pypy default: Add a comment. Message-ID: <20110404133413.24B11282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43144:0b7edfd84a51 Date: 2011-04-03 19:01 +0200 http://bitbucket.org/pypy/pypy/changeset/0b7edfd84a51/ Log: Add a comment. diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -378,6 +378,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) From commits-noreply at bitbucket.org Mon Apr 4 15:34:13 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 15:34:13 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix test: now FRAME_FIXED_SIZE may be larger than 10. Message-ID: <20110404133413.B64C9282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43145:a32e9197f736 Date: 2011-04-04 15:32 +0200 http://bitbucket.org/pypy/pypy/changeset/a32e9197f736/ Log: Fix test: now FRAME_FIXED_SIZE may be larger than 10. diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -140,7 +140,7 @@ xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw', immortal=True) registers = rffi.ptradd(xmmregisters, 16) - stacklen = baseloc + 10 + stacklen = baseloc + 30 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw', immortal=True) expected_ints = [0] * len(content) From commits-noreply at bitbucket.org Mon Apr 4 15:34:14 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 15:34:14 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110404133414.4954F282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43146:5f250aad04f4 Date: 2011-04-04 15:33 +0200 http://bitbucket.org/pypy/pypy/changeset/5f250aad04f4/ Log: merge heads diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -378,6 +378,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) From commits-noreply at bitbucket.org Mon Apr 4 15:56:53 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 15:56:53 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: fix tests Message-ID: <20110404135653.DE2F5282BEC@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43147:b54b162e8762 Date: 2011-04-04 15:52 +0200 http://bitbucket.org/pypy/pypy/changeset/b54b162e8762/ Log: fix tests diff --git a/pypy/jit/backend/arm/test/test_regalloc.py b/pypy/jit/backend/arm/test/test_regalloc.py --- a/pypy/jit/backend/arm/test/test_regalloc.py +++ b/pypy/jit/backend/arm/test/test_regalloc.py @@ -8,7 +8,7 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.jit.backend.llsupport.descr import GcCache from pypy.jit.backend.detect_cpu import getcpuclass -from pypy.jit.backend.arm.regalloc import RegAlloc +from pypy.jit.backend.arm.regalloc import Regalloc from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.annlowlevel import llhelper @@ -57,7 +57,7 @@ def load_effective_addr(self, *args): self.lea.append(args) -class RegAllocForTests(RegAlloc): +class RegAllocForTests(Regalloc): position = 0 def _compute_next_usage(self, v, _): return -1 diff --git a/pypy/jit/backend/arm/test/test_assembler.py b/pypy/jit/backend/arm/test/test_assembler.py --- a/pypy/jit/backend/arm/test/test_assembler.py +++ b/pypy/jit/backend/arm/test/test_assembler.py @@ -20,11 +20,11 @@ class TestRunningAssembler(object): def setup_method(self, method): cpu = CPU(None, None) - lp = LoopToken() - lp.compiled_loop_token = CompiledLoopToken(cpu, None) + #lp = LoopToken() + #lp.compiled_loop_token = CompiledLoopToken(cpu, None) self.a = AssemblerARM(cpu) self.a.setup_once() - self.a.setup(lp) + self.a.setup() def test_make_operation_list(self): i = rop.INT_ADD From commits-noreply at bitbucket.org Mon Apr 4 15:56:54 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 15:56:54 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: some register allocation related fixes Message-ID: <20110404135654.B43FD282BEC@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43148:c8a2911e2b9b Date: 2011-04-04 15:55 +0200 http://bitbucket.org/pypy/pypy/changeset/c8a2911e2b9b/ Log: some register allocation related fixes diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -710,6 +710,9 @@ self.mc.PUSH([loc.value]) elif loc.is_vfp_reg(): self.mc.VPUSH([loc.value]) + elif loc.is_imm(): + self.regalloc_mov(loc, r.ip) + self.mc.PUSH([r.ip.value]) else: assert 0, 'ffuu' diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -214,7 +214,7 @@ i += 1 if loc.is_reg(): self.rm.reg_bindings[arg] = loc - elif loc.is_vfp_reg: + elif loc.is_vfp_reg(): self.vfprm.reg_bindings[arg] = loc else: assert loc.is_stack() @@ -264,8 +264,11 @@ box = thing return loc, box - - + def _sync_var(self, v): + if v.type == FLOAT: + self.vfprm._sync_var(v) + else: + self.rm._sync_var(v) def prepare_op_int_add(self, op, fcond): boxes = list(op.getarglist()) From commits-noreply at bitbucket.org Mon Apr 4 18:19:54 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 18:19:54 +0200 (CEST) Subject: [pypy-svn] pypy default: Update the numbers. Message-ID: <20110404161954.4EDD8282BAA@codespeak.net> Author: Armin Rigo Branch: Changeset: r43153:563264d53fdd Date: 2011-04-04 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/563264d53fdd/ Log: Update the numbers. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -79,7 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel **32-bit** environment needs ``4GB``. + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html From commits-noreply at bitbucket.org Mon Apr 4 18:20:07 2011 From: commits-noreply at bitbucket.org (bivab) Date: Mon, 4 Apr 2011 18:20:07 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Add myself Message-ID: <20110404162007.B6261282BEC@codespeak.net> Author: David Schneider Branch: extradoc Changeset: r3465:6500dfb21d28 Date: 2011-04-04 18:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/6500dfb21d28/ Log: Add myself diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -13,5 +13,6 @@ Carl Friedrich Bolz 24-30 J+L's house Vegan Lukas Diekmann 24-30 J+L's house Stephen Simmons 28-1 +David Schneider 26-01 SGS Veckobostader ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Mon Apr 4 18:24:30 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 18:24:30 +0200 (CEST) Subject: [pypy-svn] pypy default: Update the version; we are now close to 1.5. Message-ID: <20110404162430.0EB78282BAA@codespeak.net> Author: Armin Rigo Branch: Changeset: r43154:3020afb69cce Date: 2011-04-04 18:24 +0200 http://bitbucket.org/pypy/pypy/changeset/3020afb69cce/ Log: Update the version; we are now close to 1.5. diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4.1' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.5-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From commits-noreply at bitbucket.org Mon Apr 4 18:39:23 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 4 Apr 2011 18:39:23 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Stephen Simmons cannot eat nuts. And Jacob's last name keeps getting mangled. Message-ID: <20110404163923.671BA282BAA@codespeak.net> Author: Laura Creighton Branch: extradoc Changeset: r3466:71f5b3a63419 Date: 2011-04-04 18:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/71f5b3a63419/ Log: Stephen Simmons cannot eat nuts. And Jacob's last name keeps getting mangled. diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -8,11 +8,11 @@ ==================== ============== ===================== ================== Name Arrive/Depart Accomodation Food ==================== ============== ===================== ================== -Jacob Hall�n lives there no peppers +Jacob Hallen lives there no peppers Laura Creighton lives there Carl Friedrich Bolz 24-30 J+L's house Vegan Lukas Diekmann 24-30 J+L's house -Stephen Simmons 28-1 +Stephen Simmons 28-1 absolutely no nuts David Schneider 26-01 SGS Veckobostader ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Mon Apr 4 18:53:51 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 18:53:51 +0200 (CEST) Subject: [pypy-svn] pypy default: No clue what I'm doing there, but pytest_runtest_logreport() Message-ID: <20110404165351.2E4E2282BAA@codespeak.net> Author: Armin Rigo Branch: Changeset: r43155:765fd1aaa082 Date: 2011-04-04 18:53 +0200 http://bitbucket.org/pypy/pypy/changeset/765fd1aaa082/ Log: No clue what I'm doing there, but pytest_runtest_logreport() in the "resultlog" output tries to do an invalid operation on the result of (some?) skipped tests. Just use the str() of the whole thing for now. diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): From commits-noreply at bitbucket.org Mon Apr 4 19:07:35 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 19:07:35 +0200 (CEST) Subject: [pypy-svn] pypy default: Tentative: kill various pieces of code left and right handling Message-ID: <20110404170735.3010D282BAA@codespeak.net> Author: Armin Rigo Branch: Changeset: r43156:7373ba665cdc Date: 2011-04-04 18:00 +0200 http://bitbucket.org/pypy/pypy/changeset/7373ba665cdc/ Log: Tentative: kill various pieces of code left and right handling keepalives. Now the inliner no longer inserts tons of keepalives for you (which are almost always unneeded). diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -46,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -157,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -198,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -53,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -556,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -769,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -211,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -331,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -482,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -498,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -544,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -614,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -637,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,5 +1,5 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - From commits-noreply at bitbucket.org Mon Apr 4 19:07:35 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 19:07:35 +0200 (CEST) Subject: [pypy-svn] pypy default: Remove another test. Message-ID: <20110404170735.CE048282BAA@codespeak.net> Author: Armin Rigo Branch: Changeset: r43157:71ccf27c81c5 Date: 2011-04-04 19:03 +0200 http://bitbucket.org/pypy/pypy/changeset/71ccf27c81c5/ Log: Remove another test. diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 From commits-noreply at bitbucket.org Mon Apr 4 19:07:44 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 4 Apr 2011 19:07:44 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110404170744.8FE8B282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43158:7cfdee10b79d Date: 2011-04-04 17:07 +0000 http://bitbucket.org/pypy/pypy/changeset/7cfdee10b79d/ Log: merge heads diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -46,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -157,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -198,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -53,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -556,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -769,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -211,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -331,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -482,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -498,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -544,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -614,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -637,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,5 +1,5 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - From commits-noreply at bitbucket.org Mon Apr 4 21:12:24 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Mon, 4 Apr 2011 21:12:24 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix translation in the _winreg module Message-ID: <20110404191224.3E12B282C18@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43160:f110138d39bf Date: 2011-04-04 20:56 +0200 http://bitbucket.org/pypy/pypy/changeset/f110138d39bf/ Log: Fix translation in the _winreg module diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -261,7 +261,8 @@ if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') - return space.wrap(rffi.charp2strn(buf, bufsize_p[0] - 1)) + length = intmask(bufsize_p[0] - 1) + return space.wrap(rffi.charp2strn(buf, length)) def convert_to_regdata(space, w_value, typ): buf = None @@ -445,9 +446,10 @@ continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') + length = intmask(retDataSize[0]) return space.newtuple([ convert_from_regdata(space, databuf, - retDataSize[0], retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) @@ -595,11 +597,11 @@ if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') + length = intmask(retDataSize[0]) return space.newtuple([ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, - retDataSize[0], - retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) From commits-noreply at bitbucket.org Mon Apr 4 21:12:02 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Mon, 4 Apr 2011 21:12:02 +0200 (CEST) Subject: [pypy-svn] pypy default: Redo e5ce4f03d51a Message-ID: <20110404191202.2630B282BEC@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43159:e3f11f236dd6 Date: 2011-04-04 21:07 +0200 http://bitbucket.org/pypy/pypy/changeset/e3f11f236dd6/ Log: Redo e5ce4f03d51a diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -438,6 +438,38 @@ res = __import__('', mydict, None, ['bar'], 2) assert res is pkg + def test__package__(self): + # Regression test for http://bugs.python.org/issue3221. + def check_absolute(): + exec "from os import path" in ns + def check_relative(): + exec "from . import a" in ns + + # Check both OK with __package__ and __name__ correct + ns = dict(__package__='pkg', __name__='pkg.notarealmodule') + check_absolute() + check_relative() + + # Check both OK with only __name__ wrong + ns = dict(__package__='pkg', __name__='notarealpkg.notarealmodule') + check_absolute() + check_relative() + + # Check relative fails with only __package__ wrong + ns = dict(__package__='foo', __name__='pkg.notarealmodule') + check_absolute() # XXX check warnings + raises(SystemError, check_relative) + + # Check relative fails with __package__ and __name__ wrong + ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') + check_absolute() # XXX check warnings + raises(SystemError, check_relative) + + # Check both fail with package set to a non-string + ns = dict(__package__=object()) + raises(ValueError, check_absolute) + raises(ValueError, check_relative) + def test_universal_newlines(self): import pkg_univnewlines assert pkg_univnewlines.a == 5 diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -118,6 +118,105 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) +def _get_relative_name(space, modulename, level, w_globals): + w = space.wrap + ctxt_w_package = space.finditem(w_globals, w('__package__')) + + ctxt_package = None + if ctxt_w_package is not None and ctxt_w_package is not space.w_None: + try: + ctxt_package = space.str_w(ctxt_w_package) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_ValueError, space.wrap( + "__package__ set to non-string")) + + if ctxt_package is not None: + # __package__ is set, so use it + package_parts = ctxt_package.split('.') + while level > 1 and package_parts: + level -= 1 + if package_parts: + package_parts.pop() + if not package_parts: + if len(ctxt_package) == 0: + msg = "Attempted relative import in non-package" + else: + msg = "Attempted relative import beyond toplevel package" + raise OperationError(space.w_ValueError, w(msg)) + + # Try to import parent package + try: + w_parent = absolute_import(space, ctxt_package, 0, + None, tentative=False) + except OperationError, e: + if not e.match(space, space.w_ImportError): + raise + if level > 0: + raise OperationError(space.w_SystemError, space.wrap( + "Parent module '%s' not loaded, " + "cannot perform relative import" % ctxt_package)) + else: + space.warn("Parent module '%s' not found " + "while handling absolute import" % ctxt_package, + space.w_RuntimeWarning) + + if modulename: + package_parts.append(modulename) + rel_level = len(package_parts) + rel_modulename = '.'.join(package_parts) + else: + # __package__ not set, so figure it out and set it + ctxt_w_name = space.finditem(w_globals, w('__name__')) + ctxt_w_path = space.finditem(w_globals, w('__path__')) + + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.str_w(ctxt_w_name) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + + if not ctxt_name: + return None, 0 + + ctxt_name_prefix_parts = ctxt_name.split('.') + if level > 0: + n = len(ctxt_name_prefix_parts)-level+1 + assert n>=0 + ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] + if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module + ctxt_name_prefix_parts.pop() + + if level > 0 and not ctxt_name_prefix_parts: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) + + rel_modulename = '.'.join(ctxt_name_prefix_parts) + + if ctxt_w_path is not None: + # __path__ is set, so __name__ is already the package name + space.setitem(w_globals, w("__package__"), ctxt_w_name) + else: + # Normal module, so work out the package name if any + if '.' not in ctxt_name: + space.setitem(w_globals, w("__package__"), space.w_None) + elif rel_modulename: + space.setitem(w_globals, w("__package__"), w(rel_modulename)) + + if modulename: + if rel_modulename: + rel_modulename += '.' + modulename + else: + rel_modulename = modulename + + rel_level = len(ctxt_name_prefix_parts) + + return rel_modulename, rel_level + + @unwrap_spec(name=str, level=int) def importhook(space, name, w_globals=None, w_locals=None, w_fromlist=None, level=-1): @@ -139,58 +238,30 @@ w_globals is not None and space.isinstance_w(w_globals, space.w_dict)): - ctxt_w_name = space.finditem(w_globals, w('__name__')) - ctxt_w_path = space.finditem(w_globals, w('__path__')) + rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) - ctxt_name = None - if ctxt_w_name is not None: - try: - ctxt_name = space.str_w(ctxt_w_name) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise + if rel_modulename: + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + tentative = True + else: + tentative = False - if ctxt_name is not None: - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - if ctxt_name_prefix_parts: - rel_modulename = '.'.join(ctxt_name_prefix_parts) - if modulename: - rel_modulename += '.' + modulename - baselevel = len(ctxt_name_prefix_parts) - if rel_modulename is not None: - # XXX What is this check about? There is no test for it - w_mod = check_sys_modules(space, w(rel_modulename)) + w_mod = absolute_import(space, rel_modulename, rel_level, + fromlist_w, tentative=tentative) + if w_mod is not None: + space.timer.stop_name("importhook", modulename) + return w_mod - if (w_mod is None or - not space.is_w(w_mod, space.w_None) or - level > 0): + ## if level > 0: + ## msg = "Attempted relative import in non-package" + ## raise OperationError(space.w_ValueError, w(msg)) - # if no level was set, ignore import errors, and - # fall back to absolute import at the end of the - # function. - if level == -1: - tentative = True - else: - tentative = False + ## if not modulename: + ## return None - w_mod = absolute_import(space, rel_modulename, - baselevel, fromlist_w, - tentative=tentative) - if w_mod is not None: - space.timer.stop_name("importhook", modulename) - return w_mod - else: - rel_modulename = None - - if level > 0: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) w_mod = absolute_import_try(space, modulename, 0, fromlist_w) if w_mod is None or space.is_w(w_mod, space.w_None): w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) From commits-noreply at bitbucket.org Mon Apr 4 21:12:28 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Mon, 4 Apr 2011 21:12:28 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix the imp module to better handle the __package__ attribute Message-ID: <20110404191228.1D810282C19@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43161:5cefa0984ff6 Date: 2011-04-04 20:56 +0200 http://bitbucket.org/pypy/pypy/changeset/5cefa0984ff6/ Log: Fix the imp module to better handle the __package__ attribute diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -135,7 +135,7 @@ return importing.check_sys_modules(space, w_modulename) def new_module(space, w_name): - return space.wrap(Module(space, w_name)) + return space.wrap(Module(space, w_name, add_package=False)) def init_builtin(space, w_name): name = space.str_w(w_name) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -134,11 +134,13 @@ if ctxt_package is not None: # __package__ is set, so use it + if ctxt_package == '' and level < 0: + return None, 0 + package_parts = ctxt_package.split('.') while level > 1 and package_parts: level -= 1 - if package_parts: - package_parts.pop() + package_parts.pop() if not package_parts: if len(ctxt_package) == 0: msg = "Attempted relative import in non-package" @@ -162,9 +164,9 @@ "while handling absolute import" % ctxt_package, space.w_RuntimeWarning) + rel_level = len(package_parts) if modulename: package_parts.append(modulename) - rel_level = len(package_parts) rel_modulename = '.'.join(package_parts) else: # __package__ not set, so figure it out and set it From commits-noreply at bitbucket.org Tue Apr 5 04:56:39 2011 From: commits-noreply at bitbucket.org (ademan) Date: Tue, 5 Apr 2011 04:56:39 +0200 (CEST) Subject: [pypy-svn] pypy fold_intadd: Added support for folding int_sub(ConstInt(*), i*) Message-ID: <20110405025639.4D8D1282BDC@codespeak.net> Author: Daniel Roberts Branch: fold_intadd Changeset: r43162:4a04352b5328 Date: 2011-04-04 19:54 -0700 http://bitbucket.org/pypy/pypy/changeset/4a04352b5328/ Log: Added support for folding int_sub(ConstInt(*), i*) diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5554,6 +5554,24 @@ """ self.optimize_loop(ops, expected) + def test_fold_backwards_sub(self): + ops = """ + [i0] + i1 = int_add(i0, 3) + i2 = int_sub(4, i1) + i3 = int_sub(i2, 14) + i4 = int_add(6, i2) + jump(i3) + """ + + expected = """ + [i0] + i1 = int_add(i0, 3) + i2 = int_sub(1, i0) + i3 = int_sub(i0, 13) + i4 = int_sub(7, i0) + jump(i3) + """ ##class TestOOtype(OptimizeOptTest, OOtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/addition.py b/pypy/jit/metainterp/optimizeopt/addition.py --- a/pypy/jit/metainterp/optimizeopt/addition.py +++ b/pypy/jit/metainterp/optimizeopt/addition.py @@ -1,15 +1,13 @@ from pypy.jit.metainterp.optimizeopt.optimizer import * -from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex from pypy.jit.metainterp.history import ConstInt from pypy.jit.metainterp.optimizeutil import _findall from pypy.jit.metainterp.resoperation import rop, ResOperation -from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.metainterp.optimizeopt.intutils import IntBound -from pypy.rlib.rarithmetic import highest_bit class OptAddition(Optimization): def __init__(self): - self.args = {} + self.loperands = {} + self.roperands = {} # roperands is only for int_sub(ConstInt(*), i*) + # and cases deriving from that def reconstruct_for_next_iteration(self, optimizer, valuemap): return OptAddition() @@ -31,18 +29,60 @@ constant = ConstInt(constant) return ResOperation(rop.INT_ADD, [variable, constant], result) - def _process_add(self, variable, constant, result): + def _process_add(self, constant, variable, result): + # int_add(ConstInt(*), int_sub(ConstInt(*), i*)) try: - root, stored_constant = self.args[variable] + stored_constant, root = self.roperands[variable] + constant = constant + stored_constant + + self.roperands[result] = constant, root + + boxed_constant = ConstInt(constant) + new_op = ResOperation(rop.INT_SUB, [boxed_constant, variable], result) + self.emit_operation(new_op) + return + except KeyError: + pass + + # int_add(ConstInt(*), int_add(ConstInt(*), i*)) + try: + root, stored_constant = self.loperands[variable] constant = constant + stored_constant except KeyError: root = variable - self.args[result] = root, constant + self.loperands[result] = root, constant new_op = self._int_operation(root, constant, result) self.emit_operation(new_op) + def _process_sub(self, constant, variable, result): + # int_sub(ConstInt(*), int_sub(ConstInt(*), i*)) + try: + stored_constant, root = self.roperands[variable] + constant = constant - stored_constant + + self.loperands[result] = root, constant + + new_op = self._int_operation(root, constant, result) + self.emit_operation(new_op) + return + except KeyError: + pass + + # int_sub(ConstInt(*), int_add(ConstInt(*), i*)) + try: + root, stored_constant = self.loperands[variable] + constant = constant - stored_constant + except KeyError: + root = variable + + self.roperands[result] = constant, root + + constant = ConstInt(constant) + new_op = ResOperation(rop.INT_SUB, [constant, root], result) + self.emit_operation(new_op) + def optimize_INT_ADD(self, op): lv = self.getvalue(op.getarg(0)) rv = self.getvalue(op.getarg(1)) @@ -51,10 +91,10 @@ self.emit_operation(op) # XXX: there's support for optimizing this elsewhere, right? elif lv.is_constant(): constant = lv.box.getint() - self._process_add(op.getarg(1), constant, result) + self._process_add(constant, op.getarg(1), result) elif rv.is_constant(): constant = rv.box.getint() - self._process_add(op.getarg(0), constant, result) + self._process_add(constant, op.getarg(0), result) else: self.emit_operation(op) @@ -65,11 +105,12 @@ if lv.is_constant() and rv.is_constant(): self.emit_operation(op) # XXX: there's support for optimizing this elsewhere, right? elif lv.is_constant(): - # TODO: implement? - self.emit_operation(op) + constant = lv.box.getint() + self._process_sub(constant, op.getarg(1), result) + #self.emit_operation(op) elif rv.is_constant(): constant = rv.box.getint() - self._process_add(op.getarg(0), -constant, result) + self._process_add(-constant, op.getarg(0), result) else: self.emit_operation(op) From commits-noreply at bitbucket.org Tue Apr 5 11:38:51 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Tue, 5 Apr 2011 11:38:51 +0200 (CEST) Subject: [pypy-svn] pypy default: Restore the fast path for imports: only read sys.modules when the module Message-ID: <20110405093851.3AE6D282BE3@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43164:11faf77ff2b1 Date: 2011-04-05 11:33 +0200 http://bitbucket.org/pypy/pypy/changeset/11faf77ff2b1/ Log: Restore the fast path for imports: only read sys.modules when the module has already been imported, and do not acquire the import lock. diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -61,3 +61,27 @@ assert not imp.lock_held() self.waitfor(lambda: done) assert done + +class TestImportLock: + def test_lock(self, space, monkeypatch): + from pypy.module.imp.importing import getimportlock, importhook + + # Monkeypatch the import lock and add a counter + importlock = getimportlock(space) + original_acquire = importlock.acquire_lock + def acquire_lock(): + importlock.count += 1 + original_acquire() + importlock.count = 0 + monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) + + # An already imported module + importhook(space, 'sys') + assert importlock.count == 0 + # A new module + importhook(space, 're') + assert importlock.count == 7 + # Import it again + previous_count = importlock.count + importhook(space, 're') + assert importlock.count == previous_count diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -257,16 +257,7 @@ space.timer.stop_name("importhook", modulename) return w_mod - ## if level > 0: - ## msg = "Attempted relative import in non-package" - ## raise OperationError(space.w_ValueError, w(msg)) - - ## if not modulename: - ## return None - - w_mod = absolute_import_try(space, modulename, 0, fromlist_w) - if w_mod is None or space.is_w(w_mod, space.w_None): - w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) + w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) if rel_modulename is not None: space.setitem(space.sys.get('modules'), w(rel_modulename), space.w_None) space.timer.stop_name("importhook", modulename) @@ -274,6 +265,11 @@ @jit.dont_look_inside def absolute_import(space, modulename, baselevel, fromlist_w, tentative): + # Short path: check in sys.modules + w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w) + if w_mod is not None and not space.is_w(w_mod, space.w_None): + return w_mod + lock = getimportlock(space) lock.acquire_lock() try: From commits-noreply at bitbucket.org Tue Apr 5 14:52:57 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 5 Apr 2011 14:52:57 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: draft for the training session Message-ID: <20110405125257.01782282BE7@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3467:8fd60941bd43 Date: 2011-04-05 14:19 +0200 http://bitbucket.org/pypy/extradoc/changeset/8fd60941bd43/ Log: draft for the training session diff --git a/talk/ep2011/training.txt b/talk/ep2011/training.txt new file mode 100644 --- /dev/null +++ b/talk/ep2011/training.txt @@ -0,0 +1,35 @@ +PyPy training session +===================== + +Part 1 (2 hours): Run your application under PyPy +-------------------------------------------------- + +- how to use pypy to run your app (easy: s/python/pypy :-)) + +- how to optimize it for the pypy jit: + + * general idea about how the jit works + + * looking at the traces + + * using the jitviewer + + * tweaking jit and gc params + +- how to compile C extensions for pypy, and fix them if necessary + + +Part 2 (2 hours): Write your own interpreter with PyPy +------------------------------------------------------- + +- give the students the source code of a toy language interpreter of written + in RPython + +- teach them how to translate it + +- teach them about the JIT hints + +- challenge: place hints (or maybe even refactor the interpreter) to get the + best results with the JIT + +- do we still have a PyPy T-shirt to give as a prize to the winner? :) From commits-noreply at bitbucket.org Tue Apr 5 14:53:07 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 5 Apr 2011 14:53:07 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: try to take the best of both versions Message-ID: <20110405125307.5E27F282C1E@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3468:a2cff55ff6da Date: 2011-04-04 16:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/a2cff55ff6da/ Log: try to take the best of both versions diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -147,10 +147,9 @@ In this paper we present two of these hints that are extensively used in the PyPy project to improve the performance of its Python interpreter. -% XXX: paragraph rephrased by anto; feel free to pick the one you like best - -Conceptually, it is possible to achieve significant speed-ups by feeding into -the compiler some information that is observed at runtime: in particular, if +Conceptually, the significant speed-ups that can be achieved with +dynamic compilation depend on feeding into compilation and exploiting +values observed at runtime. In particular, if there are values which vary very slowly, it is possible to compile multiple specialized versions of the same code, one for each actual value. To exploit the runtime feedback, the implementation code and data structures need to be @@ -158,13 +157,6 @@ we present allow exactly to implement such feedback and exploitation in a meta-tracing context. -% Conceptually the significant speed-ups that can be achieved with -% dynamic compilation depend on feeding into compilation and exploiting -% values observed at runtime that are slow-varying in practice. To exploit the -% runtime feedback, the implementation code and data structures need to be -% structured so that many such values are at hand. The hints that we present allow -% exactly to implement such feedback and exploitation in a meta-tracing context. - Concretely these hints are used to control how the optimizer of the tracing JIT can improve the traces of the object model. More specifically, these hints influence the constant folding @@ -442,7 +434,7 @@ in static compilers it is significantly less applicable. Promotion is essentially a tool for trace specialization. There are places in -the interpreter where knowing that a value if constant opens a lot of +the interpreter where knowing that a value is constant opens a lot of optimization opportunities, even though it could have different values in practice. In such a place, promotion is used. The typical reason to do that is if there is @@ -643,7 +635,7 @@ side effects, because it changes the memoizing dictionary. However, because this side effect is not externally visible, the function from the outside is pure. This is a property that is not easily detectable by analysis. Therefore, the purity -of this function needs to be manually annotated. +of this function needs to be annotated manually. From commits-noreply at bitbucket.org Tue Apr 5 14:53:07 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 5 Apr 2011 14:53:07 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: lukas is staying till the 2nd Message-ID: <20110405125307.DA91D282C1B@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3469:01df4b40e67a Date: 2011-04-05 11:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/01df4b40e67a/ Log: lukas is staying till the 2nd diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -11,7 +11,7 @@ Jacob Hallen lives there no peppers Laura Creighton lives there Carl Friedrich Bolz 24-30 J+L's house Vegan -Lukas Diekmann 24-30 J+L's house +Lukas Diekmann 24-2 J+L's house Stephen Simmons 28-1 absolutely no nuts David Schneider 26-01 SGS Veckobostader ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Tue Apr 5 14:53:08 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 5 Apr 2011 14:53:08 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: add a sentence that the trace is SSA and stress where guards come from Message-ID: <20110405125308.5339A282C1B@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3470:5b83cfa07ca3 Date: 2011-04-05 14:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/5b83cfa07ca3/ Log: add a sentence that the trace is SSA and stress where guards come from diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -355,7 +355,8 @@ attributes \texttt{b} and \texttt{c} are found on the class. The line numbers in the trace correspond to the line numbers in Figure~\ref{fig:interpreter-slow} where the traced operations come from. The -trace indeed contains +trace is in SSA form. Note how all the guards in trace correspond to a +condition in the original code. The trace contains five calls to \texttt{dict.get}, which is slow. To make the language efficient using a tracing JIT, we need to find a way to get rid of these dictionary lookups somehow. How to achieve this will be topic of From commits-noreply at bitbucket.org Tue Apr 5 14:53:09 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 5 Apr 2011 14:53:09 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: implement Samuele's radical idea: just show the compounded speedups Message-ID: <20110405125309.270BE282C1B@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3471:5c33349a70f0 Date: 2011-04-05 14:26 +0200 http://bitbucket.org/pypy/extradoc/changeset/5c33349a70f0/ Log: implement Samuele's radical idea: just show the compounded speedups diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -846,13 +846,16 @@ \label{sec:evaluation} For space reasons we cannot perform a full evaluation here, but still want to -present some benchmark numbers. We chose to present two benchmarks, a port of -the classical Richards benchmark in Python and a Python version of the Telco -decimal benchmark\footnote{\texttt{http://speleotrove.com/decimal/telco.html}}, -using a pure Python decimal floating point implementation. The results we see in -these two benchmarks seem to repeat themselves in other benchmarks using -object-oriented code; for purely numerical algorithms the speedups are -significantly smaller. +present some benchmark numbers. We chose to present just some benchmarks: The +templating engine of the Django web +framework\footnote{\texttt{http://www.djangoproject.com/}}; a Monte-Carlo Go +AI\footnote{\texttt{http://shed-skin.blogspot.com/2009/07/ +disco-elegant-python-go-player.html}}; a BZ2 decoder; a port of the classical +Richards benchmark in Python; a Python version of the Telco decimal +benchmark\footnote{\texttt{http://speleotrove.com/decimal/telco.html}}, using a +pure Python decimal floating point implementation. The results we see in these +benchmarks seem to repeat themselves in other benchmarks using object-oriented +code; for purely numerical algorithms the speedups are significantly smaller. The benchmarks were run on an otherwise idle Intel Core2 Duo P8400 processor with 2.26 GHz and 3072 KB of cache on a machine with 3GB RAM running Linux @@ -868,44 +871,42 @@ produce machine code. The arithmetic mean of the times of the last 30 runs were used as the result. The errors were computed using a confidence interval with a 95\% confidence level \cite{georges_statistically_2007}. The results are -reported in Figure~\ref{fig:times}. +reported in Figure~\ref{fig:times}, together with the same numbers normed to +those of the full JIT. -Versioned types speed up both benchmarks by a significant factor of around 7. -The speed advantage of maps alone is a lot less clear. Maps also have a memory -advantage which we did not measure here. By themselves, maps improved the -Richards benchmark slightly, but made the Telco benchmark slower. Enabling both -maps and versioned types together yields a significant improvement over just -versioned types for Richards. XXX good explanation. For Telco, enabling both -has little effect over the gains for versioned types alone. - -\pedronis{XXX radical idea, given that there may be no space to discuss the subtle points and that this is really about showing that we can enable such mechanisms and there is already literature that shows that/how they work, to just consider the benchmarks with no maps and no versions and with both?} +The optimizations give a speedup between 80\% and almost 20 times. The Richards +is a particularly good case for the optimizations as it makes heavy uses of +object-oriented features. Pyflate uses mostly imperative code, so does not +benefit as much. Together with the optimization, PyPy outperforms CPython in +all benchmarks, which is not surprising because CPython is a simple +bytecode-based interpreter. \begin{figure} +\begin{center} {\footnotesize -\begin{center} -\begin{tabular}{|l|r|r|} +\begin{tabular}{|l|r|r|r|} \hline - &richards[ms] &telco[ms] \\ + &CPython &JIT baseline &JIT full\\ \hline -CPython &357.79 $\pm$ 1.32 &1209.67 $\pm$ 2.20\\ -speedup &1.00 $\times$ &1.00 $\times$\\ +django[ms] &988.67 $\pm$ 0.49 &405.62 $\pm$ 4.80 &149.31 $\pm$ 1.37\\ + &6.62 $\times$ &2.72 $\times$ &1.00 $\times$\\ \hline -JIT baseline &421.87 $\pm$ 0.48 &738.18 $\pm$ 3.29\\ -speedup &0.85 $\times$ &1.64 $\times$\\ +go[ms] &947.43 $\pm$ 1.30 &525.53 $\pm$ 7.67 &174.32 $\pm$ 7.78\\ + &5.44 $\times$ &3.01 $\times$ &1.00 $\times$\\ \hline -JIT map &382.88 $\pm$ 4.40 &834.19 $\pm$ 4.91\\ -speedup &0.93 $\times$ &1.45 $\times$\\ +pyflate[ms] &3209.20 $\pm$ 3.65 &2884.26 $\pm$ 21.11 &1585.48 $\pm$ 5.22\\ + &2.02 $\times$ &1.82 $\times$ &1.00 $\times$\\ \hline -JIT version &49.87 $\pm$ 0.29 &157.88 $\pm$ 1.79 \\ -speedup &7.17 $\times$ &7.66 $\times$\\ +richards[ms] &357.79 $\pm$ 1.32 &421.87 $\pm$ 0.48 &17.89 $\pm$ 1.15\\ + &20.00 $\times$ &23.58 $\times$ &1.00 $\times$\\ \hline -JIT full &17.89 $\pm$ 1.15 &153.48 $\pm$ 1.86 \\ -speedup &20.00 $\times$ &7.88 $\times$\\ +telco[ms] &1209.67 $\pm$ 2.20 &738.18 $\pm$ 3.29 &153.48 $\pm$ 1.86\\ + &7.88 $\times$ &4.81 $\times$ &1.00 $\times$\\ \hline \end{tabular} +} \end{center} -} \caption{Benchmark Results} \label{fig:times} \end{figure} diff --git a/talk/icooolps2011/benchmarks/benchmarks.gnumeric b/talk/icooolps2011/benchmarks/benchmarks.gnumeric index a99159c250d8bc2c132073789433f2dcf36ed834..9f7f50b9aeb7343dca27cbb456cab785a8454541 GIT binary patch [cut] From commits-noreply at bitbucket.org Tue Apr 5 17:17:40 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 5 Apr 2011 17:17:40 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: add myself Message-ID: <20110405151740.2F4D936C204@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3474:197e2d21cf51 Date: 2011-04-05 17:17 +0200 http://bitbucket.org/pypy/extradoc/changeset/197e2d21cf51/ Log: add myself diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -14,5 +14,6 @@ Lukas Diekmann 24-2 J+L's house Stephen Simmons 28-1 absolutely no nuts David Schneider 26-01 SGS Veckobostader +Antonio Cuni 26-30 Hotel Poseidon his own diet :) ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Tue Apr 5 17:21:21 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 5 Apr 2011 17:21:21 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Add myself. Message-ID: <20110405152121.1C26E36C204@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3475:1d1e00d817d4 Date: 2011-04-05 17:20 +0200 http://bitbucket.org/pypy/extradoc/changeset/1d1e00d817d4/ Log: Add myself. diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -15,5 +15,6 @@ Stephen Simmons 28-1 absolutely no nuts David Schneider 26-01 SGS Veckobostader Antonio Cuni 26-30 Hotel Poseidon his own diet :) +Armin Rigo 23-02 SGS Veckobostader ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Tue Apr 5 20:17:53 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Tue, 5 Apr 2011 20:17:53 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: signing up Message-ID: <20110405181753.97021282C1B@codespeak.net> Author: Hakan Ardo Branch: extradoc Changeset: r3476:cbc62d4b3736 Date: 2011-04-05 20:17 +0200 http://bitbucket.org/pypy/extradoc/changeset/cbc62d4b3736/ Log: signing up diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -16,5 +16,6 @@ David Schneider 26-01 SGS Veckobostader Antonio Cuni 26-30 Hotel Poseidon his own diet :) Armin Rigo 23-02 SGS Veckobostader +Hakan Ardo 24-27 ??? ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Wed Apr 6 13:40:09 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 6 Apr 2011 13:40:09 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: turn the draft into a real abstract Message-ID: <20110406114009.6BAD8282C30@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3477:582605076dd6 Date: 2011-04-06 13:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/582605076dd6/ Log: turn the draft into a real abstract diff --git a/talk/ep2011/training.txt b/talk/ep2011/training.txt --- a/talk/ep2011/training.txt +++ b/talk/ep2011/training.txt @@ -6,35 +6,44 @@ However, the first part is a prerequisite for the second one, thus people are not advised to join in the middle of the session +The session is meant to be highly interactive. People are invited to bring +their own laptop and try things by themselves. + + Part 1: Run your application under PyPy -------------------------------------------------- -- how to use pypy to run your app (easy: s/python/pypy :-)) +This tutorial is targeted to Python users who want to run their favorite +Python application under PyPy, and exploit the most of it. The following +topics will be covered: -- how to optimize it for the pypy jit: + - how to fix/avoid CPython implementation details (e.g., refcounting) - * general idea about how the jit works + - general overview of how the PyPy JIT works - * looking at the traces + - how to optimize your program for the PyPy JIT by looking at the produced + traces - * using the jitviewer + - how to tweak the parameters of the JIT and the GC - * tweaking jit and gc params - -- how to compile C extensions for pypy, and fix them if necessary + - how to use existing CPython C extensions on PyPy, and fix them if + necessary Part 2: Write your own interpreter with PyPy ------------------------------------------------------- -- give the students the source code of a toy language interpreter of written - in RPython +PyPy is not only a Python interpreter, but also a toolchain to implement +dynamic languages. This tutorial is targeted to people who want to implement +their own programming languages, or who simply want to know more about how the +PyPy JIT works internally. -- teach them how to translate it +The students will be given the source code for a toy language implemented in +RPython. They will learn: -- teach them about the JIT hints + - how to translate it to C using the PyPy translation toolchain -- challenge: place hints (or maybe even refactor the interpreter) to get the - best results with the JIT + - what are the "hints" needed by the JIT generator, and how to place them -- do we still have a PyPy T-shirt to give as a prize to the winner? :) +Then, they will be challenged to add the proper hints to the toy interpreter, +to get the best result with the JIT. From commits-noreply at bitbucket.org Wed Apr 6 14:22:35 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 6 Apr 2011 14:22:35 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: clarify Message-ID: <20110406122235.43AF3282C31@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3478:ce3075d47faa Date: 2011-04-06 14:22 +0200 http://bitbucket.org/pypy/extradoc/changeset/ce3075d47faa/ Log: clarify diff --git a/talk/ep2011/training.txt b/talk/ep2011/training.txt --- a/talk/ep2011/training.txt +++ b/talk/ep2011/training.txt @@ -21,8 +21,9 @@ - general overview of how the PyPy JIT works - - how to optimize your program for the PyPy JIT by looking at the produced - traces + - how to optimize your program for the PyPy JIT + + - how to view and interpret the traces produced by the JIT - how to tweak the parameters of the JIT and the GC From commits-noreply at bitbucket.org Wed Apr 6 14:27:36 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 6 Apr 2011 14:27:36 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix comment. Message-ID: <20110406122736.C43F1282C33@codespeak.net> Author: Armin Rigo Branch: Changeset: r43171:cdd41d69e9bf Date: 2011-04-06 14:27 +0200 http://bitbucket.org/pypy/pypy/changeset/cdd41d69e9bf/ Log: Fix comment. diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -163,7 +163,7 @@ if (not we_are_jitted() or w_self.is_heaptype() or w_self.space.config.objspace.std.mutable_builtintypes): return w_self._version_tag - # heap objects cannot get their version_tag changed + # prebuilt objects cannot get their version_tag changed return w_self._pure_version_tag() @purefunction_promote() From commits-noreply at bitbucket.org Wed Apr 6 14:36:53 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 6 Apr 2011 14:36:53 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: First version. Message-ID: <20110406123653.11678282C31@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3479:15b48e65e683 Date: 2011-04-06 14:36 +0200 http://bitbucket.org/pypy/extradoc/changeset/15b48e65e683/ Log: First version. diff --git a/talk/ep2011/talk.txt b/talk/ep2011/talk.txt new file mode 100644 --- /dev/null +++ b/talk/ep2011/talk.txt @@ -0,0 +1,36 @@ +PyPy -- EuroPython 2011 +======================= + +The PyPy project has recently gathered a lot of attention for its +progress in speeding up the Python language -- it is the fastest, +most compatible and most stable 'alternative´ Python interpreter. No +longer merely a research curiosity, PyPy is now suitable for production +use. + +We will give an overview on how the tracing Just-in-Time compiler +works in PyPy. From there, we will then focus on what the PyPy +project has achieved, particularly in the past two years: + +* most Python benchmarks run much faster than with CPython or Psyco +* the real-world PyPy compiler toolchain itself (200 KLocs) runs twice as fast +* already supports 64bit and is in the process of supporting ARM +* full compatibility with CPython (more than Jython/IronPython) +* full (and JIT-ed) ctypes support to call C libraries from Python +* supports Stackless Python (in-progress) +* new "cpyext" layer which integrates existing CPython C extensions +* an experimental super-fast JIT-compilation of calls to C++ libraries + +We want to reserve time for discussing potential future work like SWIG +and/or Cython compatibility and other areas brought up by the audience. +There are many interesting details that can be explored further; +we will focus on the points the audience is most interested in. + +For more info: + +* http://pypy.org/ +* Our blog: http://morepypy.blogspot.com/ +* Eureka program: http://www.eurostars-eureka.eu/ [1] + +[1] Eurostars Eureka is our funding source since 2009. It is a +cross-European funding collaboration that targets small firms +which produce research. From commits-noreply at bitbucket.org Wed Apr 6 14:40:35 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 6 Apr 2011 14:40:35 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Minor clarification Message-ID: <20110406124035.A3D15282C31@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3480:3463c3ccd46c Date: 2011-04-06 14:40 +0200 http://bitbucket.org/pypy/extradoc/changeset/3463c3ccd46c/ Log: Minor clarification diff --git a/talk/ep2011/talk.txt b/talk/ep2011/talk.txt --- a/talk/ep2011/talk.txt +++ b/talk/ep2011/talk.txt @@ -13,7 +13,8 @@ * most Python benchmarks run much faster than with CPython or Psyco * the real-world PyPy compiler toolchain itself (200 KLocs) runs twice as fast -* already supports 64bit and is in the process of supporting ARM +* already supports 32 and 64bit x86 atchitecture and is in the process + of supporting ARM * full compatibility with CPython (more than Jython/IronPython) * full (and JIT-ed) ctypes support to call C libraries from Python * supports Stackless Python (in-progress) From commits-noreply at bitbucket.org Wed Apr 6 14:43:28 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 6 Apr 2011 14:43:28 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Typo, but I'm killing the word anyway Message-ID: <20110406124328.0B168282C31@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3481:80245136b9be Date: 2011-04-06 14:43 +0200 http://bitbucket.org/pypy/extradoc/changeset/80245136b9be/ Log: Typo, but I'm killing the word anyway diff --git a/talk/ep2011/talk.txt b/talk/ep2011/talk.txt --- a/talk/ep2011/talk.txt +++ b/talk/ep2011/talk.txt @@ -13,8 +13,7 @@ * most Python benchmarks run much faster than with CPython or Psyco * the real-world PyPy compiler toolchain itself (200 KLocs) runs twice as fast -* already supports 32 and 64bit x86 atchitecture and is in the process - of supporting ARM +* already supports 32 and 64bit x86 and is in the process of supporting ARM * full compatibility with CPython (more than Jython/IronPython) * full (and JIT-ed) ctypes support to call C libraries from Python * supports Stackless Python (in-progress) From commits-noreply at bitbucket.org Wed Apr 6 14:46:14 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 6 Apr 2011 14:46:14 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Clarify, following fijal's suggestion. Message-ID: <20110406124614.4A757282C31@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3482:9663167ccdbd Date: 2011-04-06 14:46 +0200 http://bitbucket.org/pypy/extradoc/changeset/9663167ccdbd/ Log: Clarify, following fijal's suggestion. diff --git a/talk/ep2011/talk.txt b/talk/ep2011/talk.txt --- a/talk/ep2011/talk.txt +++ b/talk/ep2011/talk.txt @@ -2,10 +2,10 @@ ======================= The PyPy project has recently gathered a lot of attention for its -progress in speeding up the Python language -- it is the fastest, -most compatible and most stable 'alternative´ Python interpreter. No -longer merely a research curiosity, PyPy is now suitable for production -use. +progress in speeding up the Python language -- it is the fastest +Python interpreter, and the most compatible and most stable +'alternative� one. No longer merely a research curiosity, PyPy +is now suitable for production use. We will give an overview on how the tracing Just-in-Time compiler works in PyPy. From there, we will then focus on what the PyPy From commits-noreply at bitbucket.org Wed Apr 6 15:12:53 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 6 Apr 2011 15:12:53 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: - research curiosity => research project, which seems to be the Message-ID: <20110406131253.527A4282C31@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3483:9a6e79f255fb Date: 2011-04-06 15:12 +0200 http://bitbucket.org/pypy/extradoc/changeset/9a6e79f255fb/ Log: - research curiosity => research project, which seems to be the concensus with #pypy (it seems that "curiosity" evokes various more-or-less negative things in various cultures). - added a sentence in the introduction about calling C. diff --git a/talk/ep2011/talk.txt b/talk/ep2011/talk.txt --- a/talk/ep2011/talk.txt +++ b/talk/ep2011/talk.txt @@ -4,8 +4,10 @@ The PyPy project has recently gathered a lot of attention for its progress in speeding up the Python language -- it is the fastest Python interpreter, and the most compatible and most stable -'alternative� one. No longer merely a research curiosity, PyPy -is now suitable for production use. +'alternative� one. No longer merely a research project, PyPy +is now suitable for production use. We are working on improvements +on calling into C libraries and generally integrating with +the existing Python extensions ecosystem. We will give an overview on how the tracing Just-in-Time compiler works in PyPy. From there, we will then focus on what the PyPy From commits-noreply at bitbucket.org Wed Apr 6 16:07:16 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 6 Apr 2011 16:07:16 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix: after refactoring, the call to absolute_import_try() ended Message-ID: <20110406140716.0548F282C33@codespeak.net> Author: Armin Rigo Branch: Changeset: r43172:7fe259af19d2 Date: 2011-04-06 16:06 +0200 http://bitbucket.org/pypy/pypy/changeset/7fe259af19d2/ Log: Fix: after refactoring, the call to absolute_import_try() ended up being inside a @jit.dont_look_inside function. This made useless the optimization @jit.unroll_loops on the absolute_import_try() function. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -263,13 +263,17 @@ space.timer.stop_name("importhook", modulename) return w_mod - at jit.dont_look_inside def absolute_import(space, modulename, baselevel, fromlist_w, tentative): # Short path: check in sys.modules w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w) if w_mod is not None and not space.is_w(w_mod, space.w_None): return w_mod + return absolute_import_with_lock(space, modulename, baselevel, + fromlist_w, tentative) + at jit.dont_look_inside +def absolute_import_with_lock(space, modulename, baselevel, + fromlist_w, tentative): lock = getimportlock(space) lock.acquire_lock() try: From commits-noreply at bitbucket.org Wed Apr 6 16:43:36 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 6 Apr 2011 16:43:36 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: fix an obscure case which seems to be produced only with ootype translation. I didn't manage to write a test for it :-( Message-ID: <20110406144336.49AD0282C33@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43173:67fec99826fb Date: 2011-04-06 16:42 +0200 http://bitbucket.org/pypy/pypy/changeset/67fec99826fb/ Log: fix an obscure case which seems to be produced only with ootype translation. I didn't manage to write a test for it :-( diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: From commits-noreply at bitbucket.org Wed Apr 6 17:31:40 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 6 Apr 2011 17:31:40 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: revert a4582c0ec4bb; signal is not essential, and it does not work on ootype Message-ID: <20110406153140.8C2DC282C33@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43174:cb00ac639ff7 Date: 2011-04-06 17:31 +0200 http://bitbucket.org/pypy/pypy/changeset/cb00ac639ff7/ Log: revert a4582c0ec4bb; signal is not essential, and it does not work on ootype diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() From commits-noreply at bitbucket.org Wed Apr 6 17:56:37 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 6 Apr 2011 17:56:37 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: - rename the SlowMutate class into QuasiImmut. Message-ID: <20110406155637.011D0282C33@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43175:0734d2c106e9 Date: 2011-04-06 17:55 +0200 http://bitbucket.org/pypy/pypy/changeset/0734d2c106e9/ Log: - rename the SlowMutate class into QuasiImmut. - add front-end support for invalidating loops; backend interface missing so far. diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -76,6 +76,10 @@ op.setdescr(None) # clear reference, mostly for tests if not we_are_translated(): op._jumptarget_number = descr.number + # record this looptoken on the QuasiImmut used in the code + if loop.quasi_immutable_deps is not None: + for qmut in loop.quasi_immutable_deps: + qmut.register_loop_token(wref) # mostly for tests: make sure we don't keep a reference to the LoopToken loop.token = None if not we_are_translated(): diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py --- a/pypy/jit/metainterp/quasiimmut.py +++ b/pypy/jit/metainterp/quasiimmut.py @@ -1,3 +1,4 @@ +import weakref from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.rpython.lltypesystem import lltype, rclass from pypy.rpython.annlowlevel import cast_base_ptr_to_instance @@ -17,25 +18,25 @@ else: raise AssertionError(fieldname) -def get_current_mutate_instance(cpu, gcref, mutatefielddescr): - """Returns the current SlowMutate instance in the field, +def get_current_qmut_instance(cpu, gcref, mutatefielddescr): + """Returns the current QuasiImmut instance in the field, possibly creating one. """ - mutate_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) - if mutate_gcref: - mutate = SlowMutate.show(cpu, mutate_gcref) + qmut_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) + if qmut_gcref: + qmut = QuasiImmut.show(cpu, qmut_gcref) else: - mutate = SlowMutate() - cpu.bh_setfield_gc_r(gcref, mutatefielddescr, mutate.hide(cpu)) - return mutate + qmut = QuasiImmut(cpu) + cpu.bh_setfield_gc_r(gcref, mutatefielddescr, qmut.hide()) + return qmut def make_invalidation_function(STRUCT, mutatefieldname): # def _invalidate_now(p): - mutate_ptr = getattr(p, mutatefieldname) + qmut_ptr = getattr(p, mutatefieldname) setattr(p, mutatefieldname, lltype.nullptr(rclass.OBJECT)) - mutate = cast_base_ptr_to_instance(SlowMutate, mutate_ptr) - mutate.invalidate() + qmut = cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) + qmut.invalidate() _invalidate_now._dont_inline_ = True # def invalidation(p): @@ -45,31 +46,54 @@ return invalidation -class SlowMutate(object): - def __init__(self): - pass +class QuasiImmut(object): + def __init__(self, cpu): + self.cpu = cpu + # list of weakrefs to the LoopTokens that must be invalidated if + # this value ever changes + self.looptokens_wrefs = [] + self.compress_limit = 30 - def hide(self, cpu): - mutate_ptr = cpu.ts.cast_instance_to_base_ref(self) - return cpu.ts.cast_to_ref(mutate_ptr) + def hide(self): + qmut_ptr = self.cpu.ts.cast_instance_to_base_ref(self) + return self.cpu.ts.cast_to_ref(qmut_ptr) @staticmethod - def show(cpu, mutate_gcref): - mutate_ptr = cpu.ts.cast_to_baseclass(mutate_gcref) - return cast_base_ptr_to_instance(SlowMutate, mutate_ptr) + def show(cpu, qmut_gcref): + qmut_ptr = cpu.ts.cast_to_baseclass(qmut_gcref) + return cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) + + def register_loop_token(self, wref_looptoken): + if len(self.looptokens_wrefs) > self.compress_limit: + self.compress_looptokens_list() + self.looptokens_wrefs.append(wref_looptoken) + + def compress_looptokens_list(self): + self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs + if wref() is not None] + self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2 def invalidate(self): - pass # XXX + # When this is called, all the loops that we record become + # become invalid and must not be called again, nor returned to. + wrefs = self.looptokens_wrefs + self.looptokens_wrefs = [] + for wref in wrefs: + looptoken = wref() + if looptoken is not None: + pass + # + # XXX tell the backend to mark the loop as invalid -class SlowMutateDescr(AbstractDescr): +class QuasiImmutDescr(AbstractDescr): def __init__(self, cpu, structbox, fielddescr, mutatefielddescr): self.cpu = cpu self.structbox = structbox self.fielddescr = fielddescr self.mutatefielddescr = mutatefielddescr gcref = structbox.getref_base() - self.mutate = get_current_mutate_instance(cpu, gcref, mutatefielddescr) + self.qmut = get_current_qmut_instance(cpu, gcref, mutatefielddescr) self.constantfieldbox = self.get_current_constant_fieldvalue() def get_current_constant_fieldvalue(self): @@ -80,10 +104,10 @@ return fieldbox.constbox() def is_still_valid(self): + cpu = self.cpu gcref = self.structbox.getref_base() - curmut = get_current_mutate_instance(self.cpu, gcref, - self.mutatefielddescr) - if curmut is not self.mutate: + qmut = get_current_qmut_instance(cpu, gcref, self.mutatefielddescr) + if qmut is not self.qmut: return False else: currentbox = self.get_current_constant_fieldvalue() diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -379,7 +379,7 @@ write=True) def optimize_QUASIIMMUT_FIELD(self, op): - # Pattern: QUASIIMMUT_FIELD(s, descr=SlowMutateDescr) + # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) # x = GETFIELD_GC(s, descr='inst_x') # If 's' is a constant (after optimizations), then we make 's.inst_x' # a constant too, and we rely on the rest of the optimizations to @@ -388,22 +388,22 @@ if not structvalue.is_constant(): return # not a constant at all; ignore QUASIIMMUT_FIELD # - from pypy.jit.metainterp.quasiimmut import SlowMutateDescr - smdescr = op.getdescr() - assert isinstance(smdescr, SlowMutateDescr) + from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr + qmutdescr = op.getdescr() + assert isinstance(qmutdescr, QuasiImmutDescr) # check that the value is still correct; it could have changed # already between the tracing and now. In this case, we are # simply ignoring the QUASIIMMUT_FIELD hint and compiling it # as a regular getfield. - if not smdescr.is_still_valid(): + if not qmutdescr.is_still_valid(): return # record as an out-of-line guard if self.optimizer.quasi_immutable_deps is None: self.optimizer.quasi_immutable_deps = {} - self.optimizer.quasi_immutable_deps[smdescr.mutate] = None + self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None # perform the replacement in the list of operations - fieldvalue = self.getvalue(smdescr.constantfieldbox) - cf = self.field_cache(smdescr.fielddescr) + fieldvalue = self.getvalue(qmutdescr.constantfieldbox) + cf = self.field_cache(qmutdescr.fielddescr) cf.remember_field_value(structvalue, fieldvalue) def propagate_forward(self, op): diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -1,13 +1,13 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE from pypy.jit.metainterp import typesystem -from pypy.jit.metainterp.quasiimmut import SlowMutate -from pypy.jit.metainterp.quasiimmut import get_current_mutate_instance +from pypy.jit.metainterp.quasiimmut import QuasiImmut +from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance from pypy.jit.metainterp.test.test_basic import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside -def test_get_current_mutate_instance(): +def test_get_current_qmut_instance(): accessor = FieldListAccessor() accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) STRUCT = lltype.GcStruct('Foo', ('inst_x', lltype.Signed), @@ -36,10 +36,10 @@ mutatefielddescr = ('fielddescr', STRUCT, 'mutate_x') foo_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) - sm1 = get_current_mutate_instance(cpu, foo_gcref, mutatefielddescr) - assert isinstance(sm1, SlowMutate) - sm2 = get_current_mutate_instance(cpu, foo_gcref, mutatefielddescr) - assert sm1 is sm2 + qmut1 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) + assert isinstance(qmut1, QuasiImmut) + qmut2 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) + assert qmut1 is qmut2 class QuasiImmutTests: @@ -68,7 +68,7 @@ loops = get_stats().loops for loop in loops: assert len(loop.quasi_immutable_deps) == 1 - assert isinstance(loop.quasi_immutable_deps.keys()[0], SlowMutate) + assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) @@ -149,6 +149,32 @@ assert res == 700 self.check_loops(getfield_gc=1) + def test_change_invalidate_reentering(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(foo, x): + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + x -= 1 + return total + def g(a, x): + foo = Foo(a) + res1 = f(foo, x) + foo.a += 1 # invalidation, while the jit is not running + res2 = f(foo, x) # should still mark the loop as invalid + return res1 * 1000 + res2 + # + assert g(100, 7) == 700707 + res = self.meta_interp(g, [100, 7]) + assert res == 700707 + self.check_loops(getfield_gc=0) + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -558,9 +558,9 @@ @arguments("box", "descr", "descr") def opimpl_record_quasiimmut_field(self, box, fielddescr, mutatefielddescr): - from pypy.jit.metainterp.quasiimmut import SlowMutateDescr + from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr cpu = self.metainterp.cpu - descr = SlowMutateDescr(cpu, box, fielddescr, mutatefielddescr) + descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], None, descr=descr) From commits-noreply at bitbucket.org Wed Apr 6 18:35:21 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 6 Apr 2011 18:35:21 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement force_cast for the cli backend Message-ID: <20110406163521.4B00C282BD4@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43176:c7bd951a8df1 Date: 2011-04-06 18:34 +0200 http://bitbucket.org/pypy/pypy/changeset/c7bd951a8df1/ Log: implement force_cast for the cli backend diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -148,6 +148,7 @@ 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } From commits-noreply at bitbucket.org Wed Apr 6 18:54:38 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 6 Apr 2011 18:54:38 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement ullong_and for the cli backend Message-ID: <20110406165438.DBF5A282BD4@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43177:6d73aef65edf Date: 2011-04-06 18:54 +0200 http://bitbucket.org/pypy/pypy/changeset/6d73aef65edf/ Log: implement ullong_and for the cli backend diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b): + return a&b + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF)]) + assert res == f(0x1234, 0x00FF) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -267,6 +267,7 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', 'oois': 'ceq', 'ooisnot': _not('ceq'), From commits-noreply at bitbucket.org Wed Apr 6 18:58:23 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 6 Apr 2011 18:58:23 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement ullong_or for the cli backend Message-ID: <20110406165823.3E4D8282BD4@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43178:f10ff0abbcb8 Date: 2011-04-06 18:58 +0200 http://bitbucket.org/pypy/pypy/changeset/f10ff0abbcb8/ Log: implement ullong_or for the cli backend diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -215,13 +215,13 @@ assert res == f(inttype(0)) assert type(res) == inttype - def test_and(self): + def test_and_or(self): inttypes = [int, r_uint, r_int64, r_ulonglong] for inttype in inttypes: - def f(a, b): - return a&b - res = self.interpret(f, [inttype(0x1234), inttype(0x00FF)]) - assert res == f(0x1234, 0x00FF) + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) def test_neg_abs_ovf(self): for op in (operator.neg, abs): diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -268,6 +268,7 @@ 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), From commits-noreply at bitbucket.org Wed Apr 6 19:52:20 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 6 Apr 2011 19:52:20 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: fix some things Message-ID: <20110406175220.B4FE3282BD4@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3484:8d11018ccf0a Date: 2011-04-06 19:50 +0200 http://bitbucket.org/pypy/extradoc/changeset/8d11018ccf0a/ Log: fix some things diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -35,7 +35,7 @@ } \newboolean{showcomments} -\setboolean{showcomments}{true} +\setboolean{showcomments}{false} \ifthenelse{\boolean{showcomments}} {\newcommand{\nb}[2]{ \fbox{\bfseries\sffamily\scriptsize#1} @@ -122,7 +122,7 @@ extremely challenging, because of their many corner-cases. It has long been an objective of the partial evaluation community to -automatically produce compilers from interpreters. There has been a recent +automatically produce compilers from interpreters. There has been a renaissance of this idea around the approach of tracing just-in-time compilers. A number of projects have attempted this approach. SPUR \cite{bebenita_spur:_2010} is a tracing JIT for .NET together with a JavaScript implementation in C\#. PyPy @@ -135,7 +135,7 @@ These projects have in common that they work one meta-level down, providing a tracing JIT for the language used to implement the dynamic language, and not for the dynamic language itself. -The tracing JIT then will trace through the object model of the dynamic +The tracing JIT will then trace through the object model of the dynamic language implementation. This makes the object model transparent to the tracer and its optimizations. Therefore the semantics of the dynamic language does not have to be replicated in a JIT. We call this approach \emph{meta-tracing}. @@ -158,8 +158,8 @@ meta-tracing context. Concretely these hints are used to control how the optimizer of the -tracing JIT can improve the traces of the object model. More -specifically, these hints influence the constant folding +tracing JIT can improve the traces of the object model. In particular the hints +influence the constant folding optimization. The first hint makes it possible to turn arbitrary variables in the trace into constant by feeding back runtime values. The second hint allows the definition of additional foldable operations. @@ -233,7 +233,9 @@ traces are therefore linear list of operations, which are optimized and then get turned into machine code. This recording automatically inlines functions: when a function call is encountered the operations of the called functions are -simply put into the trace too. +simply put into the trace of the caller too. The tracing JIT tries to produce traces +that correspond to loops in the traced program, but most tracing JITs now also +have support for tracing non-loops \cite{andreas_gal_incremental_2006}. Because the traces always correspond to a concrete execution they cannot contain any control flow splits. Therefore they encode the control flow @@ -243,15 +245,14 @@ To be able to do this recording, VMs with a tracing JIT typically contain an interpreter. After a user program is started the interpreter is used; only the most frequently executed paths through the user -program are turned into machine code. The tracing JIT tries to produce traces -that correspond to loops in the traced program, but most tracing JITs now also -have support for tracing non-loops \cite{andreas_gal_incremental_2006}. +program are turned into machine code. The interpreter is also used when a guard +fails to continue the execution from the failing guard. One disadvantage of (tracing) JITs which makes them not directly applicable to PyPy is that they need to encode the language semantics of the language they are tracing. Since PyPy wants to be a general framework, we want to reuse our tracer for different languages. -Therefore PyPy's JIT is a meta-tracer \cite{bolz_tracing_2009}. It does not +Therefore PyPy's JIT is a \emph{meta-tracer} \cite{bolz_tracing_2009}. It does not trace the execution of the user program, but instead traces the execution of the \emph{interpreter} that is running the program. This means that the traces it produces don't contain the bytecodes of the language in question, but @@ -264,7 +265,7 @@ While the operations in a trace are those of the interpreter, the loops that are traced by the tracer are the loops in the -user program. This means that the tracer stops tracing after one iteration of +user program. To achieve this the tracer stops tracing after one iteration of the loop in the user function that is being considered. At this point, it probably traced many iterations of the interpreter main loop. @@ -290,15 +291,10 @@ optimized. The optimizer applies a number of techniques to remove or simplify the operations in the trace. Most of these are well known compiler optimization techniques, with the difference that it is easier to apply them in a tracing -JIT because it only has to deal with linear traces. Among the techniques: -% -\begin{itemize} - \item constant folding - \item common subexpression elimination - \item allocation removal \cite{bolz_allocation_2011} - \item store/load propagation - \item loop invariant code motion -\end{itemize} +JIT because it only has to deal with linear traces. Among the techniques are +constant folding, common subexpression elimination, allocation removal +\cite{bolz_allocation_2011}, store/load propagation, loop invariant code +motion. In some places it turns out that if the interpreter author rewrites some parts of the interpreter with these optimizations in mind the traces that are produced @@ -350,6 +346,8 @@ \label{fig:trace1} \end{figure} +\cfbolz{should we show the code that would create the inst use in tracing?} + The trace would look like in Figure~\ref{fig:trace1}. In this example, the attribute \texttt{a} is found on the instance, but the attributes \texttt{b} and \texttt{c} are found on the class. The line @@ -422,8 +420,8 @@ $y_2$ = $y_1$ + $x_1$ \end{lstlisting} -In the trace above, the value of $x_1$ is statically known thanks to the -guard. Remember that a guard is a runtime check. The above trace will run to +In the trace above, the value of $x_1$ is statically known after the guard. +Remember that a guard is a runtime check. The above trace will run to completion when $x_1$ \texttt{== 4}. If the check fails, execution of the trace is stopped and the interpreter continues to run. @@ -431,13 +429,13 @@ into a constant value. This process is called \emph{promotion} and it is an old idea in partial evaluation (it's called ``The Trick'' \cite{jones_partial_1993} there). Promotion is also heavily used by Psyco \cite{rigo_representation-based_2004} and by all older versions -of PyPy's JIT. Promotion is a technique that only works well in JIT compilers; +of PyPy's JIT. It is a technique that only works well in JIT compilers; in static compilers it is significantly less applicable. Promotion is essentially a tool for trace specialization. There are places in the interpreter where knowing that a value is constant opens a lot of optimization opportunities, even though it -could have different values in practice. In such a place, promotion is used. The +could have different values in practice. In such a place, promotion can be used. The typical reason to do that is if there is a lot of computation depending on the value of that variable. @@ -481,11 +479,12 @@ $v_1$ = $x_1$ * 2 $z_1$ = $v_1$ + 1 $v_2$ = $z_1$ + $y_1$ -return(v2) +return($v_2$) \end{lstlisting} The promotion is turned into a \texttt{guard} operation in the trace. The guard -captures the value of $x_1$ as it was at runtime. From the point of view of the +captures the value of $x_1$ as it was during tracing. \cfbolz{drop the word runtime feedback here?} +From the point of view of the optimizer, this guard is not any different than the one produced by the \texttt{if} statement in the example above. After the guard, the rest of the trace can assume that $x_1$ is equal to \texttt{4}, meaning that the optimizer will turn this @@ -517,7 +516,7 @@ $x_1$ takes on even more values, a new trace will eventually be made for all of them, linking them into a chain. This is clearly not desirable, so we should promote only variables that don't vary much. However, adding a promotion hint will never produce wrong -results. It might just lead to too much assembler code. +results. It might just lead to too much assembler code being generated. Promoting integers, as in the examples above, is not used that often. However, the internals of dynamic language interpreters often @@ -525,7 +524,7 @@ program. An example would be the types of variables in a user function. Even though in principle the argument to a Python function could be any Python type, in practice the argument types tend not to vary often. Therefore it is possible to -promote the types. The next section will present a complete example of how +promote the types. Section~\ref{sec:} will present a complete example of how this works. @@ -592,6 +591,8 @@ return self.x * 2 + 1 \end{lstlisting} +\cfbolz{should we mention that pure functions are not actually called by the optimizer, but the values that are seen during tracing are used?} + Now the trace will look like this: % \begin{lstlisting}[mathescape,basicstyle=\ttfamily] @@ -621,7 +622,9 @@ However, the annotation also gives the interpreter author ample opportunity to mess things up. If a function is annotated to be pure, but is not really, the optimizer can produce subtly wrong code. Therefore, a lot of care has to be taken when using this -annotation. +annotation\footnote{The most common use case of the \texttt{purefunction} +annotation is indeed to declare the immutability of fields. Because it is so +common, we have special syntactic sugar for it.}. \subsubsection{Observably Pure Functions} @@ -640,17 +643,6 @@ -\subsubsection{Immutable Fields} - -One of the most common cases of pure functions is reading immutable -values out of objects. Since this is so common, we have special syntactic sugar -for it. A RPython class can have a class attribute \texttt{\_immutable\_fields\_} set to -a list of strings, listing the fields that cannot be changed. This is equivalent -to using getters and annotating them with \texttt{purefunction}. - - - - %___________________________________________________________________________ \section{Putting It All Together} @@ -694,11 +686,12 @@ In this implementation instances no longer use dictionaries to store their fields. Instead, they have a reference to a map, which maps field names to indexes into a storage list. The -storage list contains the actual field values. The maps are shared between -objects with the same layout. Therefore they have to be immutable, which means +storage list contains the actual field values. Therefore they have to be immutable, which means that their \texttt{getindex} method is a pure function. When a new attribute is added to an instance, a new map needs to be chosen, which is done with the -\texttt{add\_attribute} method on the previous map (which is also pure). Now that we have +\texttt{add\_attribute} method on the previous map. This function is also pure, +because it caches all new instances of \texttt{Map} that it creates, to make +sure that objects with the same layout have the same map. Now that we have introduced maps, it is safe to promote the map everywhere, because we assume that the number of different instance layouts is small. @@ -739,7 +732,7 @@ new value. Therefore, we give every class a version object, which is changed every time a -class gets changed (i.e., the content of the \texttt{methods} dictionary changes). +class gets changed (i.e., the \texttt{methods} dictionary changes). This means that the result of \texttt{methods.get()} for a given \texttt{(name, version)} pair will always be the same, i.e. it is a pure operation. To help the JIT to detect this case, we factor it out in a helper method which is @@ -855,23 +848,23 @@ benchmark\footnote{\texttt{http://speleotrove.com/decimal/telco.html}}, using a pure Python decimal floating point implementation. The results we see in these benchmarks seem to repeat themselves in other benchmarks using object-oriented -code; for purely numerical algorithms the speedups are significantly smaller. +code; for purely numerical algorithms the speedups introduced by the techniques +in this paper are much smaller because they are already fast. The benchmarks were run on an otherwise idle Intel Core2 Duo P8400 processor with 2.26 GHz and 3072 KB of cache on a machine with 3GB RAM running Linux -2.6.35. We compared the performance of various Python implementations on the +2.6.35. We compared the performance of two Python implementations on the benchmarks. As a baseline, we used the standard Python implementation in C, CPython 2.6.6\footnote{\texttt{http://python.org}}, which uses a bytecode-based -interpreter. We compare it against four versions of PyPy's Python interpreter, -all of them with JIT enabled. The PyPy baseline does not enable maps or type -versions. We then benchmarked PyPy, first using each technique separately, -and finally using both together. +interpreter. We compare it against two versions of PyPy's Python interpreter, +both of them with JIT enabled. The PyPy baseline does not enable maps or type +version, the full JIT enables both. All benchmarks were run 50 times in the same process, to give the JIT time to produce machine code. The arithmetic mean of the times of the last 30 runs were used as the result. The errors were computed using a confidence interval with a 95\% confidence level \cite{georges_statistically_2007}. The results are -reported in Figure~\ref{fig:times}, together with the same numbers normed to +reported in Figure~\ref{fig:times}, together with the same numbers normalized to those of the full JIT. The optimizations give a speedup between 80\% and almost 20 times. The Richards From commits-noreply at bitbucket.org Wed Apr 6 21:22:36 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 6 Apr 2011 21:22:36 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: add a comment Message-ID: <20110406192236.6116836C213@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3485:af08c2f78822 Date: 2011-04-06 21:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/af08c2f78822/ Log: add a comment diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -215,6 +215,9 @@ and less error prone than manually writing a JIT compiler. Similarly, writing in a high level language such as RPython is easier than writing in C. +XXX [fijal] yet another advantage is that JIT is by design supporting the whole + language + We call the code that runs on top of an interpreter implemented with PyPy the \emph{user code} or \emph{user program}. diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 319fb7f0ad019fd3dc01575729a8ac0e2767baca..bba7e9b9246f9bee671aa631b5f8ed889869ba04 GIT binary patch [cut] From commits-noreply at bitbucket.org Wed Apr 6 21:35:44 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 6 Apr 2011 21:35:44 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: a bit experimental - unroll cases where we look up attrs. it should only Message-ID: <20110406193544.1AE9F36C214@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43179:2a3aa9abc6e5 Date: 2011-04-06 21:35 +0200 http://bitbucket.org/pypy/pypy/changeset/2a3aa9abc6e5/ Log: a bit experimental - unroll cases where we look up attrs. it should only be invoked in a mix of old and new style classes diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -253,7 +253,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,6 +262,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found From commits-noreply at bitbucket.org Wed Apr 6 21:37:23 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 6 Apr 2011 21:37:23 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: Backed out changeset 2a3aa9abc6e5 Message-ID: <20110406193723.6E9B7282BD4@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43180:24e193e27a49 Date: 2011-04-06 21:37 +0200 http://bitbucket.org/pypy/pypy/changeset/24e193e27a49/ Log: Backed out changeset 2a3aa9abc6e5 diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe +from pypy.rlib.jit import purefunction, dont_look_inside from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -253,7 +253,7 @@ return w_value return None - @unroll_safe + def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,7 +262,6 @@ return w_value return None - @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found From commits-noreply at bitbucket.org Wed Apr 6 21:39:21 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 6 Apr 2011 21:39:21 +0200 (CEST) Subject: [pypy-svn] pypy default: An experiment - unroll _lookup and _lookup_where. Should make a mix Message-ID: <20110406193921.C4439282BD4@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43181:54ef304d2fd8 Date: 2011-04-06 21:39 +0200 http://bitbucket.org/pypy/pypy/changeset/54ef304d2fd8/ Log: An experiment - unroll _lookup and _lookup_where. Should make a mix of old and new style classes much faster diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -253,7 +253,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,6 +262,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found From commits-noreply at bitbucket.org Thu Apr 7 02:38:27 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 02:38:27 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: s/fancy/advanced/ Message-ID: <20110407003827.4355E282B9E@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3486:568fe1f3f207 Date: 2011-04-07 02:38 +0200 http://bitbucket.org/pypy/extradoc/changeset/568fe1f3f207/ Log: s/fancy/advanced/ diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -310,7 +310,7 @@ As the running example of this paper we will use a very simple and bare-bones object model that just supports classes and instances, without any -inheritance or other fancy features. The model has classes, which contain methods. +inheritance or other advanced features. The model has classes, which contain methods. Instances have a class. Instances have their own attributes (or fields). When looking up an attribute on an instance, the instances attributes are searched. If the attribute is not found there, the class' methods are searched. From commits-noreply at bitbucket.org Thu Apr 7 02:55:03 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 02:55:03 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: I think Python object model contains more hard features, but those are the Message-ID: <20110407005503.E6A81282B9E@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3487:05f912892730 Date: 2011-04-07 02:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/05f912892730/ Log: I think Python object model contains more hard features, but those are the major ones diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -327,7 +327,7 @@ In this straightforward implementation the methods and attributes are just stored in dictionaries (hash maps) on the classes and instances, respectively. While this object model is very -simple it already contains all the hard parts of Python's object model. Both +simple it already contains most hard parts of Python's object model. Both instances and classes can have arbitrary fields, and they are changeable at any time. Moreover, instances can change their class after they have been created. From commits-noreply at bitbucket.org Thu Apr 7 02:55:04 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 02:55:04 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: add a note Message-ID: <20110407005504.5DCAA282B9E@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3488:474da55ac693 Date: 2011-04-07 02:49 +0200 http://bitbucket.org/pypy/extradoc/changeset/474da55ac693/ Log: add a note diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -783,6 +783,9 @@ class did not change since the trace was produced. It will fail if somebody calls the \texttt{write\_method} method on the class. +XXX [fijal] maybe it's worth noting that those guards are removed out of + the loop by loop-invariant-code motion, unless our is so special + we want to write a special paper about it %___________________________________________________________________________ From commits-noreply at bitbucket.org Thu Apr 7 02:55:04 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 02:55:04 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: add another note Message-ID: <20110407005504.C2B6D282B9E@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3489:3c4440caaa60 Date: 2011-04-07 02:54 +0200 http://bitbucket.org/pypy/extradoc/changeset/3c4440caaa60/ Log: add another note diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -880,6 +880,7 @@ all benchmarks, which is not surprising because CPython is a simple bytecode-based interpreter. +XXX [fijal] wouldn't a graph be better? \begin{figure} \begin{center} From commits-noreply at bitbucket.org Thu Apr 7 03:11:46 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 03:11:46 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: store long longs internally Message-ID: <20110407011146.3692E282B9E@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43182:ae4fe73e5090 Date: 2011-04-07 01:06 +0200 http://bitbucket.org/pypy/pypy/changeset/ae4fe73e5090/ Log: store long longs internally diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -9,6 +9,7 @@ from pypy.rlib import jit from pypy.rlib.rtimer import read_timestamp from pypy.rpython.lltypesystem import rffi +from pypy.rlib.rarithmetic import r_longlong import time, sys @@ -96,7 +97,8 @@ def stats(self, space, parent, factor): w_sse = W_StatsSubEntry(space, self.frame, self.callcount, self.recursivecallcount, - factor * self.tt, factor * self.it) + factor * float(self.tt), + factor * float(self.it)) return space.wrap(w_sse) def _stop(self, tt, it): @@ -121,7 +123,8 @@ w_sublist = space.w_None w_se = W_StatsEntry(space, self.frame, self.callcount, self.recursivecallcount, - factor * self.tt, factor * self.it, w_sublist) + factor * float(self.tt), + factor * float(self.it), w_sublist) return space.wrap(w_se) @jit.purefunction @@ -211,6 +214,7 @@ pass class W_Profiler(Wrappable): + def __init__(self, space, w_callable, time_unit, subcalls, builtins): self.subcalls = subcalls self.builtins = builtins @@ -222,15 +226,22 @@ self.space = space def timer(self): + # XXX ignore for now casting of float to long long and instead + # use float -> int -> long long if self.w_callable: space = self.space try: - return space.float_w(space.call_function(self.w_callable)) + if self.time_unit > 0.0: + return r_longlong( + space.int_w(space.call_function(self.w_callable))) + else: + return r_longlong(int(space.float_w( + space.call_function(self.w_callable)))) except OperationError, e: e.write_unraisable(space, "timer function ", self.w_callable) - return 0.0 - return float(read_timestamp()) + return r_longlong(0) + return read_timestamp() def enable(self, space, w_subcalls=NoneNotWrapped, w_builtins=NoneNotWrapped): @@ -311,6 +322,7 @@ def getstats(self, space): if self.w_callable is None: + # XXX find out a correct measurment freq factor = 1. # we measure time.time in floats elif self.time_unit > 0.0: factor = self.time_unit From commits-noreply at bitbucket.org Thu Apr 7 06:23:50 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 06:23:50 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: Add CPU pinning support for lsprofiler. Do for one the correct thing - Message-ID: <20110407042350.E1AAA282C36@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43183:cb3113ad5062 Date: 2011-04-07 06:23 +0200 http://bitbucket.org/pypy/pypy/changeset/cb3113ad5062/ Log: Add CPU pinning support for lsprofiler. Do for one the correct thing - the C part is split into .h and .c file and always included in the resulting binary diff --git a/pypy/translator/c/src/debug_print.h b/pypy/translator/c/src/debug_print.h --- a/pypy/translator/c/src/debug_print.h +++ b/pypy/translator/c/src/debug_print.h @@ -20,7 +20,6 @@ Note that 'fname' can be '-' to send the logging data to stderr. */ - /* macros used by the generated code */ #define PYPY_HAVE_DEBUG_PRINTS (pypy_have_debug_prints & 1 ? \ (pypy_debug_ensure_opened(), 1) : 0) @@ -40,25 +39,11 @@ extern long pypy_have_debug_prints; extern FILE *pypy_debug_file; - /* implementations */ #ifndef PYPY_NOT_MAIN_FILE #include -#if defined(__GNUC__) && defined(__linux__) -# include - static void pypy_setup_profiling() - { - cpu_set_t set; - CPU_ZERO(&set); - CPU_SET(0, &set); /* restrict to a single cpu */ - sched_setaffinity(0, sizeof(cpu_set_t), &set); - } -#else -static void pypy_setup_profiling() { } -#endif - long pypy_have_debug_prints = -1; FILE *pypy_debug_file = NULL; static bool_t debug_ready = 0; diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h --- a/pypy/translator/c/src/g_include.h +++ b/pypy/translator/c/src/g_include.h @@ -44,6 +44,7 @@ /*** modules ***/ #ifdef HAVE_RTYPER /* only if we have an RTyper */ # include "src/rtyper.h" +# include "src/profiling.h" # include "src/debug_print.h" # include "src/debug_traceback.h" # include "src/debug_alloc.h" diff --git a/pypy/translator/c/src/profiling.h b/pypy/translator/c/src/profiling.h new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/profiling.h @@ -0,0 +1,8 @@ + +#ifndef PROFILING_H +#define PROFILING_H + +void pypy_setup_profiling(); +void pypy_teardown_profiling(); + +#endif diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -8,12 +8,25 @@ interp_attrproperty) from pypy.rlib import jit from pypy.rlib.rtimer import read_timestamp -from pypy.rpython.lltypesystem import rffi +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.tool.autopath import pypydir from pypy.rlib.rarithmetic import r_longlong import time, sys -# timer +# cpu affinity settings + +srcdir = py.path.local(pypydir).join('translator', 'c', 'src') +eci = ExternalCompilationInfo(separate_module_files= + [srcdir.join('profiling.c')]) + +c_setup_profiling = rffi.llexternal('pypy_setup_profiling', + [], lltype.Void, + compilation_info = eci) +c_teardown_profiling = rffi.llexternal('pypy_teardown_profiling', + [], lltype.Void, + compilation_info = eci) class W_StatsEntry(Wrappable): def __init__(self, space, frame, callcount, reccallcount, tt, it, @@ -250,6 +263,7 @@ if w_builtins is not None: self.builtins = space.bool_w(w_builtins) # set profiler hook + c_setup_profiling() space.getexecutioncontext().setllprofile(lsprof_call, space.wrap(self)) @jit.purefunction @@ -318,6 +332,7 @@ def disable(self, space): # unset profiler hook space.getexecutioncontext().setllprofile(None, None) + c_teardown_profiling() self._flush_unmatched() def getstats(self, space): diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py --- a/pypy/translator/c/genc.py +++ b/pypy/translator/c/genc.py @@ -915,6 +915,11 @@ from pypy.rlib.rarithmetic import LONG_BIT defines['PYPY_LONG_BIT'] = LONG_BIT +def add_extra_files(eci): + srcdir = py.path.local(autopath.pypydir).join('translator', 'c', 'src') + return eci.merge(ExternalCompilationInfo( + separate_module_files=[srcdir.join('profiling.c')])) + def gen_source_standalone(database, modulename, targetdir, eci, entrypointname, defines={}): assert database.standalone @@ -964,6 +969,7 @@ print >>fi, "#define INSTRUMENT_NCOUNTER %d" % n fi.close() + eci = add_extra_files(eci) eci = eci.convert_sources_to_files(being_main=True) files, eci = eci.get_module_files() return eci, filename, sg.getextrafiles() + list(files) @@ -1010,6 +1016,7 @@ gen_startupcode(f, database) f.close() + eci = add_extra_files(eci) eci = eci.convert_sources_to_files(being_main=True) files, eci = eci.get_module_files() return eci, filename, sg.getextrafiles() + list(files) diff --git a/pypy/translator/c/src/profiling.c b/pypy/translator/c/src/profiling.c new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/profiling.c @@ -0,0 +1,35 @@ + +#include +#if defined(__GNUC__) && defined(__linux__) + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#include +#endif + +cpu_set_t base_cpu_set; +int profiling_setup = 0; + +void pypy_setup_profiling() +{ + if (!profiling_setup) { + cpu_set_t set; + sched_getaffinity(0, sizeof(cpu_set_t), &base_cpu_set); + CPU_ZERO(&set); + CPU_SET(0, &set); /* restrict to a single cpu */ + sched_setaffinity(0, sizeof(cpu_set_t), &set); + profiling_setup = 1; + } +} + +void pypy_teardown_profiling() +{ + if (profiling_setup) { + sched_setaffinity(0, sizeof(cpu_set_t), &base_cpu_set); + profiling_setup = 0; + } +} +#else +void pypy_setup_profiling() { } +void pypy_teardown_profiling() { } +#endif From commits-noreply at bitbucket.org Thu Apr 7 07:46:50 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Thu, 7 Apr 2011 07:46:50 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: short preamble now constructed from pure_operations. briges curently broken Message-ID: <20110407054650.253FE282C37@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43184:90506c0e4091 Date: 2011-04-07 07:46 +0200 http://bitbucket.org/pypy/pypy/changeset/90506c0e4091/ Log: short preamble now constructed from pure_operations. briges curently broken diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -165,12 +165,12 @@ values = [self.getvalue(arg) for arg in jump_args] inputargs = virtual_state.make_inputargs(values) short_boxes = preamble_optimizer.produce_short_preamble_ops(inputargs) - print short_boxes + initial_inputargs_len = len(short_boxes) try: - inputargs = self.inline(self.cloned_operations, - loop.inputargs, jump_args, - virtual_state) + inputargs, short = self.inline(self.cloned_operations, + loop.inputargs, jump_args, + virtual_state, short_boxes) except KeyError: debug_print("Unrolling failed.") loop.preamble.operations = None @@ -197,7 +197,7 @@ snapshot.boxes = new_snapshot_args snapshot = snapshot.prev - short = self.create_short_preamble(loop.preamble, loop) + #short = self.create_short_preamble(loop.preamble, loop) if short: if False: # FIXME: This should save some memory but requires @@ -214,7 +214,7 @@ short[i] = op short_loop = TreeLoop('short preamble') - short_loop.inputargs = loop.preamble.inputargs[:] + short_loop.inputargs = loop.inputargs[:initial_inputargs_len] short_loop.operations = short # Clone ops and boxes to get private versions and @@ -224,6 +224,7 @@ ops = [inliner.inline_op(op) for op in short_loop.operations] short_loop.operations = ops descr = start_resumedescr.clone_if_mutable() + self.inliner.inline_descr_inplace(descr) inliner.inline_descr_inplace(descr) short_loop.start_resumedescr = descr @@ -241,7 +242,8 @@ if op.result: op.result.forget_value() - def inline(self, loop_operations, loop_args, jump_args, virtual_state): + def inline(self, loop_operations, loop_args, jump_args, virtual_state, + short_boxes): self.inliner = inliner = Inliner(loop_args, jump_args) values = [self.getvalue(arg) for arg in jump_args] @@ -267,6 +269,9 @@ boxes_created_this_iteration = {} jumpargs = jmp.getarglist() + short_inliner = Inliner(inputargs, jumpargs) + short = [] + # FIXME: Should also loop over operations added by forcing things in this loop for op in newoperations: boxes_created_this_iteration[op.result] = True @@ -277,15 +282,19 @@ for a in args: if not isinstance(a, Const) and not a in boxes_created_this_iteration: if a not in inputargs: + short_op = short_boxes[a] + short.append(short_op) + newop = short_inliner.inline_op(short_op) + self.optimizer.send_extra_operation(newop) inputargs.append(a) - box = inliner.inline_arg(a) + box = newop.result if box in self.optimizer.values: box = self.optimizer.values[box].force_box() jumpargs.append(box) jmp.initarglist(jumpargs) self.optimizer.newoperations.append(jmp) - return inputargs + return inputargs, short def sameop(self, op1, op2): if op1.getopnum() != op2.getopnum(): diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_basic.py --- a/pypy/jit/metainterp/test/test_basic.py +++ b/pypy/jit/metainterp/test/test_basic.py @@ -342,6 +342,23 @@ found += 1 assert found == 1 + def test_loop_variant_mul1(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += x * x + x += 1 + res += x * x + y -= 1 + return res + res = self.meta_interp(f, [6, 7]) + assert res == 1323 + self.check_loop_count(1) + self.check_loops(int_mul=1) + def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): From commits-noreply at bitbucket.org Thu Apr 7 08:11:42 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 08:11:42 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: We want to include it even if we don't have rtyper Message-ID: <20110407061142.94AF6282C38@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43185:8070223587ed Date: 2011-04-07 08:11 +0200 http://bitbucket.org/pypy/pypy/changeset/8070223587ed/ Log: We want to include it even if we don't have rtyper diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h --- a/pypy/translator/c/src/g_include.h +++ b/pypy/translator/c/src/g_include.h @@ -40,11 +40,11 @@ #include "src/asm.h" #include "src/timer.h" +#include "src/profiling.h" /*** modules ***/ #ifdef HAVE_RTYPER /* only if we have an RTyper */ # include "src/rtyper.h" -# include "src/profiling.h" # include "src/debug_print.h" # include "src/debug_traceback.h" # include "src/debug_alloc.h" From commits-noreply at bitbucket.org Thu Apr 7 08:36:12 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 08:36:12 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: merge default Message-ID: <20110407063612.DFF62282C38@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43186:11ac8e1ed565 Date: 2011-04-07 08:34 +0200 http://bitbucket.org/pypy/pypy/changeset/11ac8e1ed565/ Log: merge default diff --git a/pypy/jit/metainterp/optimizeopt/string.py b/pypy/jit/metainterp/optimizeopt/string.py --- a/pypy/jit/metainterp/optimizeopt/string.py +++ b/pypy/jit/metainterp/optimizeopt/string.py @@ -47,7 +47,7 @@ class __extend__(optimizer.OptValue): """New methods added to the base class OptValue for this file.""" - def getstrlen(self, newoperations, mode): + def getstrlen(self, optimization, mode): if mode is mode_string: s = self.get_constant_string_spec(mode_string) if s is not None: @@ -56,12 +56,12 @@ s = self.get_constant_string_spec(mode_unicode) if s is not None: return ConstInt(len(s)) - if newoperations is None: + if optimization is None: return None self.ensure_nonnull() box = self.force_box() lengthbox = BoxInt() - newoperations.append(ResOperation(mode.STRLEN, [box], lengthbox)) + optimization.emit_operation(ResOperation(mode.STRLEN, [box], lengthbox)) return lengthbox @specialize.arg(1) @@ -72,13 +72,13 @@ else: return None - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): + def string_copy_parts(self, optimization, targetbox, offsetbox, mode): # Copies the pointer-to-string 'self' into the target string # given by 'targetbox', at the specified offset. Returns the offset # at the end of the copy. - lengthbox = self.getstrlen(newoperations, mode) + lengthbox = self.getstrlen(optimization, mode) srcbox = self.force_box() - return copy_str_content(newoperations, srcbox, targetbox, + return copy_str_content(optimization, srcbox, targetbox, CONST_0, offsetbox, lengthbox, mode) @@ -105,13 +105,12 @@ return assert self.source_op is not None self.box = box = self.source_op.result - newoperations = self.optimizer.newoperations - lengthbox = self.getstrlen(newoperations, self.mode) + lengthbox = self.getstrlen(self.optimizer, self.mode) op = ResOperation(self.mode.NEWSTR, [lengthbox], box) if not we_are_translated(): op.name = 'FORCE' - newoperations.append(op) - self.string_copy_parts(newoperations, box, CONST_0, self.mode) + self.optimizer.emit_operation(op) + self.string_copy_parts(self.optimizer, box, CONST_0, self.mode) class VStringPlainValue(VAbstractStringValue): @@ -145,14 +144,14 @@ return mode.emptystr.join([mode.chr(c.box.getint()) for c in self._chars]) - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): for i in range(len(self._chars)): charbox = self._chars[i].force_box() - newoperations.append(ResOperation(mode.STRSETITEM, [targetbox, + optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox], None)) - offsetbox = _int_add(newoperations, offsetbox, CONST_1) + offsetbox = _int_add(optimizer, offsetbox, CONST_1) return offsetbox def get_args_for_fail(self, modifier): @@ -186,16 +185,16 @@ self.left = left self.right = right - def getstrlen(self, newoperations, mode): + def getstrlen(self, optimizer, mode): if self.lengthbox is None: - len1box = self.left.getstrlen(newoperations, mode) + len1box = self.left.getstrlen(optimizer, mode) if len1box is None: return None - len2box = self.right.getstrlen(newoperations, mode) + len2box = self.right.getstrlen(optimizer, mode) if len2box is None: return None - self.lengthbox = _int_add(newoperations, len1box, len2box) - # ^^^ may still be None, if newoperations is None + self.lengthbox = _int_add(optimizer, len1box, len2box) + # ^^^ may still be None, if optimizer is None return self.lengthbox @specialize.arg(1) @@ -208,10 +207,10 @@ return None return s1 + s2 - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): - offsetbox = self.left.string_copy_parts(newoperations, targetbox, + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): + offsetbox = self.left.string_copy_parts(optimizer, targetbox, offsetbox, mode) - offsetbox = self.right.string_copy_parts(newoperations, targetbox, + offsetbox = self.right.string_copy_parts(optimizer, targetbox, offsetbox, mode) return offsetbox @@ -266,9 +265,9 @@ return s1[start : start + length] return None - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): - lengthbox = self.getstrlen(newoperations, mode) - return copy_str_content(newoperations, + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): + lengthbox = self.getstrlen(optimizer, mode) + return copy_str_content(optimizer, self.vstr.force_box(), targetbox, self.vstart.force_box(), offsetbox, lengthbox, mode) @@ -299,7 +298,7 @@ return modifier.make_vstrslice(self.mode is mode_unicode) -def copy_str_content(newoperations, srcbox, targetbox, +def copy_str_content(optimizer, srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox, mode): if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const): M = 5 @@ -309,23 +308,23 @@ # up to M characters are done "inline", i.e. with STRGETITEM/STRSETITEM # instead of just a COPYSTRCONTENT. for i in range(lengthbox.value): - charbox = _strgetitem(newoperations, srcbox, srcoffsetbox, mode) - srcoffsetbox = _int_add(newoperations, srcoffsetbox, CONST_1) - newoperations.append(ResOperation(mode.STRSETITEM, [targetbox, + charbox = _strgetitem(optimizer, srcbox, srcoffsetbox, mode) + srcoffsetbox = _int_add(optimizer, srcoffsetbox, CONST_1) + optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox], None)) - offsetbox = _int_add(newoperations, offsetbox, CONST_1) + offsetbox = _int_add(optimizer, offsetbox, CONST_1) else: - nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) + nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox) op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox], None) - newoperations.append(op) + optimizer.emit_operation(op) offsetbox = nextoffsetbox return offsetbox -def _int_add(newoperations, box1, box2): +def _int_add(optimizer, box1, box2): if isinstance(box1, ConstInt): if box1.value == 0: return box2 @@ -333,23 +332,23 @@ return ConstInt(box1.value + box2.value) elif isinstance(box2, ConstInt) and box2.value == 0: return box1 - if newoperations is None: + if optimizer is None: return None resbox = BoxInt() - newoperations.append(ResOperation(rop.INT_ADD, [box1, box2], resbox)) + optimizer.emit_operation(ResOperation(rop.INT_ADD, [box1, box2], resbox)) return resbox -def _int_sub(newoperations, box1, box2): +def _int_sub(optimizer, box1, box2): if isinstance(box2, ConstInt): if box2.value == 0: return box1 if isinstance(box1, ConstInt): return ConstInt(box1.value - box2.value) resbox = BoxInt() - newoperations.append(ResOperation(rop.INT_SUB, [box1, box2], resbox)) + optimizer.emit_operation(ResOperation(rop.INT_SUB, [box1, box2], resbox)) return resbox -def _strgetitem(newoperations, strbox, indexbox, mode): +def _strgetitem(optimizer, strbox, indexbox, mode): if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt): if mode is mode_string: s = strbox.getref(lltype.Ptr(rstr.STR)) @@ -358,7 +357,7 @@ s = strbox.getref(lltype.Ptr(rstr.UNICODE)) return ConstInt(ord(s.chars[indexbox.getint()])) resbox = BoxInt() - newoperations.append(ResOperation(mode.STRGETITEM, [strbox, indexbox], + optimizer.emit_operation(ResOperation(mode.STRGETITEM, [strbox, indexbox], resbox)) return resbox @@ -370,7 +369,7 @@ def reconstruct_for_next_iteration(self, optimizer, valuemap): self.enabled = True return self - + def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(self.optimizer, box, source_op, mode) self.make_equal_to(box, vvalue) @@ -431,7 +430,7 @@ value.ensure_nonnull() # if value.is_virtual() and isinstance(value, VStringSliceValue): - fullindexbox = _int_add(self.optimizer.newoperations, + fullindexbox = _int_add(self.optimizer, value.vstart.force_box(), vindex.force_box()) value = value.vstr @@ -441,7 +440,7 @@ if vindex.is_constant(): return value.getitem(vindex.box.getint()) # - resbox = _strgetitem(self.optimizer.newoperations, + resbox = _strgetitem(self.optimizer, value.force_box(),vindex.force_box(), mode) return self.getvalue(resbox) @@ -452,7 +451,7 @@ def _optimize_STRLEN(self, op, mode): value = self.getvalue(op.getarg(0)) - lengthbox = value.getstrlen(self.optimizer.newoperations, mode) + lengthbox = value.getstrlen(self, mode) self.make_equal_to(op.result, self.getvalue(lengthbox)) def optimize_CALL(self, op): @@ -498,13 +497,11 @@ vright = self.getvalue(op.getarg(2)) vleft.ensure_nonnull() vright.ensure_nonnull() - newoperations = self.optimizer.newoperations value = self.make_vstring_concat(op.result, op, mode) value.setup(vleft, vright) return True def opt_call_stroruni_STR_SLICE(self, op, mode): - newoperations = self.optimizer.newoperations vstr = self.getvalue(op.getarg(1)) vstart = self.getvalue(op.getarg(2)) vstop = self.getvalue(op.getarg(3)) @@ -518,14 +515,14 @@ return True # vstr.ensure_nonnull() - lengthbox = _int_sub(newoperations, vstop.force_box(), + lengthbox = _int_sub(self.optimizer, vstop.force_box(), vstart.force_box()) # if isinstance(vstr, VStringSliceValue): # double slicing s[i:j][k:l] vintermediate = vstr vstr = vintermediate.vstr - startbox = _int_add(newoperations, + startbox = _int_add(self.optimizer, vintermediate.vstart.force_box(), vstart.force_box()) vstart = self.getvalue(startbox) @@ -574,7 +571,7 @@ l2box = v2.getstrlen(None, mode) if isinstance(l2box, ConstInt): if l2box.value == 0: - lengthbox = v1.getstrlen(self.optimizer.newoperations, mode) + lengthbox = v1.getstrlen(self.optimizer, mode) seo = self.optimizer.send_extra_operation seo(ResOperation(rop.INT_EQ, [lengthbox, CONST_0], resultbox)) return True @@ -609,7 +606,7 @@ op = ResOperation(rop.PTR_EQ, [v1.force_box(), llhelper.CONST_NULL], resultbox) - self.optimizer.newoperations.append(op) + self.optimizer.emit_operation(op) return True # return False @@ -646,7 +643,7 @@ calldescr, func = cic.callinfo_for_oopspec(oopspecindex) op = ResOperation(rop.CALL, [ConstInt(func)] + args, result, descr=calldescr) - self.optimizer.newoperations.append(op) + self.optimizer.emit_operation(op) def propagate_forward(self, op): if not self.enabled: diff --git a/pypy/module/cpyext/include/abstract.h b/pypy/module/cpyext/include/abstract.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/abstract.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -108,6 +108,7 @@ Anders Qvist Alan McIntyre Bert Freudenberg + Tav Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -39,6 +39,10 @@ state = space.fromcache(State) state.clear_exception() + at cpython_api([PyObject], PyObject) +def PyExceptionInstance_Class(space, w_obj): + return space.type(w_obj) + @cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void) def PyErr_Fetch(space, ptype, pvalue, ptraceback): """Retrieve the error indicator into three variables whose addresses are passed. @@ -75,6 +79,9 @@ error indicator temporarily; use PyErr_Fetch() to save the current exception state.""" state = space.fromcache(State) + if w_type is None: + state.clear_exception() + return state.set_exception(OperationError(w_type, w_value)) Py_DecRef(space, w_type) Py_DecRef(space, w_value) @@ -300,3 +307,11 @@ operror = state.clear_exception() if operror: operror.write_unraisable(space, space.str_w(space.repr(w_where))) + + at cpython_api([], lltype.Void) +def PyErr_SetInterrupt(space): + """This function simulates the effect of a SIGINT signal arriving --- the + next time PyErr_CheckSignals() is called, KeyboardInterrupt will be raised. + It may be called without holding the interpreter lock.""" + space.check_signal_action.set_interrupt() + diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -103,6 +103,7 @@ except KeyError: subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, weakrefable) + assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -6,6 +6,7 @@ from pypy.tool.udir import udir from pypy.rlib import streamio from pypy.conftest import gettestobjspace +import pytest import sys, os import tempfile, marshal @@ -109,6 +110,14 @@ p.join('lone.pyc').write(p.join('x.pyc').read(mode='rb'), mode='wb') + # create a .pyw file + p = setuppkg("windows", x = "x = 78") + try: + p.join('x.pyw').remove() + except py.error.ENOENT: + pass + p.join('x.py').rename(p.join('x.pyw')) + return str(root) @@ -177,6 +186,14 @@ import a assert a == a0 + def test_trailing_slash(self): + import sys + try: + sys.path[0] += '/' + import a + finally: + sys.path[0] = sys.path[0].rstrip('/') + def test_import_pkg(self): import sys import pkg @@ -325,6 +342,11 @@ import compiled.x assert compiled.x == sys.modules.get('compiled.x') + @pytest.mark.skipif("sys.platform != 'win32'") + def test_pyw(self): + import windows.x + assert windows.x.__file__.endswith('x.pyw') + def test_cannot_write_pyc(self): import sys, os p = os.path.join(sys.path[-1], 'readonly') @@ -416,6 +438,38 @@ res = __import__('', mydict, None, ['bar'], 2) assert res is pkg + def test__package__(self): + # Regression test for http://bugs.python.org/issue3221. + def check_absolute(): + exec "from os import path" in ns + def check_relative(): + exec "from . import a" in ns + + # Check both OK with __package__ and __name__ correct + ns = dict(__package__='pkg', __name__='pkg.notarealmodule') + check_absolute() + check_relative() + + # Check both OK with only __name__ wrong + ns = dict(__package__='pkg', __name__='notarealpkg.notarealmodule') + check_absolute() + check_relative() + + # Check relative fails with only __package__ wrong + ns = dict(__package__='foo', __name__='pkg.notarealmodule') + check_absolute() # XXX check warnings + raises(SystemError, check_relative) + + # Check relative fails with __package__ and __name__ wrong + ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') + check_absolute() # XXX check warnings + raises(SystemError, check_relative) + + # Check both fail with package set to a non-string + ns = dict(__package__=object()) + raises(ValueError, check_absolute) + raises(ValueError, check_relative) + def test_universal_newlines(self): import pkg_univnewlines assert pkg_univnewlines.a == 5 @@ -985,7 +1039,8 @@ class AppTestPyPyExtension(object): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['imp', 'zipimport']) + cls.space = gettestobjspace(usemodules=['imp', 'zipimport', + '__pypy__']) cls.w_udir = cls.space.wrap(str(udir)) def test_run_compiled_module(self): diff --git a/lib-python/modified-2.7.0/distutils/command/build_ext.py b/lib-python/modified-2.7.0/distutils/command/build_ext.py --- a/lib-python/modified-2.7.0/distutils/command/build_ext.py +++ b/lib-python/modified-2.7.0/distutils/command/build_ext.py @@ -184,7 +184,7 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: @@ -192,8 +192,13 @@ # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree - self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) - if MSVC_VERSION == 9: + if 0: + # pypy has no PC directory + self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) + if 1: + # pypy has no PCBuild directory + pass + elif MSVC_VERSION == 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' @@ -695,24 +700,14 @@ shared extension. On most platforms, this is just 'ext.libraries'; on Windows and OS/2, we add the Python library (eg. python20.dll). """ - # The python library is always needed on Windows. For MSVC, this - # is redundant, since the library is mentioned in a pragma in - # pyconfig.h that MSVC groks. The other Windows compilers all seem - # to need it mentioned explicitly, though, so that's what we do. - # Append '_d' to the python import library on debug builds. + # The python library is always needed on Windows. if sys.platform == "win32": - from distutils.msvccompiler import MSVCCompiler - if not isinstance(self.compiler, MSVCCompiler): - template = "python%d%d" - if self.debug: - template = template + '_d' - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - # don't extend ext.libraries, it may be shared with other - # extensions, it is a reference to the original list - return ext.libraries + [pythonlib] - else: - return ext.libraries + template = "python%d%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + # don't extend ext.libraries, it may be shared with other + # extensions, it is a reference to the original list + return ext.libraries + [pythonlib] elif sys.platform == "os2emx": # EMX/GCC requires the python library explicitly, and I # believe VACPP does as well (though not confirmed) - AIM Apr01 diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,6 +1,7 @@ import py from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver +from pypy.rlib import objectmodel class DictTests: @@ -69,6 +70,66 @@ res = self.meta_interp(f, [10], listops=True) assert res == expected + def test_dict_trace_hash(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + if total not in dct: + dct[total] = [] + dct[total].append(total) + total -= 1 + return len(dct[0]) + + res1 = f(100) + res2 = self.meta_interp(f, [100], listops=True) + assert res1 == res2 + self.check_loops(int_mod=1) # the hash was traced + + def test_dict_setdefault(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def f(n): + dct = {} + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct.setdefault(total % 2, []).append(total) + total -= 1 + return len(dct[0]) + + assert f(100) == 50 + res = self.meta_interp(f, [100], listops=True) + assert res == 50 + self.check_loops(new=0, new_with_vtable=0) + + def test_dict_as_counter(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct[total] = dct.get(total, 0) + 1 + total -= 1 + return dct[0] + + assert f(100) == 50 + res = self.meta_interp(f, [100], listops=True) + assert res == 50 + self.check_loops(int_mod=1) + class TestOOtype(DictTests, OOJitMixin): pass diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -147,7 +147,6 @@ FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token - virtualrefindexdescr = vrefinfo.descr_virtualref_index virtualforceddescr = vrefinfo.descr_forced jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,8 +1,8 @@ from __future__ import with_statement import new import py -from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse -from pypy.objspace.flow.model import flatten, mkentrymap, c_last_exception +from pypy.objspace.flow.model import Constant, Block, Link, Variable +from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph from pypy.objspace.flow.objspace import FlowObjSpace, error @@ -37,12 +37,10 @@ def all_operations(self, graph): result = {} - def visit(node): - if isinstance(node, Block): - for op in node.operations: - result.setdefault(op.opname, 0) - result[op.opname] += 1 - traverse(visit, graph) + for node in graph.iterblocks(): + for op in node.operations: + result.setdefault(op.opname, 0) + result[op.opname] += 1 return result @@ -246,12 +244,9 @@ x = self.codetest(self.implicitException) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock - def implicitAttributeError(x): try: x = getattr(x, "y") @@ -263,10 +258,8 @@ x = self.codetest(self.implicitAttributeError) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock #__________________________________________________________ def implicitException_int_and_id(x): @@ -311,14 +304,12 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: if isinstance(link.args[0], Constant): found[link.args[0].value] = True else: found[link.exitcase] = None - traverse(find_exceptions, x) assert found == {IndexError: True, KeyError: True, Exception: None} def reraiseAnything(x): @@ -332,12 +323,10 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: assert isinstance(link.args[0], Constant) found[link.args[0].value] = True - traverse(find_exceptions, x) assert found == {ValueError: True, ZeroDivisionError: True, OverflowError: True} def loop_in_bare_except_bug(lst): @@ -521,11 +510,9 @@ def test_jump_target_specialization(self): x = self.codetest(self.jump_target_specialization) - def visitor(node): - if isinstance(node, Block): - for op in node.operations: - assert op.opname != 'mul', "mul should have disappeared" - traverse(visitor, x) + for block in x.iterblocks(): + for op in block.operations: + assert op.opname != 'mul', "mul should have disappeared" #__________________________________________________________ def highly_branching_example(a,b,c,d,e,f,g,h,i,j): @@ -573,7 +560,8 @@ def test_highly_branching_example(self): x = self.codetest(self.highly_branching_example) - assert len(flatten(x)) < 60 # roughly 20 blocks + 30 links + # roughly 20 blocks + 30 links + assert len(list(x.iterblocks())) + len(list(x.iterlinks())) < 60 #__________________________________________________________ def test_unfrozen_user_class1(self): @@ -589,11 +577,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 2 def test_unfrozen_user_class2(self): @@ -607,11 +593,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert not isinstance(results[0], Constant) def test_frozen_user_class1(self): @@ -630,11 +614,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 1 def test_frozen_user_class2(self): @@ -650,11 +632,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert results == [Constant(4)] def test_const_star_call(self): @@ -663,14 +643,9 @@ def f(): return g(1,*(2,3)) graph = self.codetest(f) - call_args = [] - def visit(block): - if isinstance(block, Block): - for op in block.operations: - if op.opname == "call_args": - call_args.append(op) - traverse(visit, graph) - assert not call_args + for block in graph.iterblocks(): + for op in block.operations: + assert not op.opname == "call_args" def test_catch_importerror_1(self): def f(): @@ -997,11 +972,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, AttributeError] @@ -1019,11 +992,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, TypeError] diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -79,7 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel **32-bit** environment needs ``4GB``. + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html @@ -232,6 +233,12 @@ ../../.. etc. +If the executable fails to find suitable libraries, it will report +``debug: WARNING: library path not found, using compiled-in sys.path`` +and then attempt to continue normally. If the default path is usable, +most code will be fine. However, the ``sys.prefix`` will be unset +and some existing libraries assume that this is never the case. + In order to use ``distutils`` or ``setuptools`` a directory ``PREFIX/site-packages`` needs to be created. Here's an example session setting up and using ``easy_install``:: $ cd PREFIX diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/pypy/module/cpyext/include/pyerrors.h b/pypy/module/cpyext/include/pyerrors.h --- a/pypy/module/cpyext/include/pyerrors.h +++ b/pypy/module/cpyext/include/pyerrors.h @@ -15,6 +15,20 @@ PyObject *PyErr_NewExceptionWithDoc(char *name, char *doc, PyObject *base, PyObject *dict); PyObject *PyErr_Format(PyObject *exception, const char *format, ...); +/* These APIs aren't really part of the error implementation, but + often needed to format error messages; the native C lib APIs are + not available on all platforms, which is why we provide emulations + for those platforms in Python/mysnprintf.c, + WARNING: The return value of snprintf varies across platforms; do + not rely on any particular behavior; eventually the C99 defn may + be reliable. +*/ +#if defined(MS_WIN32) && !defined(HAVE_SNPRINTF) +# define HAVE_SNPRINTF +# define snprintf _snprintf +# define vsnprintf _vsnprintf +#endif + #ifdef __cplusplus } #endif diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -127,12 +127,15 @@ checks[2], checks[3])) subclasses = {} for key, subcls in typedef._subclass_cache.items(): + if key[0] is not space.config: + continue cls = key[1] subclasses.setdefault(cls, {}) - subclasses[cls][subcls] = True + prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls) + assert subcls is prevsubcls for cls, set in subclasses.items(): assert len(set) <= 6, "%s has %d subclasses:\n%r" % ( - cls, len(set), [subcls.__name__ for subcls in set]) + cls, len(set), list(set)) def test_getsetproperty(self): class W_SomeType(Wrappable): diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ pypy/doc/*.html pypy/doc/config/*.html pypy/doc/discussion/*.html +pypy/module/cpyext/src/*.o +pypy/module/cpyext/test/*.o pypy/module/test_lib_pypy/ctypes_tests/*.o pypy/translator/c/src/dtoa.o pypy/translator/goal/pypy-c diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -9,7 +9,6 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.test.test_optimizeopt import equaloplists -from pypy.rpython.memory.gctransform import asmgcroot def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -75,8 +74,8 @@ num2a = ((-num2|3) >> 7) | 128 num2b = (-num2|3) & 127 shape = gcrootmap.get_basic_shape() - gcrootmap.add_ebp_offset(shape, num1) - gcrootmap.add_ebp_offset(shape, num2) + gcrootmap.add_frame_offset(shape, num1) + gcrootmap.add_frame_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, @@ -228,6 +227,33 @@ gc.asmgcroot = saved +class TestGcRootMapShadowStack: + class FakeGcDescr: + force_index_ofs = 92 + + def test_make_shapes(self): + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = gcrootmap.get_basic_shape() + gcrootmap.add_frame_offset(shape, 16) + gcrootmap.add_frame_offset(shape, -24) + assert shape == [16, -24] + + def test_compress_callshape(self): + class FakeDataBlockWrapper: + def malloc_aligned(self, size, alignment): + assert alignment == 4 # even on 64-bits + assert size == 12 # 4*3, even on 64-bits + return rffi.cast(lltype.Signed, p) + datablockwrapper = FakeDataBlockWrapper() + p = lltype.malloc(rffi.CArray(rffi.INT), 3, immortal=True) + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = [16, -24] + gcrootmap.compress_callshape(shape, datablockwrapper) + assert rffi.cast(lltype.Signed, p[0]) == 16 + assert rffi.cast(lltype.Signed, p[1]) == -24 + assert rffi.cast(lltype.Signed, p[2]) == 0 + + class FakeLLOp(object): def __init__(self): self.record = [] diff --git a/pypy/rlib/rdtoa.py b/pypy/rlib/rdtoa.py --- a/pypy/rlib/rdtoa.py +++ b/pypy/rlib/rdtoa.py @@ -5,16 +5,33 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder -import py +import py, sys cdir = py.path.local(pypydir) / 'translator' / 'c' include_dirs = [cdir] +# set the word endianness based on the host's endianness +# and the C double's endianness (which should be equal) +if hasattr(float, '__getformat__'): + assert float.__getformat__('double') == 'IEEE, %s-endian' % sys.byteorder +if sys.byteorder == 'little': + source_file = ['#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754'] +elif sys.byteorder == 'big': + source_file = ['#define WORDS_BIGENDIAN', + '#define DOUBLE_IS_BIG_ENDIAN_IEEE754'] +else: + raise AssertionError(sys.byteorder) + +source_file.append('#include "src/dtoa.c"') +source_file = '\n\n'.join(source_file) + +# ____________________________________________________________ + eci = ExternalCompilationInfo( include_dirs = [cdir], includes = ['src/dtoa.h'], libraries = [], - separate_module_files = [cdir / 'src' / 'dtoa.c'], + separate_module_sources = [source_file], export_symbols = ['_PyPy_dg_strtod', '_PyPy_dg_dtoa', '_PyPy_dg_freedtoa', diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_wait.py @@ -0,0 +1,51 @@ +from ctypes import CDLL, c_int, POINTER, byref +from ctypes.util import find_library +from resource import _struct_rusage, struct_rusage + +__all__ = ["wait3", "wait4"] + +libc = CDLL(find_library("c")) +c_wait3 = libc.wait3 + +c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] + +c_wait4 = libc.wait4 + +c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] + +def create_struct_rusage(c_struct): + return struct_rusage(( + float(c_struct.ru_utime), + float(c_struct.ru_stime), + c_struct.ru_maxrss, + c_struct.ru_ixrss, + c_struct.ru_idrss, + c_struct.ru_isrss, + c_struct.ru_minflt, + c_struct.ru_majflt, + c_struct.ru_nswap, + c_struct.ru_inblock, + c_struct.ru_oublock, + c_struct.ru_msgsnd, + c_struct.ru_msgrcv, + c_struct.ru_nsignals, + c_struct.ru_nvcsw, + c_struct.ru_nivcsw)) + +def wait3(options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage + +def wait4(pid, options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -34,11 +34,7 @@ @jit.purefunction def _getcell_makenew(self, key): - res = self.content.get(key, None) - if res is not None: - return res - result = self.content[key] = ModuleCell() - return result + return self.content.setdefault(key, ModuleCell()) def impl_setitem(self, w_key, w_value): space = self.space @@ -50,6 +46,16 @@ def impl_setitem_str(self, name, w_value): self.getcell(name, True).w_value = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + cell = self.getcell(space.str_w(w_key), True) + if cell.w_value is None: + cell.w_value = w_default + return cell.w_value + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -382,7 +382,7 @@ send_bridge_to_backend(metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.token) - def copy_all_attrbutes_into(self, res): + def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here res.rd_snapshot = self.rd_snapshot res.rd_frame_info_list = self.rd_frame_info_list @@ -393,13 +393,13 @@ def _clone_if_mutable(self): res = ResumeGuardDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeGuardForcedDescr(ResumeGuardDescr): @@ -473,7 +473,7 @@ def _clone_if_mutable(self): res = ResumeGuardForcedDescr(self.metainterp_sd, self.jitdriver_sd) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -27,7 +27,10 @@ from pyrepl.console import Console, Event from pyrepl import unix_eventqueue -_error = (termios.error, curses.error) +class InvalidTerminal(RuntimeError): + pass + +_error = (termios.error, curses.error, InvalidTerminal) # there are arguments for changing this to "refresh" SIGWINCH_EVENT = 'repaint' @@ -38,7 +41,7 @@ def _my_getstr(cap, optional=0): r = curses.tigetstr(cap) if not optional and r is None: - raise RuntimeError, \ + raise InvalidTerminal, \ "terminal doesn't have the required '%s' capability"%cap return r @@ -289,6 +292,12 @@ self.__write_code(self._el) self.__write(newline[x:]) self.__posxy = len(newline), y + + if '\x1b' in newline: + # ANSI escape characters are present, so we can't assume + # anything about the position of the cursor. Moving the cursor + # to the left margin should work to get to a known position. + self.move_cursor(0, y) def __write(self, text): self.__buffer.append((text, 0)) diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -278,6 +278,22 @@ rex_mem_reg_plus_scaled_reg_plus_const) # ____________________________________________________________ +# Emit a mod/rm referencing an immediate address that fits in 32-bit +# (the immediate address itself must be explicitely encoded as well, +# with immediate(argnum)). + +def encode_abs(mc, _1, _2, orbyte): + # expands to either '\x05' on 32-bit, or '\x04\x25' or 64-bit + if mc.WORD == 8: + mc.writechar(chr(0x04 | orbyte)) + mc.writechar(chr(0x25)) + else: + mc.writechar(chr(0x05 | orbyte)) + return 0 + +abs_ = encode_abs, 0, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -348,7 +364,9 @@ INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) - INSN_rj = insn(rex_w, chr(base+3), register(1,8), '\x05', immediate(2)) + INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2)) + INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1), + immediate(2,'b')) INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b')) INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2)) @@ -366,7 +384,8 @@ INSN_bi32(mc, offset, immed) INSN_bi._always_inline_ = True # try to constant-fold single_byte() - return INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj + return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, + INSN_ji8) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -444,23 +463,25 @@ # ------------------------------ Arithmetic ------------------------------ - ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj = common_modes(0) - OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj = common_modes(1) - AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj = common_modes(4) - SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj = common_modes(5) - SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj = common_modes(3) - XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj = common_modes(6) - CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj = common_modes(7) + ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj, _ = common_modes(0) + OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj, _ = common_modes(1) + AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj, _ = common_modes(4) + SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj, SUB_ji8 = common_modes(5) + SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj, _ = common_modes(3) + XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj, _ = common_modes(6) + CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj, _ = common_modes(7) CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32) CMP_mr = insn(rex_w, '\x39', register(2, 8), mem_reg_plus_const(1)) - CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b')) - CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2)) + CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_, + immediate(1), immediate(2, 'b')) + CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_, + immediate(1), immediate(2)) CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) - CMP_jr = insn(rex_w, '\x39', register(2, 8), '\x05', immediate(1)) + CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_, immediate(1)) CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) @@ -508,7 +529,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) - LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_, immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) @@ -534,7 +555,7 @@ CDQ = insn(rex_nw, '\x99') TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) - TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), '\x05', immediate(1), immediate(2, 'b')) + TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_, immediate(1), immediate(2, 'b')) TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') # x87 instructions @@ -543,6 +564,9 @@ # ------------------------------ Random mess ----------------------- RDTSC = insn('\x0F\x31') + # reserved as an illegal instruction + UD2 = insn('\x0F\x0B') + # ------------------------------ SSE2 ------------------------------ # Conversion @@ -642,7 +666,7 @@ add_insn('s', stack_sp(modrm_argnum)) add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) - add_insn('j', '\x05', immediate(modrm_argnum)) + add_insn('j', abs_, immediate(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register @@ -683,7 +707,7 @@ # assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') - add_insn('j', '\x05', immediate(2)) + add_insn('j', abs_, immediate(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/jit/metainterp/virtualref.py b/pypy/jit/metainterp/virtualref.py --- a/pypy/jit/metainterp/virtualref.py +++ b/pypy/jit/metainterp/virtualref.py @@ -13,7 +13,6 @@ self.JIT_VIRTUAL_REF = lltype.GcStruct('JitVirtualRef', ('super', rclass.OBJECT), ('virtual_token', lltype.Signed), - ('virtualref_index', lltype.Signed), ('forced', rclass.OBJECTPTR)) self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE, zero=True, flavor='raw', @@ -27,8 +26,6 @@ fielddescrof = self.cpu.fielddescrof self.descr_virtual_token = fielddescrof(self.JIT_VIRTUAL_REF, 'virtual_token') - self.descr_virtualref_index = fielddescrof(self.JIT_VIRTUAL_REF, - 'virtualref_index') self.descr_forced = fielddescrof(self.JIT_VIRTUAL_REF, 'forced') # # record the type JIT_VIRTUAL_REF explicitly in the rtyper, too diff --git a/pypy/translator/backendopt/test/test_merge_if_blocks.py b/pypy/translator/backendopt/test/test_merge_if_blocks.py --- a/pypy/translator/backendopt/test/test_merge_if_blocks.py +++ b/pypy/translator/backendopt/test/test_merge_if_blocks.py @@ -2,7 +2,7 @@ from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof as tgraphof -from pypy.objspace.flow.model import flatten, Block +from pypy.objspace.flow.model import Block from pypy.translator.backendopt.removenoops import remove_same_as from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.simplify import (get_graph, transform_dead_op_vars, desugar_isinstance) -from pypy.objspace.flow.model import traverse, Block, Constant, summary +from pypy.objspace.flow.model import Block, Constant, summary from pypy import conftest def translate(func, argtypes, backend_optimize=True): @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 diff --git a/lib_pypy/pypy_test/test_os_wait.py b/lib_pypy/pypy_test/test_os_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_os_wait.py @@ -0,0 +1,44 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + +import os + +from lib_pypy._pypy_wait import wait3, wait4 + +if hasattr(os, 'wait3'): + def test_os_wait3(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait3()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) + +if hasattr(os, 'wait4'): + def test_os_wait4(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait4()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/doc/config/confrest.py b/pypy/doc/config/confrest.py --- a/pypy/doc/config/confrest.py +++ b/pypy/doc/config/confrest.py @@ -7,7 +7,6 @@ all_optiondescrs = [pypyoption.pypy_optiondescription, translationoption.translation_optiondescription, ] - start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) class PyPyPage(PyPyPage): @@ -29,7 +28,7 @@ Page = PyPyPage def get_content(self, txtpath, encoding): - if txtpath.basename == "commandline.txt": + if txtpath.basename == "commandline.rst": result = [] for line in txtpath.read().splitlines(): if line.startswith('.. GENERATE:'): diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -12,12 +12,13 @@ 'get_ident': 'os_thread.get_ident', 'exit': 'os_thread.exit', 'exit_thread': 'os_thread.exit', # obsolete synonym + 'interrupt_main': 'os_thread.interrupt_main', 'stack_size': 'os_thread.stack_size', '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -7,6 +7,7 @@ CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject +from pypy.objspace.std.objectobject import W_ObjectObject from pypy.rlib.objectmodel import specialize, we_are_translated from pypy.rlib.rweakref import RWeakKeyDictionary from pypy.rpython.annlowlevel import llhelper @@ -370,6 +371,15 @@ @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): obj.c_ob_refcnt = 1 + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + assert isinstance(w_type, W_TypeObject) + if w_type.is_cpytype(): + w_obj = space.allocate_instance(W_ObjectObject, w_type) + track_reference(space, obj, w_obj) + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + else: + assert False, "Please add more cases in _Py_NewReference()" def _Py_Dealloc(space, obj): from pypy.module.cpyext.api import generic_cpy_call_dont_decref diff --git a/pypy/rpython/lltypesystem/rdict.py b/pypy/rpython/lltypesystem/rdict.py --- a/pypy/rpython/lltypesystem/rdict.py +++ b/pypy/rpython/lltypesystem/rdict.py @@ -4,12 +4,15 @@ from pypy.rpython.rdict import AbstractDictRepr, AbstractDictIteratorRepr,\ rtype_newdict from pypy.rpython.lltypesystem import lltype -from pypy.rlib.rarithmetic import r_uint, intmask +from pypy.rlib.rarithmetic import r_uint, intmask, LONG_BIT from pypy.rlib.objectmodel import hlinvoke from pypy.rpython import robject -from pypy.rlib import objectmodel +from pypy.rlib import objectmodel, jit from pypy.rpython import rmodel +HIGHEST_BIT = intmask(1 << (LONG_BIT - 1)) +MASK = intmask(HIGHEST_BIT - 1) + # ____________________________________________________________ # # generic implementation of RPython dictionary, with parametric DICTKEY and @@ -405,6 +408,10 @@ ENTRIES = lltype.typeOf(entries).TO return ENTRIES.fasthashfn(entries[i].key) + at jit.dont_look_inside +def ll_get_value(d, i): + return d.entries[i].value + def ll_keyhash_custom(d, key): DICT = lltype.typeOf(d).TO return hlinvoke(DICT.r_rdict_hashfn, d.fnkeyhash, key) @@ -422,18 +429,21 @@ def ll_dict_getitem(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - entries = d.entries - if entries.valid(i): - return entries[i].value - else: - raise KeyError -ll_dict_getitem.oopspec = 'dict.getitem(d, key)' + if not i & HIGHEST_BIT: + return ll_get_value(d, i) + else: + raise KeyError def ll_dict_setitem(d, key, value): hash = d.keyhash(key) i = ll_dict_lookup(d, key, hash) + return _ll_dict_setitem_lookup_done(d, key, value, hash, i) + + at jit.dont_look_inside +def _ll_dict_setitem_lookup_done(d, key, value, hash, i): + valid = (i & HIGHEST_BIT) == 0 + i = i & MASK everused = d.entries.everused(i) - valid = d.entries.valid(i) # set up the new entry ENTRY = lltype.typeOf(d.entries).TO.OF entry = d.entries[i] @@ -449,7 +459,6 @@ d.num_pristine_entries -= 1 if d.num_pristine_entries <= len(d.entries) / 3: ll_dict_resize(d) -ll_dict_setitem.oopspec = 'dict.setitem(d, key, value)' def ll_dict_insertclean(d, key, value, hash): # Internal routine used by ll_dict_resize() to insert an item which is @@ -470,7 +479,7 @@ def ll_dict_delitem(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - if not d.entries.valid(i): + if i & HIGHEST_BIT: raise KeyError _ll_dict_del(d, i) ll_dict_delitem.oopspec = 'dict.delitem(d, key)' @@ -543,7 +552,7 @@ elif entries.everused(i): freeslot = i else: - return i # pristine entry -- lookup failed + return i | HIGHEST_BIT # pristine entry -- lookup failed # In the loop, a deleted entry (everused and not valid) is by far # (factor of 100s) the least likely outcome, so test for that last. @@ -558,7 +567,7 @@ if not entries.everused(i): if freeslot == -1: freeslot = i - return freeslot + return freeslot | HIGHEST_BIT elif entries.valid(i): checkingkey = entries[i].key if direct_compare and checkingkey == key: @@ -712,22 +721,19 @@ def ll_get(dict, key, default): i = ll_dict_lookup(dict, key, dict.keyhash(key)) - entries = dict.entries - if entries.valid(i): - return entries[i].value - else: + if not i & HIGHEST_BIT: + return ll_get_value(dict, i) + else: return default -ll_get.oopspec = 'dict.get(dict, key, default)' def ll_setdefault(dict, key, default): - i = ll_dict_lookup(dict, key, dict.keyhash(key)) - entries = dict.entries - if entries.valid(i): - return entries[i].value + hash = dict.keyhash(key) + i = ll_dict_lookup(dict, key, hash) + if not i & HIGHEST_BIT: + return ll_get_value(dict, i) else: - ll_dict_setitem(dict, key, default) + _ll_dict_setitem_lookup_done(dict, key, default, hash, i) return default -ll_setdefault.oopspec = 'dict.setdefault(dict, key, default)' def ll_copy(dict): DICT = lltype.typeOf(dict).TO @@ -769,7 +775,10 @@ while i < d2len: if entries.valid(i): entry = entries[i] - ll_dict_setitem(dic1, entry.key, entry.value) + hash = entries.hash(i) + key = entry.key + j = ll_dict_lookup(dic1, key, hash) + _ll_dict_setitem_lookup_done(dic1, key, entry.value, hash, j) i += 1 ll_update.oopspec = 'dict.update(dic1, dic2)' @@ -819,8 +828,7 @@ def ll_contains(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - return d.entries.valid(i) -ll_contains.oopspec = 'dict.contains(d, key)' + return not i & HIGHEST_BIT POPITEMINDEX = lltype.Struct('PopItemIndex', ('nextindex', lltype.Signed)) global_popitem_index = lltype.malloc(POPITEMINDEX, zero=True, immortal=True) diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -15,7 +15,7 @@ ## The problem ## ----------- ## -## PyString_AsString() must returns a (non-movable) pointer to the underlying +## PyString_AsString() must return a (non-movable) pointer to the underlying ## buffer, whereas pypy strings are movable. C code may temporarily store ## this address and use it, as long as it owns a reference to the PyObject. ## There is no "release" function to specify that the pointer is not needed diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -245,6 +245,11 @@ obj = foo.new() assert module.read_tp_dict(obj) == foo.fooType.copy + def test_custom_allocation(self): + foo = self.import_module("foo") + obj = foo.newCustom() + assert type(obj) is foo.Custom + assert type(foo.Custom) is foo.MetaType class TestTypes(BaseApiTest): def test_type_attributes(self, space, api): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -43,3 +43,16 @@ ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() clear_threadstate(space) + + def test_basic_threadstate_dance(self, space, api): + # Let extension modules call these functions, + # Not sure of the semantics in pypy though. + # (cpyext always acquires and releases the GIL around calls) + tstate = api.PyThreadState_Swap(None) + assert tstate is not None + assert not api.PyThreadState_Swap(tstate) + + api.PyEval_AcquireThread(tstate) + api.PyEval_ReleaseThread(tstate) + + clear_threadstate(space) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -399,12 +399,7 @@ return ll_rdict.ll_newdict(DICT) _ll_0_newdict.need_result_type = True - _ll_2_dict_getitem = ll_rdict.ll_dict_getitem - _ll_3_dict_setitem = ll_rdict.ll_dict_setitem _ll_2_dict_delitem = ll_rdict.ll_dict_delitem - _ll_3_dict_setdefault = ll_rdict.ll_setdefault - _ll_2_dict_contains = ll_rdict.ll_contains - _ll_3_dict_get = ll_rdict.ll_get _ll_1_dict_copy = ll_rdict.ll_copy _ll_1_dict_clear = ll_rdict.ll_clear _ll_2_dict_update = ll_rdict.ll_update diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -746,11 +746,13 @@ return str.substring(start, end); } - public static Object[] ll_split_chr(String str, char c) { + public static Object[] ll_split_chr(String str, char c, int max) { ArrayList list = new ArrayList(); int lastidx = 0, idx = 0; while ((idx = str.indexOf(c, lastidx)) != -1) { + if (max >= 0 && list.size() >= max) + break; String sub = str.substring(lastidx, idx); list.add(sub); lastidx = idx+1; @@ -759,6 +761,21 @@ return list.toArray(new String[list.size()]); } + public static Object[] ll_rsplit_chr(String str, char c, int max) { + ArrayList list = new ArrayList(); + int lastidx = str.length(), idx = 0; + while ((idx = str.lastIndexOf(c, lastidx - 1)) != -1) + { + if (max >= 0 && list.size() >= max) + break; + String sub = str.substring(idx + 1, lastidx); + list.add(0, sub); + lastidx = idx; + } + list.add(0, str.substring(0, lastidx)); + return list.toArray(new String[list.size()]); + } + public static String ll_substring(String str, int start, int cnt) { return str.substring(start,start+cnt); } diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -1,17 +1,29 @@ # Constants that depend on whether we are on 32-bit or 64-bit +# The frame size gives the standard fixed part at the start of +# every assembler frame: the saved value of some registers, +# one word for the force_index, and some extra space used only +# during a malloc that needs to go via its slow path. + import sys if sys.maxint == (2**31 - 1): WORD = 4 - # ebp + ebx + esi + edi + force_index = 5 words - FRAME_FIXED_SIZE = 5 + # ebp + ebx + esi + edi + 4 extra words + force_index = 9 words + FRAME_FIXED_SIZE = 9 + FORCE_INDEX_OFS = -8*WORD + MY_COPY_OF_REGS = -7*WORD IS_X86_32 = True IS_X86_64 = False else: WORD = 8 - # rbp + rbx + r12 + r13 + r14 + r15 + force_index = 7 words - FRAME_FIXED_SIZE = 7 + # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 + FRAME_FIXED_SIZE = 18 + FORCE_INDEX_OFS = -17*WORD + MY_COPY_OF_REGS = -16*WORD IS_X86_32 = False IS_X86_64 = True -FORCE_INDEX_OFS = -(FRAME_FIXED_SIZE-1)*WORD +# The extra space has room for almost all registers, apart from eax and edx +# which are used in the malloc itself. They are: +# ecx, ebx, esi, edi [32 and 64 bits] +# r8, r9, r10, r12, r13, r14, r15 [64 bits only] diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -283,9 +283,14 @@ sys.stdout = out = Out() try: raises(UnicodeError, "print unichr(0xa2)") + assert out.data == [] out.encoding = "cp424" print unichr(0xa2) assert out.data == [unichr(0xa2).encode("cp424"), "\n"] + del out.data[:] + del out.encoding + print u"foo\t", u"bar\n", u"trick", u"baz\n" # softspace handling + assert out.data == ["foo\t", "bar\n", "trick", " ", "baz\n", "\n"] finally: sys.stdout = save diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -486,6 +486,7 @@ class W_IMap(Wrappable): _error_name = "imap" + _immutable_fields_ = ["w_fun", "iterators_w"] def __init__(self, space, w_fun, args_w): self.space = space diff --git a/pypy/translator/platform/posix.py b/pypy/translator/platform/posix.py --- a/pypy/translator/platform/posix.py +++ b/pypy/translator/platform/posix.py @@ -113,11 +113,16 @@ m.eci = eci def pypyrel(fpath): - rel = py.path.local(fpath).relto(pypypath) + lpath = py.path.local(fpath) + rel = lpath.relto(pypypath) if rel: return os.path.join('$(PYPYDIR)', rel) - else: - return fpath + m_dir = m.makefile_dir + if m_dir == lpath: + return '.' + if m_dir.dirpath() == lpath: + return '..' + return fpath rel_cfiles = [m.pathrel(cfile) for cfile in cfiles] rel_ofiles = [rel_cfile[:-2]+'.o' for rel_cfile in rel_cfiles] diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import \ W_DictMultiObject, setitem__DictMulti_ANY_ANY, getitem__DictMulti_ANY, \ @@ -151,6 +152,8 @@ class AppTest_DictObject: + def setup_class(cls): + cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names) def test_equality(self): d = {1:2} @@ -259,7 +262,29 @@ d[33] = 99 assert d == dd assert x == 99 - + + def test_setdefault_fast(self): + class Key(object): + calls = 0 + def __hash__(self): + self.calls += 1 + return object.__hash__(self) + + k = Key() + d = {} + d.setdefault(k, []) + if self.on_pypy: + assert k.calls == 1 + + d.setdefault(k, 1) + if self.on_pypy: + assert k.calls == 2 + + k = Key() + d.setdefault(k, 42) + if self.on_pypy: + assert k.calls == 1 + def test_update(self): d = {1:2, 3:4} dd = d.copy() @@ -704,13 +729,20 @@ class FakeString(str): + hash_count = 0 def unwrap(self, space): self.unwrapped = True return str(self) + def __hash__(self): + self.hash_count += 1 + return str.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: + hash_count = 0 def hash_w(self, obj): + self.hash_count += 1 return hash(obj) def unwrap(self, x): return x @@ -726,6 +758,8 @@ return [] DictObjectCls = W_DictMultiObject def type(self, w_obj): + if isinstance(w_obj, FakeString): + return str return type(w_obj) w_str = str def str_w(self, string): @@ -890,6 +924,19 @@ impl.setitem(x, x) assert impl.r_dict_content is not None + def test_setdefault_fast(self): + on_pypy = "__pypy__" in sys.builtin_module_names + impl = self.impl + key = FakeString(self.string) + x = impl.setdefault(key, 1) + assert x == 1 + if on_pypy: + assert key.hash_count == 1 + x = impl.setdefault(key, 2) + assert x == 1 + if on_pypy: + assert key.hash_count == 2 + class TestStrDictImplementation(BaseTestRDictImplementation): ImplementionClass = StrDictImplementation diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -96,6 +96,10 @@ out, err = capfd.readouterr() assert "Exception ValueError: 'message' in 'location' ignored" == err.strip() + def test_ExceptionInstance_Class(self, space, api): + instance = space.call_function(space.w_ValueError) + assert api.PyExceptionInstance_Class(instance) is space.w_ValueError + class AppTestFetch(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -221,14 +221,33 @@ def rtype_method_split(self, hop): rstr = hop.args_r[0].repr - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO except AttributeError: list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr) + return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr, v_max) + + def rtype_method_rsplit(self, hop): + rstr = hop.args_r[0].repr + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) + try: + list_type = hop.r_result.lowleveltype.TO + except AttributeError: + list_type = hop.r_result.lowleveltype + cLIST = hop.inputconst(Void, list_type) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_rsplit_chr, cLIST, v_str, v_chr, v_max) def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Block, Constant, Variable, flatten +from pypy.objspace.flow.model import Block, Constant, Variable from pypy.objspace.flow.model import checkgraph, mkentrymap from pypy.translator.backendopt.support import log diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -5,9 +5,16 @@ cpython_api, build_type_checkers, PyObject, CONST_STRING, CANNOT_FAIL, Py_ssize_t) from pypy.rlib.rarithmetic import r_uint +import sys PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") + at cpython_api([], lltype.Signed, error=CANNOT_FAIL) +def PyInt_GetMax(space): + """Return the system's idea of the largest integer it can handle (LONG_MAX, + as defined in the system header files).""" + return sys.maxint + @cpython_api([lltype.Signed], PyObject) def PyInt_FromLong(space, ival): """Create a new integer object with a value of ival. diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -135,7 +135,7 @@ return type(self) is type(other) # xxx obscure def clone_if_mutable(self): res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res def _sortboxes(boxes): @@ -816,6 +816,52 @@ """ self.optimize_loop(ops, expected, preamble) + def test_compare_with_itself(self): + ops = """ + [] + i0 = escape() + i1 = int_lt(i0, i0) + guard_false(i1) [] + i2 = int_le(i0, i0) + guard_true(i2) [] + i3 = int_eq(i0, i0) + guard_true(i3) [] + i4 = int_ne(i0, i0) + guard_false(i4) [] + i5 = int_gt(i0, i0) + guard_false(i5) [] + i6 = int_ge(i0, i0) + guard_true(i6) [] + jump() + """ + expected = """ + [] + i0 = escape() + jump() + """ + self.optimize_loop(ops, expected) + + def test_compare_with_itself_uint(self): + py.test.skip("implement me") + ops = """ + [] + i0 = escape() + i7 = uint_lt(i0, i0) + guard_false(i7) [] + i8 = uint_le(i0, i0) + guard_true(i8) [] + i9 = uint_gt(i0, i0) + guard_false(i9) [] + i10 = uint_ge(i0, i0) + guard_true(i10) [] + jump() + """ + expected = """ + [] + i0 = escape() + jump() + """ + self.optimize_loop(ops, expected) @@ -1791,7 +1837,7 @@ """ self.optimize_loop(ops, ops) - def test_duplicate_setfield_1(self): + def test_duplicate_setfield_0(self): ops = """ [p1, i1, i2] setfield_gc(p1, i1, descr=valuedescr) @@ -1800,8 +1846,27 @@ """ expected = """ [p1, i1, i2] + jump(p1, i1, i2) + """ + # in this case, all setfields are removed, because we can prove + # that in the loop it will always have the same value + self.optimize_loop(ops, expected) + + def test_duplicate_setfield_1(self): + ops = """ + [p1] + i1 = escape() + i2 = escape() + setfield_gc(p1, i1, descr=valuedescr) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2) + jump(p1) + """ + expected = """ + [p1] + i1 = escape() + i2 = escape() + setfield_gc(p1, i2, descr=valuedescr) + jump(p1) """ self.optimize_loop(ops, expected) @@ -1848,6 +1913,7 @@ setfield_gc(p1, i4, descr=nextdescr) # setfield_gc(p1, i2, descr=valuedescr) + escape() jump(p1, i1, i2, p3) """ preamble = """ @@ -1860,6 +1926,7 @@ # setfield_gc(p1, i2, descr=valuedescr) setfield_gc(p1, i4, descr=nextdescr) + escape() jump(p1, i1, i2, p3, i3) """ expected = """ @@ -1871,6 +1938,7 @@ # setfield_gc(p1, i2, descr=valuedescr) setfield_gc(p1, i4, descr=nextdescr) + escape() jump(p1, i1, i2, p3, i3) """ self.optimize_loop(ops, expected, preamble) @@ -1943,6 +2011,7 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ preamble = """ @@ -1950,12 +2019,14 @@ guard_true(i3) [p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ expected = """ [p1, i2, i4] guard_true(i4) [p1] setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, 1) """ self.optimize_loop(ops, expected, preamble) @@ -1969,6 +2040,7 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ preamble = """ @@ -1976,12 +2048,14 @@ guard_true(i3) [i2, p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ expected = """ [p1, i2, i4] guard_true(i4) [i2, p1] setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, 1) """ self.optimize_loop(ops, expected) @@ -2027,15 +2101,34 @@ guard_value(p1, ConstPtr(myptr)) [] setfield_gc(p1, i1, descr=valuedescr) setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) + escape() jump(p1, i1, i2) """ expected = """ [i1, i2] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) + escape() jump(i1, i2) """ self.optimize_loop(ops, expected) + def test_dont_force_setfield_around_copystrcontent(self): + ops = """ + [p0, i0, p1, i1, i2] + setfield_gc(p0, i1, descr=valuedescr) + copystrcontent(p0, i0, p1, i1, i2) + escape() + jump(p0, i0, p1, i1, i2) + """ + expected = """ + [p0, i0, p1, i1, i2] + copystrcontent(p0, i0, p1, i1, i2) + setfield_gc(p0, i1, descr=valuedescr) + escape() + jump(p0, i0, p1, i1, i2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getarrayitem_1(self): ops = """ [p1] @@ -2356,6 +2449,33 @@ """ self.optimize_loop(ops, expected, preamble) + def test_bug_5(self): + ops = """ + [p0] + i0 = escape() + i2 = getfield_gc(p0, descr=valuedescr) + i4 = int_add(i2, 1) + setfield_gc(p0, i4, descr=valuedescr) + guard_true(i0) [] + i6 = getfield_gc(p0, descr=valuedescr) + i8 = int_sub(i6, 1) + setfield_gc(p0, i8, descr=valuedescr) + escape() + jump(p0) + """ + expected = """ + [p0] + i0 = escape() + i2 = getfield_gc(p0, descr=valuedescr) + i4 = int_add(i2, 1) + setfield_gc(p0, i4, descr=valuedescr) + guard_true(i0) [] + setfield_gc(p0, i2, descr=valuedescr) + escape() + jump(p0) + """ + self.optimize_loop(ops, expected) + def test_invalid_loop_1(self): ops = """ [p1] @@ -2992,7 +3112,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3025,7 +3144,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3065,7 +3183,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3103,6 +3220,7 @@ guard_no_exception(descr=fdescr) [p2, p1] virtual_ref_finish(p2, p1) setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ preamble = """ @@ -3111,6 +3229,7 @@ call(i1, descr=nonwritedescr) guard_no_exception(descr=fdescr) [i3, i1, p0] setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ expected = """ @@ -3119,6 +3238,7 @@ call(i1, descr=nonwritedescr) guard_no_exception(descr=fdescr2) [i3, i1, p0] setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ self.optimize_loop(ops, expected, preamble) @@ -3129,7 +3249,7 @@ #self.loop.inputargs[0].value = self.nodeobjvalue #self.check_expanded_fail_descr('''p2, p1 # p0.refdescr = p2 - # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 # where p1 is a node_vtable, nextdescr=p1b # where p1b is a node_vtable, valuedescr=i1 # ''', rop.GUARD_NO_EXCEPTION) @@ -3150,7 +3270,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3176,7 +3295,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3693,13 +3811,16 @@ guard_true(i1) [] jump(p0) """ - # The dead strlen will be eliminated be the backend. - expected = """ + preamble = """ [p0] i0 = strlen(p0) jump(p0) """ - self.optimize_strunicode_loop(ops, expected, expected) + expected = """ + [p0] + jump(p0) + """ + self.optimize_strunicode_loop(ops, expected, preamble) def test_addsub_const(self): ops = """ @@ -5150,7 +5271,21 @@ """ expected = """ [p0] + jump(p0) + """ + self.optimize_loop(ops, expected) + + def test_strlen_repeated(self): + ops = """ + [p0] i0 = strlen(p0) + i1 = strlen(p0) + i2 = int_eq(i0, i1) + guard_true(i2) [] + jump(p0) + """ + expected = """ + [p0] jump(p0) """ self.optimize_loop(ops, expected) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -174,7 +174,7 @@ def __init__(self): pass # make rpython happy - + def propagate_forward(self, op): raise NotImplementedError @@ -183,7 +183,7 @@ def test_emittable(self, op): return self.is_emittable(op) - + def is_emittable(self, op): return self.next_optimization.test_emittable(op) @@ -239,7 +239,7 @@ def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None): #return self.__class__() raise NotImplementedError - + class Optimizer(Optimization): @@ -275,20 +275,20 @@ else: optimizations = [] self.first_optimization = self - - self.optimizations = optimizations + + self.optimizations = optimizations def force_at_end_of_preamble(self): self.resumedata_memo = resume.ResumeDataLoopMemo(self.metainterp_sd) for o in self.optimizations: o.force_at_end_of_preamble() - + def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None): assert optimizer is None assert valuemap is None valuemap = {} new = Optimizer(self.metainterp_sd, self.loop) - optimizations = [o.reconstruct_for_next_iteration(new, valuemap) for o in + optimizations = [o.reconstruct_for_next_iteration(new, valuemap) for o in self.optimizations] new.set_optimizations(optimizations) @@ -305,7 +305,7 @@ for key, value in self.loop_invariant_results.items(): new.loop_invariant_results[key] = \ value.get_reconstructed(new, valuemap) - + new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None @@ -429,7 +429,7 @@ def test_emittable(self, op): return True - + def emit_operation(self, op): ###self.heap_op_optimizer.emitting_operation(op) self._emit_operation(op) @@ -507,7 +507,7 @@ canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW else: nextop = None - + if canfold: for i in range(op.numargs()): if self.get_constant_box(op.getarg(i)) is None: diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -205,7 +205,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) - elif v1.intbound.known_ge(v2.intbound): + elif v1.intbound.known_ge(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 0) else: self.emit_operation(op) @@ -215,7 +215,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) - elif v1.intbound.known_le(v2.intbound): + elif v1.intbound.known_le(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 0) else: self.emit_operation(op) @@ -223,7 +223,7 @@ def optimize_INT_LE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_le(v2.intbound): + if v1.intbound.known_le(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 1) elif v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 0) @@ -233,7 +233,7 @@ def optimize_INT_GE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_ge(v2.intbound): + if v1.intbound.known_ge(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 0) diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -381,6 +381,9 @@ def _setup(): global _old_raw_input + if _old_raw_input is not None: + return # don't run _setup twice + try: f_in = sys.stdin.fileno() f_out = sys.stdout.fileno() @@ -401,4 +404,5 @@ _old_raw_input = __builtin__.raw_input __builtin__.raw_input = _wrapper.raw_input +_old_raw_input = None _setup() diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -22,8 +22,7 @@ remover = cls.MallocRemover() checkgraph(graph) count1 = count2 = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == cls.MallocRemover.MALLOC_OP: S = op.args[0].value @@ -47,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -158,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -199,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/rlib/_rweakvaldict.py b/pypy/rlib/_rweakvaldict.py --- a/pypy/rlib/_rweakvaldict.py +++ b/pypy/rlib/_rweakvaldict.py @@ -113,7 +113,7 @@ @jit.dont_look_inside def ll_get(self, d, llkey): hash = self.ll_keyhash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK #llop.debug_print(lltype.Void, i, 'get') valueref = d.entries[i].value if valueref: @@ -132,7 +132,7 @@ def ll_set_nonnull(self, d, llkey, llvalue): hash = self.ll_keyhash(llkey) valueref = weakref_create(llvalue) # GC effects here, before the rest - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK everused = d.entries.everused(i) d.entries[i].key = llkey d.entries[i].value = valueref @@ -146,7 +146,7 @@ @jit.dont_look_inside def ll_set_null(self, d, llkey): hash = self.ll_keyhash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK if d.entries.everused(i): # If the entry was ever used, clean up its key and value. # We don't store a NULL value, but a dead weakref, because diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -9,6 +9,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert +from pypy.rlib.objectmodel import we_are_translated from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc from pypy.annotation import model as annmodel @@ -151,8 +152,13 @@ # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.root_stack_jit_hook = None if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + try: + self.root_stack_jit_hook = translator._jit2gc['rootstackhook'] + except KeyError: + pass else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) self.layoutbuilder.transformer = self @@ -500,6 +506,10 @@ s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) + s_gc_data = self.translator.annotator.bookkeeper.valueoftype( + gctypelayout.GCData) + r_gc_data = self.translator.rtyper.getrepr(s_gc_data) + self.c_const_gcdata = rmodel.inputconst(r_gc_data, self.gcdata) self.malloc_zero_filled = GCClass.malloc_zero_filled HDR = self.HDR = self.gcdata.gc.gcheaderbuilder.HDR @@ -786,6 +796,15 @@ resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) + def gct_gc_adr_of_root_stack_top(self, hop): + op = hop.spaceop + ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, + 'inst_root_stack_top') + c_ofs = rmodel.inputconst(lltype.Signed, ofs) + v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], + resulttype=llmemory.Address) + hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) + def gct_gc_x_swap_pool(self, hop): op = hop.spaceop [v_malloced] = op.args @@ -1327,6 +1346,14 @@ return top self.decr_stack = decr_stack + self.rootstackhook = gctransformer.root_stack_jit_hook + if self.rootstackhook is None: + def collect_stack_root(callback, gc, addr): + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return sizeofaddr + self.rootstackhook = collect_stack_root + def push_stack(self, addr): top = self.incr_stack(1) top.address[0] = addr @@ -1336,10 +1363,7 @@ return top.address[0] def allocate_stack(self): - result = llmemory.raw_malloc(self.rootstacksize) - if result: - llmemory.raw_memclear(result, self.rootstacksize) - return result + return llmemory.raw_malloc(self.rootstacksize) def setup_root_walker(self): stackbase = self.allocate_stack() @@ -1351,12 +1375,11 @@ def walk_stack_roots(self, collect_stack_root): gcdata = self.gcdata gc = self.gc + rootstackhook = self.rootstackhook addr = gcdata.root_stack_base end = gcdata.root_stack_top while addr != end: - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - addr += sizeofaddr + addr += rootstackhook(collect_stack_root, gc, addr) if self.collect_stacks_from_other_threads is not None: self.collect_stacks_from_other_threads(collect_stack_root) @@ -1463,12 +1486,11 @@ # collect all valid stacks from the dict (the entry # corresponding to the current thread is not valid) gc = self.gc + rootstackhook = self.rootstackhook end = stacktop - sizeofaddr addr = end.address[0] while addr != end: - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - addr += sizeofaddr + addr += rootstackhook(callback, gc, addr) def collect_more_stacks(callback): ll_assert(get_aid() == gcdata.active_thread, diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,8 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox -from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong, r_uint class X86RegisterManager(RegisterManager): @@ -34,6 +35,12 @@ esi: 2, edi: 3, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + } def call_result_location(self, v): return eax @@ -61,6 +68,19 @@ r14: 4, r15: 5, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + r8: MY_COPY_OF_REGS + 4 * WORD, + r9: MY_COPY_OF_REGS + 5 * WORD, + r10: MY_COPY_OF_REGS + 6 * WORD, + r12: MY_COPY_OF_REGS + 7 * WORD, + r13: MY_COPY_OF_REGS + 8 * WORD, + r14: MY_COPY_OF_REGS + 9 * WORD, + r15: MY_COPY_OF_REGS + 10 * WORD, + } class X86XMMRegisterManager(RegisterManager): @@ -117,6 +137,16 @@ else: return 1 +if WORD == 4: + gpr_reg_mgr_cls = X86RegisterManager + xmm_reg_mgr_cls = X86XMMRegisterManager +elif WORD == 8: + gpr_reg_mgr_cls = X86_64_RegisterManager + xmm_reg_mgr_cls = X86_64_XMMRegisterManager +else: + raise AssertionError("Word size should be 4 or 8") + + class RegAlloc(object): def __init__(self, assembler, translate_support_code=False): @@ -135,16 +165,6 @@ # compute longevity of variables longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity - # XXX - if cpu.WORD == 4: - gpr_reg_mgr_cls = X86RegisterManager - xmm_reg_mgr_cls = X86XMMRegisterManager - elif cpu.WORD == 8: - gpr_reg_mgr_cls = X86_64_RegisterManager - xmm_reg_mgr_cls = X86_64_XMMRegisterManager - else: - raise AssertionError("Word size should be 4 or 8") - self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) @@ -738,8 +758,12 @@ def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None + self.xrm.before_call(force_store, save_all_regs=save_all_regs) + if not save_all_regs: + gcrootmap = gc_ll_descr = self.assembler.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + save_all_regs = 2 self.rm.before_call(force_store, save_all_regs=save_all_regs) - self.xrm.before_call(force_store, save_all_regs=save_all_regs) if op.result is not None: if op.result.type == FLOAT: resloc = self.xrm.after_call(op.result) @@ -840,16 +864,26 @@ assert isinstance(descr, BaseSizeDescr) gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) - # We need to force-allocate each of save_around_call_regs now. - # The alternative would be to save and restore them around the - # actual call to malloc(), in the rare case where we need to do - # it; however, mark_gc_roots() would need to be adapted to know - # where the variables end up being saved. Messy. - for reg in self.rm.save_around_call_regs: - if reg is not eax: - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=reg) - self.rm.possibly_free_var(tmp_box) + + if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + # We need edx as a temporary, but otherwise don't save any more + # register. See comments in _build_malloc_fixedsize_slowpath(). + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=edx) + self.rm.possibly_free_var(tmp_box) + else: + # ---- asmgcc ---- + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) self.assembler.malloc_cond_fixedsize( gc_ll_descr.get_nursery_free_addr(), @@ -1132,7 +1166,7 @@ # call memcpy() self.rm.before_call() self.xrm.before_call() - self.assembler._emit_call(imm(self.assembler.memcpy_addr), + self.assembler._emit_call(-1, imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) self.rm.possibly_free_var(length_box) self.rm.possibly_free_var(dstaddr_box) @@ -1223,18 +1257,24 @@ def consider_jit_debug(self, op): pass - def get_mark_gc_roots(self, gcrootmap): + def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) - gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position)) + gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) for v, reg in self.rm.reg_bindings.items(): if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX - gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + if use_copy_area: + assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS + area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] + gcrootmap.add_frame_offset(shape, area_offset) + else: + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg( + shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -50,6 +50,7 @@ import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject import pypy.module.cpyext.codecs +import pypy.module.cpyext.pyfile # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -102,6 +102,7 @@ #include "modsupport.h" #include "pythonrun.h" #include "pyerrors.h" +#include "sysmodule.h" #include "stringobject.h" #include "descrobject.h" #include "tupleobject.h" diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -305,6 +305,15 @@ stackcounter = StackCounter() stackcounter._freeze_() +def llexternal_use_eci(compilation_info): + """Return a dummy function that, if called in a RPython program, + adds the given ExternalCompilationInfo to it.""" + eci = ExternalCompilationInfo(post_include_bits=['#define PYPY_NO_OP()']) + eci = eci.merge(compilation_info) + return llexternal('PYPY_NO_OP', [], lltype.Void, + compilation_info=eci, sandboxsafe=True, _nowrapper=True, + _callable=lambda: None) + # ____________________________________________________________ # Few helpers for keeping callback arguments alive # this makes passing opaque objects possible (they don't even pass @@ -737,6 +746,7 @@ def charpsize2str(cp, size): l = [cp[i] for i in range(size)] return emptystr.join(l) + charpsize2str._annenforceargs_ = [None, int] return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -26,9 +26,10 @@ CPU = getcpuclass() class MockGcRootMap(object): + is_shadow_stack = False def get_basic_shape(self, is_64_bit): return ['shape'] - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): shape.append(offset) def add_callee_save_reg(self, shape, reg_index): index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } diff --git a/pypy/module/cpyext/src/modsupport.c b/pypy/module/cpyext/src/modsupport.c --- a/pypy/module/cpyext/src/modsupport.c +++ b/pypy/module/cpyext/src/modsupport.c @@ -241,13 +241,12 @@ case 'I': { - Py_FatalError("I unsupported so far"); - //unsigned int n; - //n = va_arg(*p_va, unsigned int); - //if (n > (unsigned long)PyInt_GetMax()) - // return PyLong_FromUnsignedLong((unsigned long)n); - //else - // return PyInt_FromLong(n); + unsigned int n; + n = va_arg(*p_va, unsigned int); + if (n > (unsigned long)PyInt_GetMax()) + return PyLong_FromUnsignedLong((unsigned long)n); + else + return PyInt_FromLong(n); } case 'n': @@ -260,23 +259,20 @@ case 'k': { - Py_FatalError("Py_BuildValue k unsupported so far\n"); - /* unsigned long n; */ - /* n = va_arg(*p_va, unsigned long); */ - /* if (n > (unsigned long)PyInt_GetMax()) */ - /* return PyLong_FromUnsignedLong(n); */ - /* else */ - /* return PyInt_FromLong(n); */ + unsigned long n; + n = va_arg(*p_va, unsigned long); + if (n > (unsigned long)PyInt_GetMax()) + return PyLong_FromUnsignedLong(n); + else + return PyInt_FromLong(n); } #ifdef HAVE_LONG_LONG case 'L': - Py_FatalError("Py_BuildValue L unsupported for now\n"); - //return PyLong_FromLongLong((PY_LONG_LONG)va_arg(*p_va, PY_LONG_LONG)); + return PyLong_FromLongLong((PY_LONG_LONG)va_arg(*p_va, PY_LONG_LONG)); case 'K': - Py_FatalError("Py_BuildValue K unsupported for now\n"); - //return PyLong_FromUnsignedLongLong((PY_LONG_LONG)va_arg(*p_va, unsigned PY_LONG_LONG)); + return PyLong_FromUnsignedLongLong((PY_LONG_LONG)va_arg(*p_va, unsigned PY_LONG_LONG)); #endif #ifdef Py_USING_UNICODE case 'u': diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -1,9 +1,81 @@ # encoding: iso-8859-15 from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.unicodeobject import Py_UNICODE +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.module.cpyext.unicodeobject import ( + Py_UNICODE, PyUnicodeObject, new_empty_unicode) +from pypy.module.cpyext.api import PyObjectP, PyObject +from pypy.module.cpyext.pyobject import Py_DecRef from pypy.rpython.lltypesystem import rffi, lltype import sys, py +class AppTestUnicodeObject(AppTestCpythonExtensionBase): + def test_unicodeobject(self): + module = self.import_extension('foo', [ + ("get_hello1", "METH_NOARGS", + """ + return PyUnicode_FromStringAndSize( + "Hello world", 11); + """), + ("test_GetSize", "METH_NOARGS", + """ + PyObject* s = PyUnicode_FromString("Hello world"); + int result = 0; + + if(PyUnicode_GetSize(s) == 11) { + result = 1; + } + if(s->ob_type->tp_basicsize != sizeof(void*)*4) + result = 0; + Py_DECREF(s); + return PyBool_FromLong(result); + """), + ("test_GetSize_exception", "METH_NOARGS", + """ + PyObject* f = PyFloat_FromDouble(1.0); + Py_ssize_t size = PyUnicode_GetSize(f); + + Py_DECREF(f); + return NULL; + """), + ("test_is_unicode", "METH_VARARGS", + """ + return PyBool_FromLong(PyUnicode_Check(PyTuple_GetItem(args, 0))); + """)]) + assert module.get_hello1() == u'Hello world' + assert module.test_GetSize() + raises(TypeError, module.test_GetSize_exception) + + assert module.test_is_unicode(u"") + assert not module.test_is_unicode(()) + + def test_unicode_buffer_init(self): + module = self.import_extension('foo', [ + ("getunicode", "METH_NOARGS", + """ + PyObject *s, *t; + Py_UNICODE* c; + Py_ssize_t len; + + s = PyUnicode_FromUnicode(NULL, 4); + if (s == NULL) + return NULL; + t = PyUnicode_FromUnicode(NULL, 3); + if (t == NULL) + return NULL; + Py_DECREF(t); + c = PyUnicode_AsUnicode(s); + c[0] = 'a'; + c[1] = 0xe9; + c[3] = 'c'; + return s; + """), + ]) + s = module.getunicode() + assert len(s) == 4 + assert s == u'a�\x00c' + + + class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): assert api.PyUnicode_GET_SIZE(space.wrap(u'sp�m')) == 4 @@ -77,6 +149,28 @@ assert space.unwrap(w_res) == u'sp�' rffi.free_charp(s) + def test_unicode_resize(self, space, api): + py_uni = new_empty_unicode(space, 10) + ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + py_uni.c_buffer[0] = u'a' + py_uni.c_buffer[1] = u'b' + py_uni.c_buffer[2] = u'c' + ar[0] = rffi.cast(PyObject, py_uni) + api.PyUnicode_Resize(ar, 3) + py_uni = rffi.cast(PyUnicodeObject, ar[0]) + assert py_uni.c_size == 3 + assert py_uni.c_buffer[1] == u'b' + assert py_uni.c_buffer[3] == u'\x00' + # the same for growing + ar[0] = rffi.cast(PyObject, py_uni) + api.PyUnicode_Resize(ar, 10) + py_uni = rffi.cast(PyUnicodeObject, ar[0]) + assert py_uni.c_size == 10 + assert py_uni.c_buffer[1] == 'b' + assert py_uni.c_buffer[10] == '\x00' + Py_DecRef(space, ar[0]) + lltype.free(ar, flavor='raw') + def test_AsUTF8String(self, space, api): w_u = space.wrap(u'sp�m') w_res = api.PyUnicode_AsUTF8String(w_u) @@ -235,13 +329,13 @@ x_chunk = api.PyUnicode_AS_UNICODE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, 4) - w_y = api.PyUnicode_FromUnicode(target_chunk, 4) + w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, 4)) assert space.eq_w(w_y, space.wrap(u"abcd")) size = api.PyUnicode_GET_SIZE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, size) - w_y = api.PyUnicode_FromUnicode(target_chunk, size) + w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, size)) assert space.eq_w(w_y, w_x) diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -1,3 +1,4 @@ +from __future__ import with_statement MARKER = 42 class AppTestImpModule: @@ -34,7 +35,8 @@ def test_load_dynamic(self): raises(ImportError, self.imp.load_dynamic, 'foo', 'bar') - raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', 'baz.so') + raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', + open(self.file_module)) def test_suffixes(self): for suffix, mode, type in self.imp.get_suffixes(): diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -112,6 +112,7 @@ try: while True: count = fread(buf, 1, BUF_SIZE, fp) + count = rffi.cast(lltype.Signed, count) source += rffi.charpsize2str(buf, count) if count < BUF_SIZE: if feof(fp): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -46,4 +46,5 @@ return PyBuffer_New(150); """), ]) - module.buffer_new() + b = module.buffer_new() + raises(AttributeError, getattr, b, 'x') diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -140,7 +140,7 @@ xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw', immortal=True) registers = rffi.ptradd(xmmregisters, 16) - stacklen = baseloc + 10 + stacklen = baseloc + 30 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw', immortal=True) expected_ints = [0] * len(content) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -32,6 +32,7 @@ else: SO = ".so" DEFAULT_SOABI = 'pypy-14' +CHECK_FOR_PYW = sys.platform == 'win32' @specialize.memo() def get_so_extension(space): @@ -58,6 +59,12 @@ if os.path.exists(pyfile) and case_ok(pyfile): return PY_SOURCE, ".py", "U" + # on Windows, also check for a .pyw file + if CHECK_FOR_PYW: + pyfile = filepart + ".pyw" + if os.path.exists(pyfile) and case_ok(pyfile): + return PY_SOURCE, ".pyw", "U" + # The .py file does not exist. By default on PyPy, lonepycfiles # is False: if a .py file does not exist, we don't even try to # look for a lone .pyc file. @@ -84,7 +91,9 @@ else: # XXX that's slow def case_ok(filename): - index = filename.rfind(os.sep) + index1 = filename.rfind(os.sep) + index2 = filename.rfind(os.altsep) + index = max(index1, index2) if index < 0: directory = os.curdir else: @@ -109,6 +118,107 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) +def _get_relative_name(space, modulename, level, w_globals): + w = space.wrap + ctxt_w_package = space.finditem(w_globals, w('__package__')) + + ctxt_package = None + if ctxt_w_package is not None and ctxt_w_package is not space.w_None: + try: + ctxt_package = space.str_w(ctxt_w_package) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_ValueError, space.wrap( + "__package__ set to non-string")) + + if ctxt_package is not None: + # __package__ is set, so use it + if ctxt_package == '' and level < 0: + return None, 0 + + package_parts = ctxt_package.split('.') + while level > 1 and package_parts: + level -= 1 + package_parts.pop() + if not package_parts: + if len(ctxt_package) == 0: + msg = "Attempted relative import in non-package" + else: + msg = "Attempted relative import beyond toplevel package" + raise OperationError(space.w_ValueError, w(msg)) + + # Try to import parent package + try: + w_parent = absolute_import(space, ctxt_package, 0, + None, tentative=False) + except OperationError, e: + if not e.match(space, space.w_ImportError): + raise + if level > 0: + raise OperationError(space.w_SystemError, space.wrap( + "Parent module '%s' not loaded, " + "cannot perform relative import" % ctxt_package)) + else: + space.warn("Parent module '%s' not found " + "while handling absolute import" % ctxt_package, + space.w_RuntimeWarning) + + rel_level = len(package_parts) + if modulename: + package_parts.append(modulename) + rel_modulename = '.'.join(package_parts) + else: + # __package__ not set, so figure it out and set it + ctxt_w_name = space.finditem(w_globals, w('__name__')) + ctxt_w_path = space.finditem(w_globals, w('__path__')) + + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.str_w(ctxt_w_name) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + + if not ctxt_name: + return None, 0 + + ctxt_name_prefix_parts = ctxt_name.split('.') + if level > 0: + n = len(ctxt_name_prefix_parts)-level+1 + assert n>=0 + ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] + if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module + ctxt_name_prefix_parts.pop() + + if level > 0 and not ctxt_name_prefix_parts: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) + + rel_modulename = '.'.join(ctxt_name_prefix_parts) + + if ctxt_w_path is not None: + # __path__ is set, so __name__ is already the package name + space.setitem(w_globals, w("__package__"), ctxt_w_name) + else: + # Normal module, so work out the package name if any + if '.' not in ctxt_name: + space.setitem(w_globals, w("__package__"), space.w_None) + elif rel_modulename: + space.setitem(w_globals, w("__package__"), w(rel_modulename)) + + if modulename: + if rel_modulename: + rel_modulename += '.' + modulename + else: + rel_modulename = modulename + + rel_level = len(ctxt_name_prefix_parts) + + return rel_modulename, rel_level + + @unwrap_spec(name=str, level=int) def importhook(space, name, w_globals=None, w_locals=None, w_fromlist=None, level=-1): @@ -130,68 +240,40 @@ w_globals is not None and space.isinstance_w(w_globals, space.w_dict)): - ctxt_w_name = space.finditem(w_globals, w('__name__')) - ctxt_w_path = space.finditem(w_globals, w('__path__')) + rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) - ctxt_name = None - if ctxt_w_name is not None: - try: - ctxt_name = space.str_w(ctxt_w_name) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise + if rel_modulename: + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + tentative = True + else: + tentative = False - if ctxt_name is not None: - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - if ctxt_name_prefix_parts: - rel_modulename = '.'.join(ctxt_name_prefix_parts) - if modulename: - rel_modulename += '.' + modulename - baselevel = len(ctxt_name_prefix_parts) - if rel_modulename is not None: - # XXX What is this check about? There is no test for it - w_mod = check_sys_modules(space, w(rel_modulename)) + w_mod = absolute_import(space, rel_modulename, rel_level, + fromlist_w, tentative=tentative) + if w_mod is not None: + space.timer.stop_name("importhook", modulename) + return w_mod - if (w_mod is None or - not space.is_w(w_mod, space.w_None) or - level > 0): - - # if no level was set, ignore import errors, and - # fall back to absolute import at the end of the - # function. - if level == -1: - tentative = True - else: - tentative = False - - w_mod = absolute_import(space, rel_modulename, - baselevel, fromlist_w, - tentative=tentative) - if w_mod is not None: - space.timer.stop_name("importhook", modulename) - return w_mod - else: - rel_modulename = None - - if level > 0: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) - w_mod = absolute_import_try(space, modulename, 0, fromlist_w) - if w_mod is None or space.is_w(w_mod, space.w_None): - w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) + w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) if rel_modulename is not None: space.setitem(space.sys.get('modules'), w(rel_modulename), space.w_None) space.timer.stop_name("importhook", modulename) return w_mod +def absolute_import(space, modulename, baselevel, fromlist_w, tentative): + # Short path: check in sys.modules + w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w) + if w_mod is not None and not space.is_w(w_mod, space.w_None): + return w_mod + return absolute_import_with_lock(space, modulename, baselevel, + fromlist_w, tentative) + @jit.dont_look_inside -def absolute_import(space, modulename, baselevel, fromlist_w, tentative): +def absolute_import_with_lock(space, modulename, baselevel, + fromlist_w, tentative): lock = getimportlock(space) lock.acquire_lock() try: diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -18,7 +18,6 @@ def should_skip_instruction(self, instrname, argmodes): return ( super(TestRx86_64, self).should_skip_instruction(instrname, argmodes) or - ('j' in argmodes) or # Not testing FSTP on 64-bit for now (instrname == 'FSTP') ) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -311,8 +311,7 @@ # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations # that will be performed later on the flow graph. - def fixegg(link): - if isinstance(link, Link): + for link in list(self.graph.iterlinks()): block = link.target if isinstance(block, EggBlock): if (not block.operations and len(block.exits) == 1 and @@ -324,15 +323,14 @@ link.args = list(link2.args) link.target = link2.target assert link2.exitcase is None - fixegg(link) else: mapping = {} for a in block.inputargs: mapping[a] = Variable(a) block.renamevariables(mapping) - elif isinstance(link, SpamBlock): + for block in self.graph.iterblocks(): + if isinstance(link, SpamBlock): del link.framestate # memory saver - traverse(fixegg, self.graph) def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -722,31 +722,75 @@ newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) - def ll_split_chr(LIST, s, c): + def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) count = 1 i = 0 + if max == 0: + i = strlen while i < strlen: if chars[i] == c: count += 1 + if max >= 0 and count > max: + break i += 1 res = LIST.ll_newlist(count) items = res.ll_items() i = 0 j = 0 resindex = 0 + if max == 0: + j = strlen while j < strlen: if chars[j] == c: item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) resindex += 1 i = j + 1 + if max >= 0 and resindex >= max: + j = strlen + break j += 1 item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) return res + def ll_rsplit_chr(LIST, s, c, max): + chars = s.chars + strlen = len(chars) + count = 1 + i = 0 + if max == 0: + i = strlen + while i < strlen: + if chars[i] == c: + count += 1 + if max >= 0 and count > max: + break + i += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + i = strlen + j = strlen + resindex = count - 1 + assert resindex >= 0 + if max == 0: + j = 0 + while j > 0: + j -= 1 + if chars[j] == c: + item = items[resindex] = s.malloc(i - j - 1) + item.copy_contents(s, item, j + 1, 0, i - j - 1) + resindex -= 1 + i = j + if resindex == 0: + j = 0 + break + item = items[resindex] = s.malloc(i - j) + item.copy_contents(s, item, j, 0, i - j) + return res + @purefunction def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -283,9 +283,15 @@ # These are the worst cases: val2 = loc2.value_i() code1 = loc1.location_code() - if (code1 == 'j' - or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) - or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + if code1 == 'j': + checkvalue = loc1.value_j() + elif code1 == 'm': + checkvalue = loc1.value_m()[1] + elif code1 == 'a': + checkvalue = loc1.value_a()[3] + else: + checkvalue = 0 + if not rx86.fits_in_32bits(checkvalue): # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai # and the constant offset in the address is 64-bit. # Hopefully this doesn't happen too often @@ -330,10 +336,10 @@ if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if self.WORD == 8 and possible_code1 == 'j': + if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif self.WORD == 8 and possible_code2 == 'j': + elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): @@ -378,6 +384,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -3,9 +3,8 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import Py_LT, Py_LE, Py_NE, Py_EQ,\ - Py_GE, Py_GT, fopen, fclose, fwrite -from pypy.tool.udir import udir +from pypy.module.cpyext.api import ( + Py_LT, Py_LE, Py_NE, Py_EQ, Py_GE, Py_GT) class TestObject(BaseApiTest): def test_IsTrue(self, space, api): @@ -181,52 +180,6 @@ assert api.PyObject_Unicode(space.wrap("\xe9")) is None api.PyErr_Clear() - def test_file_fromstring(self, space, api): - filename = rffi.str2charp(str(udir / "_test_file")) - mode = rffi.str2charp("wb") - w_file = api.PyFile_FromString(filename, mode) - rffi.free_charp(filename) - rffi.free_charp(mode) - - assert api.PyFile_Check(w_file) - assert api.PyFile_CheckExact(w_file) - assert not api.PyFile_Check(space.wrap("text")) - - space.call_method(w_file, "write", space.wrap("text")) - space.call_method(w_file, "close") - assert (udir / "_test_file").read() == "text" - - def test_file_getline(self, space, api): - filename = rffi.str2charp(str(udir / "_test_file")) - - mode = rffi.str2charp("w") - w_file = api.PyFile_FromString(filename, mode) - space.call_method(w_file, "write", - space.wrap("line1\nline2\nline3\nline4")) - space.call_method(w_file, "close") - - rffi.free_charp(mode) - mode = rffi.str2charp("r") - w_file = api.PyFile_FromString(filename, mode) - rffi.free_charp(filename) - rffi.free_charp(mode) - - w_line = api.PyFile_GetLine(w_file, 0) - assert space.str_w(w_line) == "line1\n" - - w_line = api.PyFile_GetLine(w_file, 4) - assert space.str_w(w_line) == "line" - - w_line = api.PyFile_GetLine(w_file, 0) - assert space.str_w(w_line) == "2\n" - - # XXX We ought to raise an EOFError here, but don't - w_line = api.PyFile_GetLine(w_file, -1) - # assert api.PyErr_Occurred() is space.w_EOFError - assert space.str_w(w_line) == "line3\n" - - space.call_method(w_file, "close") - class AppTestObject(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -17,7 +17,6 @@ ^pypy/doc/.+\.html$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ -^pypy/translator/c/src/dtoa.o$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ ^pypy/translator/c/src/libffi_msvc/.+\.dll$ ^pypy/translator/c/src/libffi_msvc/.+\.lib$ @@ -64,4 +63,4 @@ ^pypy/doc/image/parsing_example.+\.png$ ^compiled ^.git/ -^release/ \ No newline at end of file +^release/ diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,3 +1,4 @@ +import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror @@ -212,10 +213,12 @@ return addr_ref -class GcRootMap_asmgcc: +class GcRootMap_asmgcc(object): """Handles locating the stack roots in the assembler. This is the class supporting --gcrootfinder=asmgcc. """ + is_shadow_stack = False + LOC_REG = 0 LOC_ESP_PLUS = 1 LOC_EBP_PLUS = 2 @@ -224,7 +227,7 @@ GCMAP_ARRAY = rffi.CArray(lltype.Signed) CALLSHAPE_ARRAY_PTR = rffi.CArrayPtr(rffi.UCHAR) - def __init__(self): + def __init__(self, gcdescr=None): # '_gcmap' is an array of length '_gcmap_maxlength' of addresses. # '_gcmap_curlength' tells how full the array really is. # The addresses are actually grouped in pairs: @@ -237,6 +240,13 @@ self._gcmap_deadentries = 0 self._gcmap_sorted = True + def add_jit2gc_hooks(self, jit2gc): + jit2gc.update({ + 'gcmapstart': lambda: self.gcmapstart(), + 'gcmapend': lambda: self.gcmapend(), + 'gcmarksorted': lambda: self.gcmarksorted(), + }) + def initialize(self): # hack hack hack. Remove these lines and see MissingRTypeAttribute # when the rtyper tries to annotate these methods only when GC-ing... @@ -365,7 +375,7 @@ number >>= 7 shape.append(chr(number | flag)) - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset @@ -388,6 +398,126 @@ return rawaddr +class GcRootMap_shadowstack(object): + """Handles locating the stack roots in the assembler. + This is the class supporting --gcrootfinder=shadowstack. + """ + is_shadow_stack = True + MARKER = 8 + + # The "shadowstack" is a portable way in which the GC finds the + # roots that live in the stack. Normally it is just a list of + # pointers to GC objects. The pointers may be moved around by a GC + # collection. But with the JIT, an entry can also be MARKER, in + # which case the next entry points to an assembler stack frame. + # During a residual CALL from the assembler (which may indirectly + # call the GC), we use the force_index stored in the assembler + # stack frame to identify the call: we can go from the force_index + # to a list of where the GC pointers are in the frame (this is the + # purpose of the present class). + # + # Note that across CALL_MAY_FORCE or CALL_ASSEMBLER, we can also go + # from the force_index to a ResumeGuardForcedDescr instance, which + # is used if the virtualizable or the virtualrefs need to be forced + # (see pypy.jit.backend.model). The force_index number in the stack + # frame is initially set to a non-negative value x, but it is + # occasionally turned into (~x) in case of forcing. + + INTARRAYPTR = rffi.CArrayPtr(rffi.INT) + CALLSHAPES_ARRAY = rffi.CArray(INTARRAYPTR) + + def __init__(self, gcdescr): + self._callshapes = lltype.nullptr(self.CALLSHAPES_ARRAY) + self._callshapes_maxlength = 0 + self.force_index_ofs = gcdescr.force_index_ofs + + def add_jit2gc_hooks(self, jit2gc): + # + def collect_jit_stack_root(callback, gc, addr): + if addr.signed[0] != GcRootMap_shadowstack.MARKER: + # common case + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return WORD + else: + # case of a MARKER followed by an assembler stack frame + follow_stack_frame_of_assembler(callback, gc, addr) + return 2 * WORD + # + def follow_stack_frame_of_assembler(callback, gc, addr): + frame_addr = addr.signed[1] + addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + callshape = self._callshapes[force_index] + n = 0 + while True: + offset = rffi.cast(lltype.Signed, callshape[n]) + if offset == 0: + break + addr = llmemory.cast_int_to_adr(frame_addr + offset) + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + n += 1 + # + jit2gc.update({ + 'rootstackhook': collect_jit_stack_root, + }) + + def initialize(self): + pass + + def get_basic_shape(self, is_64_bit=False): + return [] + + def add_frame_offset(self, shape, offset): + assert offset != 0 + shape.append(offset) + + def add_callee_save_reg(self, shape, register): + msg = "GC pointer in %s was not spilled" % register + os.write(2, '[llsupport/gc] %s\n' % msg) + raise AssertionError(msg) + + def compress_callshape(self, shape, datablockwrapper): + length = len(shape) + SZINT = rffi.sizeof(rffi.INT) + rawaddr = datablockwrapper.malloc_aligned((length + 1) * SZINT, SZINT) + p = rffi.cast(self.INTARRAYPTR, rawaddr) + for i in range(length): + p[i] = rffi.cast(rffi.INT, shape[i]) + p[length] = rffi.cast(rffi.INT, 0) + return p + + def write_callshape(self, p, force_index): + if force_index >= self._callshapes_maxlength: + self._enlarge_callshape_list(force_index + 1) + self._callshapes[force_index] = p + + def _enlarge_callshape_list(self, minsize): + newlength = 250 + (self._callshapes_maxlength // 3) * 4 + if newlength < minsize: + newlength = minsize + newarray = lltype.malloc(self.CALLSHAPES_ARRAY, newlength, + flavor='raw', track_allocation=False) + if self._callshapes: + i = self._callshapes_maxlength - 1 + while i >= 0: + newarray[i] = self._callshapes[i] + i -= 1 + lltype.free(self._callshapes, flavor='raw') + self._callshapes = newarray + self._callshapes_maxlength = newlength + + def freeing_block(self, start, stop): + pass # nothing needed here + + def get_root_stack_top_addr(self): + rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) + return rffi.cast(lltype.Signed, rst_addr) + + class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 @@ -437,7 +567,7 @@ except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls() + gcrootmap = cls(gcdescr) self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) @@ -446,12 +576,9 @@ # where it can be fished and reused by the FrameworkGCTransformer self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = { - 'layoutbuilder': self.layoutbuilder, - 'gcmapstart': lambda: gcrootmap.gcmapstart(), - 'gcmapend': lambda: gcrootmap.gcmapend(), - 'gcmarksorted': lambda: gcrootmap.gcmarksorted(), - } + self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -260,7 +260,7 @@ @classmethod def is_const(cls, v1): return isinstance(v1, str) and v1.startswith('ConstClass(') - + def match_var(self, v1, exp_v2): assert v1 != '_' if exp_v2 == '_': @@ -285,9 +285,9 @@ self.match_var(op.res, exp_res) self._assert(len(op.args) == len(exp_args), "wrong number of arguments") for arg, exp_arg in zip(op.args, exp_args): - self._assert(self.match_var(arg, exp_arg), "variable mismatch") + self._assert(self.match_var(arg, exp_arg), "variable mismatch: %r instead of %r" % (arg, exp_arg)) self.match_descr(op.descr, exp_descr) - + def _next_op(self, iter_ops, assert_raises=False): try: diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -488,6 +488,8 @@ # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), # ^^^ returns an address of pointer, since it can change at runtime + 'gc_adr_of_root_stack_top': LLOp(), + # ^^^ returns the address of gcdata.root_stack_top (for shadowstack only) # experimental operations in support of thread cloning, only # implemented by the Mark&Sweep GC diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,5 +1,5 @@ -from pypy.jit.metainterp.history import Const, Box +from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated class TempBox(Box): @@ -313,11 +313,12 @@ self.assembler.regalloc_mov(reg, to) # otherwise it's clean - def before_call(self, force_store=[], save_all_regs=False): + def before_call(self, force_store=[], save_all_regs=0): """ Spill registers before a call, as described by 'self.save_around_call_regs'. Registers are not spilled if they don't survive past the current operation, unless they - are listed in 'force_store'. + are listed in 'force_store'. 'save_all_regs' can be 0 (default), + 1 (save all), or 2 (save default+PTRs). """ for v, reg in self.reg_bindings.items(): if v not in force_store and self.longevity[v][1] <= self.position: @@ -325,9 +326,11 @@ del self.reg_bindings[v] self.free_regs.append(reg) continue - if not save_all_regs and reg not in self.save_around_call_regs: - # we don't have to - continue + if save_all_regs != 1 and reg not in self.save_around_call_regs: + if save_all_regs == 0: + continue # we don't have to + if v.type != REF: + continue # only save GC pointers self._sync_var(v) del self.reg_bindings[v] self.free_regs.append(reg) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -1430,6 +1430,8 @@ res1 += dd(a, b, a1, b1) res2 += dd(a, b, a2, b2) res3 += dd(a, b, a3, b3) + # The purpose of this test is to check that we get + # the correct results, not really to count operations. self.run_source(''' def main(a, b): i = sa = 0 @@ -1437,11 +1439,10 @@ %s i += 1 return sa - ''' % code, 179, ([a1, b1], 2000 * res1), - ([a2, b2], 2000 * res2), - ([a3, b3], 2000 * res3), - count_debug_merge_point=False) - + ''' % code, sys.maxint, ([a1, b1], 2000 * res1), + ([a2, b2], 2000 * res2), + ([a3, b3], 2000 * res3)) + def test_mod(self): avalues = ('a', 'b', 7, -42, 8) bvalues = ['b'] + range(-10, 0) + range(1,10) @@ -1462,6 +1463,8 @@ res1 += dd(a, b, a1, b1) res2 += dd(a, b, a2, b2) res3 += dd(a, b, a3, b3) + # The purpose of this test is to check that we get + # the correct results, not really to count operations. self.run_source(''' def main(a, b): i = sa = 0 @@ -1471,11 +1474,10 @@ %s i += 1 return sa - ''' % code, 450, ([a1, b1], 2000 * res1), - ([a2, b2], 2000 * res2), - ([a3, b3], 2000 * res3), - count_debug_merge_point=False) - + ''' % code, sys.maxint, ([a1, b1], 2000 * res1), + ([a2, b2], 2000 * res2), + ([a3, b3], 2000 * res3)) + def test_dont_trace_every_iteration(self): self.run_source(''' def main(a, b): diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -266,6 +266,8 @@ x = inttype(random.randint(-100000, 100000)) y = inttype(random.randint(-100000, 100000)) if not y: continue + if (i & 31) == 0: + x = (x//y) * y # case where x is exactly divisible by y res = self.interpret(d, [x, y]) assert res == d(x, y) @@ -276,6 +278,8 @@ x = inttype(random.randint(-100000, 100000)) y = inttype(random.randint(-100000, 100000)) if not y: continue + if (i & 31) == 0: + x = (x//y) * y # case where x is exactly divisible by y res = self.interpret(m, [x, y]) assert res == m(x, y) diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -38,11 +38,7 @@ if sys.platform == 'win32': ensure_sse2_floats = lambda : None else: - _sse2_eci = ExternalCompilationInfo( + ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = ['-msse2', '-mfpmath=sse', '-DPYPY_CPU_HAS_STANDARD_PRECISION'], - separate_module_sources = ['void PYPY_NO_OP(void) {}'], - ) - ensure_sse2_floats = rffi.llexternal('PYPY_NO_OP', [], lltype.Void, - compilation_info=_sse2_eci, - sandboxsafe=True) + )) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -347,8 +347,9 @@ assert list('') == [] assert list('abc') == ['a', 'b', 'c'] assert list((1, 2)) == [1, 2] - l = [] + l = [1] assert list(l) is not l + assert list(l) == l assert list(range(10)) == range(10) def test_explicit_new_init(self): diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -0,0 +1,72 @@ +from pypy.module.cpyext.api import fopen, fclose, fwrite +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.tool.udir import udir +import pytest + +class TestFile(BaseApiTest): + + def test_file_fromstring(self, space, api): + filename = rffi.str2charp(str(udir / "_test_file")) + mode = rffi.str2charp("wb") + w_file = api.PyFile_FromString(filename, mode) + rffi.free_charp(filename) + rffi.free_charp(mode) + + assert api.PyFile_Check(w_file) + assert api.PyFile_CheckExact(w_file) + assert not api.PyFile_Check(space.wrap("text")) + + space.call_method(w_file, "write", space.wrap("text")) + space.call_method(w_file, "close") + assert (udir / "_test_file").read() == "text" + + def test_file_getline(self, space, api): + filename = rffi.str2charp(str(udir / "_test_file")) + + mode = rffi.str2charp("w") + w_file = api.PyFile_FromString(filename, mode) + space.call_method(w_file, "write", + space.wrap("line1\nline2\nline3\nline4")) + space.call_method(w_file, "close") + + rffi.free_charp(mode) + mode = rffi.str2charp("r") + w_file = api.PyFile_FromString(filename, mode) + rffi.free_charp(filename) + rffi.free_charp(mode) + + w_line = api.PyFile_GetLine(w_file, 0) + assert space.str_w(w_line) == "line1\n" + + w_line = api.PyFile_GetLine(w_file, 4) + assert space.str_w(w_line) == "line" + + w_line = api.PyFile_GetLine(w_file, 0) + assert space.str_w(w_line) == "2\n" + + # XXX We ought to raise an EOFError here, but don't + w_line = api.PyFile_GetLine(w_file, -1) + # assert api.PyErr_Occurred() is space.w_EOFError + assert space.str_w(w_line) == "line3\n" + + space.call_method(w_file, "close") + + @pytest.mark.xfail + def test_file_fromfile(self, space, api): + api.PyFile_Fromfile() + + @pytest.mark.xfail + def test_file_setbufsize(self, space, api): + api.PyFile_SetBufSize() + + def test_file_writestring(self, space, api, capfd): + s = rffi.str2charp("test\n") + try: + api.PyFile_WriteString(s, space.sys.get("stdout")) + finally: + rffi.free_charp(s) + out, err = capfd.readouterr() + out = out.replace('\r\n', '\n') + assert out == "test\n" + diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -159,6 +159,11 @@ cmdline="--allworkingmodules", negation=True), + StrOption("extmodules", + "Comma-separated list of third-party builtin modules", + cmdline="--ext", + default=None), + BoolOption("translationmodules", "use only those modules that are needed to run translate.py on pypy", default=False, @@ -352,8 +357,8 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) - if not IS_64_BITS: - config.objspace.std.suggest(withsmalllong=True) + #if not IS_64_BITS: + # config.objspace.std.suggest(withsmalllong=True) # extra costly optimizations only go in level 3 if level == '3': diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -31,6 +31,10 @@ if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' + if hasattr(os, 'wait3'): + appleveldefs['wait3'] = 'app_posix.wait3' + if hasattr(os, 'wait4'): + appleveldefs['wait4'] = 'app_posix.wait4' interpleveldefs = { 'open' : 'interp_posix.open', diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py --- a/pypy/module/cpyext/stubsactive.py +++ b/pypy/module/cpyext/stubsactive.py @@ -34,43 +34,7 @@ PyThreadState_Clear().""" raise NotImplementedError - at cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL) -def PyThreadState_Swap(space, tstate): - """Swap the current thread state with the thread state given by the argument - tstate, which may be NULL. The global interpreter lock must be held.""" - raise NotImplementedError - - at cpython_api([PyThreadState], lltype.Void) -def PyEval_AcquireThread(space, tstate): - """Acquire the global interpreter lock and set the current thread state to - tstate, which should not be NULL. The lock must have been created earlier. - If this thread already has the lock, deadlock ensues. This function is not - available when thread support is disabled at compile time.""" - raise NotImplementedError - - at cpython_api([PyThreadState], lltype.Void) -def PyEval_ReleaseThread(space, tstate): - """Reset the current thread state to NULL and release the global interpreter - lock. The lock must have been created earlier and must be held by the current - thread. The tstate argument, which must not be NULL, is only used to check - that it represents the current thread state --- if it isn't, a fatal error is - reported. This function is not available when thread support is disabled at - compile time.""" - raise NotImplementedError - @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def Py_MakePendingCalls(space): return 0 -PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', - typedef='PyGILState_STATE', - compilation_info=CConfig._compilation_info_) - - at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) -def PyGILState_Ensure(space): - return 0 - - at cpython_api([PyGILState_STATE], lltype.Void) -def PyGILState_Release(space, state): - return - diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -604,6 +604,18 @@ else: self._as_rdict().impl_fallback_setitem(w_key, w_value) + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + key = space.str_w(w_key) + w_result = self.impl_getitem_str(key) + if w_result is not None: + return w_result + self.impl_setitem_str(key, w_default) + return w_default + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/translator/goal/query.py b/pypy/translator/goal/query.py --- a/pypy/translator/goal/query.py +++ b/pypy/translator/goal/query.py @@ -30,15 +30,13 @@ def polluted_qgen(translator): """list functions with still real SomeObject variables""" annotator = translator.annotator - def visit(block): - if isinstance(block, flowmodel.Block): - for v in block.getvariables(): - s = annotator.binding(v, None) - if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: - raise Found for g in translator.graphs: try: - flowmodel.traverse(visit, g) + for block in g.iterblocks(): + for v in block.getvariables(): + s = annotator.binding(v, None) + if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: + raise Found except Found: line = "%s: %s" % (g, graph_sig(translator, g)) yield line diff --git a/pypy/module/cpyext/test/test_sysmodule.py b/pypy/module/cpyext/test/test_sysmodule.py --- a/pypy/module/cpyext/test/test_sysmodule.py +++ b/pypy/module/cpyext/test/test_sysmodule.py @@ -14,6 +14,21 @@ assert module.get("excepthook") assert not module.get("spam_spam_spam") + def test_writestdout(self): + module = self.import_extension('foo', [ + ("writestdout", "METH_NOARGS", + """ + PySys_WriteStdout("format: %d\\n", 42); + Py_RETURN_NONE; + """)]) + import sys, StringIO + sys.stdout = StringIO.StringIO() + try: + module.writestdout() + assert sys.stdout.getvalue() == "format: 42\n" + finally: + sys.stdout = sys.__stdout__ + class TestSysModule(BaseApiTest): def test_sysmodule(self, space, api): buf = rffi.str2charp("last_tb") diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/translator/backendopt/test/test_tailrecursion.py b/pypy/translator/backendopt/test/test_tailrecursion.py --- a/pypy/translator/backendopt/test/test_tailrecursion.py +++ b/pypy/translator/backendopt/test/test_tailrecursion.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -71,19 +71,6 @@ pieces.headerblock.exits[1], pieces.whileblock.exits[0]] -def test_traverse(): - lst = [] - traverse(lst.append, graph) - assert lst == [pieces.startblock, - pieces.startblock.exits[0], - pieces.headerblock, - pieces.headerblock.exits[0], - graph.returnblock, - pieces.headerblock.exits[1], - pieces.whileblock, - pieces.whileblock.exits[0]] - assert flatten(graph) == lst - def test_mkentrymap(): entrymap = mkentrymap(graph) startlink = entrymap[graph.startblock][0] diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -255,7 +255,7 @@ x = ord(s[0]) << 7 i = 0 while i < length: - x = (1000003*x) ^ ord(s[i]) + x = intmask((1000003*x) ^ ord(s[i])) i += 1 x ^= length return intmask(x) diff --git a/pypy/module/cpyext/include/sysmodule.h b/pypy/module/cpyext/include/sysmodule.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/sysmodule.h @@ -0,0 +1,13 @@ +#ifndef Py_SYSMODULE_H +#define Py_SYSMODULE_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(void) PySys_WriteStdout(const char *format, ...); +PyAPI_FUNC(void) PySys_WriteStderr(const char *format, ...); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_SYSMODULE_H */ diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -27,7 +27,7 @@ def optimize_loop_1(metainterp_sd, loop, enable_opts, inline_short_preamble=True, retraced=False): - """Optimize loop.operations to remove internal overheadish operations. + """Optimize loop.operations to remove internal overheadish operations. """ optimizations = [] unroll = 'unroll' in enable_opts @@ -43,7 +43,7 @@ if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: optimizations.append(OptSimplify()) - + if inline_short_preamble: optimizations = [OptInlineShortPreamble(retraced)] + optimizations diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,12 +1,12 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops from pypy.translator.test.snippet import simple_method from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -8,8 +8,8 @@ for descr in all_optiondescrs: prefix = descr._name c = config.Config(descr) - thisdir.join(prefix + ".txt").ensure() + thisdir.join(prefix + ".rst").ensure() for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".txt" + basename = prefix + "." + p + ".rst" f = thisdir.join(basename) f.ensure() diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1421,9 +1421,10 @@ # add a softspace unless we just printed a string which ends in a '\t' # or '\n' -- or more generally any whitespace character but ' ' - if isinstance(x, str) and x and x[-1].isspace() and x[-1]!=' ': - return - # XXX add unicode handling + if isinstance(x, (str, unicode)) and x: + lastchar = x[-1] + if lastchar.isspace() and lastchar != ' ': + return file_softspace(stream, True) print_item_to._annspecialcase_ = "specialize:argtype(0)" diff --git a/pypy/translator/goal/old_queries.py b/pypy/translator/goal/old_queries.py --- a/pypy/translator/goal/old_queries.py +++ b/pypy/translator/goal/old_queries.py @@ -415,12 +415,10 @@ ops = 0 count = Counter() def visit(block): - if isinstance(block, flowmodel.Block): + for block in graph.iterblocks(): count.blocks += 1 count.ops += len(block.operations) - elif isinstance(block, flowmodel.Link): - count.links += 1 - flowmodel.traverse(visit, graph) + count.links = len(list(graph.iterlinks())) return count.blocks, count.links, count.ops # better used before backends opts diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -26,7 +26,10 @@ else { string res = ""; foreach(char ch in x) - res+= string.Format("\\x{0:X2}", (int)ch); + if (ch >= 32 && ch < 128) + res+= ch; + else + res+= string.Format("\\x{0:X2}", (int)ch); return string.Format("'{0}'", res); } } @@ -717,9 +720,31 @@ return s.Substring(start, count); } - public static string[] ll_split_chr(string s, char ch) + public static string[] ll_split_chr(string s, char ch, int max) { - return s.Split(ch); + if (max < 0) + return s.Split(ch); + else + return s.Split(new Char[] {ch}, max + 1); + } + + public static string[] ll_rsplit_chr(string s, char ch, int max) + { + string[] splits = s.Split(ch); + if (max < 0 || splits.Length <= max + 1) + return splits; + else { + /* XXX not very efficient */ + string first = splits[0]; + // join the first (length - max - 1) items + int i; + for (i = 1; i < splits.Length - max; i++) + first += ch + splits[i]; + splits[0] = first; + Array.Copy(splits, i, splits, 1, max); + Array.Resize(ref splits, max + 1); + return splits; + } } public static bool ll_contains(string s, char ch) diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -9,7 +9,7 @@ from pypy.objspace.flow import operation from pypy.objspace.flow.model import (SpaceOperation, Variable, Constant, Block, Link, c_last_exception, checkgraph, - traverse, mkentrymap) + mkentrymap) from pypy.rlib import rarithmetic from pypy.translator import unsimplify from pypy.translator.backendopt import ssa @@ -76,23 +76,19 @@ def desugar_isinstance(graph): """Replace isinstance operation with a call to isinstance.""" constant_isinstance = Constant(isinstance) - def visit(block): - if not isinstance(block, Block): - return + for block in graph.iterblocks(): for i in range(len(block.operations) - 1, -1, -1): op = block.operations[i] if op.opname == "isinstance": args = [constant_isinstance, op.args[0], op.args[1]] new_op = SpaceOperation("simple_call", args, op.result) block.operations[i] = new_op - traverse(visit, graph) def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): + for link in list(graph.iterlinks()): while not link.target.operations: block1 = link.target if block1.exitswitch is not None: @@ -113,7 +109,6 @@ link.args = outputargs link.target = exit.target # the while loop above will simplify recursively the new link - traverse(visit, graph) def transform_ovfcheck(graph): """The special function calls ovfcheck and ovfcheck_lshift need to @@ -174,11 +169,10 @@ def rename(v): return renaming.get(v, v) - def visit(block): - if not (isinstance(block, Block) - and block.exitswitch == clastexc + for block in graph.iterblocks(): + if not (block.exitswitch == clastexc and block.exits[-1].exitcase is Exception): - return + continue covered = [link.exitcase for link in block.exits[1:-1]] seen = [] preserve = list(block.exits[:-1]) @@ -233,8 +227,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) - traverse(visit, graph) - def transform_xxxitem(graph): # xxx setitem too for block in graph.iterblocks(): @@ -262,9 +254,9 @@ return True return False - def visit(block): - if not (isinstance(block, Block) and block.exitswitch == clastexc): - return + for block in list(graph.iterblocks()): + if block.exitswitch != clastexc: + continue exits = [] seen = [] for link in block.exits: @@ -283,8 +275,6 @@ seen.append(case) block.recloseblock(*exits) - traverse(visit, graph) - def join_blocks(graph): """Links can be deleted if they are the single exit of a block and the single entry point of the next block. When this happens, we can @@ -340,8 +330,7 @@ this is how implicit exceptions are removed (see _implicit_ in flowcontext.py). """ - def visit(block): - if isinstance(block, Block): + for block in list(graph.iterblocks()): for i in range(len(block.exits)-1, -1, -1): exit = block.exits[i] if not (exit.target is graph.exceptblock and @@ -361,7 +350,6 @@ lst = list(block.exits) del lst[i] block.recloseblock(*lst) - traverse(visit, graph) # _____________________________________________________________________ @@ -627,12 +615,11 @@ tgts.append((exit.exitcase, tgt)) return tgts - def visit(block): - if isinstance(block, Block) and block.operations and block.operations[-1].opname == 'is_true': + for block in graph.iterblocks(): + if block.operations and block.operations[-1].opname == 'is_true': tgts = has_is_true_exitpath(block) if tgts: candidates.append((block, tgts)) - traverse(visit, graph) while candidates: cand, tgts = candidates.pop() diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -12,7 +12,7 @@ def __init__(self, space, initargs): self.initargs = initargs ident = thread.get_ident() - self.dicts = {ident: space.newdict()} + self.dicts = {ident: space.newdict(instance=True)} def getdict(self, space): ident = thread.get_ident() @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/gensupp.py b/pypy/translator/gensupp.py --- a/pypy/translator/gensupp.py +++ b/pypy/translator/gensupp.py @@ -6,15 +6,13 @@ import sys from pypy.objspace.flow.model import Block -from pypy.objspace.flow.model import traverse # ordering the blocks of a graph by source position def ordered_blocks(graph): # collect all blocks allblocks = [] - def visit(block): - if isinstance(block, Block): + for block in graph.iterblocks(): # first we order by offset in the code string if block.operations: ofs = block.operations[0].offset @@ -26,7 +24,6 @@ else: txt = "dummy" allblocks.append((ofs, txt, block)) - traverse(visit, graph) allblocks.sort() #for ofs, txt, block in allblocks: # print ofs, txt, block diff --git a/pypy/module/cpyext/include/modsupport.h b/pypy/module/cpyext/include/modsupport.h --- a/pypy/module/cpyext/include/modsupport.h +++ b/pypy/module/cpyext/include/modsupport.h @@ -38,7 +38,9 @@ PyObject * Py_BuildValue(const char *, ...); +PyObject * Py_VaBuildValue(const char *, va_list); PyObject * _Py_BuildValue_SizeT(const char *, ...); +PyObject * _Py_VaBuildValue_SizeT(const char *, va_list); int _PyArg_NoKeywords(const char *funcname, PyObject *kw); int PyArg_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, ...); diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/translator/backendopt/test/test_mallocprediction.py b/pypy/translator/backendopt/test/test_mallocprediction.py --- a/pypy/translator/backendopt/test/test_mallocprediction.py +++ b/pypy/translator/backendopt/test/test_mallocprediction.py @@ -4,7 +4,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.conftest import option import sys diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -435,14 +435,6 @@ return (PyObject *)foop; } -/* List of functions exported by this module */ - -static PyMethodDef foo_functions[] = { - {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, - {NULL, NULL} /* Sentinel */ -}; - - static int initerrtype_init(PyObject *self, PyObject *args, PyObject *kwargs) { PyErr_SetString(PyExc_ValueError, "init raised an error!"); return -1; @@ -592,6 +584,41 @@ 0 /*tp_weaklist*/ }; +/* A type with a custom allocator */ +static void custom_dealloc(PyObject *ob) +{ + free(ob); +} + +static PyTypeObject CustomType; + +static PyObject *newCustom(PyObject *self, PyObject *args) +{ + PyObject *obj = calloc(1, sizeof(PyObject)); + obj->ob_type = &CustomType; + _Py_NewReference(obj); + return obj; +} + +static PyTypeObject CustomType = { + PyObject_HEAD_INIT(NULL) + 0, + "foo.Custom", /*tp_name*/ + sizeof(PyObject), /*tp_size*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)custom_dealloc, /*tp_dealloc*/ +}; + + +/* List of functions exported by this module */ + +static PyMethodDef foo_functions[] = { + {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, + {"newCustom", (PyCFunction)newCustom, METH_NOARGS, NULL}, + {NULL, NULL} /* Sentinel */ +}; + /* Initialize this module. */ @@ -616,7 +643,10 @@ if (PyType_Ready(&InitErrType) < 0) return; if (PyType_Ready(&SimplePropertyType) < 0) - return; + return; + CustomType.ob_type = &MetaType; + if (PyType_Ready(&CustomType) < 0) + return; m = Py_InitModule("foo", foo_functions); if (m == NULL) return; @@ -635,4 +665,6 @@ return; if (PyDict_SetItemString(d, "Property", (PyObject *) &SimplePropertyType) < 0) return; + if (PyDict_SetItemString(d, "Custom", (PyObject *) &CustomType) < 0) + return; } diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -5,7 +5,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem import lltype, llmemory, lloperation @@ -33,8 +33,7 @@ def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 count_calls = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == 'malloc': count_mallocs += 1 @@ -54,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -557,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -770,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -857,6 +857,9 @@ def op_gc_adr_of_nursery_free(self): raise NotImplementedError + def op_gc_adr_of_root_stack_top(self): + raise NotImplementedError + def op_gc_call_rtti_destructor(self, rtti, addr): if hasattr(rtti._obj, 'destructor_funcptr'): d = rtti._obj.destructor_funcptr diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -576,20 +576,56 @@ res = self.interpret(f, [i, newlines]) assert res == f(i, newlines) - def test_split(self): + def _make_split_test(self, split_fn): const = self.const def fn(i): s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = s.split(const('.')) + l = getattr(s, split_fn)(const('.')) sum = 0 for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) return sum + len(l) * 100 + return fn + + def test_split(self): + fn = self._make_split_test('split') for i in range(5): res = self.interpret(fn, [i]) assert res == fn(i) + def test_rsplit(self): + fn = self._make_split_test('rsplit') + for i in range(5): + res = self.interpret(fn, [i]) + assert res == fn(i) + + def _make_split_limit_test(self, split_fn): + const = self.const + def fn(i, j): + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.'), j) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + return fn + + def test_split_limit(self): + fn = self._make_split_limit_test('split') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + + def test_rsplit_limit(self): + fn = self._make_split_limit_test('rsplit') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + def test_contains(self): const = self.const constchar = self.constchar diff --git a/dotviewer/conftest.py b/dotviewer/conftest.py --- a/dotviewer/conftest.py +++ b/dotviewer/conftest.py @@ -6,4 +6,6 @@ dest="pygame", default=False, help="allow interactive tests using Pygame") -option = py.test.config.option +def pytest_configure(config): + global option + option = config.option diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1647,6 +1647,7 @@ print >> output, """\ /* See description in asmgcroot.py */ + .cfi_startproc movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ movq\t%rsi, %rcx\t/* 2nd argument, which is gcrootanchor */ movq\t%rsp, %rax\t/* my frame top address */ @@ -1666,6 +1667,7 @@ pushq\t%rcx\t\t\t/* self->prev = gcrootanchor */ movq\t%rsp, 8(%rcx)\t/* gcrootanchor->next = self */ movq\t%rsp, 0(%rax)\t\t\t/* next->prev = self */ + .cfi_def_cfa_offset 80\t/* 9 pushes + the retaddr = 80 bytes */ /* note: the Mac OS X 16 bytes aligment must be respected. */ call\t*%rdx\t\t/* invoke the callback */ @@ -1687,6 +1689,7 @@ /* the return value is the one of the 'call' above, */ /* because %rax (and possibly %rdx) are unmodified */ ret + .cfi_endproc """ _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') diff --git a/pypy/rpython/memory/gc/env.py b/pypy/rpython/memory/gc/env.py --- a/pypy/rpython/memory/gc/env.py +++ b/pypy/rpython/memory/gc/env.py @@ -259,7 +259,7 @@ get_L2cache = globals().get('get_L2cache_' + sys.platform, lambda: -1) # implement me for other platforms -NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024*1024 +NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024 # arbitrary 1M. better than default of 131k for most cases # in case it didn't work diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -1,5 +1,5 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.objspace.flow.model import SpaceOperation, traverse +from pypy.objspace.flow.model import SpaceOperation from pypy.tool.algo.unionfind import UnionFind from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -149,8 +147,7 @@ set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except") set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except") - def visit(node): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname in self.IDENTITY_OPS: # special-case these operations to identify their input @@ -167,7 +164,7 @@ if isinstance(node.exitswitch, Variable): set_use_point(node, node.exitswitch, "exitswitch", node) - if isinstance(node, Link): + for node in graph.iterlinks(): if isinstance(node.last_exception, Variable): set_creation_point(node.prevblock, node.last_exception, "last_exception") @@ -187,7 +184,6 @@ else: d[arg] = True - traverse(visit, graph) return lifetimes.infos() def _try_inline_malloc(self, info): @@ -213,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -333,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -484,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -500,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -546,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -616,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -639,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -6,7 +6,7 @@ """ import weakref, random -import py +import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -72,6 +72,20 @@ return entrypoint +def get_functions_to_patch(): + from pypy.jit.backend.llsupport import gc + # + can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc + def can_inline_malloc2(*args): + try: + if os.environ['PYPY_NO_INLINE_MALLOC']: + return False + except KeyError: + pass + return can_inline_malloc1(*args) + # + return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + def compile(f, gc, **kwds): from pypy.annotation.listdef import s_list_of_strings from pypy.translator.translator import TranslationContext @@ -87,8 +101,21 @@ ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy()) ann.build_types(f, [s_list_of_strings], main_entry_point=True) t.buildrtyper().specialize() + if kwds['jit']: - apply_jit(t, enable_opts='') + patch = get_functions_to_patch() + old_value = {} + try: + for (obj, attr), value in patch.items(): + old_value[obj, attr] = getattr(obj, attr) + setattr(obj, attr, value) + # + apply_jit(t, enable_opts='') + # + finally: + for (obj, attr), oldvalue in old_value.items(): + setattr(obj, attr, oldvalue) + cbuilder = genc.CStandaloneBuilder(t, f, t.config) cbuilder.generate_source() cbuilder.compile() @@ -127,7 +154,7 @@ # ______________________________________________________________________ -class TestCompileFramework(object): +class CompileFrameworkTests(object): # Test suite using (so far) the minimark GC. def setup_class(cls): funcs = [] @@ -178,15 +205,21 @@ try: GcLLDescr_framework.DEBUG = True cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, - gcrootfinder="asmgcc", jit=True) + gcrootfinder=cls.gcrootfinder, jit=True) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG + def _run(self, name, n, env): + res = self.cbuilder.cmdexec("%s %d" %(name, n), env=env) + assert int(res) == 20 + def run(self, name, n=2000): pypylog = udir.join('TestCompileFramework.log') - res = self.cbuilder.cmdexec("%s %d" %(name, n), - env={'PYPYLOG': ':%s' % pypylog}) - assert int(res) == 20 + env = {'PYPYLOG': ':%s' % pypylog, + 'PYPY_NO_INLINE_MALLOC': '1'} + self._run(name, n, env) + env['PYPY_NO_INLINE_MALLOC'] = '' + self._run(name, n, env) def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) @@ -576,3 +609,10 @@ def test_compile_framework_minimal_size_in_nursery(self): self.run('compile_framework_minimal_size_in_nursery') + + +class TestShadowStack(CompileFrameworkTests): + gcrootfinder = "shadowstack" + +class TestAsmGcc(CompileFrameworkTests): + gcrootfinder = "asmgcc" diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -214,3 +214,21 @@ assert res == 1024*1024 res = thread.stack_size(0) assert res == 2*1024*1024 + + def test_interrupt_main(self): + import thread, time + import signal + + def f(): + time.sleep(0.5) + thread.interrupt_main() + + def busy_wait(): + for x in range(1000): + time.sleep(0.01) + + # This is normally called by app_main.py + signal.signal(signal.SIGINT, signal.default_int_handler) + + thread.start_new_thread(f, ()) + raises(KeyboardInterrupt, busy_wait) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -7,15 +7,16 @@ bootstrap_function, PyObjectFields, cpython_struct, CONST_STRING, CONST_WSTRING) from pypy.module.cpyext.pyerrors import PyErr_BadArgument -from pypy.module.cpyext.pyobject import PyObject, from_ref, make_typedescr +from pypy.module.cpyext.pyobject import ( + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.objspace.std import unicodeobject, unicodetype from pypy.rlib import runicode import sys -## See comment in stringobject.py. PyUnicode_FromUnicode(NULL, size) is not -## yet supported. +## See comment in stringobject.py. PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) @@ -28,7 +29,8 @@ make_typedescr(space.w_unicode.instancetypedef, basestruct=PyUnicodeObject.TO, attach=unicode_attach, - dealloc=unicode_dealloc) + dealloc=unicode_dealloc, + realize=unicode_realize) # Buffer for the default encoding (used by PyUnicde_GetDefaultEncoding) DEFAULT_ENCODING_SIZE = 100 @@ -39,12 +41,39 @@ Py_UNICODE = lltype.UniChar +def new_empty_unicode(space, length): + """ + Allocatse a PyUnicodeObject and its buffer, but without a corresponding + interpreter object. The buffer may be mutated, until unicode_realize() is + called. + """ + typedescr = get_typedescr(space.w_unicode.instancetypedef) + py_obj = typedescr.allocate(space, space.w_unicode) + py_uni = rffi.cast(PyUnicodeObject, py_obj) + + buflen = length + 1 + py_uni.c_size = length + py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, + flavor='raw', zero=True) + return py_uni + def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) +def unicode_realize(space, py_obj): + """ + Creates the unicode in the interpreter. The PyUnicodeObject buffer must not + be modified after this call. + """ + py_uni = rffi.cast(PyUnicodeObject, py_obj) + s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + w_obj = space.wrap(s) + track_reference(space, py_obj, w_obj) + return w_obj + @cpython_api([PyObject], lltype.Void, external=False) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) @@ -128,7 +157,9 @@ def PyUnicode_AsUnicode(space, ref): """Return a read-only pointer to the Unicode object's internal Py_UNICODE buffer, NULL if unicode is not a Unicode object.""" - if not PyUnicode_Check(space, ref): + # Don't use PyUnicode_Check, it will realize the object :-( + w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) + if not space.is_true(space.issubtype(w_type, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap("expected unicode object")) return PyUnicode_AS_UNICODE(space, ref) @@ -237,10 +268,11 @@ object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" - if not wchar_p: - raise NotImplementedError - s = rffi.wcharpsize2unicode(wchar_p, length) - return space.wrap(s) + if wchar_p: + s = rffi.wcharpsize2unicode(wchar_p, length) + return make_ref(space, space.wrap(s)) + else: + return rffi.cast(PyObject, new_empty_unicode(space, length)) @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromWideChar(space, wchar_p, length): @@ -330,6 +362,29 @@ w_str = space.wrap(rffi.charpsize2str(s, size)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) + at cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) +def PyUnicode_Resize(space, ref, newsize): + # XXX always create a new string so far + py_uni = rffi.cast(PyUnicodeObject, ref[0]) + if not py_uni.c_buffer: + raise OperationError(space.w_SystemError, space.wrap( + "PyUnicode_Resize called on already created string")) + try: + py_newuni = new_empty_unicode(space, newsize) + except MemoryError: + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + raise + to_cp = newsize + oldsize = py_uni.c_size + if oldsize < newsize: + to_cp = oldsize + for i in range(to_cp): + py_newuni.c_buffer[i] = py_uni.c_buffer[i] + Py_DecRef(space, ref[0]) + ref[0] = rffi.cast(PyObject, py_newuni) + return 0 + @cpython_api([PyObject], PyObject) def PyUnicode_AsUTF8String(space, w_unicode): """Encode a Unicode object using UTF-8 and return the result as Python string diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -11,6 +11,8 @@ /* the -3 option will probably not be implemented */ #define Py_Py3kWarningFlag 0 +#define Py_FrozenFlag 0 + #ifdef __cplusplus } #endif diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -146,6 +146,15 @@ self.pending_signals[n] = None self.reissue_signal_action.fire_after_thread_switch() + def set_interrupt(self): + "Simulates the effect of a SIGINT signal arriving" + n = cpy_signal.SIGINT + if self.reissue_signal_action is None: + self.report_signal(n) + else: + self.pending_signals[n] = None + self.reissue_signal_action.fire_after_thread_switch() + def report_signal(self, n): try: w_handler = self.handlers_w[n] diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -385,6 +385,19 @@ assert module.__doc__ == "docstring" assert module.return_cookie() == 3.14 + def test_load_dynamic(self): + import sys + init = """ + if (Py_IsInitialized()) + Py_InitModule("foo", NULL); + """ + foo = self.import_module(name='foo', init=init) + assert 'foo' in sys.modules + del sys.modules['foo'] + import imp + foo2 = imp.load_dynamic('foo', foo.__file__) + assert 'foo' in sys.modules + assert foo.__dict__ == foo2.__dict__ def test_InitModule4_dotted(self): """ diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -45,3 +45,9 @@ space.warn('PyImport_ImportModuleNoBlock() is not non-blocking', space.w_RuntimeWarning) return PyImport_Import(space, space.wrap(rffi.charp2str(name))) + + at cpython_api([PyObject], PyObject) +def PyImport_ReloadModule(space, w_mod): + from pypy.module.imp.importing import reload + return reload(space, w_mod) + diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -61,3 +61,27 @@ assert not imp.lock_held() self.waitfor(lambda: done) assert done + +class TestImportLock: + def test_lock(self, space, monkeypatch): + from pypy.module.imp.importing import getimportlock, importhook + + # Monkeypatch the import lock and add a counter + importlock = getimportlock(space) + original_acquire = importlock.acquire_lock + def acquire_lock(): + importlock.count += 1 + original_acquire() + importlock.count = 0 + monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) + + # An already imported module + importhook(space, 'sys') + assert importlock.count == 0 + # A new module + importhook(space, 're') + assert importlock.count == 7 + # Import it again + previous_count = importlock.count + importhook(space, 're') + assert importlock.count == previous_count diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -1,5 +1,5 @@ -from pypy.module.cpyext.api import cpython_api, generic_cpy_call, CANNOT_FAIL,\ - cpython_struct +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, CANNOT_FAIL, CConfig, cpython_struct) from pypy.rpython.lltypesystem import rffi, lltype PyInterpreterState = lltype.Ptr(cpython_struct("PyInterpreterState", ())) @@ -77,6 +77,52 @@ state = space.fromcache(InterpreterState) return state.get_thread_state(space) + at cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL) +def PyThreadState_Swap(space, tstate): + """Swap the current thread state with the thread state given by the argument + tstate, which may be NULL. The global interpreter lock must be held.""" + # All cpyext calls release and acquire the GIL, so this function has no + # side-effects + if tstate: + return lltype.nullptr(PyThreadState.TO) + else: + state = space.fromcache(InterpreterState) + return state.get_thread_state(space) + + at cpython_api([PyThreadState], lltype.Void) +def PyEval_AcquireThread(space, tstate): + """Acquire the global interpreter lock and set the current thread state to + tstate, which should not be NULL. The lock must have been created earlier. + If this thread already has the lock, deadlock ensues. This function is not + available when thread support is disabled at compile time.""" + # All cpyext calls release and acquire the GIL, so this is not necessary. + pass + + at cpython_api([PyThreadState], lltype.Void) +def PyEval_ReleaseThread(space, tstate): + """Reset the current thread state to NULL and release the global interpreter + lock. The lock must have been created earlier and must be held by the current + thread. The tstate argument, which must not be NULL, is only used to check + that it represents the current thread state --- if it isn't, a fatal error is + reported. This function is not available when thread support is disabled at + compile time.""" + # All cpyext calls release and acquire the GIL, so this is not necessary. + pass + +PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', + typedef='PyGILState_STATE', + compilation_info=CConfig._compilation_info_) + + at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) +def PyGILState_Ensure(space): + # All cpyext calls release and acquire the GIL, so this is not necessary. + return 0 + + at cpython_api([PyGILState_STATE], lltype.Void) +def PyGILState_Release(space, state): + # All cpyext calls release and acquire the GIL, so this is not necessary. + return + @cpython_api([], PyInterpreterState, error=CANNOT_FAIL) def PyInterpreterState_Head(space): """Return the interpreter state object at the head of the list of all such objects. diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -286,7 +286,6 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info c_cls = vrefinfo.jit_virtual_ref_const_class descr_virtual_token = vrefinfo.descr_virtual_token - descr_virtualref_index = vrefinfo.descr_virtualref_index # # Replace the VIRTUAL_REF operation with a virtual structure of type # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, @@ -296,7 +295,6 @@ tokenbox = BoxInt() self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox)) vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox)) - vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox)) def optimize_VIRTUAL_REF_FINISH(self, op): # Set the 'forced' field of the virtual_ref. diff --git a/pypy/jit/backend/x86/test/test_rx86.py b/pypy/jit/backend/x86/test/test_rx86.py --- a/pypy/jit/backend/x86/test/test_rx86.py +++ b/pypy/jit/backend/x86/test/test_rx86.py @@ -206,3 +206,8 @@ s = CodeBuilder64() s.MOV_rm(edx, (edi, -1)) assert s.getvalue() == '\x48\x8B\x57\xFF' + +def test_movsd_xj_64(): + s = CodeBuilder64() + s.MOVSD_xj(xmm2, 0x01234567) + assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -745,13 +745,6 @@ described there.""" raise NotImplementedError - at cpython_api([], lltype.Void) -def PyErr_SetInterrupt(space): - """This function simulates the effect of a SIGINT signal arriving --- the - next time PyErr_CheckSignals() is called, KeyboardInterrupt will be raised. - It may be called without holding the interpreter lock.""" - raise NotImplementedError - @cpython_api([rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) def PySignal_SetWakeupFd(space, fd): """This utility function specifies a file descriptor to which a '\0' byte will @@ -850,13 +843,6 @@ successful invocation of Py_EnterRecursiveCall().""" raise NotImplementedError - at cpython_api([FILE, rffi.CCHARP, rffi.CCHARP, rffi.INT_real], PyObject) -def PyFile_FromFile(space, fp, name, mode, close): - """Create a new PyFileObject from the already-open standard C file - pointer, fp. The function close will be called when the file should be - closed. Return NULL on failure.""" - raise NotImplementedError - @cpython_api([PyFileObject], lltype.Void) def PyFile_IncUseCount(space, p): """Increments the PyFileObject's internal use count to indicate @@ -899,12 +885,6 @@ borrow_from() raise NotImplementedError - at cpython_api([PyFileObject, rffi.INT_real], lltype.Void) -def PyFile_SetBufSize(space, p, n): - """Available on systems with setvbuf() only. This should only be called - immediately after file object creation.""" - raise NotImplementedError - @cpython_api([PyFileObject, rffi.CCHARP], rffi.INT_real, error=0) def PyFile_SetEncoding(space, p, enc): """Set the file's encoding for Unicode output to enc. Return 1 on success and 0 @@ -941,12 +921,6 @@ appropriate exception will be set.""" raise NotImplementedError - at cpython_api([rffi.CCHARP, PyObject], rffi.INT_real, error=-1) -def PyFile_WriteString(space, s, p): - """Write string s to file object p. Return 0 on success or -1 on - failure; the appropriate exception will be set.""" - raise NotImplementedError - @cpython_api([], PyObject) def PyFloat_GetInfo(space): """Return a structseq instance which contains information about the @@ -1770,12 +1744,6 @@ """ raise NotImplementedError - at cpython_api([], lltype.Signed, error=CANNOT_FAIL) -def PyInt_GetMax(space): - """Return the system's idea of the largest integer it can handle (LONG_MAX, - as defined in the system header files).""" - raise NotImplementedError - @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyInt_ClearFreeList(space): """Clear the integer free list. Return the number of items that could not @@ -2336,28 +2304,6 @@ (: on Unix, ; on Windows).""" raise NotImplementedError - at cpython_api([rffi.CCHARP, ], lltype.Void) -def PySys_WriteStdout(space, format): - """Write the output string described by format to sys.stdout. No - exceptions are raised, even if truncation occurs (see below). - - format should limit the total size of the formatted output string to - 1000 bytes or less -- after 1000 bytes, the output string is truncated. - In particular, this means that no unrestricted "%s" formats should occur; - these should be limited using "%.s" where is a decimal number - calculated so that plus the maximum size of other formatted text does not - exceed 1000 bytes. Also watch out for "%f", which can print hundreds of - digits for very large numbers. - - If a problem occurs, or sys.stdout is unset, the formatted message - is written to the real (C level) stdout.""" - raise NotImplementedError - - at cpython_api([rffi.CCHARP, ], lltype.Void) -def PySys_WriteStderr(space, format): - """As above, but write to sys.stderr or stderr instead.""" - raise NotImplementedError - @cpython_api([rffi.INT_real], lltype.Void) def Py_Exit(space, status): """Exit the current process. This calls Py_Finalize() and then calls the diff --git a/pypy/rlib/_rweakkeydict.py b/pypy/rlib/_rweakkeydict.py --- a/pypy/rlib/_rweakkeydict.py +++ b/pypy/rlib/_rweakkeydict.py @@ -123,7 +123,7 @@ @jit.dont_look_inside def ll_get(d, llkey): hash = compute_identity_hash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK #llop.debug_print(lltype.Void, i, 'get', hex(hash), # ll_debugrepr(d.entries[i].key), # ll_debugrepr(d.entries[i].value)) @@ -143,7 +143,7 @@ def ll_set_nonnull(d, llkey, llvalue): hash = compute_identity_hash(llkey) keyref = weakref_create(llkey) # GC effects here, before the rest - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK everused = d.entries.everused(i) d.entries[i].key = keyref d.entries[i].value = llvalue @@ -160,7 +160,7 @@ @jit.dont_look_inside def ll_set_null(d, llkey): hash = compute_identity_hash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK if d.entries.everused(i): # If the entry was ever used, clean up its key and value. # We don't store a NULL value, but a dead weakref, because diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -23,18 +23,22 @@ self.fail_descr_list = [] self.fail_descr_free_list = [] + def reserve_some_free_fail_descr_number(self): + lst = self.fail_descr_list + if len(self.fail_descr_free_list) > 0: + n = self.fail_descr_free_list.pop() + assert lst[n] is None + else: + n = len(lst) + lst.append(None) + return n + def get_fail_descr_number(self, descr): assert isinstance(descr, history.AbstractFailDescr) n = descr.index if n < 0: - lst = self.fail_descr_list - if len(self.fail_descr_free_list) > 0: - n = self.fail_descr_free_list.pop() - assert lst[n] is None - lst[n] = descr - else: - n = len(lst) - lst.append(descr) + n = self.reserve_some_free_fail_descr_number() + self.fail_descr_list[n] = descr descr.index = n return n @@ -294,6 +298,13 @@ def record_faildescr_index(self, n): self.faildescr_indices.append(n) + def reserve_and_record_some_faildescr_index(self): + # like record_faildescr_index(), but invent and return a new, + # unused faildescr index + n = self.cpu.reserve_some_free_fail_descr_number() + self.record_faildescr_index(n) + return n + def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -314,6 +314,7 @@ 'Py_BuildValue', 'Py_VaBuildValue', 'PyTuple_Pack', 'PyErr_Format', 'PyErr_NewException', 'PyErr_NewExceptionWithDoc', + 'PySys_WriteStdout', 'PySys_WriteStderr', 'PyEval_CallFunction', 'PyEval_CallMethod', 'PyObject_CallFunction', 'PyObject_CallMethod', 'PyObject_CallFunctionObjArgs', 'PyObject_CallMethodObjArgs', @@ -883,6 +884,7 @@ source_dir / "stringobject.c", source_dir / "mysnprintf.c", source_dir / "pythonrun.c", + source_dir / "sysmodule.c", source_dir / "bufferobject.c", source_dir / "object.c", source_dir / "cobject.c", diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,7 +4,6 @@ .. contents:: - .. _`try out the translator`: Trying out the translator @@ -18,9 +17,7 @@ * Download and install Pygame_. - * Download and install `Dot Graphviz`_ (optional if you have an internet - connection: the flowgraph viewer then connects to - codespeak.net and lets it convert the flowgraph by a graphviz server). + * Download and install `Dot Graphviz`_ To start the interactive translator shell do:: diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -3,7 +3,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.annlowlevel import llhelper -from pypy.interpreter.baseobjspace import DescrMismatch +from pypy.interpreter.baseobjspace import W_Root, DescrMismatch from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( @@ -287,11 +287,17 @@ W_TypeObject.__init__(self, space, extension_name, bases_w or [space.w_object], dict_w) - self.flag_cpytype = True + if not space.is_true(space.issubtype(self, space.w_type)): + self.flag_cpytype = True self.flag_heaptype = False @bootstrap_function def init_typeobject(space): + # Probably a hack + space.model.typeorder[W_PyCTypeObject] = [(W_PyCTypeObject, None), + (W_TypeObject, None), + (W_Root, None)] + make_typedescr(space.w_type.instancetypedef, basestruct=PyTypeObject, attach=type_attach, @@ -472,14 +478,19 @@ def PyType_Ready(space, pto): if pto.c_tp_flags & Py_TPFLAGS_READY: return 0 + type_realize(space, rffi.cast(PyObject, pto)) + return 0 + +def type_realize(space, py_obj): + pto = rffi.cast(PyTypeObjectPtr, py_obj) assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 pto.c_tp_flags |= Py_TPFLAGS_READYING try: - type_realize(space, rffi.cast(PyObject, pto)) - pto.c_tp_flags |= Py_TPFLAGS_READY + w_obj = _type_realize(space, py_obj) finally: pto.c_tp_flags &= ~Py_TPFLAGS_READYING - return 0 + pto.c_tp_flags |= Py_TPFLAGS_READY + return w_obj def solid_base(space, w_type): typedef = w_type.instancetypedef @@ -535,7 +546,7 @@ finally: Py_DecRef(space, base_pyo) -def type_realize(space, py_obj): +def _type_realize(space, py_obj): """ Creates an interpreter type from a PyTypeObject structure. """ @@ -554,7 +565,9 @@ finish_type_1(space, py_type) - w_obj = space.allocate_instance(W_PyCTypeObject, space.w_type) + w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type)) + + w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype) track_reference(space, py_obj, w_obj) w_obj.__init__(space, py_type) w_obj.ready() diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -205,7 +205,8 @@ if dirname == search: # not found! let's hope that the compiled-in path is ok print >> sys.stderr, ('debug: WARNING: library path not found, ' - 'using compiled-in sys.path') + 'using compiled-in sys.path ' + 'and sys.prefix will be unset') newpath = sys.path[:] break newpath = sys.pypy_initial_path(dirname) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -2,14 +2,12 @@ from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, PyVarObject, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, - Py_GE, CONST_STRING, FILEP, fwrite, build_type_checkers) + Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, - track_reference, get_typedescr, RefcountState) + track_reference, get_typedescr, _Py_NewReference, RefcountState) from pypy.module.cpyext.typeobject import PyTypeObjectPtr from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall -from pypy.module._file.interp_file import W_File -from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.error import OperationError import pypy.module.__builtin__.operation as operation @@ -185,26 +183,17 @@ return 0 @cpython_api([PyObject, PyTypeObjectPtr], PyObject) -def PyObject_Init(space, py_obj, type): +def PyObject_Init(space, obj, type): """Initialize a newly-allocated object op with its type and initial reference. Returns the initialized object. If type indicates that the object participates in the cyclic garbage detector, it is added to the detector's set of observed objects. Other fields of the object are not affected.""" - if not py_obj: + if not obj: PyErr_NoMemory(space) - py_obj.c_ob_type = type - py_obj.c_ob_refcnt = 1 - w_type = from_ref(space, rffi.cast(PyObject, type)) - assert isinstance(w_type, W_TypeObject) - if w_type.is_cpytype(): - w_obj = space.allocate_instance(W_ObjectObject, w_type) - track_reference(space, py_obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, py_obj) - else: - assert False, "Please add more cases in PyObject_Init" - return py_obj + obj.c_ob_type = type + _Py_NewReference(space, obj) + return obj @cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject) def PyObject_InitVar(space, py_obj, type, size): @@ -429,40 +418,3 @@ rffi.free_nonmovingbuffer(data, buf) return 0 -PyFile_Check, PyFile_CheckExact = build_type_checkers("File", W_File) - - at cpython_api([PyObject, rffi.INT_real], PyObject) -def PyFile_GetLine(space, w_obj, n): - """ - Equivalent to p.readline([n]), this function reads one line from the - object p. p may be a file object or any object with a readline() - method. If n is 0, exactly one line is read, regardless of the length of - the line. If n is greater than 0, no more than n bytes will be read - from the file; a partial line can be returned. In both cases, an empty string - is returned if the end of the file is reached immediately. If n is less than - 0, however, one line is read regardless of length, but EOFError is - raised if the end of the file is reached immediately.""" - try: - w_readline = space.getattr(w_obj, space.wrap('readline')) - except OperationError: - raise OperationError( - space.w_TypeError, space.wrap( - "argument must be a file, or have a readline() method.")) - - n = rffi.cast(lltype.Signed, n) - if space.is_true(space.gt(space.wrap(n), space.wrap(0))): - return space.call_function(w_readline, space.wrap(n)) - elif space.is_true(space.lt(space.wrap(n), space.wrap(0))): - return space.call_function(w_readline) - else: - # XXX Raise EOFError as specified - return space.call_function(w_readline) - at cpython_api([CONST_STRING, CONST_STRING], PyObject) -def PyFile_FromString(space, filename, mode): - """ - On success, return a new file object that is opened on the file given by - filename, with a file mode given by mode, where mode has the same - semantics as the standard C routine fopen(). On failure, return NULL.""" - w_filename = space.wrap(rffi.charp2str(filename)) - w_mode = space.wrap(rffi.charp2str(mode)) - return space.call_method(space.builtin, 'file', w_filename, w_mode) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -108,6 +108,11 @@ #return w_value or None return None + def impl_setdefault(self, w_key, w_default): + # here the dict is always empty + self._as_rdict().impl_fallback_setitem(w_key, w_default) + return w_default + def impl_setitem(self, w_key, w_value): self._as_rdict().impl_fallback_setitem(w_key, w_value) @@ -181,6 +186,9 @@ # _________________________________________________________________ # fallback implementation methods + def impl_fallback_setdefault(self, w_key, w_default): + return self.r_dict_content.setdefault(w_key, w_default) + def impl_fallback_setitem(self, w_key, w_value): self.r_dict_content[w_key] = w_value @@ -227,6 +235,7 @@ ("length", 0), ("setitem_str", 2), ("setitem", 2), + ("setdefault", 2), ("delitem", 1), ("iter", 0), ("items", 0), @@ -317,6 +326,14 @@ def impl_setitem_str(self, key, w_value): self.content[key] = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + return self.content.setdefault(space.str_w(w_key), w_default) + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) @@ -787,13 +804,7 @@ return w_default def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default): - # XXX should be more efficient, with only one dict lookup - w_value = w_dict.getitem(w_key) - if w_value is not None: - return w_value - else: - w_dict.setitem(w_key, w_default) - return w_default + return w_dict.setdefault(w_key, w_default) def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w): len_defaults = len(defaults_w) diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -685,7 +685,7 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, descr=) + p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) @@ -1012,7 +1012,7 @@ def main(n): i = 1 while i < n: - i += len(xrange(i)) / i + i += len(xrange(i+1)) - i return i log = self.run(main, [10000]) @@ -1023,17 +1023,49 @@ guard_true(i10, descr=) # This can be improved if the JIT realized the lookup of i5 produces # a constant and thus can be removed entirely - i12 = int_sub(i5, 1) - i13 = uint_floordiv(i12, i7) + i120 = int_add(i5, 1) + i140 = int_lt(0, i120) + guard_true(i140, descr=) + i13 = uint_floordiv(i5, i7) i15 = int_add(i13, 1) i17 = int_lt(i15, 0) - guard_false(i17, descr=) - i18 = int_floordiv(i15, i5) - i19 = int_xor(i15, i5) - i20 = int_mod(i15, i5) - i21 = int_is_true(i20) - i22 = int_add_ovf(i5, i18) - guard_no_overflow(descr=) + guard_false(i17, descr=) + i20 = int_sub(i15, i5) + i21 = int_add_ovf(i5, i20) + guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, p3, p4, i22, i6, i7, p8, p9, descr=) + jump(p0, p1, p2, p3, p4, i21, i6, i7, p8, p9, descr=) """) + + def test_unpack_iterable_non_list_tuple(self): + def main(n): + import array + + items = [array.array("i", [1])] * n + total = 0 + for a, in items: + total += a + return total + + log = self.run(main, [1000000]) + assert log.result == 1000000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i16 = int_ge(i12, i13) + guard_false(i16, descr=) + p17 = getarrayitem_gc(p15, i12, descr=) + i19 = int_add(i12, 1) + setfield_gc(p4, i19, descr=) + guard_nonnull_class(p17, 146982464, descr=) + i21 = getfield_gc(p17, descr=) + i23 = int_lt(0, i21) + guard_true(i23, descr=) + i24 = getfield_gc(p17, descr=) + i25 = getarrayitem_raw(i24, 0, descr=) + i27 = int_lt(1, i21) + guard_false(i27, descr=) + i28 = int_add_ovf(i10, i25) + guard_no_overflow(descr=) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) + """) diff --git a/pypy/jit/metainterp/test/test_optimizebasic.py b/pypy/jit/metainterp/test/test_optimizebasic.py --- a/pypy/jit/metainterp/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/test/test_optimizebasic.py @@ -253,7 +253,7 @@ loop.call_pure_results = args_dict() if call_pure_results is not None: for k, v in call_pure_results.items(): - loop.call_pure_results[list(k)] = v + loop.call_pure_results[list(k)] = v metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo @@ -2886,7 +2886,7 @@ # the result of the call, recorded as the first arg), or turned into # a regular CALL. arg_consts = [ConstInt(i) for i in (123456, 4, 5, 6)] - call_pure_results = {tuple(arg_consts): ConstInt(42)} + call_pure_results = {tuple(arg_consts): ConstInt(42)} ops = ''' [i0, i1, i2] escape(i1) @@ -2931,7 +2931,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -2964,7 +2963,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3005,7 +3003,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3062,7 +3059,7 @@ self.loop.inputargs[0].value = self.nodeobjvalue self.check_expanded_fail_descr('''p2, p1 p0.refdescr = p2 - where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 where p1 is a node_vtable, nextdescr=p1b where p1b is a node_vtable, valuedescr=i1 ''', rop.GUARD_NO_EXCEPTION) @@ -3084,7 +3081,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3111,7 +3107,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3360,7 +3355,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) i3 = int_lt(i2, -5) guard_true(i3) [] @@ -3371,7 +3366,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) jump(i0) """ diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -212,52 +212,48 @@ # cpython, and rpython, assumed that integer division truncates # towards -infinity. however, in C99 and most (all?) other # backends, integer division truncates towards 0. so assuming - # that, we can generate scary code that applies the necessary + # that, we call a helper function that applies the necessary # correction in the right cases. - # paper and pencil are encouraged for this :) - - from pypy.rpython.rbool import bool_repr - assert isinstance(repr.lowleveltype, Number) - c_zero = inputconst(repr.lowleveltype, repr.lowleveltype._default) op = func.split('_', 1)[0] if op == 'floordiv': - # return (x/y) - (((x^y)<0)&((x%y)!=0)); - v_xor = hop.genop(prefix + 'xor', vlist, - resulttype=repr) - v_xor_le = hop.genop(prefix + 'lt', [v_xor, c_zero], - resulttype=Bool) - v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr) - v_mod = hop.genop(prefix + 'mod', vlist, - resulttype=repr) - v_mod_ne = hop.genop(prefix + 'ne', [v_mod, c_zero], - resulttype=Bool) - v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr) - v_corr = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne], - resulttype=repr) - v_res = hop.genop(prefix + 'sub', [v_res, v_corr], - resulttype=repr) + llfunc = globals()['ll_correct_' + prefix + 'floordiv'] + v_res = hop.gendirectcall(llfunc, vlist[0], vlist[1], v_res) elif op == 'mod': - # return r + y*(((x^y)<0)&(r!=0)); - v_xor = hop.genop(prefix + 'xor', vlist, - resulttype=repr) - v_xor_le = hop.genop(prefix + 'lt', [v_xor, c_zero], - resulttype=Bool) - v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr) - v_mod_ne = hop.genop(prefix + 'ne', [v_res, c_zero], - resulttype=Bool) - v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr) - v_corr1 = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne], - resulttype=repr) - v_corr = hop.genop(prefix + 'mul', [v_corr1, vlist[1]], - resulttype=repr) - v_res = hop.genop(prefix + 'add', [v_res, v_corr], - resulttype=repr) + llfunc = globals()['ll_correct_' + prefix + 'mod'] + v_res = hop.gendirectcall(llfunc, vlist[1], v_res) + v_res = hop.llops.convertvar(v_res, repr, r_result) return v_res +INT_BITS_1 = r_int.BITS - 1 +LLONG_BITS_1 = r_longlong.BITS - 1 + +def ll_correct_int_floordiv(x, y, r): + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> INT_BITS_1) + +def ll_correct_llong_floordiv(x, y, r): + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> LLONG_BITS_1) + +def ll_correct_int_mod(y, r): + if y < 0: u = -r + else: u = r + return r + (y & (u >> INT_BITS_1)) + +def ll_correct_llong_mod(y, r): + if y < 0: u = -r + else: u = r + return r + (y & (u >> LLONG_BITS_1)) + + #Helper functions for comparisons def _rtype_compare_template(hop, func): diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -19,6 +19,7 @@ 'load_module': 'interp_imp.load_module', 'load_source': 'interp_imp.load_source', 'load_compiled': 'interp_imp.load_compiled', + 'load_dynamic': 'interp_imp.load_dynamic', '_run_compiled_module': 'interp_imp._run_compiled_module', # pypy '_getimporter': 'importing._getimporter', # pypy #'run_module': 'interp_imp.run_module', @@ -36,7 +37,6 @@ } appleveldefs = { - 'load_dynamic': 'app_imp.load_dynamic', } def __init__(self, space, *args): diff --git a/pypy/translator/backendopt/test/test_inline.py b/pypy/translator/backendopt/test/test_inline.py --- a/pypy/translator/backendopt/test/test_inline.py +++ b/pypy/translator/backendopt/test/test_inline.py @@ -1,7 +1,7 @@ # XXX clean up these tests to use more uniform helpers import py import os -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import last_exception, checkgraph from pypy.translator.backendopt import canraise from pypy.translator.backendopt.inline import simple_inline_function, CannotInline @@ -20,29 +20,27 @@ from pypy.translator.backendopt import removenoops from pypy.objspace.flow.model import summary -def no_missing_concretetype(node): - if isinstance(node, Block): - for v in node.inputargs: - assert hasattr(v, 'concretetype') - for op in node.operations: - for v in op.args: - assert hasattr(v, 'concretetype') - assert hasattr(op.result, 'concretetype') - if isinstance(node, Link): - if node.exitcase is not None: - assert hasattr(node, 'llexitcase') - for v in node.args: - assert hasattr(v, 'concretetype') - if isinstance(node.last_exception, (Variable, Constant)): - assert hasattr(node.last_exception, 'concretetype') - if isinstance(node.last_exc_value, (Variable, Constant)): - assert hasattr(node.last_exc_value, 'concretetype') - def sanity_check(t): # look for missing '.concretetype' for graph in t.graphs: checkgraph(graph) - traverse(no_missing_concretetype, graph) + for node in graph.iterblocks(): + for v in node.inputargs: + assert hasattr(v, 'concretetype') + for op in node.operations: + for v in op.args: + assert hasattr(v, 'concretetype') + assert hasattr(op.result, 'concretetype') + for node in graph.iterlinks(): + if node.exitcase is not None: + assert hasattr(node, 'llexitcase') + for v in node.args: + assert hasattr(v, 'concretetype') + if isinstance(node.last_exception, (Variable, Constant)): + assert hasattr(node.last_exception, 'concretetype') + if isinstance(node.last_exc_value, (Variable, Constant)): + assert hasattr(node.last_exc_value, 'concretetype') + class CustomError1(Exception): def __init__(self): diff --git a/pypy/translator/goal/targetrpystonedalone.py b/pypy/translator/goal/targetrpystonedalone.py --- a/pypy/translator/goal/targetrpystonedalone.py +++ b/pypy/translator/goal/targetrpystonedalone.py @@ -2,11 +2,11 @@ from pypy.translator.test import rpystone from pypy.translator.goal import richards import pypy.interpreter.gateway # needed before sys, order of imports !!! -from pypy.module.sys.version import svn_revision +from pypy.tool.version import get_repo_version_info # __________ Entry point __________ -VERSION = svn_revision() +VERSION = get_repo_version_info()[2] # note that we have %f but no length specifiers in RPython diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -261,7 +261,8 @@ if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') - return space.wrap(rffi.charp2strn(buf, bufsize_p[0] - 1)) + length = intmask(bufsize_p[0] - 1) + return space.wrap(rffi.charp2strn(buf, length)) def convert_to_regdata(space, w_value, typ): buf = None @@ -445,9 +446,10 @@ continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') + length = intmask(retDataSize[0]) return space.newtuple([ convert_from_regdata(space, databuf, - retDataSize[0], retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) @@ -595,11 +597,11 @@ if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') + length = intmask(retDataSize[0]) return space.newtuple([ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, - retDataSize[0], - retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pyfile.py @@ -0,0 +1,68 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, CONST_STRING, FILEP, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject) +from pypy.interpreter.error import OperationError +from pypy.module._file.interp_file import W_File + +PyFile_Check, PyFile_CheckExact = build_type_checkers("File", W_File) + + at cpython_api([PyObject, rffi.INT_real], PyObject) +def PyFile_GetLine(space, w_obj, n): + """ + Equivalent to p.readline([n]), this function reads one line from the + object p. p may be a file object or any object with a readline() + method. If n is 0, exactly one line is read, regardless of the length of + the line. If n is greater than 0, no more than n bytes will be read + from the file; a partial line can be returned. In both cases, an empty string + is returned if the end of the file is reached immediately. If n is less than + 0, however, one line is read regardless of length, but EOFError is + raised if the end of the file is reached immediately.""" + try: + w_readline = space.getattr(w_obj, space.wrap('readline')) + except OperationError: + raise OperationError( + space.w_TypeError, space.wrap( + "argument must be a file, or have a readline() method.")) + + n = rffi.cast(lltype.Signed, n) + if space.is_true(space.gt(space.wrap(n), space.wrap(0))): + return space.call_function(w_readline, space.wrap(n)) + elif space.is_true(space.lt(space.wrap(n), space.wrap(0))): + return space.call_function(w_readline) + else: + # XXX Raise EOFError as specified + return space.call_function(w_readline) + + at cpython_api([CONST_STRING, CONST_STRING], PyObject) +def PyFile_FromString(space, filename, mode): + """ + On success, return a new file object that is opened on the file given by + filename, with a file mode given by mode, where mode has the same + semantics as the standard C routine fopen(). On failure, return NULL.""" + w_filename = space.wrap(rffi.charp2str(filename)) + w_mode = space.wrap(rffi.charp2str(mode)) + return space.call_method(space.builtin, 'file', w_filename, w_mode) + + at cpython_api([FILEP, CONST_STRING, CONST_STRING, rffi.VOIDP], PyObject) +def PyFile_FromFile(space, fp, name, mode, close): + """Create a new PyFileObject from the already-open standard C file + pointer, fp. The function close will be called when the file should be + closed. Return NULL on failure.""" + raise NotImplementedError + + at cpython_api([PyObject, rffi.INT_real], lltype.Void) +def PyFile_SetBufSize(space, w_file, n): + """Available on systems with setvbuf() only. This should only be called + immediately after file object creation.""" + raise NotImplementedError + + at cpython_api([CONST_STRING, PyObject], rffi.INT_real, error=-1) +def PyFile_WriteString(space, s, w_p): + """Write string s to file object p. Return 0 on success or -1 on + failure; the appropriate exception will be set.""" + w_s = space.wrap(rffi.charp2str(s)) + space.call_method(w_p, "write", w_s) + return 0 + diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -190,14 +190,30 @@ def wait(): """ wait() -> (pid, status) - + Wait for completion of a child process. """ return posix.waitpid(-1, 0) + def wait3(options): + """ wait3(options) -> (pid, status, rusage) + + Wait for completion of a child process and provides resource usage informations + """ + from _pypy_wait import wait3 + return wait3(options) + + def wait4(pid, options): + """ wait4(pid, options) -> (pid, status, rusage) + + Wait for completion of the child process "pid" and provides resource usage informations + """ + from _pypy_wait import wait4 + return wait4(pid, options) + else: # Windows implementations - + # Supply os.popen() based on subprocess def popen(cmd, mode="r", bufsize=-1): """popen(command [, mode='r' [, bufsize]]) -> pipe @@ -285,7 +301,7 @@ raise TypeError("invalid cmd type (%s, expected string)" % (type(cmd),)) return cmd - + # A proxy for a file whose close waits for the process class _wrap_close(object): def __init__(self, stream, proc): diff --git a/pypy/module/cpyext/include/longintrepr.h b/pypy/module/cpyext/include/longintrepr.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/longintrepr.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -3,8 +3,102 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.jitexc import JitException +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization + +class CachedField(object): + def __init__(self): + # Cache information for a field descr. It can be in one + # of two states: + # + # 1. 'cached_fields' is a dict mapping OptValues of structs + # to OptValues of fields. All fields on-heap are + # synchronized with the values stored in the cache. + # + # 2. we just did one setfield, which is delayed (and thus + # not synchronized). 'lazy_setfield' is the delayed + # ResOperation. In this state, 'cached_fields' contains + # out-of-date information. More precisely, the field + # value pending in the ResOperation is *not* visible in + # 'cached_fields'. + # + self._cached_fields = {} + self._lazy_setfield = None + self._lazy_setfield_registered = False + + def do_setfield(self, optheap, op): + # Update the state with the SETFIELD_GC operation 'op'. + structvalue = optheap.getvalue(op.getarg(0)) + fieldvalue = optheap.getvalue(op.getarg(1)) + if self.possible_aliasing(optheap, structvalue): + self.force_lazy_setfield(optheap) + assert not self.possible_aliasing(optheap, structvalue) + cached_fieldvalue = self._cached_fields.get(structvalue, None) + if cached_fieldvalue is not fieldvalue: + # common case: store the 'op' as lazy_setfield, and register + # myself in the optheap's _lazy_setfields list + self._lazy_setfield = op + if not self._lazy_setfield_registered: + optheap._lazy_setfields.append(self) + self._lazy_setfield_registered = True + else: + # this is the case where the pending setfield ends up + # storing precisely the value that is already there, + # as proved by 'cached_fields'. In this case, we don't + # need any _lazy_setfield: the heap value is already right. + # Note that this may reset to None a non-None lazy_setfield, + # cancelling its previous effects with no side effect. + self._lazy_setfield = None + + def possible_aliasing(self, optheap, structvalue): + # If lazy_setfield is set and contains a setfield on a different + # structvalue, then we are annoyed, because it may point to either + # the same or a different structure at runtime. + return (self._lazy_setfield is not None + and (optheap.getvalue(self._lazy_setfield.getarg(0)) + is not structvalue)) + + def getfield_from_cache(self, optheap, structvalue): + # Returns the up-to-date field's value, or None if not cached. + if self.possible_aliasing(optheap, structvalue): + self.force_lazy_setfield(optheap) + if self._lazy_setfield is not None: + op = self._lazy_setfield + assert optheap.getvalue(op.getarg(0)) is structvalue + return optheap.getvalue(op.getarg(1)) + else: + return self._cached_fields.get(structvalue, None) + + def remember_field_value(self, structvalue, fieldvalue): + assert self._lazy_setfield is None + self._cached_fields[structvalue] = fieldvalue + + def force_lazy_setfield(self, optheap): + op = self._lazy_setfield + if op is not None: + # This is the way _lazy_setfield is usually reset to None. + # Now we clear _cached_fields, because actually doing the + # setfield might impact any of the stored result (because of + # possible aliasing). + self._cached_fields.clear() + self._lazy_setfield = None + optheap.next_optimization.propagate_forward(op) + # Once it is done, we can put at least one piece of information + # back in the cache: the value of this particular structure's + # field. + structvalue = optheap.getvalue(op.getarg(0)) + fieldvalue = optheap.getvalue(op.getarg(1)) + self.remember_field_value(structvalue, fieldvalue) + + def get_reconstructed(self, optimizer, valuemap): + assert self._lazy_setfield is None + cf = CachedField() + for structvalue, fieldvalue in self._cached_fields.iteritems(): + structvalue2 = structvalue.get_reconstructed(optimizer, valuemap) + fieldvalue2 = fieldvalue .get_reconstructed(optimizer, valuemap) + cf._cached_fields[structvalue2] = fieldvalue2 + return cf + class CachedArrayItems(object): def __init__(self): @@ -20,40 +114,23 @@ """Cache repeated heap accesses""" def __init__(self): - # cached fields: {descr: {OptValue_instance: OptValue_fieldvalue}} + # cached fields: {descr: CachedField} self.cached_fields = {} - self.known_heap_fields = {} + self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} - # lazily written setfields (at most one per descr): {descr: op} - self.lazy_setfields = {} - self.lazy_setfields_descrs = [] # keys (at least) of previous dict def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() if True: self.force_all_lazy_setfields() - assert not self.lazy_setfields_descrs - assert not self.lazy_setfields else: - new.lazy_setfields_descrs = self.lazy_setfields_descrs - new.lazy_setfields = self.lazy_setfields + assert 0 # was: new.lazy_setfields = self.lazy_setfields for descr, d in self.cached_fields.items(): - newd = {} - new.cached_fields[descr] = newd - for value, fieldvalue in d.items(): - newd[value.get_reconstructed(optimizer, valuemap)] = \ - fieldvalue.get_reconstructed(optimizer, valuemap) - - for descr, d in self.known_heap_fields.items(): - newd = {} - new.known_heap_fields[descr] = newd - for value, fieldvalue in d.items(): - newd[value.get_reconstructed(optimizer, valuemap)] = \ - fieldvalue.get_reconstructed(optimizer, valuemap) - + new.cached_fields[descr] = d.get_reconstructed(optimizer, valuemap) + new.cached_arrayitems = {} for descr, d in self.cached_arrayitems.items(): newd = {} @@ -74,30 +151,16 @@ return new def clean_caches(self): + del self._lazy_setfields[:] self.cached_fields.clear() - self.known_heap_fields.clear() self.cached_arrayitems.clear() - def cache_field_value(self, descr, value, fieldvalue, write=False): - if write: - # when seeing a setfield, we have to clear the cache for the same - # field on any other structure, just in case they are aliasing - # each other - d = self.cached_fields[descr] = {} - else: - d = self.cached_fields.setdefault(descr, {}) - d[value] = fieldvalue - - def read_cached_field(self, descr, value): - # XXX self.cached_fields and self.lazy_setfields should probably - # be merged somehow - d = self.cached_fields.get(descr, None) - if d is None: - op = self.lazy_setfields.get(descr, None) - if op is None: - return None - return self.getvalue(op.getarg(1)) - return d.get(value, None) + def field_cache(self, descr): + try: + cf = self.cached_fields[descr] + except KeyError: + cf = self.cached_fields[descr] = CachedField() + return cf def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): d = self.cached_arrayitems.get(descr, None) @@ -157,11 +220,15 @@ self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or - opnum == rop.SETFIELD_RAW or - opnum == rop.SETARRAYITEM_GC or - opnum == rop.SETARRAYITEM_RAW or - opnum == rop.DEBUG_MERGE_POINT): + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or @@ -179,8 +246,8 @@ for fielddescr in effectinfo.write_descrs_fields: self.force_lazy_setfield(fielddescr) try: - del self.cached_fields[fielddescr] - del self.known_heap_fields[fielddescr] + cf = self.cached_fields[fielddescr] + cf._cached_fields.clear() except KeyError: pass for arraydescr in effectinfo.write_descrs_arrays: @@ -194,10 +261,7 @@ # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. return - self.force_all_lazy_setfields() - elif op.is_final() or (not we_are_translated() and - op.getopnum() < 0): # escape() operations - self.force_all_lazy_setfields() + self.force_all_lazy_setfields() self.clean_caches() @@ -205,58 +269,54 @@ assert value.is_constant() newvalue = self.getvalue(value.box) if value is not newvalue: - for d in self.cached_fields.values(): - if value in d: - d[newvalue] = d[value] - # FIXME: Update the other caches too? - - - def force_lazy_setfield(self, descr, before_guard=False): + for cf in self.cached_fields.itervalues(): + if value in cf._cached_fields: + cf._cached_fields[newvalue] = cf._cached_fields[value] + + def force_lazy_setfield(self, descr): try: - op = self.lazy_setfields[descr] + cf = self.cached_fields[descr] except KeyError: return - del self.lazy_setfields[descr] - value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) - try: - heapvalue = self.known_heap_fields[op.getdescr()][value] - if fieldvalue is heapvalue: - return - except KeyError: - pass - self.next_optimization.propagate_forward(op) + cf.force_lazy_setfield(self) + def fixup_guard_situation(self): # hackish: reverse the order of the last two operations if it makes # sense to avoid a situation like "int_eq/setfield_gc/guard_true", # which the backend (at least the x86 backend) does not handle well. newoperations = self.optimizer.newoperations - if before_guard and len(newoperations) >= 2: - lastop = newoperations[-1] - prevop = newoperations[-2] - # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" - # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" - # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.getopnum() - lastop_args = lastop.getarglist() - if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE - or prevop.is_ovf()) - and prevop.result not in lastop_args): - newoperations[-2] = lastop - newoperations[-1] = prevop + if len(newoperations) < 2: + return + lastop = newoperations[-1] + if (lastop.getopnum() != rop.SETFIELD_GC and + lastop.getopnum() != rop.SETARRAYITEM_GC): + return + # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" + # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" + # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" + prevop = newoperations[-2] + opnum = prevop.getopnum() + if not (prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE + or prevop.is_ovf()): + return + if prevop.result in lastop.getarglist(): + return + newoperations[-2] = lastop + newoperations[-1] = prevop def force_all_lazy_setfields(self): - if len(self.lazy_setfields_descrs) > 0: - for descr in self.lazy_setfields_descrs: - self.force_lazy_setfield(descr) - del self.lazy_setfields_descrs[:] + for cf in self._lazy_setfields: + if not we_are_translated(): + assert cf in self.cached_fields.values() + cf.force_lazy_setfield(self) def force_lazy_setfields_for_guard(self): pendingfields = [] - for descr in self.lazy_setfields_descrs: - try: - op = self.lazy_setfields[descr] - except KeyError: + for cf in self._lazy_setfields: + if not we_are_translated(): + assert cf in self.cached_fields.values() + op = cf._lazy_setfield + if op is None: continue # the only really interesting case that we need to handle in the # guards' resume data is that of a virtual object that is stored @@ -266,41 +326,27 @@ fieldvalue = self.getvalue(op.getarg(1)) if fieldvalue.is_virtual(): # this is the case that we leave to resume.py - pendingfields.append((descr, value.box, + pendingfields.append((op.getdescr(), value.box, fieldvalue.get_key_box())) else: - self.force_lazy_setfield(descr, before_guard=True) + cf.force_lazy_setfield(self) + self.fixup_guard_situation() return pendingfields - def force_lazy_setfield_if_necessary(self, op, value, write=False): - try: - op1 = self.lazy_setfields[op.getdescr()] - except KeyError: - if write: - self.lazy_setfields_descrs.append(op.getdescr()) - else: - if self.getvalue(op1.getarg(0)) is not value: - self.force_lazy_setfield(op.getdescr()) - def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.getarg(0)) - self.force_lazy_setfield_if_necessary(op, value) - # check if the field was read from another getfield_gc just before - # or has been written to recently - fieldvalue = self.read_cached_field(op.getdescr(), value) + structvalue = self.getvalue(op.getarg(0)) + cf = self.field_cache(op.getdescr()) + fieldvalue = cf.getfield_from_cache(self, structvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return # default case: produce the operation - value.ensure_nonnull() + structvalue.ensure_nonnull() ###self.optimizer.optimize_default(op) self.emit_operation(op) # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - self.cache_field_value(op.getdescr(), value, fieldvalue) - # keep track of what's on the heap - d = self.known_heap_fields.setdefault(op.getdescr(), {}) - d[value] = fieldvalue + cf.remember_field_value(structvalue, fieldvalue) def optimize_SETFIELD_GC(self, op): if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)], @@ -309,14 +355,8 @@ (op.getdescr().repr_of_descr())) raise BogusPureField # - value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) - cached_fieldvalue = self.read_cached_field(op.getdescr(), value) - if fieldvalue is not cached_fieldvalue: - self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.getdescr()] = op - # remember the result of future reads of the field - self.cache_field_value(op.getdescr(), value, fieldvalue, write=True) + cf = self.field_cache(op.getdescr()) + cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC(self, op): value = self.getvalue(op.getarg(0)) diff --git a/pypy/translator/c/src/dtoa.c b/pypy/translator/c/src/dtoa.c --- a/pypy/translator/c/src/dtoa.c +++ b/pypy/translator/c/src/dtoa.c @@ -116,7 +116,6 @@ /* Begin PYPY hacks */ /* #include "Python.h" */ -#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754 #define HAVE_UINT32_T #define HAVE_INT32_T #define HAVE_UINT64_T diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -126,8 +126,16 @@ _run_compiled_module(space, w_modulename, filename, w_file, w_mod) return w_mod + at unwrap_spec(filename=str) +def load_dynamic(space, w_modulename, filename, w_file=None): + if not space.config.objspace.usemodules.cpyext: + raise OperationError(space.w_ImportError, space.wrap( + "Not implemented")) + importing.load_c_extension(space, filename, space.str_w(w_modulename)) + return importing.check_sys_modules(space, w_modulename) + def new_module(space, w_name): - return space.wrap(Module(space, w_name)) + return space.wrap(Module(space, w_name, add_package=False)) def init_builtin(space, w_name): name = space.str_w(w_name) diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.tool.autopath import pypydir -from pypy.rlib import rposix +from pypy.rlib import jit, rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN @@ -91,11 +91,11 @@ # # Custom implementations - + at jit.purefunction def ll_math_isnan(y): return bool(math_isnan(y)) - + at jit.purefunction def ll_math_isinf(y): return bool(math_isinf(y)) diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -248,3 +248,8 @@ """This is synonymous to ``raise SystemExit''. It will cause the current thread to exit silently unless the exception is caught.""" raise OperationError(space.w_SystemExit, space.w_None) + +def interrupt_main(space): + """Raise a KeyboardInterrupt in the main thread. +A subthread can use this function to interrupt the main thread.""" + space.check_signal_action.set_interrupt() diff --git a/pypy/module/imp/app_imp.py b/pypy/module/imp/app_imp.py deleted file mode 100644 --- a/pypy/module/imp/app_imp.py +++ /dev/null @@ -1,5 +0,0 @@ - - -def load_dynamic(name, pathname, file=None): - """Always raises ah ImportError on pypy""" - raise ImportError('Not implemented') diff --git a/lib-python/modified-2.7.0/distutils/msvc9compiler.py b/lib-python/modified-2.7.0/distutils/msvc9compiler.py --- a/lib-python/modified-2.7.0/distutils/msvc9compiler.py +++ b/lib-python/modified-2.7.0/distutils/msvc9compiler.py @@ -644,6 +644,7 @@ temp_manifest = os.path.join( build_temp, os.path.basename(output_filename) + ".manifest") + ld_args.append('/MANIFEST') ld_args.append('/MANIFESTFILE:' + temp_manifest) if extra_preargs: diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -4,6 +4,8 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.interpreter.error import OperationError from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask +from pypy.rlib.rbigint import rbigint +from pypy.rlib.rarithmetic import intmask PyLong_Check, PyLong_CheckExact = build_type_checkers("Long") @@ -177,4 +179,31 @@ assert isinstance(w_long, W_LongObject) return w_long.num.sign +UCHARP = rffi.CArrayPtr(rffi.UCHAR) + at cpython_api([UCHARP, rffi.SIZE_T, rffi.INT_real, rffi.INT_real], PyObject) +def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): + little_endian = rffi.cast(lltype.Signed, little_endian) + signed = rffi.cast(lltype.Signed, signed) + result = rbigint() + negative = False + + for i in range(0, n): + if little_endian: + c = intmask(bytes[i]) + else: + c = intmask(bytes[n - i - 1]) + if i == 0 and signed and c & 0x80: + negative = True + if negative: + c = c ^ 0xFF + digit = rbigint.fromint(c) + + result = result.lshift(8) + result = result.add(digit) + + if negative: + result = result.neg() + + return space.newlong_from_rbigint(result) + diff --git a/pypy/interpreter/test/test_extmodules.py b/pypy/interpreter/test/test_extmodules.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_extmodules.py @@ -0,0 +1,68 @@ +import sys +import pytest + +from pypy.config.pypyoption import get_pypy_config +from pypy.objspace.std import StdObjSpace +from pypy.tool.udir import udir + +mod_init = """ +from pypy.interpreter.mixedmodule import MixedModule + +import time + +class Module(MixedModule): + + appleveldefs = {} + + interpleveldefs = { + 'clock' : 'interp_time.clock', + 'time' : 'interp_time.time_', + 'sleep' : 'interp_time.sleep', + } +""" + +mod_interp = """ +import time + +from pypy.interpreter.gateway import unwrap_spec + +def clock(space): + return space.wrap(time.clock()) + +def time_(space): + return space.wrap(time.time()) + + at unwrap_spec(seconds=float) +def sleep(space, seconds): + time.sleep(seconds) +""" + +old_sys_path = [] + +def init_extmodule_code(): + pkg = udir.join("testext") + pkg.ensure(dir=True) + pkg.join("__init__.py").write("# package") + mod = pkg.join("extmod") + mod.ensure(dir=True) + mod.join("__init__.py").write(mod_init) + mod.join("interp_time.py").write(mod_interp) + +class AppTestExtModules(object): + def setup_class(cls): + init_extmodule_code() + conf = get_pypy_config() + conf.objspace.extmodules = 'testext.extmod' + old_sys_path[:] = sys.path[:] + sys.path.insert(0, str(udir)) + space = StdObjSpace(conf) + cls.space = space + + def teardown_class(cls): + sys.path[:] = old_sys_path + + @pytest.mark.skipif("config.option.runappdirect") + def test_import(self): + import extmod + assert extmod.__file__.endswith('extmod') + assert type(extmod.time()) is float diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -144,3 +144,20 @@ """), ]) assert module.from_string() == 0x1234 + + def test_frombytearray(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC", 2, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x9ABC + assert module.from_bytearray(True, True) == -0x6543 + assert module.from_bytearray(False, False) == 0xBC9A + assert module.from_bytearray(False, True) == -0x4365 + diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -19,6 +19,8 @@ def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): + if gcdescr is not None: + gcdescr.force_index_ofs = FORCE_INDEX_OFS AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) @@ -127,7 +129,7 @@ fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) - rffi.cast(TP, addr_of_force_index)[0] = -1 + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index frb = self.assembler._find_failure_recovery_bytecode(faildescr) bytecode = rffi.cast(rffi.UCHARP, frb) # start of "no gc operation!" block @@ -147,7 +149,6 @@ WORD = 4 NUM_REGS = 8 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 supports_longlong = True @@ -163,7 +164,6 @@ WORD = 8 NUM_REGS = 16 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 def __init__(self, *args, **kwargs): assert sys.maxint == (2**63 - 1) diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -1,10 +1,10 @@ -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rpython.tool import rffi_platform as platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import py, os from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rlib import jit from pypy.rlib.debug import ll_assert from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem.lloperation import llop @@ -79,6 +79,7 @@ # wrappers... + at jit.loop_invariant def get_ident(): return rffi.cast(lltype.Signed, c_thread_get_ident()) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -365,7 +365,11 @@ def setbuiltinmodule(self, importname): """NOT_RPYTHON. load a lazy pypy/module and put it into sys.modules""" - fullname = "pypy.module.%s" % importname + if '.' in importname: + fullname = importname + importname = fullname.rsplit('.', 1)[1] + else: + fullname = "pypy.module.%s" % importname Module = __import__(fullname, None, None, ["Module"]).Module @@ -428,6 +432,11 @@ if value and name not in modules: modules.append(name) + if self.config.objspace.extmodules: + for name in self.config.objspace.extmodules.split(','): + if name not in modules: + modules.append(name) + # a bit of custom logic: time2 or rctime take precedence over time # XXX this could probably be done as a "requires" in the config if ('time2' in modules or 'rctime' in modules) and 'time' in modules: @@ -745,7 +754,12 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - items = [] + # If we know the expected length we can preallocate. + if expected_length == -1: + items = [] + else: + items = [None] * expected_length + idx = 0 while True: try: w_item = self.next(w_iterator) @@ -753,19 +767,22 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and len(items) == expected_length: + if expected_length != -1 and idx == expected_length: raise OperationError(self.w_ValueError, self.wrap("too many values to unpack")) - items.append(w_item) - if expected_length != -1 and len(items) < expected_length: - i = len(items) - if i == 1: + if expected_length == -1: + items.append(w_item) + else: + items[idx] = w_item + idx += 1 + if expected_length != -1 and idx < expected_length: + if idx == 1: plural = "" else: plural = "s" raise OperationError(self.w_ValueError, self.wrap("need more than %d value%s to unpack" % - (i, plural))) + (idx, plural))) return items unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py --- a/pypy/annotation/unaryop.py +++ b/pypy/annotation/unaryop.py @@ -499,10 +499,14 @@ def getanyitem(str): return str.basecharclass() - def method_split(str, patt): # XXX + def method_split(str, patt, max=-1): getbookkeeper().count("str_split", str, patt) return getbookkeeper().newlist(str.basestringclass()) + def method_rsplit(str, patt, max=-1): + getbookkeeper().count("str_rsplit", str, patt) + return getbookkeeper().newlist(str.basestringclass()) + def method_replace(str, s1, s2): return str.basestringclass() diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -379,27 +379,6 @@ return result -def traverse(visit, functiongraph): - block = functiongraph.startblock - visit(block) - seen = identity_dict() - seen[block] = True - stack = list(block.exits[::-1]) - while stack: - link = stack.pop() - visit(link) - block = link.target - if block not in seen: - visit(block) - seen[block] = True - stack += block.exits[::-1] - - -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - def flattenobj(*args): for arg in args: try: @@ -497,6 +476,19 @@ assert block.operations == () assert block.exits == () + def definevar(v, only_in_link=None): + assert isinstance(v, Variable) + assert v not in vars, "duplicate variable %r" % (v,) + assert v not in vars_previous_blocks, ( + "variable %r used in more than one block" % (v,)) + vars[v] = only_in_link + + def usevar(v, in_link=None): + assert v in vars + if in_link is not None: + assert vars[v] is None or vars[v] is in_link + + for block in graph.iterblocks(): assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( @@ -506,18 +498,6 @@ assert block in exitblocks vars = {} - def definevar(v, only_in_link=None): - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = only_in_link - - def usevar(v, in_link=None): - assert v in vars - if in_link is not None: - assert vars[v] is None or vars[v] is in_link - for v in block.inputargs: definevar(v) diff --git a/pypy/translator/backendopt/ssa.py b/pypy/translator/backendopt/ssa.py --- a/pypy/translator/backendopt/ssa.py +++ b/pypy/translator/backendopt/ssa.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block +from pypy.objspace.flow.model import Variable, mkentrymap, Block from pypy.tool.algo.unionfind import UnionFind class DataFlowFamilyBuilder: diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -211,8 +211,11 @@ def ll_stringslice_minusone(s): return s.ll_substring(0, s.ll_strlen()-1) - def ll_split_chr(RESULT, s, c): - return RESULT.ll_convert_from_array(s.ll_split_chr(c)) + def ll_split_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_split_chr(c, max)) + + def ll_rsplit_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_rsplit_chr(c, max)) def ll_int(s, base): if not 2 <= base <= 36: diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -274,8 +274,12 @@ screeninfo.append((0, [])) self.lxy = p, ln prompt = self.get_prompt(ln, ll >= p >= 0) + while '\n' in prompt: + pre_prompt, _, prompt = prompt.partition('\n') + screen.append(pre_prompt) + screeninfo.append((0, [])) p -= ll + 1 - lp = len(prompt) + prompt, lp = self.process_prompt(prompt) l, l2 = disp_str(line) wrapcount = (len(l) + lp) / w if wrapcount == 0: @@ -297,6 +301,31 @@ screeninfo.append((0, [])) return screen + def process_prompt(self, prompt): + """ Process the prompt. + + This means calculate the length of the prompt. The character \x01 + and \x02 are used to bracket ANSI control sequences and need to be + excluded from the length calculation. So also a copy of the prompt + is returned with these control characters removed. """ + + out_prompt = '' + l = len(prompt) + pos = 0 + while True: + s = prompt.find('\x01', pos) + if s == -1: + break + e = prompt.find('\x02', s) + if e == -1: + break + # Found start and end brackets, subtract from string length + l = l - (e-s+1) + out_prompt += prompt[pos:s] + prompt[s+1:e] + pos = e+1 + out_prompt += prompt[pos:] + return out_prompt, l + def bow(self, p=None): """Return the 0-based index of the word break preceding p most immediately. diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: @@ -115,46 +114,6 @@ # in the second block! return split_block(annotator, block, 0, _forcelink=block.inputargs) -def remove_direct_loops(annotator, graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def remove_double_links(annotator, graph): - """This can be useful for code generators: it ensures that no block has - more than one incoming links from one and the same other block. It allows - argument passing along links to be implemented with phi nodes since the - value of an argument can be determined by looking from which block the - control passed. """ - def visit(block): - if isinstance(block, Block): - double_links = [] - seen = {} - for link in block.exits: - if link.target in seen: - double_links.append(link) - seen[link.target] = True - for link in double_links: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def no_links_to_startblock(graph): - """Ensure no links to start block.""" - links_to_start_block = False - for block in graph.iterblocks(): - for link in block.exits: - if link.target == graph.startblock: - links_to_start_block = True - break - if links_to_start_block: - insert_empty_startblock(None, graph) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from pypy.annotation import model as annmodel diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -36,29 +36,35 @@ init_defaults = Defaults([None]) def init__List(space, w_list, __args__): + from pypy.objspace.std.tupleobject import W_TupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - # - # this is the old version of the loop at the end of this function: - # - # w_list.wrappeditems = space.unpackiterable(w_iterable) - # - # This is commented out to avoid assigning a new RPython list to - # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. - # items_w = w_list.wrappeditems del items_w[:] if w_iterable is not None: - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - items_w.append(w_item) + # unfortunately this is duplicating space.unpackiterable to avoid + # assigning a new RPython list to 'wrappeditems', which defeats the + # W_FastSeqIterObject optimization. + if isinstance(w_iterable, W_ListObject): + items_w.extend(w_iterable.wrappeditems) + elif isinstance(w_iterable, W_TupleObject): + items_w.extend(w_iterable.wrappeditems) + else: + _init_from_iterable(space, items_w, w_iterable) + +def _init_from_iterable(space, items_w, w_iterable): + # in its own function to make the JIT look into init__List + # XXX this would need a JIT driver somehow? + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + items_w.append(w_item) def len__List(space, w_list): result = len(w_list.wrappeditems) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -163,7 +163,7 @@ if (not we_are_jitted() or w_self.is_heaptype() or w_self.space.config.objspace.std.mutable_builtintypes): return w_self._version_tag - # heap objects cannot get their version_tag changed + # prebuilt objects cannot get their version_tag changed return w_self._pure_version_tag() @purefunction_promote() @@ -253,7 +253,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,6 +262,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found diff --git a/pypy/module/cpyext/include/import.h b/pypy/module/cpyext/include/import.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/import.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/pypy/translator/backendopt/test/test_ssa.py b/pypy/translator/backendopt/test/test_ssa.py --- a/pypy/translator/backendopt/test/test_ssa.py +++ b/pypy/translator/backendopt/test/test_ssa.py @@ -1,6 +1,6 @@ from pypy.translator.backendopt.ssa import * from pypy.translator.translator import TranslationContext -from pypy.objspace.flow.model import flatten, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import SpaceOperation diff --git a/pypy/module/cpyext/src/sysmodule.c b/pypy/module/cpyext/src/sysmodule.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/src/sysmodule.c @@ -0,0 +1,103 @@ +#include + +/* Reimplementation of PyFile_WriteString() no calling indirectly + PyErr_CheckSignals(): avoid the call to PyObject_Str(). */ + +static int +sys_pyfile_write_unicode(PyObject *unicode, PyObject *file) +{ + PyObject *writer = NULL, *args = NULL, *result = NULL; + int err; + + if (file == NULL) + return -1; + + writer = PyObject_GetAttrString(file, "write"); + if (writer == NULL) + goto error; + + args = PyTuple_Pack(1, unicode); + if (args == NULL) + goto error; + + result = PyEval_CallObject(writer, args); + if (result == NULL) { + goto error; + } else { + err = 0; + goto finally; + } + +error: + err = -1; +finally: + Py_XDECREF(writer); + Py_XDECREF(args); + Py_XDECREF(result); + return err; +} + +static int +sys_pyfile_write(const char *text, PyObject *file) +{ + PyObject *unicode = NULL; + int err; + + if (file == NULL) + return -1; + + unicode = PyUnicode_FromString(text); + if (unicode == NULL) + return -1; + + err = sys_pyfile_write_unicode(unicode, file); + Py_DECREF(unicode); + return err; +} + +/* APIs to write to sys.stdout or sys.stderr using a printf-like interface. + */ + +static void +sys_write(char *name, FILE *fp, const char *format, va_list va) +{ + PyObject *file; + PyObject *error_type, *error_value, *error_traceback; + char buffer[1001]; + int written; + + PyErr_Fetch(&error_type, &error_value, &error_traceback); + file = PySys_GetObject(name); + written = vsnprintf(buffer, sizeof(buffer), format, va); + if (sys_pyfile_write(buffer, file) != 0) { + PyErr_Clear(); + fputs(buffer, fp); + } + if (written < 0 || (size_t)written >= sizeof(buffer)) { + const char *truncated = "... truncated"; + if (sys_pyfile_write(truncated, file) != 0) + fputs(truncated, fp); + } + PyErr_Restore(error_type, error_value, error_traceback); +} + +void +PySys_WriteStdout(const char *format, ...) +{ + va_list va; + + va_start(va, format); + sys_write("stdout", stdout, format, va); + va_end(va); +} + +void +PySys_WriteStderr(const char *format, ...) +{ + va_list va; + + va_start(va, format); + sys_write("stderr", stderr, format, va); + va_end(va); +} + diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link from pypy.objspace.flow.model import SpaceOperation, c_last_exception from pypy.objspace.flow.model import FunctionGraph -from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph +from pypy.objspace.flow.model import mkentrymap, checkgraph from pypy.annotation import model as annmodel from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr from pypy.rpython.lltypesystem.lltype import normalizeptr @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -7,6 +7,12 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_reload(self, space, api): + pdb = api.PyImport_Import(space.wrap("pdb")) + space.delattr(pdb, space.wrap("set_trace")) + pdb = api.PyImport_ReloadModule(pdb) + assert space.getattr(pdb, space.wrap("set_trace")) + class AppTestImportLogic(AppTestCpythonExtensionBase): def test_import_logic(self): skip("leak?") diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.extmodules.rst @@ -0,0 +1,12 @@ +You can pass a comma-separated list of third-party builtin modules +which should be translated along with the standard modules within +``pypy.module``. + +The module names need to be fully qualified (i.e. have a ``.`` in them), +be on the ``$PYTHONPATH`` and not conflict with any existing ones, e.g. +``mypkg.somemod``. + +Once translated, the module will be accessible with a simple:: + + import somemod + diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,9 +4,9 @@ from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, - getattrfunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, - ssizeobjargproc, iternextfunc, initproc, richcmpfunc, hashfunc, - descrgetfunc, descrsetfunc, objobjproc) + getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, + ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, + hashfunc, descrgetfunc, descrsetfunc, objobjproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State @@ -65,6 +65,12 @@ finally: rffi.free_charp(name_ptr) +def wrap_getattro(space, w_self, w_args, func): + func_target = rffi.cast(getattrofunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + return generic_cpy_call(space, func_target, w_self, args_w[0]) + def wrap_setattr(space, w_self, w_args, func): func_target = rffi.cast(setattrofunc, func) check_num_args(space, w_args, 2) @@ -289,7 +295,12 @@ # irregular interface, because of tp_getattr/tp_getattro confusion if NAME == "__getattr__": - wrapper = wrap_getattr + if SLOT == "tp_getattro": + wrapper = wrap_getattro + elif SLOT == "tp_getattr": + wrapper = wrap_getattr + else: + assert False function = globals().get(FUNCTION, None) assert FLAGS == 0 or FLAGS == PyWrapperFlag_KEYWORDS diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -443,7 +443,8 @@ "ll_upper": Meth([], self.SELFTYPE_T), "ll_lower": Meth([], self.SELFTYPE_T), "ll_substring": Meth([Signed, Signed], self.SELFTYPE_T), # ll_substring(start, count) - "ll_split_chr": Meth([self.CHAR], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_split_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_rsplit_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! "ll_contains": Meth([self.CHAR], Bool), "ll_replace_chr_chr": Meth([self.CHAR, self.CHAR], self.SELFTYPE_T), }) @@ -1480,9 +1481,16 @@ # NOT_RPYTHON return self.make_string(self._str[start:start+count]) - def ll_split_chr(self, ch): + def ll_split_chr(self, ch, max): # NOT_RPYTHON - l = [self.make_string(s) for s in self._str.split(ch)] + l = [self.make_string(s) for s in self._str.split(ch, max)] + res = _array(Array(self._TYPE), len(l)) + res._array[:] = l + return res + + def ll_rsplit_chr(self, ch, max): + # NOT_RPYTHON + l = [self.make_string(s) for s in self._str.rsplit(ch, max)] res = _array(Array(self._TYPE), len(l)) res._array[:] = l return res diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -1126,7 +1126,7 @@ """ if not isinstance(source, str): source = py.std.inspect.getsource(source).lstrip() - while source.startswith('@py.test.mark.'): + while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function # object, we may ignore them assert '\n' in source diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -8,9 +8,8 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, - X86XMMRegisterManager, get_ebp_ofs, - _get_scale) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, + _get_scale, gpr_reg_mgr_cls) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -133,6 +132,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" + self.current_clt = looptoken.compiled_loop_token self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -145,6 +145,7 @@ self.mc = None self.looppos = -1 self.currently_compiling_loop = None + self.current_clt = None def finish_once(self): if self._debug: @@ -171,25 +172,46 @@ self.float_const_abs_addr = float_constants + 16 def _build_malloc_fixedsize_slowpath(self): + # With asmgcc, we need two helpers, so that we can write two CALL + # instructions in assembler, with a mark_gc_roots in between. + # With shadowstack, this is not needed, so we produce a single helper. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + # # ---------- first helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() if self.cpu.supports_floats: # save the XMM registers in for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - if IS_X86_32: - mc.MOV_sr(WORD, edx.value) # save it as the new argument - elif IS_X86_64: - # rdi can be clobbered: its content was forced to the stack - # by _fastpath_malloc(), like all other save_around_call_regs. - mc.MOV_rr(edi.value, edx.value) - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() - mc.JMP(imm(addr)) # tail call to the real malloc - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart - # ---------- second helper for the slow path of malloc ---------- - mc = codebuf.MachineCodeBlockWrapper() + # + if gcrootmap is not None and gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_br(ofs, reg.value) + mc.SUB_ri(esp.value, 16 - WORD) # stack alignment of 16 bytes + if IS_X86_32: + mc.MOV_sr(0, edx.value) # push argument + elif IS_X86_64: + mc.MOV_rr(edi.value, edx.value) + mc.CALL(imm(addr)) + mc.ADD_ri(esp.value, 16 - WORD) + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_rb(reg.value, ofs) + else: + # ---- asmgcc ---- + if IS_X86_32: + mc.MOV_sr(WORD, edx.value) # save it as the new argument + elif IS_X86_64: + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. + mc.MOV_rr(edi.value, edx.value) + mc.JMP(imm(addr)) # tail call to the real malloc + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.malloc_fixedsize_slowpath1 = rawstart + # ---------- second helper for the slow path of malloc ---------- + mc = codebuf.MachineCodeBlockWrapper() + # if self.cpu.supports_floats: # restore the XMM registers for i in range(self.cpu.NUM_REGS):# from where they were saved mc.MOVSD_xs(i, (WORD*2)+8*i) @@ -200,18 +222,25 @@ self.malloc_fixedsize_slowpath2 = rawstart def _build_stack_check_slowpath(self): - from pypy.rlib import rstack _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or self.cpu.exit_frame_with_exception_v < 0: return # no stack check (for tests, or non-translated) # + # make a "function" that is called immediately at the start of + # an assembler function. In particular, the stack looks like: + # + # | ... | <-- aligned to a multiple of 16 + # | retaddr of caller | + # | my own retaddr | <-- esp + # +---------------------+ + # mc = codebuf.MachineCodeBlockWrapper() - mc.PUSH_r(ebp.value) - mc.MOV_rr(ebp.value, esp.value) # + stack_size = WORD if IS_X86_64: # on the x86_64, we have to save all the registers that may # have been used to pass arguments + stack_size += 6*WORD + 8*8 for reg in [edi, esi, edx, ecx, r8, r9]: mc.PUSH_r(reg.value) mc.SUB_ri(esp.value, 8*8) @@ -220,11 +249,13 @@ # if IS_X86_32: mc.LEA_rb(eax.value, +8) + stack_size += 2*WORD + mc.PUSH_r(eax.value) # alignment mc.PUSH_r(eax.value) elif IS_X86_64: mc.LEA_rb(edi.value, +16) - mc.AND_ri(esp.value, -16) # + # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) # mc.MOV(eax, heap(self.cpu.pos_exception())) @@ -232,16 +263,16 @@ mc.J_il8(rx86.Conditions['NZ'], 0) jnz_location = mc.get_relative_pos() # - if IS_X86_64: + if IS_X86_32: + mc.ADD_ri(esp.value, 2*WORD) + elif IS_X86_64: # restore the registers for i in range(7, -1, -1): mc.MOVSD_xs(i, 8*i) - for i, reg in [(6, r9), (5, r8), (4, ecx), - (3, edx), (2, esi), (1, edi)]: - mc.MOV_rb(reg.value, -8*i) + mc.ADD_ri(esp.value, 8*8) + for reg in [r9, r8, ecx, edx, esi, edi]: + mc.POP_r(reg.value) # - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) mc.RET() # # patch the JNZ above @@ -266,9 +297,7 @@ # function, and will instead return to the caller's caller. Note # also that we completely ignore the saved arguments, because we # are interrupting the function. - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) - mc.ADD_ri(esp.value, WORD) + mc.ADD_ri(esp.value, stack_size) mc.RET() # rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -537,7 +566,7 @@ def _get_offset_of_ebp_from_esp(self, allocated_depth): # Given that [EBP] is where we saved EBP, i.e. in the last word # of our fixed frame, then the 'words' value is: - words = (self.cpu.FRAME_FIXED_SIZE - 1) + allocated_depth + words = (FRAME_FIXED_SIZE - 1) + allocated_depth # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP return -WORD * aligned_words @@ -550,6 +579,10 @@ for regloc in self.cpu.CALLEE_SAVE_REGISTERS: self.mc.PUSH_r(regloc.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(gcrootmap) + def _call_header_with_stack_check(self): if self.stack_check_slowpath == 0: pass # no stack check (e.g. not translated) @@ -571,12 +604,32 @@ def _call_footer(self): self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) self.mc.POP_r(ebp.value) self.mc.RET() + def _call_header_shadowstack(self, gcrootmap): + # we need to put two words into the shadowstack: the MARKER + # and the address of the frame (ebp, actually) + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] + self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER + self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp + self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + + def _call_footer_shadowstack(self, gcrootmap): + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.SUB_ji8(rst, 2*WORD) # SUB [rootstacktop], 2*WORD + def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): if IS_X86_64: return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) @@ -686,8 +739,8 @@ nonfloatlocs, floatlocs = arglocs self._call_header() stackadjustpos = self._patchable_stackadjust() - tmp = X86RegisterManager.all_regs[0] - xmmtmp = X86XMMRegisterManager.all_regs[0] + tmp = eax + xmmtmp = xmm0 self.mc.begin_reuse_scratch_register() for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] @@ -896,9 +949,9 @@ self.implement_guard(guard_token, checkfalsecond) return genop_cmp_guard_float - def _emit_call(self, x, arglocs, start=0, tmp=eax): + def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: - return self._emit_call_64(x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start) p = 0 n = len(arglocs) @@ -924,9 +977,9 @@ self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) - def _emit_call_64(self, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start): src_locs = [] dst_locs = [] xmm_src_locs = [] @@ -984,12 +1037,27 @@ self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) def call(self, addr, args, res): - self._emit_call(imm(addr), args) + force_index = self.write_new_force_index() + self._emit_call(force_index, imm(addr), args) assert res is eax + def write_new_force_index(self): + # for shadowstack only: get a new, unused force_index number and + # write it to FORCE_INDEX_OFS. Used to record the call shape + # (i.e. where the GC pointers are in the stack) around a CALL + # instruction that doesn't already have a force_index. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + clt = self.current_clt + force_index = clt.reserve_and_record_some_faildescr_index() + self.mc.MOV_bi(FORCE_INDEX_OFS, force_index) + return force_index + else: + return 0 + genop_int_neg = _unaryop("NEG") genop_int_invert = _unaryop("NOT") genop_int_add = _binaryop("ADD", True) @@ -1794,6 +1862,10 @@ self.pending_guard_tokens.append(guard_token) def genop_call(self, op, arglocs, resloc): + force_index = self.write_new_force_index() + self._genop_call(op, arglocs, resloc, force_index) + + def _genop_call(self, op, arglocs, resloc, force_index): sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -1808,7 +1880,7 @@ else: tmp = eax - self._emit_call(x, arglocs, 3, tmp=tmp) + self._emit_call(force_index, x, arglocs, 3, tmp=tmp) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return @@ -1839,7 +1911,7 @@ faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - self.genop_call(op, arglocs, result_loc) + self._genop_call(op, arglocs, result_loc, fail_index) self.mc.CMP_bi(FORCE_INDEX_OFS, 0) self.implement_guard(guard_token, 'L') @@ -1853,8 +1925,8 @@ assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2, - tmp=eax) + self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_void_v @@ -1879,7 +1951,7 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self._emit_call(imm(asm_helper_adr), [eax, arglocs[1]], 0, + self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0, tmp=ecx) if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: self.mc.FSTP_b(result_loc.value) @@ -1906,7 +1978,7 @@ # load the return value from fail_boxes_xxx[0] kind = op.result.type if kind == FLOAT: - xmmtmp = X86XMMRegisterManager.all_regs[0] + xmmtmp = xmm0 adr = self.fail_boxes_float.get_addr_for_num(0) self.mc.MOVSD(xmmtmp, heap(adr)) self.mc.MOVSD(result_loc, xmmtmp) @@ -2001,11 +2073,16 @@ not_implemented("not implemented operation (guard): %s" % op.getopname()) - def mark_gc_roots(self): + def mark_gc_roots(self, force_index, use_copy_area=False): + if force_index < 0: + return # not needed gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: - mark = self._regalloc.get_mark_gc_roots(gcrootmap) - self.mc.insert_gcroot_marker(mark) + mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area) + if gcrootmap.is_shadow_stack: + gcrootmap.write_callshape(mark, force_index) + else: + self.mc.insert_gcroot_marker(mark) def target_arglocs(self, loop_token): return loop_token._x86_arglocs @@ -2036,12 +2113,19 @@ # result in EAX; slowpath_addr2 additionally returns in EDX a # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + # reserve room for the argument to the real malloc and the # 8 saved XMM regs self._regalloc.reserve_param(1+16) - self.mc.CALL(imm(slowpath_addr1)) - self.mark_gc_roots() + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) + if not shadow_stack: + # there are two helpers to call only with asmgcc + slowpath_addr1 = self.malloc_fixedsize_slowpath1 + self.mc.CALL(imm(slowpath_addr1)) + self.mark_gc_roots(self.write_new_force_index(), + use_copy_area=shadow_stack) slowpath_addr2 = self.malloc_fixedsize_slowpath2 self.mc.CALL(imm(slowpath_addr2)) @@ -2049,6 +2133,7 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) # on 64-bits, 'tid' is a value that fits in 31 bits + assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4.1' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.5-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From commits-noreply at bitbucket.org Thu Apr 7 08:39:55 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 08:39:55 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: refactor the untested option Message-ID: <20110407063955.52E39282C38@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43187:2f3857b72200 Date: 2011-04-07 08:39 +0200 http://bitbucket.org/pypy/pypy/changeset/2f3857b72200/ Log: refactor the untested option diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -221,12 +221,14 @@ pdb_plus_show = PdbPlusShow(t) # need a translator to support extended commands - def debug(got_error): + def finish_profiling(): if prof: prof.disable() statfilename = 'prof.dump' log.info('Dumping profiler stats to: %s' % statfilename) - prof.dump_stats(statfilename) + prof.dump_stats(statfilename) + + def debug(got_error): tb = None if got_error: import traceback @@ -293,9 +295,11 @@ except SystemExit: raise except: + finish_profiling() debug(True) raise SystemExit(1) else: + finish_profiling() if translateconfig.pdb: debug(False) From commits-noreply at bitbucket.org Thu Apr 7 09:18:32 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 09:18:32 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: change C structure a bit to reuse stuff in asm_* instead of copying (wrongly) Message-ID: <20110407071832.1C355282C38@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43188:5300489d4c50 Date: 2011-04-07 09:18 +0200 http://bitbucket.org/pypy/pypy/changeset/5300489d4c50/ Log: change C structure a bit to reuse stuff in asm_* instead of copying (wrongly) diff --git a/pypy/translator/c/src/debug_print.h b/pypy/translator/c/src/debug_print.h --- a/pypy/translator/c/src/debug_print.h +++ b/pypy/translator/c/src/debug_print.h @@ -103,6 +103,8 @@ } +#define OP_LL_READ_TIMESTAMP(val) READ_TIMESTAMP(val) + #ifndef READ_TIMESTAMP /* asm_xxx.h may contain a specific implementation of READ_TIMESTAMP. * This is the default generic timestamp implementation. diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h --- a/pypy/translator/c/src/g_include.h +++ b/pypy/translator/c/src/g_include.h @@ -38,7 +38,6 @@ #include "src/instrument.h" #include "src/asm.h" -#include "src/timer.h" #include "src/profiling.h" diff --git a/pypy/translator/c/src/timer.h b/pypy/translator/c/src/timer.h deleted file mode 100644 --- a/pypy/translator/c/src/timer.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef PYPY_TIMER_H -#define PYPY_TIMER_H - -/* XXX Some overlap with the stuff in asm_gcc_x86 - */ -#define OP_LL_READ_TIMESTAMP(v) v = pypy_read_timestamp(); - -#ifndef PYPY_NOT_MAIN_FILE -/* implementations */ - -#ifdef _WIN32 -long long pypy_read_timestamp(void) { - long long timestamp; - long long scale; - QueryPerformanceCounter((LARGE_INTEGER*)&(timestamp)); - return timestamp; -} - -#else - -#include "inttypes.h" - -long long pypy_read_timestamp(void) { - uint32_t low, high; - __asm__ __volatile__ ( - "rdtsc" : "=a" (low), "=d" (high) - ); - return ((long long)high << 32) + low; -} - -#endif -#endif -#endif From commits-noreply at bitbucket.org Thu Apr 7 09:20:02 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 09:20:02 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement cast between float and ulonglong Message-ID: <20110407072002.9428E282C38@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43189:9658cd21b86a Date: 2011-04-07 09:18 +0200 http://bitbucket.org/pypy/pypy/changeset/9658cd21b86a/ Log: implement cast between float and ulonglong diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -147,6 +147,8 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', From commits-noreply at bitbucket.org Thu Apr 7 09:33:21 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 7 Apr 2011 09:33:21 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: dance dance dance Message-ID: <20110407073321.92535282C38@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43190:c397a4d279f1 Date: 2011-04-07 09:32 +0200 http://bitbucket.org/pypy/pypy/changeset/c397a4d279f1/ Log: dance dance dance diff --git a/pypy/translator/c/src/debug_print.h b/pypy/translator/c/src/debug_print.h --- a/pypy/translator/c/src/debug_print.h +++ b/pypy/translator/c/src/debug_print.h @@ -41,6 +41,8 @@ /* implementations */ +#define OP_LL_READ_TIMESTAMP(val) READ_TIMESTAMP(val) + #ifndef PYPY_NOT_MAIN_FILE #include @@ -102,9 +104,6 @@ pypy_debug_open(); } - -#define OP_LL_READ_TIMESTAMP(val) READ_TIMESTAMP(val) - #ifndef READ_TIMESTAMP /* asm_xxx.h may contain a specific implementation of READ_TIMESTAMP. * This is the default generic timestamp implementation. From commits-noreply at bitbucket.org Thu Apr 7 09:51:09 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 09:51:09 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: generate getfield/oogetfield depending on the typesystem Message-ID: <20110407075109.2AADC282C3A@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43191:fde7c5e4d671 Date: 2011-04-07 09:50 +0200 http://bitbucket.org/pypy/pypy/changeset/fde7c5e4d671/ Log: generate getfield/oogetfield depending on the typesystem diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -500,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None From commits-noreply at bitbucket.org Thu Apr 7 10:44:53 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Thu, 7 Apr 2011 10:44:53 +0200 (CEST) Subject: [pypy-svn] pypy default: Let test_pypy_c tests run on Windows Message-ID: <20110407084453.0B140282C3A@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43192:5916ce81f4e2 Date: 2011-04-07 10:40 +0200 http://bitbucket.org/pypy/pypy/changeset/5916ce81f4e2/ Log: Let test_pypy_c tests run on Windows diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -3,6 +3,7 @@ import py from py.test import skip import sys, os, re +import subprocess class BytecodeTrace(list): def get_opnames(self, prefix=""): @@ -116,13 +117,12 @@ print >> f, "print 'OK :-)'" f.close() - if sys.platform.startswith('win'): - py.test.skip("XXX this is not Windows-friendly") print logfilepath - child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( - logfilepath, self.pypy_c, filepath), 'r') - result = child_stdout.read() - child_stdout.close() + env = os.environ.copy() + env['PYPYLOG'] = ":%s" % (logfilepath,) + p = subprocess.Popen([self.pypy_c, str(filepath)], + env=env, stdout=subprocess.PIPE) + result, _ = p.communicate() assert result if result.strip().startswith('SKIP:'): py.test.skip(result.strip()) From commits-noreply at bitbucket.org Thu Apr 7 11:13:44 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 11:13:44 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: add (partial) support to rlocale for the cli backend Message-ID: <20110407091344.BEEC5282C3A@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43193:5b17f8d01f3b Date: 2011-04-07 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/5b17f8d01f3b/ Log: add (partial) support to rlocale for the cli backend diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -156,11 +156,11 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) @@ -184,11 +184,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -673,6 +673,7 @@ return frame.eval() def op_direct_call(self, f, *args): + import pdb;pdb.set_trace() FTYPE = self.llinterpreter.typer.type_system.derefType(lltype.typeOf(f)) return self.perform_call(f, FTYPE.ARGS, args) diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -1148,10 +1148,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } From commits-noreply at bitbucket.org Thu Apr 7 12:31:39 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 12:31:39 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement int_between for the cli backend Message-ID: <20110407103139.478A2282C3A@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43194:a92cd3b9d3f1 Date: 2011-04-07 12:31 +0200 http://bitbucket.org/pypy/pypy/changeset/a92cd3b9d3f1/ Log: implement int_between for the cli backend diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -396,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -673,7 +673,6 @@ return frame.eval() def op_direct_call(self, f, *args): - import pdb;pdb.set_trace() FTYPE = self.llinterpreter.typer.type_system.derefType(lltype.typeOf(f)) return self.perform_call(f, FTYPE.ARGS, args) diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -501,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) From commits-noreply at bitbucket.org Thu Apr 7 14:23:02 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 14:23:02 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: add an ootype partial version of rlocale.numeric_formatting: it is of course not fully implemented, but it should at least allow translation to work Message-ID: <20110407122302.59DB8282C3D@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43195:aac08fe66ae0 Date: 2011-04-07 14:01 +0200 http://bitbucket.org/pypy/pypy/changeset/aac08fe66ae0/ Log: add an ootype partial version of rlocale.numeric_formatting: it is of course not fully implemented, but it should at least allow translation to work diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -173,6 +174,13 @@ grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + ooimpl=oo_numeric_formatting) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): From commits-noreply at bitbucket.org Thu Apr 7 14:41:40 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 14:41:40 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement math.copysign for the cli backend Message-ID: <20110407124140.88096282C3D@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43196:404b10866f8d Date: 2011-04-07 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/404b10866f8d/ Log: implement math.copysign for the cli backend diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,14 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,15 @@ { return Math.Tanh(x); } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } From commits-noreply at bitbucket.org Thu Apr 7 16:33:33 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 16:33:33 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: exclude a large set of functions from the posix module if we are compiling to ootype Message-ID: <20110407143333.5F734282C40@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43197:50d10927a6b3 Date: 2011-04-07 15:05 +0200 http://bitbucket.org/pypy/pypy/changeset/50d10927a6b3/ Log: exclude a large set of functions from the posix module if we are compiling to ootype diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'isatty', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -160,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): From commits-noreply at bitbucket.org Thu Apr 7 16:33:34 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 16:33:34 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement rstring_to_float for ootype Message-ID: <20110407143334.3E042282C40@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43198:764ef40b550c Date: 2011-04-07 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/764ef40b550c/ Log: implement rstring_to_float for ootype diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,7 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +25,33 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +def ll_rstring_to_float(lls): + from pypy.rpython.annlowlevel import hlstr + s = hlstr(lls) + assert s is not None + return rstring_to_float_impl(s) + +register_external(rstring_to_float, [str], float, + llimpl=ll_rstring_to_float, + ooimpl=oo_rstring_to_float) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -163,6 +163,13 @@ assert self.interpret(fn, [42, -1]) == -42 assert self.interpret(fn, [42, -0.0]) == -42 assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 class TestLLtype(BaseTestRfloat, LLRtypeMixin): From commits-noreply at bitbucket.org Thu Apr 7 17:00:21 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 17:00:21 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement math.isnan for cli Message-ID: <20110407150021.BD472282C40@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43199:164b6fb381d4 Date: 2011-04-07 17:00 +0200 http://bitbucket.org/pypy/pypy/changeset/164b6fb381d4/ Log: implement math.isnan for cli diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -170,6 +170,16 @@ s = ['42.3', '123.4'][i] return rstring_to_float(s) assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -225,6 +225,11 @@ return Math.Tanh(x); } + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + static public double ll_math_copysign(double x, double y) { if (x < 0.0) From commits-noreply at bitbucket.org Thu Apr 7 17:01:58 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 17:01:58 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: fix annotation Message-ID: <20110407150158.2C092282C40@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43200:05ca454c30b9 Date: 2011-04-07 16:45 +0200 http://bitbucket.org/pypy/pypy/changeset/05ca454c30b9/ Log: fix annotation diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -5,6 +5,7 @@ from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -45,10 +46,9 @@ def ll_rstring_to_float(lls): from pypy.rpython.annlowlevel import hlstr s = hlstr(lls) - assert s is not None return rstring_to_float_impl(s) -register_external(rstring_to_float, [str], float, +register_external(rstring_to_float, [SomeString(can_be_None=True)], float, llimpl=ll_rstring_to_float, ooimpl=oo_rstring_to_float) From commits-noreply at bitbucket.org Thu Apr 7 17:01:58 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 17:01:58 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: merge heads Message-ID: <20110407150158.EC3E8282C40@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43201:3a314abb44d4 Date: 2011-04-07 17:00 +0200 http://bitbucket.org/pypy/pypy/changeset/3a314abb44d4/ Log: merge heads diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -170,6 +170,16 @@ s = ['42.3', '123.4'][i] return rstring_to_float(s) assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -225,6 +225,11 @@ return Math.Tanh(x); } + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + static public double ll_math_copysign(double x, double y) { if (x < 0.0) From commits-noreply at bitbucket.org Thu Apr 7 17:12:55 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 17:12:55 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: implement isinf for cli Message-ID: <20110407151255.37F75282C40@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43202:ff657dd939b8 Date: 2011-04-07 17:11 +0200 http://bitbucket.org/pypy/pypy/changeset/ff657dd939b8/ Log: implement isinf for cli diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -179,7 +179,13 @@ return math.isnan(nan) assert self.interpret(fn, [1e200]) - + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + class TestLLtype(BaseTestRfloat, LLRtypeMixin): diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -230,6 +230,11 @@ return double.IsNaN(x); } + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + static public double ll_math_copysign(double x, double y) { if (x < 0.0) From commits-noreply at bitbucket.org Thu Apr 7 17:41:22 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 7 Apr 2011 17:41:22 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: we need isatty for the interactive prompt Message-ID: <20110407154122.73F5F282C40@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43203:127621a9d008 Date: 2011-04-07 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/127621a9d008/ Log: we need isatty for the interactive prompt diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -12,7 +12,7 @@ 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', - 'getsid', 'getuid', 'isatty', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', From commits-noreply at bitbucket.org Thu Apr 7 22:11:21 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Thu, 7 Apr 2011 22:11:21 +0200 (CEST) Subject: [pypy-svn] pypy default: Revert again the changes in the import mechanism. Message-ID: <20110407201121.77367282C44@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43205:195459aa1891 Date: 2011-04-07 22:11 +0200 http://bitbucket.org/pypy/pypy/changeset/195459aa1891/ Log: Revert again the changes in the import mechanism. It is very important for the JIT that importing again a known module does not need to acquire the import lock, for example. diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -61,27 +61,3 @@ assert not imp.lock_held() self.waitfor(lambda: done) assert done - -class TestImportLock: - def test_lock(self, space, monkeypatch): - from pypy.module.imp.importing import getimportlock, importhook - - # Monkeypatch the import lock and add a counter - importlock = getimportlock(space) - original_acquire = importlock.acquire_lock - def acquire_lock(): - importlock.count += 1 - original_acquire() - importlock.count = 0 - monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) - - # An already imported module - importhook(space, 'sys') - assert importlock.count == 0 - # A new module - importhook(space, 're') - assert importlock.count == 7 - # Import it again - previous_count = importlock.count - importhook(space, 're') - assert importlock.count == previous_count diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -438,38 +438,6 @@ res = __import__('', mydict, None, ['bar'], 2) assert res is pkg - def test__package__(self): - # Regression test for http://bugs.python.org/issue3221. - def check_absolute(): - exec "from os import path" in ns - def check_relative(): - exec "from . import a" in ns - - # Check both OK with __package__ and __name__ correct - ns = dict(__package__='pkg', __name__='pkg.notarealmodule') - check_absolute() - check_relative() - - # Check both OK with only __name__ wrong - ns = dict(__package__='pkg', __name__='notarealpkg.notarealmodule') - check_absolute() - check_relative() - - # Check relative fails with only __package__ wrong - ns = dict(__package__='foo', __name__='pkg.notarealmodule') - check_absolute() # XXX check warnings - raises(SystemError, check_relative) - - # Check relative fails with __package__ and __name__ wrong - ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') - check_absolute() # XXX check warnings - raises(SystemError, check_relative) - - # Check both fail with package set to a non-string - ns = dict(__package__=object()) - raises(ValueError, check_absolute) - raises(ValueError, check_relative) - def test_universal_newlines(self): import pkg_univnewlines assert pkg_univnewlines.a == 5 diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -118,107 +118,6 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) -def _get_relative_name(space, modulename, level, w_globals): - w = space.wrap - ctxt_w_package = space.finditem(w_globals, w('__package__')) - - ctxt_package = None - if ctxt_w_package is not None and ctxt_w_package is not space.w_None: - try: - ctxt_package = space.str_w(ctxt_w_package) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_ValueError, space.wrap( - "__package__ set to non-string")) - - if ctxt_package is not None: - # __package__ is set, so use it - if ctxt_package == '' and level < 0: - return None, 0 - - package_parts = ctxt_package.split('.') - while level > 1 and package_parts: - level -= 1 - package_parts.pop() - if not package_parts: - if len(ctxt_package) == 0: - msg = "Attempted relative import in non-package" - else: - msg = "Attempted relative import beyond toplevel package" - raise OperationError(space.w_ValueError, w(msg)) - - # Try to import parent package - try: - w_parent = absolute_import(space, ctxt_package, 0, - None, tentative=False) - except OperationError, e: - if not e.match(space, space.w_ImportError): - raise - if level > 0: - raise OperationError(space.w_SystemError, space.wrap( - "Parent module '%s' not loaded, " - "cannot perform relative import" % ctxt_package)) - else: - space.warn("Parent module '%s' not found " - "while handling absolute import" % ctxt_package, - space.w_RuntimeWarning) - - rel_level = len(package_parts) - if modulename: - package_parts.append(modulename) - rel_modulename = '.'.join(package_parts) - else: - # __package__ not set, so figure it out and set it - ctxt_w_name = space.finditem(w_globals, w('__name__')) - ctxt_w_path = space.finditem(w_globals, w('__path__')) - - ctxt_name = None - if ctxt_w_name is not None: - try: - ctxt_name = space.str_w(ctxt_w_name) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - - if not ctxt_name: - return None, 0 - - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - - if level > 0 and not ctxt_name_prefix_parts: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) - - rel_modulename = '.'.join(ctxt_name_prefix_parts) - - if ctxt_w_path is not None: - # __path__ is set, so __name__ is already the package name - space.setitem(w_globals, w("__package__"), ctxt_w_name) - else: - # Normal module, so work out the package name if any - if '.' not in ctxt_name: - space.setitem(w_globals, w("__package__"), space.w_None) - elif rel_modulename: - space.setitem(w_globals, w("__package__"), w(rel_modulename)) - - if modulename: - if rel_modulename: - rel_modulename += '.' + modulename - else: - rel_modulename = modulename - - rel_level = len(ctxt_name_prefix_parts) - - return rel_modulename, rel_level - - @unwrap_spec(name=str, level=int) def importhook(space, name, w_globals=None, w_locals=None, w_fromlist=None, level=-1): @@ -240,40 +139,68 @@ w_globals is not None and space.isinstance_w(w_globals, space.w_dict)): - rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) + ctxt_w_name = space.finditem(w_globals, w('__name__')) + ctxt_w_path = space.finditem(w_globals, w('__path__')) - if rel_modulename: - # if no level was set, ignore import errors, and - # fall back to absolute import at the end of the - # function. - if level == -1: - tentative = True - else: - tentative = False + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.str_w(ctxt_w_name) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise - w_mod = absolute_import(space, rel_modulename, rel_level, - fromlist_w, tentative=tentative) - if w_mod is not None: - space.timer.stop_name("importhook", modulename) - return w_mod + if ctxt_name is not None: + ctxt_name_prefix_parts = ctxt_name.split('.') + if level > 0: + n = len(ctxt_name_prefix_parts)-level+1 + assert n>=0 + ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] + if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module + ctxt_name_prefix_parts.pop() + if ctxt_name_prefix_parts: + rel_modulename = '.'.join(ctxt_name_prefix_parts) + if modulename: + rel_modulename += '.' + modulename + baselevel = len(ctxt_name_prefix_parts) + if rel_modulename is not None: + # XXX What is this check about? There is no test for it + w_mod = check_sys_modules(space, w(rel_modulename)) - w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) + if (w_mod is None or + not space.is_w(w_mod, space.w_None) or + level > 0): + + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + tentative = True + else: + tentative = False + + w_mod = absolute_import(space, rel_modulename, + baselevel, fromlist_w, + tentative=tentative) + if w_mod is not None: + space.timer.stop_name("importhook", modulename) + return w_mod + else: + rel_modulename = None + + if level > 0: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) + w_mod = absolute_import_try(space, modulename, 0, fromlist_w) + if w_mod is None or space.is_w(w_mod, space.w_None): + w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) if rel_modulename is not None: space.setitem(space.sys.get('modules'), w(rel_modulename), space.w_None) space.timer.stop_name("importhook", modulename) return w_mod + at jit.dont_look_inside def absolute_import(space, modulename, baselevel, fromlist_w, tentative): - # Short path: check in sys.modules - w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w) - if w_mod is not None and not space.is_w(w_mod, space.w_None): - return w_mod - return absolute_import_with_lock(space, modulename, baselevel, - fromlist_w, tentative) - - at jit.dont_look_inside -def absolute_import_with_lock(space, modulename, baselevel, - fromlist_w, tentative): lock = getimportlock(space) lock.acquire_lock() try: From commits-noreply at bitbucket.org Fri Apr 8 10:25:18 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 10:25:18 +0200 (CEST) Subject: [pypy-svn] pypy default: Implement a fast malloc path for arrays of known (and not too Message-ID: <20110408082518.AB961282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43206:6d4b45b03f28 Date: 2011-04-07 22:29 +0200 http://bitbucket.org/pypy/pypy/changeset/6d4b45b03f28/ Log: Implement a fast malloc path for arrays of known (and not too large) length, mostly reusing the code for fixedsize mallocs. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -77,8 +77,8 @@ self.loop_run_counters = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 - self.malloc_fixedsize_slowpath1 = 0 - self.malloc_fixedsize_slowpath2 = 0 + self.malloc_slowpath1 = 0 + self.malloc_slowpath2 = 0 self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False @@ -123,8 +123,8 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() - if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): - self._build_malloc_fixedsize_slowpath() + if gc_ll_descr.get_malloc_slowpath_addr is not None: + self._build_malloc_slowpath() self._build_stack_check_slowpath() debug_start('jit-backend-counts') self.set_debug(have_debug_prints()) @@ -171,7 +171,7 @@ self.float_const_neg_addr = float_constants self.float_const_abs_addr = float_constants + 16 - def _build_malloc_fixedsize_slowpath(self): + def _build_malloc_slowpath(self): # With asmgcc, we need two helpers, so that we can write two CALL # instructions in assembler, with a mark_gc_roots in between. # With shadowstack, this is not needed, so we produce a single helper. @@ -183,7 +183,7 @@ for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() # if gcrootmap is not None and gcrootmap.is_shadow_stack: # ---- shadowstack ---- @@ -208,7 +208,7 @@ mc.MOV_rr(edi.value, edx.value) mc.JMP(imm(addr)) # tail call to the real malloc rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart + self.malloc_slowpath1 = rawstart # ---------- second helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() # @@ -219,7 +219,7 @@ mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath2 = rawstart + self.malloc_slowpath2 = rawstart def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() @@ -1273,6 +1273,11 @@ assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + def set_new_array_length(self, loc, ofs_length, loc_num_elem): + assert isinstance(loc, RegLoc) + assert isinstance(loc_num_elem, ImmedLoc) + self.mc.MOV(mem(loc, ofs_length), loc_num_elem) + # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) def genop_new(self, op, arglocs, result_loc): @@ -2083,8 +2088,7 @@ else: self.mc.JMP(imm(loop_token._x86_loop_code)) - def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, - size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) @@ -2092,7 +2096,7 @@ self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - # See comments in _build_malloc_fixedsize_slowpath for the + # See comments in _build_malloc_slowpath for the # details of the two helper functions that we are calling below. # First, we need to call two of them and not just one because we # need to have a mark_gc_roots() in between. Then the calling @@ -2111,11 +2115,11 @@ shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) if not shadow_stack: # there are two helpers to call only with asmgcc - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + slowpath_addr1 = self.malloc_slowpath1 self.mc.CALL(imm(slowpath_addr1)) self.mark_gc_roots(self.write_new_force_index(), use_copy_area=shadow_stack) - slowpath_addr2 = self.malloc_fixedsize_slowpath2 + slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) offset = self.mc.get_relative_pos() - jmp_adr diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -860,15 +860,27 @@ self.PerformDiscard(op, arglocs) self.rm.possibly_free_vars_for_op(op) - def _fastpath_malloc(self, op, descr): + def fastpath_malloc_fixedsize(self, op, descr): assert isinstance(descr, BaseSizeDescr) + self._do_fastpath_malloc(op, descr.size, descr.tid) + + def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + size = basesize + itemsize * num_elem + self._do_fastpath_malloc(op, size, arraydescr.tid) + self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) + + def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: # ---- shadowstack ---- # We need edx as a temporary, but otherwise don't save any more - # register. See comments in _build_malloc_fixedsize_slowpath(). + # register. See comments in _build_malloc_slowpath(). tmp_box = TempBox() self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) @@ -885,16 +897,16 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=reg) self.rm.possibly_free_var(tmp_box) - self.assembler.malloc_cond_fixedsize( + self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - descr.size, descr.tid, + size, tid, ) def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): - self._fastpath_malloc(op, op.getdescr()) + self.fastpath_malloc_fixedsize(op, op.getdescr()) else: args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] @@ -904,7 +916,7 @@ classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self._fastpath_malloc(op, descrsize) + self.fastpath_malloc_fixedsize(op, descrsize) self.assembler.set_vtable(eax, imm(classint)) # result of fastpath malloc is in eax else: @@ -963,16 +975,25 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) + return # boehm GC (XXX kill the following code at some point) itemsize, basesize, ofs_length, _, _ = ( self._unpack_arraydescr(op.getdescr())) scale_of_field = _get_scale(itemsize) - return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + self._malloc_varsize(basesize, ofs_length, scale_of_field, + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -45,7 +45,8 @@ get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new - + get_malloc_slowpath_addr = None + moving_gc = True gcrootmap = MockGcRootMap() @@ -167,26 +168,29 @@ class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - + expected_malloc_slowpath_size = WORD*2 + def __init__(self): GcCache.__init__(self, False) # create a nursery NTP = rffi.CArray(lltype.Signed) self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) self.addrs[1] = self.addrs[0] + 64 + self.addrs[2] = 0 # 64 bytes def malloc_slowpath(size): - assert size == WORD*2 + assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size + self.addrs[2] += 1 return nadr self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) - self._counter = 123 + self._counter = 123000 def can_inline_malloc(self, descr): return True @@ -205,7 +209,7 @@ def get_nursery_top_addr(self): return rffi.cast(lltype.Signed, self.addrs) + WORD - def get_malloc_fixedsize_slowpath_addr(self): + def get_malloc_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) @@ -255,6 +259,7 @@ assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called def test_malloc_slowpath(self): ops = ''' @@ -275,6 +280,7 @@ gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nadr + (WORD*2) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once def test_new_with_vtable(self): ops = ''' @@ -290,3 +296,93 @@ assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + +class Seen(Exception): + pass + +class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): + def can_inline_malloc_varsize(self, arraydescr, num_elem): + return num_elem < 5 + def get_funcptr_for_newarray(self): + return 52 + def init_array_descr(self, A, descr): + descr.tid = self._counter + self._counter += 1 + def args_for_new_array(self, descr): + raise Seen("args_for_new_array") + +class TestMallocVarsizeFastpath(BaseTestRegalloc): + def setup_method(self, method): + cpu = CPU(None, None) + cpu.vtable_offset = WORD + cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() + cpu.setup_once() + self.cpu = cpu + + ARRAY = lltype.GcArray(lltype.Signed) + arraydescr = cpu.arraydescrof(ARRAY) + self.arraydescr = arraydescr + + self.namespace = locals().copy() + + def test_malloc_varsize_fastpath(self): + # Hack. Running the GcLLDescr_framework without really having + # a complete GC means that we end up with both the tid and the + # length being at offset 0. In this case, so the length overwrites + # the tid. This is of course only the case in this test class. + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 142, descr=arraydescr) + setarrayitem_gc(p0, 3, 143, descr=arraydescr) + finish(p0) + ''' + self.interpret(ops, []) + # check the nursery + gc_ll_descr = self.cpu.gc_ll_descr + assert gc_ll_descr.nursery[0] == 4 + assert gc_ll_descr.nursery[1] == 142 + assert gc_ll_descr.nursery[4] == 143 + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + def test_malloc_varsize_slowpath(self): + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 420, descr=arraydescr) + setarrayitem_gc(p0, 3, 430, descr=arraydescr) + p1 = new_array(4, descr=arraydescr) + setarrayitem_gc(p1, 0, 421, descr=arraydescr) + setarrayitem_gc(p1, 3, 431, descr=arraydescr) + p2 = new_array(4, descr=arraydescr) + setarrayitem_gc(p2, 0, 422, descr=arraydescr) + setarrayitem_gc(p2, 3, 432, descr=arraydescr) + p3 = new_array(4, descr=arraydescr) + setarrayitem_gc(p3, 0, 423, descr=arraydescr) + setarrayitem_gc(p3, 3, 433, descr=arraydescr) + finish(p0, p1, p2, p3) + ''' + gc_ll_descr = self.cpu.gc_ll_descr + gc_ll_descr.expected_malloc_slowpath_size = 5*WORD + self.interpret(ops, []) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once + + def test_malloc_varsize_too_big(self): + ops = ''' + [] + p0 = new_array(5, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_varsize_variable(self): + ops = ''' + [i0] + p0 = new_array(i0, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -2,6 +2,7 @@ from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror +from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop @@ -22,6 +23,8 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 + get_malloc_slowpath_addr = None + def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr @@ -35,6 +38,8 @@ pass def can_inline_malloc(self, descr): return False + def can_inline_malloc_varsize(self, descr, num_elem): + return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): @@ -588,6 +593,10 @@ self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() + # for the fast path of mallocs, the following must be true, at least + assert self.GCClass.inline_simple_malloc + assert self.GCClass.inline_simple_malloc_varsize + # make a malloc function, with three arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) @@ -666,20 +675,23 @@ x3 = x0 * 0.3 for_test_only.x = x0 + x1 + x2 + x3 # - def malloc_fixedsize_slowpath(size): + def malloc_slowpath(size): if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery try: + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, 0, size, True, False, False) except MemoryError: fatalerror("out of memory (from JITted code)") return 0 return rffi.cast(lltype.Signed, gcref) - self.malloc_fixedsize_slowpath = malloc_fixedsize_slowpath - self.MALLOC_FIXEDSIZE_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) + self.malloc_slowpath = malloc_slowpath + self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -689,9 +701,8 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_fixedsize_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_FIXEDSIZE_SLOWPATH), - self.malloc_fixedsize_slowpath) + def get_malloc_slowpath_addr(self): + fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) def initialize(self): @@ -837,6 +848,16 @@ return True return False + def can_inline_malloc_varsize(self, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + return size < self.max_size_of_young_obj + except OverflowError: + return False + def has_write_barrier_class(self): return WriteBarrierDescr From commits-noreply at bitbucket.org Fri Apr 8 10:25:20 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 10:25:20 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110408082520.7437A282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43207:7793938ab4f8 Date: 2011-04-08 10:24 +0200 http://bitbucket.org/pypy/pypy/changeset/7793938ab4f8/ Log: merge heads diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -61,27 +61,3 @@ assert not imp.lock_held() self.waitfor(lambda: done) assert done - -class TestImportLock: - def test_lock(self, space, monkeypatch): - from pypy.module.imp.importing import getimportlock, importhook - - # Monkeypatch the import lock and add a counter - importlock = getimportlock(space) - original_acquire = importlock.acquire_lock - def acquire_lock(): - importlock.count += 1 - original_acquire() - importlock.count = 0 - monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) - - # An already imported module - importhook(space, 'sys') - assert importlock.count == 0 - # A new module - importhook(space, 're') - assert importlock.count == 7 - # Import it again - previous_count = importlock.count - importhook(space, 're') - assert importlock.count == previous_count diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -438,38 +438,6 @@ res = __import__('', mydict, None, ['bar'], 2) assert res is pkg - def test__package__(self): - # Regression test for http://bugs.python.org/issue3221. - def check_absolute(): - exec "from os import path" in ns - def check_relative(): - exec "from . import a" in ns - - # Check both OK with __package__ and __name__ correct - ns = dict(__package__='pkg', __name__='pkg.notarealmodule') - check_absolute() - check_relative() - - # Check both OK with only __name__ wrong - ns = dict(__package__='pkg', __name__='notarealpkg.notarealmodule') - check_absolute() - check_relative() - - # Check relative fails with only __package__ wrong - ns = dict(__package__='foo', __name__='pkg.notarealmodule') - check_absolute() # XXX check warnings - raises(SystemError, check_relative) - - # Check relative fails with __package__ and __name__ wrong - ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') - check_absolute() # XXX check warnings - raises(SystemError, check_relative) - - # Check both fail with package set to a non-string - ns = dict(__package__=object()) - raises(ValueError, check_absolute) - raises(ValueError, check_relative) - def test_universal_newlines(self): import pkg_univnewlines assert pkg_univnewlines.a == 5 diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -118,107 +118,6 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) -def _get_relative_name(space, modulename, level, w_globals): - w = space.wrap - ctxt_w_package = space.finditem(w_globals, w('__package__')) - - ctxt_package = None - if ctxt_w_package is not None and ctxt_w_package is not space.w_None: - try: - ctxt_package = space.str_w(ctxt_w_package) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_ValueError, space.wrap( - "__package__ set to non-string")) - - if ctxt_package is not None: - # __package__ is set, so use it - if ctxt_package == '' and level < 0: - return None, 0 - - package_parts = ctxt_package.split('.') - while level > 1 and package_parts: - level -= 1 - package_parts.pop() - if not package_parts: - if len(ctxt_package) == 0: - msg = "Attempted relative import in non-package" - else: - msg = "Attempted relative import beyond toplevel package" - raise OperationError(space.w_ValueError, w(msg)) - - # Try to import parent package - try: - w_parent = absolute_import(space, ctxt_package, 0, - None, tentative=False) - except OperationError, e: - if not e.match(space, space.w_ImportError): - raise - if level > 0: - raise OperationError(space.w_SystemError, space.wrap( - "Parent module '%s' not loaded, " - "cannot perform relative import" % ctxt_package)) - else: - space.warn("Parent module '%s' not found " - "while handling absolute import" % ctxt_package, - space.w_RuntimeWarning) - - rel_level = len(package_parts) - if modulename: - package_parts.append(modulename) - rel_modulename = '.'.join(package_parts) - else: - # __package__ not set, so figure it out and set it - ctxt_w_name = space.finditem(w_globals, w('__name__')) - ctxt_w_path = space.finditem(w_globals, w('__path__')) - - ctxt_name = None - if ctxt_w_name is not None: - try: - ctxt_name = space.str_w(ctxt_w_name) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - - if not ctxt_name: - return None, 0 - - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - - if level > 0 and not ctxt_name_prefix_parts: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) - - rel_modulename = '.'.join(ctxt_name_prefix_parts) - - if ctxt_w_path is not None: - # __path__ is set, so __name__ is already the package name - space.setitem(w_globals, w("__package__"), ctxt_w_name) - else: - # Normal module, so work out the package name if any - if '.' not in ctxt_name: - space.setitem(w_globals, w("__package__"), space.w_None) - elif rel_modulename: - space.setitem(w_globals, w("__package__"), w(rel_modulename)) - - if modulename: - if rel_modulename: - rel_modulename += '.' + modulename - else: - rel_modulename = modulename - - rel_level = len(ctxt_name_prefix_parts) - - return rel_modulename, rel_level - - @unwrap_spec(name=str, level=int) def importhook(space, name, w_globals=None, w_locals=None, w_fromlist=None, level=-1): @@ -240,40 +139,68 @@ w_globals is not None and space.isinstance_w(w_globals, space.w_dict)): - rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) + ctxt_w_name = space.finditem(w_globals, w('__name__')) + ctxt_w_path = space.finditem(w_globals, w('__path__')) - if rel_modulename: - # if no level was set, ignore import errors, and - # fall back to absolute import at the end of the - # function. - if level == -1: - tentative = True - else: - tentative = False + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.str_w(ctxt_w_name) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise - w_mod = absolute_import(space, rel_modulename, rel_level, - fromlist_w, tentative=tentative) - if w_mod is not None: - space.timer.stop_name("importhook", modulename) - return w_mod + if ctxt_name is not None: + ctxt_name_prefix_parts = ctxt_name.split('.') + if level > 0: + n = len(ctxt_name_prefix_parts)-level+1 + assert n>=0 + ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] + if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module + ctxt_name_prefix_parts.pop() + if ctxt_name_prefix_parts: + rel_modulename = '.'.join(ctxt_name_prefix_parts) + if modulename: + rel_modulename += '.' + modulename + baselevel = len(ctxt_name_prefix_parts) + if rel_modulename is not None: + # XXX What is this check about? There is no test for it + w_mod = check_sys_modules(space, w(rel_modulename)) - w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) + if (w_mod is None or + not space.is_w(w_mod, space.w_None) or + level > 0): + + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + tentative = True + else: + tentative = False + + w_mod = absolute_import(space, rel_modulename, + baselevel, fromlist_w, + tentative=tentative) + if w_mod is not None: + space.timer.stop_name("importhook", modulename) + return w_mod + else: + rel_modulename = None + + if level > 0: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) + w_mod = absolute_import_try(space, modulename, 0, fromlist_w) + if w_mod is None or space.is_w(w_mod, space.w_None): + w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) if rel_modulename is not None: space.setitem(space.sys.get('modules'), w(rel_modulename), space.w_None) space.timer.stop_name("importhook", modulename) return w_mod + at jit.dont_look_inside def absolute_import(space, modulename, baselevel, fromlist_w, tentative): - # Short path: check in sys.modules - w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w) - if w_mod is not None and not space.is_w(w_mod, space.w_None): - return w_mod - return absolute_import_with_lock(space, modulename, baselevel, - fromlist_w, tentative) - - at jit.dont_look_inside -def absolute_import_with_lock(space, modulename, baselevel, - fromlist_w, tentative): lock = getimportlock(space) lock.acquire_lock() try: From commits-noreply at bitbucket.org Fri Apr 8 10:25:28 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 10:25:28 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: don't use ll_ specialization for the lltype part of this function; also, write a failing test for the C backend Message-ID: <20110408082528.CFD0C282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43208:ada41af85c55 Date: 2011-04-08 10:18 +0200 http://bitbucket.org/pypy/pypy/changeset/ada41af85c55/ Log: don't use ll_ specialization for the lltype part of this function; also, write a failing test for the C backend diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -43,13 +43,8 @@ lls = oostr(s) return ootype.ooparse_float(lls) -def ll_rstring_to_float(lls): - from pypy.rpython.annlowlevel import hlstr - s = hlstr(lls) - return rstring_to_float_impl(s) - -register_external(rstring_to_float, [SomeString(can_be_None=True)], float, - llimpl=ll_rstring_to_float, +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, ooimpl=oo_rstring_to_float) From commits-noreply at bitbucket.org Fri Apr 8 10:25:29 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 10:25:29 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: adding the export_name fixes the failing test in the C backend Message-ID: <20110408082529.74AAC282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43209:53e143ec956f Date: 2011-04-08 10:24 +0200 http://bitbucket.org/pypy/pypy/changeset/53e143ec956f/ Log: adding the export_name fixes the failing test in the C backend diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -44,6 +44,7 @@ return ootype.ooparse_float(lls) register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + export_name='rstring_to_float', llimpl=rstring_to_float_impl, ooimpl=oo_rstring_to_float) From commits-noreply at bitbucket.org Fri Apr 8 11:21:37 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 11:21:37 +0200 (CEST) Subject: [pypy-svn] pypy default: Get rid of Frame.numlocals. Message-ID: <20110408092137.861D9282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43210:c239b9f74408 Date: 2011-04-08 11:21 +0200 http://bitbucket.org/pypy/pypy/changeset/c239b9f74408/ Log: Get rid of Frame.numlocals. diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py --- a/pypy/interpreter/test/test_eval.py +++ b/pypy/interpreter/test/test_eval.py @@ -13,7 +13,8 @@ def __init__(self, space, code, numlocals): self.code = code - Frame.__init__(self, space, numlocals=numlocals) + Frame.__init__(self, space) + self.numlocals = numlocals self.fastlocals_w = [None] * self.numlocals def getcode(self): @@ -24,7 +25,10 @@ def getfastscope(self): return self.fastlocals_w - + + def getfastscopelength(self): + return self.numlocals + self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -151,9 +151,9 @@ class CPythonFakeFrame(eval.Frame): - def __init__(self, space, code, w_globals=None, numlocals=-1): + def __init__(self, space, code, w_globals=None): self.fakecode = code - eval.Frame.__init__(self, space, w_globals, numlocals) + eval.Frame.__init__(self, space, w_globals) def getcode(self): return self.fakecode diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -54,7 +54,7 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code - eval.Frame.__init__(self, space, w_globals, code.co_nlocals) + eval.Frame.__init__(self, space, w_globals) self.valuestack_w = [None] * code.co_stacksize self.valuestackdepth = 0 self.lastblock = None @@ -63,7 +63,7 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(closure, code) - self.fastlocals_w = [None]*self.numlocals + self.fastlocals_w = [None] * code.co_nlocals make_sure_not_resized(self.fastlocals_w) self.f_lineno = code.co_firstlineno @@ -430,7 +430,10 @@ """Initialize cellvars from self.fastlocals_w This is overridden in nestedscope.py""" pass - + + def getfastscopelength(self): + return self.pycode.co_nlocals + def getclosure(self): return None diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -56,13 +56,10 @@ """A frame is an environment supporting the execution of a code object. Abstract base class.""" - def __init__(self, space, w_globals=None, numlocals=-1): + def __init__(self, space, w_globals=None): self.space = space self.w_globals = w_globals # wrapped dict of globals self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(self.getcode().getvarnames()) - self.numlocals = numlocals def run(self): "Abstract method to override. Runs the frame" @@ -96,6 +93,10 @@ where the order is according to self.getcode().signature().""" raise TypeError, "abstract" + def getfastscopelength(self): + "Abstract. Get the expected number of locals." + raise TypeError, "abstract" + def fast2locals(self): # Copy values from self.fastlocals_w to self.w_locals if self.w_locals is None: @@ -113,10 +114,11 @@ # Copy values from self.w_locals to self.fastlocals_w assert self.w_locals is not None varnames = self.getcode().getvarnames() + numlocals = self.getfastscopelength() - new_fastlocals_w = [None]*self.numlocals - - for i in range(min(len(varnames), self.numlocals)): + new_fastlocals_w = [None] * numlocals + + for i in range(min(len(varnames), numlocals)): w_name = self.space.wrap(varnames[i]) try: w_value = self.space.getitem(self.w_locals, w_name) From commits-noreply at bitbucket.org Fri Apr 8 12:05:33 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 12:05:33 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: add a more meaningful default for export_name Message-ID: <20110408100533.360CF282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43211:dc12b14e8caf Date: 2011-04-08 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/dc12b14e8caf/ Log: add a more meaningful default for export_name diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -44,7 +44,6 @@ return ootype.ooparse_float(lls) register_external(rstring_to_float, [SomeString(can_be_None=False)], float, - export_name='rstring_to_float', llimpl=rstring_to_float_impl, ooimpl=oo_rstring_to_float) From commits-noreply at bitbucket.org Fri Apr 8 12:05:33 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 12:05:33 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: add an lltype implementation for numeric_formatting Message-ID: <20110408100533.E1D7C282BE9@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43212:0be5a3620529 Date: 2011-04-08 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/0be5a3620529/ Log: add an lltype implementation for numeric_formatting diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -168,6 +168,9 @@ def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) @@ -178,6 +181,7 @@ return '.', '', '' register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, ooimpl=oo_numeric_formatting) From commits-noreply at bitbucket.org Fri Apr 8 12:22:57 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 12:22:57 +0200 (CEST) Subject: [pypy-svn] pypy default: Optimize frame creation by the JIT by making the default value Message-ID: <20110408102257.69370282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43213:a7a6d25abf34 Date: 2011-04-08 12:22 +0200 http://bitbucket.org/pypy/pypy/changeset/a7a6d25abf34/ Log: Optimize frame creation by the JIT by making the default value of these two fields be 0 instead of -1. diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -519,7 +519,7 @@ return code = frame.pycode if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr <= frame.instr_prev: + if frame.last_instr < frame.instr_prev_plus_one: # We jumped backwards in the same line. executioncontext._trace(frame, 'line', self.space.w_None) else: @@ -557,5 +557,5 @@ frame.f_lineno = line executioncontext._trace(frame, 'line', self.space.w_None) - frame.instr_prev = frame.last_instr + frame.instr_prev_plus_one = frame.last_instr + 1 self.space.frame_trace_action.fire() # continue tracing diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -46,8 +46,8 @@ w_f_trace = None # For tracing instr_lb = 0 - instr_ub = -1 - instr_prev = -1 + instr_ub = 0 + instr_prev_plus_one = 0 is_being_profiled = False def __init__(self, space, code, w_globals, closure): @@ -335,7 +335,7 @@ w(self.instr_lb), #do we need these three (that are for tracing) w(self.instr_ub), - w(self.instr_prev), + w(self.instr_prev_plus_one), w_cells, ] @@ -349,7 +349,7 @@ args_w = space.unpackiterable(w_args) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ - w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev, w_cells = args_w + w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w new_frame = self pycode = space.interp_w(PyCode, w_pycode) @@ -397,7 +397,7 @@ new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev = space.int_w(w_instr_prev) + new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) # XXX what if the frame is in another thread?? From commits-noreply at bitbucket.org Fri Apr 8 13:09:11 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 8 Apr 2011 13:09:11 +0200 (CEST) Subject: [pypy-svn] pypy default: Implement PyObject_Cmp Message-ID: <20110408110911.C9ECE282BD7@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43214:70ba9d611efe Date: 2011-04-05 14:09 +0200 http://bitbucket.org/pypy/pypy/changeset/70ba9d611efe/ Log: Implement PyObject_Cmp diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -174,6 +174,17 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1965,14 +1965,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -245,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, From commits-noreply at bitbucket.org Fri Apr 8 13:09:13 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 8 Apr 2011 13:09:13 +0200 (CEST) Subject: [pypy-svn] pypy default: Implement PyImport_AddModule() Message-ID: <20110408110913.0C1C2282BD7@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43215:54b162b77747 Date: 2011-04-05 14:30 +0200 http://bitbucket.org/pypy/pypy/changeset/54b162b77747/ Log: Implement PyImport_AddModule() diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1116,20 +1116,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -51,3 +53,23 @@ from pypy.module.imp.importing import reload return reload(space, w_mod) + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,16 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + def test_reload(self, space, api): pdb = api.PyImport_Import(space.wrap("pdb")) space.delattr(pdb, space.wrap("set_trace")) From commits-noreply at bitbucket.org Fri Apr 8 13:09:19 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 8 Apr 2011 13:09:19 +0200 (CEST) Subject: [pypy-svn] pypy default: Add PyCode_New(), PyFrame_New(), and PyTraceBack_Here(). Message-ID: <20110408110919.C04DC282C49@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43216:575df4c13028 Date: 2011-04-07 09:12 +0200 http://bitbucket.org/pypy/pypy/changeset/575df4c13028/ Log: Add PyCode_New(), PyFrame_New(), and PyTraceBack_Here(). diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -539,7 +539,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -110,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,6 +61,12 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_lineno = frame.f_lineno + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,6 +46,7 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,20 +29,14 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) def test_basic_threadstate_dance(self, space, api): # Let extension modules call these functions, @@ -54,5 +48,3 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) - - clear_threadstate(space) diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ From commits-noreply at bitbucket.org Fri Apr 8 13:14:46 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 8 Apr 2011 13:14:46 +0200 (CEST) Subject: [pypy-svn] pypy default: make MAKE_CLOSURE a bit faster (and more JIT-friendly) Message-ID: <20110408111446.80C00282BD7@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43217:e5b1300d7453 Date: 2011-04-08 13:13 +0200 http://bitbucket.org/pypy/pypy/changeset/e5b1300d7453/ Log: make MAKE_CLOSURE a bit faster (and more JIT-friendly) diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -220,11 +220,13 @@ for cell in self.space.fixedview(w_freevarstuple)] else: nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + freevars = [None] * nfreevars + while True: + nfreevars -= 1 + if nfreevars < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) From commits-noreply at bitbucket.org Fri Apr 8 14:35:08 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 14:35:08 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: this class has been killed nowadays Message-ID: <20110408123508.5EF1D282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43218:698ebaf41a5e Date: 2011-04-08 11:56 +0200 http://bitbucket.org/pypy/pypy/changeset/698ebaf41a5e/ Log: this class has been killed nowadays diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass From commits-noreply at bitbucket.org Fri Apr 8 14:35:09 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 14:35:09 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: we need to explicitly list signal now Message-ID: <20110408123509.0B283282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43219:9192f921237f Date: 2011-04-08 14:22 +0200 http://bitbucket.org/pypy/pypy/changeset/9192f921237f/ Log: we need to explicitly list signal now diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) From commits-noreply at bitbucket.org Fri Apr 8 14:35:09 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 14:35:09 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: surprise, we need the signal module to test the signal module Message-ID: <20110408123509.963EB282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43220:5a8e0a9468ef Date: 2011-04-08 14:24 +0200 http://bitbucket.org/pypy/pypy/changeset/5a8e0a9468ef/ Log: surprise, we need the signal module to test the signal module diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal From commits-noreply at bitbucket.org Fri Apr 8 14:35:10 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 14:35:10 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: we need signal for these tests Message-ID: <20110408123510.4A3D8282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43221:ef693a35f768 Date: 2011-04-08 14:28 +0200 http://bitbucket.org/pypy/pypy/changeset/ef693a35f768/ Log: we need signal for these tests diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: From commits-noreply at bitbucket.org Fri Apr 8 14:35:13 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 14:35:13 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: these two functions are sandboxsafe Message-ID: <20110408123513.927AA282C4B@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43222:c425c49065bd Date: 2011-04-08 14:30 +0200 http://bitbucket.org/pypy/pypy/changeset/c425c49065bd/ Log: these two functions are sandboxsafe diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -178,7 +178,8 @@ return '.', '', '' register_external(numeric_formatting, [], (str, str, str), - ooimpl=oo_numeric_formatting) + ooimpl=oo_numeric_formatting, + sandboxsafe=True) _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -46,7 +46,8 @@ register_external(rstring_to_float, [SomeString(can_be_None=False)], float, export_name='rstring_to_float', llimpl=rstring_to_float_impl, - ooimpl=oo_rstring_to_float) + ooimpl=oo_rstring_to_float, + sandboxsafe=True) # float as string -> sign, beforept, afterpt, exponent From commits-noreply at bitbucket.org Fri Apr 8 14:35:15 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 14:35:15 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: merge heads Message-ID: <20110408123515.DF32A282BE9@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43223:dfce0d45ef51 Date: 2011-04-08 14:34 +0200 http://bitbucket.org/pypy/pypy/changeset/dfce0d45ef51/ Log: merge heads diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -168,6 +168,9 @@ def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) @@ -178,6 +181,7 @@ return '.', '', '' register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, ooimpl=oo_numeric_formatting, sandboxsafe=True) diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -44,7 +44,6 @@ return ootype.ooparse_float(lls) register_external(rstring_to_float, [SomeString(can_be_None=False)], float, - export_name='rstring_to_float', llimpl=rstring_to_float_impl, ooimpl=oo_rstring_to_float, sandboxsafe=True) From commits-noreply at bitbucket.org Fri Apr 8 15:52:46 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 15:52:46 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: skip this test, which was renamed Message-ID: <20110408135246.61448282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43224:cdb213e88811 Date: 2011-04-08 15:04 +0200 http://bitbucket.org/pypy/pypy/changeset/cdb213e88811/ Log: skip this test, which was renamed diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): From commits-noreply at bitbucket.org Fri Apr 8 15:52:47 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 15:52:47 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: add support for multiple comma-separated categories in PYPYLOG Message-ID: <20110408135247.0F90E282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43225:006e50c09d54 Date: 2011-04-08 15:52 +0200 http://bitbucket.org/pypy/pypy/changeset/006e50c09d54/ Log: add support for multiple comma-separated categories in PYPYLOG diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") From commits-noreply at bitbucket.org Fri Apr 8 17:52:59 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 17:52:59 +0200 (CEST) Subject: [pypy-svn] pypy default: When forcing a virtual that is actually an immutable struct, if it Message-ID: <20110408155259.D6377282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43226:0db17e3bbb64 Date: 2011-04-08 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/0db17e3bbb64/ Log: When forcing a virtual that is actually an immutable struct, if it contains only constants, then it can become a constant struct. Should help e.g. to remove W_IntObject(0) that were still malloced in the operations sent to the backend. diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,12 +25,13 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None): + arg_types=None, count_fields_if_immut=-1): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types + self.count_fields_if_immut = count_fields_if_immut def get_arg_types(self): return self.arg_types @@ -63,6 +64,9 @@ def as_vtable_size_descr(self): return self + def count_fields_if_immutable(self): + return self.count_fields_if_immut + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -109,12 +113,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None): - key = (ofs, typeinfo, extrainfo, name, arg_types) + arg_types=None, count_fields_if_immut=-1): + key = (ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) try: return self._descrs[key] except KeyError: - descr = Descr(ofs, typeinfo, extrainfo, name, arg_types) + descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) self._descrs[key] = descr return descr @@ -284,7 +290,8 @@ def sizeof(self, S): assert not isinstance(S, lltype.Ptr) - return self.getdescr(symbolic.get_size(S)) + count = heaptracker.count_fields_if_immutable(S) + return self.getdescr(symbolic.get_size(S), count_fields_if_immut=count) class LLtypeCPU(BaseCPU): diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -179,6 +179,9 @@ """ raise NotImplementedError + def count_fields_if_immutable(self): + return -1 + def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -86,6 +86,8 @@ metainterp.history = History() metainterp.history.operations = loop.operations[:] metainterp.history.inputargs = loop.inputargs[:] + cpu._all_size_descrs_with_vtable = ( + LLtypeMixin.cpu._all_size_descrs_with_vtable) # loop_tokens = [] loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -10,6 +10,30 @@ def int2adr(int): return llmemory.cast_int_to_adr(int) +def count_fields_if_immutable(STRUCT): + assert isinstance(STRUCT, lltype.GcStruct) + if STRUCT._hints.get('immutable', False): + try: + return _count_fields(STRUCT) + except ValueError: + pass + return -1 + +def _count_fields(STRUCT): + if STRUCT == rclass.OBJECT: + return 0 # don't count 'typeptr' + result = 0 + for fieldname, TYPE in STRUCT._flds.items(): + if TYPE is lltype.Void: + pass # ignore Voids + elif not isinstance(TYPE, lltype.ContainerType): + result += 1 + elif isinstance(TYPE, lltype.GcStruct): + result += _count_fields(TYPE) + else: + raise ValueError(TYPE) + return result + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -18,12 +18,33 @@ descr_t = get_size_descr(c0, T) assert descr_s.size == symbolic.get_size(S, False) assert descr_t.size == symbolic.get_size(T, False) + assert descr_s.count_fields_if_immutable() == -1 + assert descr_t.count_fields_if_immutable() == -1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # descr_s = get_size_descr(c1, S) assert isinstance(descr_s.size, Symbolic) + assert descr_s.count_fields_if_immutable() == -1 +def test_get_size_descr_immut(): + S = lltype.GcStruct('S', hints={'immutable': True}) + T = lltype.GcStruct('T', ('parent', S), + ('x', lltype.Char), + hints={'immutable': True}) + U = lltype.GcStruct('U', ('parent', T), + ('u', lltype.Ptr(T)), + ('v', lltype.Signed), + hints={'immutable': True}) + V = lltype.GcStruct('V', ('parent', U), + ('miss1', lltype.Void), + ('miss2', lltype.Void), + hints={'immutable': True}) + for STRUCT, expected in [(S, 0), (T, 1), (U, 3), (V, 3)]: + for translated in [False, True]: + c0 = GcCache(translated) + descr_s = get_size_descr(c0, STRUCT) + assert descr_s.count_fields_if_immutable() == expected def test_get_field_descr(): U = lltype.Struct('U') diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -43,9 +43,14 @@ class SizeDescr(AbstractDescr): size = 0 # help translation + is_immutable = False - def __init__(self, size): + def __init__(self, size, count_fields_if_immut=-1): self.size = size + self.count_fields_if_immut = count_fields_if_immut + + def count_fields_if_immutable(self): + return self.count_fields_if_immut def repr_of_descr(self): return '' % self.size @@ -62,15 +67,15 @@ return cache[STRUCT] except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) + count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) else: - sizedescr = SizeDescr(size) + sizedescr = SizeDescr(size, count_fields_if_immut) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr - # ____________________________________________________________ # FieldDescrs diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -514,12 +514,10 @@ break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(op.getarg(i)) - for i in range(op.numargs())] - resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.getdescr()) - # FIXME: Don't we need to check for an overflow here? - self.make_constant(op.result, resbox.constbox()) + resbox = self.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.make_constant(op.result, resbox) return # did we do the exact same operation already? @@ -538,6 +536,13 @@ if nextop: self.emit_operation(nextop) + def constant_fold(self, op): + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] + resbox = execute_nonspec(self.cpu, None, + op.getopnum(), argboxes, op.getdescr()) + return resbox.constbox() + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -4960,6 +4960,58 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i1, descr=nextdescr) """ + py.test.skip("no test here") + + def test_immutable_not(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_noimmut_vtable)) + setfield_gc(p0, 42, descr=noimmut_intval) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_variable(self): + ops = """ + [i0] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, i0, descr=immut_intval) + escape(p0) + jump(i0) + """ + self.optimize_loop(ops, ops) + + def test_immutable_incomplete(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_constantfold(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, 1242, descr=immut_intval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class IntObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(self, other): + return other.container.intval == 1242 + self.namespace['intobj1242'] = lltype._ptr(llmemory.GCREF, + IntObj1242()) + expected = """ + [] + escape(ConstPtr(intobj1242)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble=None): diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -68,6 +68,16 @@ nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') + INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), + ('intval', lltype.Signed)) + INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), + ('intval', lltype.Signed), + hints={'immutable': True}) + intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') + immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -155,6 +165,8 @@ register_known_gctype(cpu, node_vtable2, NODE2) register_known_gctype(cpu, u_vtable, U) register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) + register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) + register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,8 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.executor import execute +from pypy.jit.codewriter.heaptracker import vtable2descr class AbstractVirtualValue(optimizer.OptValue): @@ -72,28 +74,53 @@ assert isinstance(fieldvalue, optimizer.OptValue) self._fields[ofs] = fieldvalue + def _get_descr(self): + raise NotImplementedError + + def _is_immutable_and_filled_with_constants(self): + count = self._get_descr().count_fields_if_immutable() + if count != len(self._fields): # always the case if count == -1 + return False + for value in self._fields.itervalues(): + subbox = value.force_box() + if not isinstance(subbox, Const): + return False + return True + def _really_force(self): - assert self.source_op is not None + op = self.source_op + assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): - self.source_op.name = 'FORCE ' + self.source_op.name - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) + op.name = 'FORCE ' + self.source_op.name + + if self._is_immutable_and_filled_with_constants(): + box = self.optimizer.constant_fold(op) + self.make_constant(box) + for ofs, value in self._fields.iteritems(): + subbox = value.force_box() + assert isinstance(subbox, Const) + execute(self.optimizer.cpu, None, rop.SETFIELD_GC, + ofs, box, subbox) + # keep self._fields, because it's all immutable anyway + else: + newoperations = self.optimizer.newoperations newoperations.append(op) - self._fields = None + self.box = box = op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None def _get_field_descr_list(self): _cached_sorted_fields = self._cached_sorted_fields @@ -168,6 +195,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def _get_descr(self): + return vtable2descr(self.optimizer.cpu, self.known_class.getint()) + def __repr__(self): cls_name = self.known_class.value.adr.ptr._obj._TYPE._name if self._fields is None: @@ -185,6 +215,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_vstruct(self.structdescr, fielddescrs) + def _get_descr(self): + return self.structdescr + class VArrayValue(AbstractVirtualValue): def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): From commits-noreply at bitbucket.org Fri Apr 8 17:53:08 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 17:53:08 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110408155308.502FF282BE9@codespeak.net> Author: Armin Rigo Branch: Changeset: r43227:2f723ee92a83 Date: 2011-04-08 17:52 +0200 http://bitbucket.org/pypy/pypy/changeset/2f723ee92a83/ Log: merge heads diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -1116,20 +1108,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code @@ -1965,14 +1943,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -539,7 +539,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -220,11 +220,13 @@ for cell in self.space.fixedview(w_freevarstuple)] else: nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + freevars = [None] * nfreevars + while True: + nfreevars -= 1 + if nfreevars < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,6 +46,7 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_lineno = frame.f_lineno + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,6 +61,12 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -245,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,16 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + def test_reload(self, space, api): pdb = api.PyImport_Import(space.wrap("pdb")) space.delattr(pdb, space.wrap("set_trace")) diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -110,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -51,3 +53,23 @@ from pypy.module.imp.importing import reload return reload(space, w_mod) + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,20 +29,14 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) def test_basic_threadstate_dance(self, space, api): # Let extension modules call these functions, @@ -54,5 +48,3 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) - - clear_threadstate(space) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -174,6 +174,17 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ From commits-noreply at bitbucket.org Fri Apr 8 18:06:42 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 18:06:42 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: the logic for doing casts between integers was broken, fix it Message-ID: <20110408160642.E80E2282BD7@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43228:fdc167595f56 Date: 2011-04-08 17:54 +0200 http://bitbucket.org/pypy/pypy/changeset/fdc167595f56/ Log: the logic for doing casts between integers was broken, fix it diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,32 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + if TO == ootype.Float: + mnemonic = 'r8' + else: + if FROM in UNSIGNED_TYPES: + mnemonic = 'u' + else: + mnemonic = 'i' + mnemonic += str(INT_SIZE[TO]) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() From commits-noreply at bitbucket.org Fri Apr 8 18:06:43 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 18:06:43 +0200 (CEST) Subject: [pypy-svn] pypy ootype-virtualrefs: close this about-to-be-merged branch Message-ID: <20110408160643.88ADB282BE9@codespeak.net> Author: Antonio Cuni Branch: ootype-virtualrefs Changeset: r43229:052273b9dcae Date: 2011-04-08 18:03 +0200 http://bitbucket.org/pypy/pypy/changeset/052273b9dcae/ Log: close this about-to-be-merged branch From commits-noreply at bitbucket.org Fri Apr 8 18:06:58 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 18:06:58 +0200 (CEST) Subject: [pypy-svn] pypy default: merge the ootype-virtualrefs branch, which makes ootype/cli translation working again! (probably the jvm one still needs some work) Message-ID: <20110408160658.AADD9282BD7@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43230:346e4eec05f8 Date: 2011-04-08 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/346e4eec05f8/ Log: merge the ootype-virtualrefs branch, which makes ootype/cli translation working again! (probably the jvm one still needs some work) diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -160,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,32 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + if TO == ootype.Float: + mnemonic = 'r8' + else: + if FROM in UNSIGNED_TYPES: + mnemonic = 'u' + else: + mnemonic = 'i' + mnemonic += str(INT_SIZE[TO]) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -501,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -1148,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -110,7 +110,6 @@ print 'Translation to cli and jvm is known to be broken at the moment' print 'Please try the "cli-jit" branch at:' print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument @@ -159,8 +158,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -114,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1350,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -388,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() From commits-noreply at bitbucket.org Fri Apr 8 18:07:11 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 18:07:11 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110408160711.68503282BE9@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43231:11f49021bbd6 Date: 2011-04-08 18:06 +0200 http://bitbucket.org/pypy/pypy/changeset/11f49021bbd6/ Log: merge heads diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -160,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,32 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + if TO == ootype.Float: + mnemonic = 'r8' + else: + if FROM in UNSIGNED_TYPES: + mnemonic = 'u' + else: + mnemonic = 'i' + mnemonic += str(INT_SIZE[TO]) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -501,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -1148,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -110,7 +110,6 @@ print 'Translation to cli and jvm is known to be broken at the moment' print 'Please try the "cli-jit" branch at:' print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument @@ -159,8 +158,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -114,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1350,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -388,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() From commits-noreply at bitbucket.org Fri Apr 8 18:08:57 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 8 Apr 2011 18:08:57 +0200 (CEST) Subject: [pypy-svn] pypy default: kill this outdated warning Message-ID: <20110408160857.84C6F282BD7@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43232:2954c56a26b1 Date: 2011-04-08 18:08 +0200 http://bitbucket.org/pypy/pypy/changeset/2954c56a26b1/ Log: kill this outdated warning diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,12 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level From commits-noreply at bitbucket.org Fri Apr 8 18:27:09 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 18:27:09 +0200 (CEST) Subject: [pypy-svn] pypy default: issue681 resolved Message-ID: <20110408162709.BAC9F282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43233:ef2c23556f3e Date: 2011-04-08 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/ef2c23556f3e/ Log: issue681 resolved os.altsep is None on MacOSX. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -91,9 +91,10 @@ else: # XXX that's slow def case_ok(filename): - index1 = filename.rfind(os.sep) - index2 = filename.rfind(os.altsep) - index = max(index1, index2) + index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) if index < 0: directory = os.curdir else: From commits-noreply at bitbucket.org Fri Apr 8 18:27:23 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 18:27:23 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110408162723.4D750282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43234:4e798ad894a6 Date: 2011-04-08 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/4e798ad894a6/ Log: merge heads diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -160,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,32 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + if TO == ootype.Float: + mnemonic = 'r8' + else: + if FROM in UNSIGNED_TYPES: + mnemonic = 'u' + else: + mnemonic = 'i' + mnemonic += str(INT_SIZE[TO]) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -501,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -1148,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,13 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level @@ -159,8 +152,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -114,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1350,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -388,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() From commits-noreply at bitbucket.org Fri Apr 8 19:49:48 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 19:49:48 +0200 (CEST) Subject: [pypy-svn] pypy default: Uh? This code contains a NameError (n). Message-ID: <20110408174948.41C4A282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43235:9d28ae7c8c49 Date: 2011-04-08 17:49 +0000 http://bitbucket.org/pypy/pypy/changeset/9d28ae7c8c49/ Log: Uh? This code contains a NameError (n). diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -219,11 +219,11 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] else: - nfreevars = len(codeobj.co_freevars) - freevars = [None] * nfreevars + n = len(codeobj.co_freevars) + freevars = [None] * n while True: - nfreevars -= 1 - if nfreevars < 0: + n -= 1 + if n < 0: break freevars[n] = self.space.interp_w(Cell, self.popvalue()) defaultarguments = self.popvalues(numdefaults) From commits-noreply at bitbucket.org Fri Apr 8 22:53:08 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 8 Apr 2011 22:53:08 +0200 (CEST) Subject: [pypy-svn] pypy default: This likely "fixes" for now the performance issue of running Message-ID: <20110408205308.79BBE282BD7@codespeak.net> Author: Armin Rigo Branch: Changeset: r43236:d91a8e70580c Date: 2011-04-08 22:52 +0200 http://bitbucket.org/pypy/pypy/changeset/d91a8e70580c/ Log: This likely "fixes" for now the performance issue of running "pypy-c translate.py". We have to think about it more and redo this somehow. diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -28,7 +28,9 @@ self.items = items def getitems(self): - return jit.hint(self, promote=True).items + ## XXX! we would like: return jit.hint(self, promote=True).items + ## XXX! but it gives horrible performance in some cases + return self.items def getitem(self, idx): return self.getitems()[idx] From commits-noreply at bitbucket.org Fri Apr 8 23:07:32 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 8 Apr 2011 23:07:32 +0200 (CEST) Subject: [pypy-svn] pypy default: cpyext: implement all rich comparisons Message-ID: <20110408210732.E71B0282BD7@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43237:712f0897e13f Date: 2011-04-08 15:44 +0200 http://bitbucket.org/pypy/pypy/changeset/712f0897e13f/ Log: cpyext: implement all rich comparisons diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -205,6 +205,10 @@ richcmp_eq = get_richcmp_func(Py_EQ) richcmp_ne = get_richcmp_func(Py_NE) +richcmp_lt = get_richcmp_func(Py_LT) +richcmp_le = get_richcmp_func(Py_LE) +richcmp_gt = get_richcmp_func(Py_GT) +richcmp_ge = get_richcmp_func(Py_GE) @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -201,6 +201,18 @@ assert cmpr == 3 assert cmpr != 42 + def test_richcompare(self): + module = self.import_module("comparisons") + cmpr = module.CmpType() + + # should not crash + cmpr < 4 + cmpr <= 4 + cmpr > 4 + cmpr >= 4 + + assert cmpr.__le__(4) is NotImplemented + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() From commits-noreply at bitbucket.org Fri Apr 8 23:07:34 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 8 Apr 2011 23:07:34 +0200 (CEST) Subject: [pypy-svn] pypy default: Add support for tp_compare Message-ID: <20110408210734.25C0B282BD7@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43238:48571fedd5a4 Date: 2011-04-08 16:28 +0200 http://bitbucket.org/pypy/pypy/changeset/48571fedd5a4/ Log: Add support for tp_compare diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -6,7 +6,7 @@ unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, - hashfunc, descrgetfunc, descrsetfunc, objobjproc) + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State @@ -197,10 +197,9 @@ def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) check_num_args(space, w_args, 1) - args_w = space.fixedview(w_args) - other_w = args_w[0] + w_other, = space.fixedview(w_args) return generic_cpy_call(space, func_target, - w_self, other_w, rffi.cast(rffi.INT_real, OP_CONST)) + w_self, w_other, rffi.cast(rffi.INT_real, OP_CONST)) return inner richcmp_eq = get_richcmp_func(Py_EQ) @@ -210,6 +209,21 @@ richcmp_gt = get_richcmp_func(Py_GT) richcmp_ge = get_richcmp_func(Py_GE) +def wrap_cmpfunc(space, w_self, w_args, func): + func_target = rffi.cast(cmpfunc, func) + check_num_args(space, w_args, 1) + w_other, = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(w_self), + space.type(w_other))): + raise OperationError(space.w_TypeError, space.wrap( + "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % + space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space))) + + return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) + @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): from pypy.module.cpyext.tupleobject import PyTuple_Check diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -69,12 +69,31 @@ }; +static int cmp_compare(PyObject *self, PyObject *other) { + return -1; +} + +PyTypeObject OldCmpType = { + PyVarObject_HEAD_INIT(NULL, 0) + "comparisons.OldCmpType", /* tp_name */ + sizeof(CmpObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)cmp_compare, /* tp_compare */ +}; + + void initcomparisons(void) { PyObject *m, *d; if (PyType_Ready(&CmpType) < 0) return; + if (PyType_Ready(&OldCmpType) < 0) + return; m = Py_InitModule("comparisons", NULL); if (m == NULL) return; @@ -83,4 +102,6 @@ return; if (PyDict_SetItemString(d, "CmpType", (PyObject *)&CmpType) < 0) return; + if (PyDict_SetItemString(d, "OldCmpType", (PyObject *)&OldCmpType) < 0) + return; } diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -213,6 +213,11 @@ assert cmpr.__le__(4) is NotImplemented + def test_tpcompare(self): + module = self.import_module("comparisons") + cmpr = module.OldCmpType() + assert cmpr < cmpr + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() From commits-noreply at bitbucket.org Sat Apr 9 00:04:14 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Sat, 9 Apr 2011 00:04:14 +0200 (CEST) Subject: [pypy-svn] pypy default: Add buffer() support for PyTypeObjects which define tp_as_buffer.bf_getreadbuffer Message-ID: <20110408220414.AB8C0282BD7@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43239:f83b7a19ee72 Date: 2011-04-09 00:03 +0200 http://bitbucket.org/pypy/pypy/changeset/f83b7a19ee72/ Log: Add buffer() support for PyTypeObjects which define tp_as_buffer.bf_getreadbuffer diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -10,7 +10,7 @@ cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - PyBufferProcs, build_type_checkers) + build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, RefcountState, borrow_from) @@ -24,7 +24,7 @@ from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, - PyNumberMethods, PySequenceMethods) + PyNumberMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.interpreter.error import OperationError diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,16 +1,18 @@ import re from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, PyObject, Py_ssize_t) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, - cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc) + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, readbufferproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.buffer import Buffer as W_Buffer from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import specialize @@ -193,6 +195,29 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) +class CPyBuffer(W_Buffer): + # Similar to Py_buffer + + def __init__(self, ptr, size, w_obj): + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + + def getlength(self): + return self.size + + def getitem(self, index): + return self.ptr[index] + +def wrap_getreadbuffer(space, w_self, w_args, func): + func_target = rffi.cast(readbufferproc, func) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: + index = rffi.cast(Py_ssize_t, 0) + size = generic_cpy_call(space, func_target, w_self, index, ptr) + if size < 0: + space.fromcache(State).check_and_raise_exception(always=True) + return space.wrap(CPyBuffer(ptr[0], size, w_self)) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) @@ -589,12 +614,19 @@ for regex, repl in slotdef_replacements: slotdefs_str = re.sub(regex, repl, slotdefs_str) +slotdefs = eval(slotdefs_str) +# PyPy addition +slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), +) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in eval(slotdefs_str)]) + for x in slotdefs]) + slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) - for x in eval(slotdefs_str)]) + for x in slotdefs]) if __name__ == "__main__": print slotdefs_str diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -42,3 +42,13 @@ assert arr[1:].tolist() == [2,3,4] assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + + def test_buffer(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + # XXX big-endian + assert str(buffer(arr)) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') + diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,9 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import cpython_struct, \ - PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, \ - Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE, \ - PyTypeObject, PyTypeObjectPtr, PyBufferProcs, FILEP +from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -55,6 +54,14 @@ wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) +readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) +charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) +## We don't support new buffer interface for now +getbufferproc = rffi.VOIDP +releasebufferproc = rffi.VOIDP + PyGetSetDef = cpython_struct("PyGetSetDef", ( ("name", rffi.CCHARP), @@ -127,7 +134,6 @@ ("mp_ass_subscript", objobjargproc), )) -""" PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getreadbuffer", readbufferproc), ("bf_getwritebuffer", writebufferproc), @@ -136,7 +142,6 @@ ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), )) -""" PyMemberDef = cpython_struct("PyMemberDef", ( ("name", rffi.CCHARP), diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,21 +400,9 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyBufferProcs = lltype.ForwardReference() PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) -def F(ARGS, RESULT=lltype.Signed): - return lltype.Ptr(lltype.FuncType(ARGS, RESULT)) -PyBufferProcsFields = ( - ("bf_getreadbuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getwritebuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getsegcount", F([PyObject, rffi.INTP])), - ("bf_getcharbuffer", F([PyObject, lltype.Signed, rffi.CCHARPP])), -# we don't support new buffer interface for now - ("bf_getbuffer", rffi.VOIDP), - ("bf_releasebuffer", rffi.VOIDP)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -cpython_struct('PyBufferProcs', PyBufferProcsFields, PyBufferProcs) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) From commits-noreply at bitbucket.org Sat Apr 9 05:12:56 2011 From: commits-noreply at bitbucket.org (ademan) Date: Sat, 9 Apr 2011 05:12:56 +0200 (CEST) Subject: [pypy-svn] pypy fold_intadd: Fixed test, cleaned up, working on passing final test. Message-ID: <20110409031256.3AAD32A202B@codespeak.net> Author: Daniel Roberts Branch: fold_intadd Changeset: r43240:26bbe3710254 Date: 2011-04-08 20:12 -0700 http://bitbucket.org/pypy/pypy/changeset/26bbe3710254/ Log: Fixed test, cleaned up, working on passing final test. diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5561,17 +5561,18 @@ i2 = int_sub(4, i1) i3 = int_sub(i2, 14) i4 = int_add(6, i2) - jump(i3) + jump(i4) """ expected = """ [i0] i1 = int_add(i0, 3) i2 = int_sub(1, i0) - i3 = int_sub(i0, 13) - i4 = int_sub(7, i0) - jump(i3) - """ + i3 = int_sub(-13, i0) + i4 = int_sub(-7, i0) + jump(i4) + """ + self.optimize_loop(ops, expected) ##class TestOOtype(OptimizeOptTest, OOtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/addition.py b/pypy/jit/metainterp/optimizeopt/addition.py --- a/pypy/jit/metainterp/optimizeopt/addition.py +++ b/pypy/jit/metainterp/optimizeopt/addition.py @@ -38,7 +38,7 @@ self.roperands[result] = constant, root boxed_constant = ConstInt(constant) - new_op = ResOperation(rop.INT_SUB, [boxed_constant, variable], result) + new_op = ResOperation(rop.INT_SUB, [boxed_constant, root], result) self.emit_operation(new_op) return except KeyError: @@ -88,7 +88,7 @@ rv = self.getvalue(op.getarg(1)) result = op.result if lv.is_constant() and rv.is_constant(): - self.emit_operation(op) # XXX: there's support for optimizing this elsewhere, right? + self.emit_operation(op) elif lv.is_constant(): constant = lv.box.getint() self._process_add(constant, op.getarg(1), result) @@ -103,11 +103,10 @@ rv = self.getvalue(op.getarg(1)) result = op.result if lv.is_constant() and rv.is_constant(): - self.emit_operation(op) # XXX: there's support for optimizing this elsewhere, right? + self.emit_operation(op) elif lv.is_constant(): constant = lv.box.getint() self._process_sub(constant, op.getarg(1), result) - #self.emit_operation(op) elif rv.is_constant(): constant = rv.box.getint() self._process_add(-constant, op.getarg(0), result) From commits-noreply at bitbucket.org Sat Apr 9 08:44:27 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Sat, 9 Apr 2011 08:44:27 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix translation Message-ID: <20110409064427.3E8E4282B8B@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43241:7ef1509cc516 Date: 2011-04-09 08:44 +0200 http://bitbucket.org/pypy/pypy/changeset/7ef1509cc516/ Log: Fix translation diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -7,7 +7,7 @@ from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, + cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, build_type_checkers) @@ -361,14 +361,14 @@ # hopefully this does not clash with the memory model assumed in # extension modules - at cpython_api([PyObject, rffi.INTP], lltype.Signed, external=False, + at cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: - ref[0] = rffi.cast(rffi.INT, space.len_w(w_obj)) + ref[0] = space.len_w(w_obj) return 1 - at cpython_api([PyObject, lltype.Signed, rffi.VOIDPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, external=False, error=-1) def str_getreadbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -381,7 +381,7 @@ Py_DecRef(space, pyref) return space.len_w(w_str) - at cpython_api([PyObject, lltype.Signed, rffi.CCHARPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, external=False, error=-1) def str_getcharbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -243,9 +243,9 @@ space.type(w_other))): raise OperationError(space.w_TypeError, space.wrap( "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % - space.type(w_self).getname(space), - space.type(w_self).getname(space), - space.type(w_other).getname(space))) + (space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space)))) return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -395,7 +395,7 @@ raise OperationError(space.w_TypeError, space.wrap( "expected a character buffer object")) if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(rffi.INTP.TO)) != 1: + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: raise OperationError(space.w_TypeError, space.wrap( "expected a single-segment buffer object")) size = generic_cpy_call(space, pb.c_bf_getcharbuffer, From commits-noreply at bitbucket.org Sat Apr 9 09:40:06 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 9 Apr 2011 09:40:06 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix tests. Message-ID: <20110409074006.0FB83282B8B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43242:eb4205db4277 Date: 2011-04-09 09:39 +0200 http://bitbucket.org/pypy/pypy/changeset/eb4205db4277/ Log: Fix tests. diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -178,9 +178,9 @@ self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 64 + self.addrs[1] = self.addrs[0] + 16*WORD self.addrs[2] = 0 - # 64 bytes + # 16 WORDs def malloc_slowpath(size): assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) @@ -225,9 +225,11 @@ cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() - NODE = lltype.Struct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) # xxx hack: NODE is not a GcStruct + # hack: specify 'tid' explicitly, because this test is not running + # with the gc transformer + NODE = lltype.GcStruct('node', ('tid', lltype.Signed), + ('value', lltype.Signed)) + nodedescr = cpu.sizeof(NODE) valuedescr = cpu.fielddescrof(NODE, 'value') self.cpu = cpu From commits-noreply at bitbucket.org Sat Apr 9 09:44:48 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Sat, 9 Apr 2011 09:44:48 +0200 (CEST) Subject: [pypy-svn] pypy default: More fixes Message-ID: <20110409074448.31BC6282B8B@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43243:40ab3eac752e Date: 2011-04-09 09:44 +0200 http://bitbucket.org/pypy/pypy/changeset/40ab3eac752e/ Log: More fixes diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -194,8 +194,8 @@ c_buf = py_str.c_ob_type.c_tp_as_buffer assert c_buf py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(rffi.INTP.TO)) == 1 - ref = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 assert ref[0] == 10 lltype.free(ref, flavor='raw') diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -33,7 +33,7 @@ py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) py_frame.c_f_globals = make_ref(space, frame.w_globals) - py_frame.c_f_lineno = frame.f_lineno + rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) @cpython_api([PyObject], lltype.Void, external=False) def frame_dealloc(space, py_obj): From commits-noreply at bitbucket.org Sat Apr 9 10:07:10 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Sat, 9 Apr 2011 10:07:10 +0200 (CEST) Subject: [pypy-svn] pypy default: fix array_{get,set}item Message-ID: <20110409080710.961CF282B8B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43244:09092bd91220 Date: 2011-04-09 10:06 +0200 http://bitbucket.org/pypy/pypy/changeset/09092bd91220/ Log: fix array_{get,set}item diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -284,18 +284,24 @@ UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, ootype.Unsigned, ootype.UnsignedLongLong] +def ootype_to_mnemonic(FROM, TO, default=None): + if TO == ootype.Float: + return 'r8' + # + try: + size = str(INT_SIZE[TO]) + except KeyError: + return default + if FROM in UNSIGNED_TYPES: + return 'u' + size + else: + return 'i' + size + class _CastPrimitive(MicroInstruction): def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype - if TO == ootype.Float: - mnemonic = 'r8' - else: - if FROM in UNSIGNED_TYPES: - mnemonic = 'u' - else: - mnemonic = 'i' - mnemonic += str(INT_SIZE[TO]) + mnemonic = ootype_to_mnemonic(FROM, TO) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/translator/cli/ilgenerator.py b/pypy/translator/cli/ilgenerator.py --- a/pypy/translator/cli/ilgenerator.py +++ b/pypy/translator/cli/ilgenerator.py @@ -443,8 +443,8 @@ self.ilasm.opcode('newarr', clitype.itemtype.typename()) def _array_suffix(self, ARRAY, erase_unsigned=False): - from pypy.translator.cli.metavm import OOTYPE_TO_MNEMONIC - suffix = OOTYPE_TO_MNEMONIC.get(ARRAY.ITEM, 'ref') + from pypy.translator.cli.metavm import ootype_to_mnemonic + suffix = ootype_to_mnemonic(ARRAY.ITEM, ARRAY.ITEM, 'ref') if erase_unsigned: suffix = suffix.replace('u', 'i') return suffix From commits-noreply at bitbucket.org Sat Apr 9 10:24:35 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 9 Apr 2011 10:24:35 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix on 64bit. It was hidden by the signal module's extra level around callbacks... Message-ID: <20110409082435.211C0282B8B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43245:2548807e92bd Date: 2011-04-09 10:24 +0200 http://bitbucket.org/pypy/pypy/changeset/2548807e92bd/ Log: Fix on 64bit. It was hidden by the signal module's extra level around callbacks... diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -253,8 +253,10 @@ except OperationError, e: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) - return 0 - return 1 + result = 0 + else: + result = 1 + return rffi.cast(rffi.INT, result) callback_type = lltype.Ptr(lltype.FuncType( [rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT)) XML_SetUnknownEncodingHandler = expat_external( From commits-noreply at bitbucket.org Sat Apr 9 11:05:37 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 11:05:37 +0200 (CEST) Subject: [pypy-svn] pypy default: Add a slight jitfriendliness in handling the operation error. Removes Message-ID: <20110409090537.0C922282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43246:7a7719ff3f05 Date: 2011-04-09 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7a7719ff3f05/ Log: Add a slight jitfriendliness in handling the operation error. Removes two setfields on a frame (since w_f_trace is not a virtualizable field) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -138,11 +138,13 @@ # raised after the exception handler block was popped. try: trace = self.w_f_trace - self.w_f_trace = None + if trace is not None: + self.w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: - self.w_f_trace = trace + if trace is not None: + self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( From commits-noreply at bitbucket.org Sat Apr 9 18:28:06 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 18:28:06 +0200 (CEST) Subject: [pypy-svn] pypy default: Port a crucial fix for pypyjit.py Message-ID: <20110409162806.01234282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43247:e24b5524e3ad Date: 2011-04-09 17:46 +0200 http://bitbucket.org/pypy/pypy/changeset/e24b5524e3ad/ Log: Port a crucial fix for pypyjit.py diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -101,7 +101,7 @@ # first annotate, rtype, and backendoptimize PyPy try: - interp, graph = get_interpreter(entry_point, [], backendopt=True, + interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) From commits-noreply at bitbucket.org Sat Apr 9 18:28:08 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 18:28:08 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix two tests and temporarily skip one that's broken because of defaults Message-ID: <20110409162808.1C945282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43248:fcacb34da1a3 Date: 2011-04-09 18:27 +0200 http://bitbucket.org/pypy/pypy/changeset/fcacb34da1a3/ Log: Fix two tests and temporarily skip one that's broken because of defaults diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -487,6 +487,7 @@ """) def test_range_iter(self): + py.test.skip("until we fix defaults") def main(n): def g(n): return range(n) @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -14,7 +14,7 @@ modname, _ = modname.split('.', 1) if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', - '_socket', '_sre']: + '_socket', '_sre', '_file']: return True return False From commits-noreply at bitbucket.org Sat Apr 9 18:32:42 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 18:32:42 +0200 (CEST) Subject: [pypy-svn] pypy default: Don't care too much about postfixes of ConstClass values (to not be too Message-ID: <20110409163242.A90F7282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43249:bb52c9b7c327 Date: 2011-04-09 18:32 +0200 http://bitbucket.org/pypy/pypy/changeset/bb52c9b7c327/ Log: Don't care too much about postfixes of ConstClass values (to not be too precise about specializations) diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -265,7 +265,7 @@ if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -686,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, -1, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) From commits-noreply at bitbucket.org Sat Apr 9 18:33:59 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 18:33:59 +0200 (CEST) Subject: [pypy-svn] pypy default: Skip another test. We just broke it Message-ID: <20110409163359.0FEEE282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43250:344b8817d1df Date: 2011-04-09 18:33 +0200 http://bitbucket.org/pypy/pypy/changeset/344b8817d1df/ Log: Skip another test. We just broke it diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1010,6 +1010,7 @@ """) def test_func_defaults(self): + py.test.skip("skipped until we fix defaults") def main(n): i = 1 while i < n: From commits-noreply at bitbucket.org Sat Apr 9 19:30:18 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 19:30:18 +0200 (CEST) Subject: [pypy-svn] pypy default: Update a demo and keep pypyjit_child up to date Message-ID: <20110409173018.18750282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43251:ff6849737935 Date: 2011-04-09 19:28 +0200 http://bitbucket.org/pypy/pypy/changeset/ff6849737935/ Log: Update a demo and keep pypyjit_child up to date diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,19 +1,16 @@ try: - import pypyjit - pypyjit.set_param(threshold=3, inlining=True) + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + main(10) - def sqrt(y, n=10000): - x = y / 2 - while n > 0: - #assert y > 0 and x > 0 - if y > 0 and x > 0: pass - n -= 1 - x = (x + y/x) / 2 - return x - - print sqrt(1234, 4) - except Exception, e: print "Exception: ", type(e) print e diff --git a/pypy/jit/tl/pypyjit_child.py b/pypy/jit/tl/pypyjit_child.py --- a/pypy/jit/tl/pypyjit_child.py +++ b/pypy/jit/tl/pypyjit_child.py @@ -2,7 +2,6 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp import warmspot from pypy.module.pypyjit.policy import PyPyJitPolicy -from pypy.rlib.jit import OPTIMIZER_FULL, OPTIMIZER_NO_UNROLL def run_child(glob, loc): @@ -34,6 +33,5 @@ option.view = True warmspot.jittify_and_run(interp, graph, [], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) From commits-noreply at bitbucket.org Sat Apr 9 19:30:18 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 19:30:18 +0200 (CEST) Subject: [pypy-svn] pypy default: oops this should not go into previous commit Message-ID: <20110409173018.A369B282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43252:a5a0c9bb54e5 Date: 2011-04-09 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/a5a0c9bb54e5/ Log: oops this should not go into previous commit diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -14,7 +14,7 @@ modname, _ = modname.split('.', 1) if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', - '_socket', '_sre', '_file']: + '_socket', '_sre']: return True return False From commits-noreply at bitbucket.org Sat Apr 9 19:30:19 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 19:30:19 +0200 (CEST) Subject: [pypy-svn] pypy default: show the _file module to the JIT. Gives some speedups for write/read Message-ID: <20110409173019.3A506282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43253:e048638c58f5 Date: 2011-04-09 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/e048638c58f5/ Log: show the _file module to the JIT. Gives some speedups for write/read (small ones though) diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -14,7 +14,7 @@ modname, _ = modname.split('.', 1) if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', - '_socket', '_sre']: + '_socket', '_sre', '_file']: return True return False From commits-noreply at bitbucket.org Sat Apr 9 20:36:20 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 9 Apr 2011 20:36:20 +0200 (CEST) Subject: [pypy-svn] pypy kqueue: Implemented kevent and started on kqueue. Message-ID: <20110409183620.A8C78282B8B@codespeak.net> Author: Alex Gaynor Branch: kqueue Changeset: r43254:32456c2f21a2 Date: 2011-02-27 17:06 -0500 http://bitbucket.org/pypy/pypy/changeset/32456c2f21a2/ Log: Implemented kevent and started on kqueue. diff --git a/pypy/module/select/test/test_kqueue.py b/pypy/module/select/test/test_kqueue.py --- a/pypy/module/select/test/test_kqueue.py +++ b/pypy/module/select/test/test_kqueue.py @@ -168,4 +168,4 @@ a.close() b.close() - kq.close() \ No newline at end of file + kq.close() diff --git a/pypy/module/select/__init__.py b/pypy/module/select/__init__.py --- a/pypy/module/select/__init__.py +++ b/pypy/module/select/__init__.py @@ -17,18 +17,21 @@ # TODO: this doesn't feel right... if hasattr(select, "epoll"): - interpleveldefs['epoll'] = 'interp_epoll.W_Epoll' - symbols = [ + interpleveldefs["epoll"] = "interp_epoll.W_Epoll" + for symbol in [ "EPOLLIN", "EPOLLOUT", "EPOLLPRI", "EPOLLERR", "EPOLLHUP", "EPOLLET", "EPOLLONESHOT", "EPOLLRDNORM", "EPOLLRDBAND", "EPOLLWRNORM", "EPOLLWRBAND", "EPOLLMSG" - ] - for symbol in symbols: + ]: if hasattr(select, symbol): interpleveldefs[symbol] = "space.wrap(%s)" % getattr(select, symbol) if hasattr(select, "kqueue"): interpleveldefs["kqueue"] = "interp_kqueue.W_Kqueue" + interpleveldefs["kevent"] = "interp_kqueue.W_Kevent" + + for symbol in ["KQ_FILTER_READ", "KQ_FILTER_WRITE", "KQ_EV_ADD", "KQ_EV_ONESHOT", "KQ_EV_ENABLE"]: + interpleveldefs[symbol] = "space.wrap(interp_kqueue.%s)" % symbol def buildloaders(cls): diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -1,5 +1,190 @@ from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import OperationError, exception_from_errno +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef, generic_new_descr, GetSetProperty +from pypy.rlib._rsocket_rffi import socketclose +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.rpython.tool import rffi_platform +from pypy.translator.tool.cbuild import ExternalCompilationInfo + + +eci = ExternalCompilationInfo( + includes = ["sys/event.h"], +) + + +class CConfig: + _compilation_info_ = eci + +CConfig.kevent = rffi_platform.Struct("struct kevent", [ + ("ident", rffi.UINT), + ("filter", rffi.INT), + ("flags", rffi.UINT), + ("fflags", rffi.UINT), + ("data", rffi.INT), + ("udata", rffi.VOIDP), +]) + +for symbol in ["EVFILT_READ", "EVFILT_WRITE", "EV_ADD", "EV_ONESHOT", "EV_ENABLE"]: + setattr(CConfig, symbol, rffi_platform.DefinedConstantInteger(symbol)) + +cconfig = rffi_platform.configure(CConfig) + +kevent = cconfig["kevent"] +KQ_FILTER_READ = cconfig["EVFILT_READ"] +KQ_FILTER_WRITE = cconfig["EVFILT_WRITE"] +KQ_EV_ADD = cconfig["EV_ADD"] +KQ_EV_ONESHOT = cconfig["EV_ONESHOT"] +KQ_EV_ENABLE = cconfig["EV_ENABLE"] + +kqueue = rffi.llexternal("kqueue", + [], + rffi.INT, + compilation_info=eci +) + class W_Kqueue(Wrappable): - pass \ No newline at end of file + def __init__(self, space, kqfd): + self.kqfd = kqfd + + def descr__new__(space, w_subtype): + kqfd = kqueue() + if kqfd < 0: + raise exception_from_errno(space, space.w_IOError) + return space.wrap(W_Kqueue(space, kqfd)) + + @unwrap_spec(fd=int) + def descr_fromfd(space, w_cls, fd): + return space.wrap(W_Kqueue(space, fd)) + + def __del__(self): + self.close() + + def get_closed(self): + return self.kqfd < 0 + + def close(self): + if not self.get_closed(): + socketclose(self.kqfd) + self.kqfd = -1 + + def check_closed(self, space): + if self.get_closed(): + raise OperationError(space.w_ValueError, space.wrap("I/O operation on closed kqueue fd")) + + def descr_get_closed(self, space): + return space.wrap(self.get_closed()) + + def descr_fileno(self, space): + self.check_closed(space) + return space.wrap(self.kqfd) + + def descr_close(self, space): + self.close() + + +W_Kqueue.typedef = TypeDef("select.kqueue", + __new__ = interp2app(W_Kqueue.descr__new__.im_func), + fromfd = interp2app(W_Kqueue.descr_fromfd.im_func, as_classmethod=True), + + closed = GetSetProperty(W_Kqueue.descr_get_closed), + fileno = interp2app(W_Kqueue.descr_fileno), + + close = interp2app(W_Kqueue.descr_close), +) +W_Kqueue.typedef.acceptable_as_base_class = False + + +class W_Kevent(Wrappable): + def __init__(self, space): + self.event = lltype.nullptr(kevent) + + def __del__(self): + if self.event: + lltype.free(self.event, flavor="raw") + + @unwrap_spec(filter=int, flags=int, fflags=int, data=int, udata=int) + def descr__init__(self, space, w_ident, filter=KQ_FILTER_READ, flags=KQ_EV_ADD, fflags=0, data=0, udata=0): + ident = space.c_filedescriptor_w(w_ident) + + self.event = lltype.malloc(kevent, flavor="raw") + rffi.setintfield(self.event, "c_ident", ident) + rffi.setintfield(self.event, "c_filter", filter) + rffi.setintfield(self.event, "c_flags", flags) + rffi.setintfield(self.event, "c_fflags", fflags) + rffi.setintfield(self.event, "c_data", data) + self.event.c_udata = rffi.cast(rffi.VOIDP, udata) + + def _compare_all_fields(self, other, op): + for field in ["ident", "filter", "flags", "fflags", "data", "udata"]: + lhs = getattr(self.event, "c_%s" % field) + rhs = getattr(other.event, "c_%s" % field) + if op == "eq": + if lhs != rhs: + return False + elif op == "lt": + if lhs < rhs: + return True + elif op == "ge": + if lhs >= rhs: + return True + else: + assert False + + if op == "eq": + return True + elif op == "lt": + return False + elif op == "ge": + return False + + def compare_all_fields(self, space, other, op): + if not space.interp_w(W_Kevent, other): + return space.w_NotImplemented + return space.wrap(self._compare_all_fields(other, op)) + + def descr__eq__(self, space, w_other): + return self.compare_all_fields(space, w_other, "eq") + + def descr__lt__(self, space, w_other): + return self.compare_all_fields(space, w_other, "lt") + + def descr__ge__(self, space, w_other): + return self.compare_all_fields(space, w_other, "ge") + + def descr_get_ident(self, space): + return space.wrap(self.event.c_ident) + + def descr_get_filter(self, space): + return space.wrap(self.event.c_filter) + + def descr_get_flags(self, space): + return space.wrap(self.event.c_flags) + + def descr_get_fflags(self, space): + return space.wrap(self.event.c_fflags) + + def descr_get_data(self, space): + return space.wrap(self.event.c_data) + + def descr_get_udata(self, space): + return space.wrap(rffi.cast(rffi.INT, self.event.c_udata)) + + +W_Kevent.typedef = TypeDef("select.kevent", + __new__ = generic_new_descr(W_Kevent), + __init__ = interp2app(W_Kevent.descr__init__), + __eq__ = interp2app(W_Kevent.descr__eq__), + __lt__ = interp2app(W_Kevent.descr__lt__), + __ge__ = interp2app(W_Kevent.descr__ge__), + + ident = GetSetProperty(W_Kevent.descr_get_ident), + filter = GetSetProperty(W_Kevent.descr_get_filter), + flags = GetSetProperty(W_Kevent.descr_get_flags), + fflags = GetSetProperty(W_Kevent.descr_get_fflags), + data = GetSetProperty(W_Kevent.descr_get_data), + udata = GetSetProperty(W_Kevent.descr_get_udata), +) +W_Kevent.typedef.acceptable_as_base_class = False From commits-noreply at bitbucket.org Sat Apr 9 20:55:11 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 9 Apr 2011 20:55:11 +0200 (CEST) Subject: [pypy-svn] pypy kqueue: merged upstream Message-ID: <20110409185511.3015A282B8B@codespeak.net> Author: Alex Gaynor Branch: kqueue Changeset: r43256:0b624568902e Date: 2011-04-09 14:33 -0400 http://bitbucket.org/pypy/pypy/changeset/0b624568902e/ Log: merged upstream diff --git a/pypy/module/select/test/test_kqueue.py b/pypy/module/select/test/test_kqueue.py --- a/pypy/module/select/test/test_kqueue.py +++ b/pypy/module/select/test/test_kqueue.py @@ -168,4 +168,4 @@ a.close() b.close() - kq.close() \ No newline at end of file + kq.close() diff --git a/.hgsubstate b/.hgsubstate new file mode 100644 diff --git a/pypy/module/select/__init__.py b/pypy/module/select/__init__.py --- a/pypy/module/select/__init__.py +++ b/pypy/module/select/__init__.py @@ -17,18 +17,21 @@ # TODO: this doesn't feel right... if hasattr(select, "epoll"): - interpleveldefs['epoll'] = 'interp_epoll.W_Epoll' - symbols = [ + interpleveldefs["epoll"] = "interp_epoll.W_Epoll" + for symbol in [ "EPOLLIN", "EPOLLOUT", "EPOLLPRI", "EPOLLERR", "EPOLLHUP", "EPOLLET", "EPOLLONESHOT", "EPOLLRDNORM", "EPOLLRDBAND", "EPOLLWRNORM", "EPOLLWRBAND", "EPOLLMSG" - ] - for symbol in symbols: + ]: if hasattr(select, symbol): interpleveldefs[symbol] = "space.wrap(%s)" % getattr(select, symbol) if hasattr(select, "kqueue"): interpleveldefs["kqueue"] = "interp_kqueue.W_Kqueue" + interpleveldefs["kevent"] = "interp_kqueue.W_Kevent" + + for symbol in ["KQ_FILTER_READ", "KQ_FILTER_WRITE", "KQ_EV_ADD", "KQ_EV_ONESHOT", "KQ_EV_ENABLE"]: + interpleveldefs[symbol] = "space.wrap(interp_kqueue.%s)" % symbol def buildloaders(cls): diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -1,5 +1,190 @@ from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import OperationError, exception_from_errno +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef, generic_new_descr, GetSetProperty +from pypy.rlib._rsocket_rffi import socketclose +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.rpython.tool import rffi_platform +from pypy.translator.tool.cbuild import ExternalCompilationInfo + + +eci = ExternalCompilationInfo( + includes = ["sys/event.h"], +) + + +class CConfig: + _compilation_info_ = eci + +CConfig.kevent = rffi_platform.Struct("struct kevent", [ + ("ident", rffi.UINT), + ("filter", rffi.INT), + ("flags", rffi.UINT), + ("fflags", rffi.UINT), + ("data", rffi.INT), + ("udata", rffi.VOIDP), +]) + +for symbol in ["EVFILT_READ", "EVFILT_WRITE", "EV_ADD", "EV_ONESHOT", "EV_ENABLE"]: + setattr(CConfig, symbol, rffi_platform.DefinedConstantInteger(symbol)) + +cconfig = rffi_platform.configure(CConfig) + +kevent = cconfig["kevent"] +KQ_FILTER_READ = cconfig["EVFILT_READ"] +KQ_FILTER_WRITE = cconfig["EVFILT_WRITE"] +KQ_EV_ADD = cconfig["EV_ADD"] +KQ_EV_ONESHOT = cconfig["EV_ONESHOT"] +KQ_EV_ENABLE = cconfig["EV_ENABLE"] + +kqueue = rffi.llexternal("kqueue", + [], + rffi.INT, + compilation_info=eci +) + class W_Kqueue(Wrappable): - pass \ No newline at end of file + def __init__(self, space, kqfd): + self.kqfd = kqfd + + def descr__new__(space, w_subtype): + kqfd = kqueue() + if kqfd < 0: + raise exception_from_errno(space, space.w_IOError) + return space.wrap(W_Kqueue(space, kqfd)) + + @unwrap_spec(fd=int) + def descr_fromfd(space, w_cls, fd): + return space.wrap(W_Kqueue(space, fd)) + + def __del__(self): + self.close() + + def get_closed(self): + return self.kqfd < 0 + + def close(self): + if not self.get_closed(): + socketclose(self.kqfd) + self.kqfd = -1 + + def check_closed(self, space): + if self.get_closed(): + raise OperationError(space.w_ValueError, space.wrap("I/O operation on closed kqueue fd")) + + def descr_get_closed(self, space): + return space.wrap(self.get_closed()) + + def descr_fileno(self, space): + self.check_closed(space) + return space.wrap(self.kqfd) + + def descr_close(self, space): + self.close() + + +W_Kqueue.typedef = TypeDef("select.kqueue", + __new__ = interp2app(W_Kqueue.descr__new__.im_func), + fromfd = interp2app(W_Kqueue.descr_fromfd.im_func, as_classmethod=True), + + closed = GetSetProperty(W_Kqueue.descr_get_closed), + fileno = interp2app(W_Kqueue.descr_fileno), + + close = interp2app(W_Kqueue.descr_close), +) +W_Kqueue.typedef.acceptable_as_base_class = False + + +class W_Kevent(Wrappable): + def __init__(self, space): + self.event = lltype.nullptr(kevent) + + def __del__(self): + if self.event: + lltype.free(self.event, flavor="raw") + + @unwrap_spec(filter=int, flags=int, fflags=int, data=int, udata=int) + def descr__init__(self, space, w_ident, filter=KQ_FILTER_READ, flags=KQ_EV_ADD, fflags=0, data=0, udata=0): + ident = space.c_filedescriptor_w(w_ident) + + self.event = lltype.malloc(kevent, flavor="raw") + rffi.setintfield(self.event, "c_ident", ident) + rffi.setintfield(self.event, "c_filter", filter) + rffi.setintfield(self.event, "c_flags", flags) + rffi.setintfield(self.event, "c_fflags", fflags) + rffi.setintfield(self.event, "c_data", data) + self.event.c_udata = rffi.cast(rffi.VOIDP, udata) + + def _compare_all_fields(self, other, op): + for field in ["ident", "filter", "flags", "fflags", "data", "udata"]: + lhs = getattr(self.event, "c_%s" % field) + rhs = getattr(other.event, "c_%s" % field) + if op == "eq": + if lhs != rhs: + return False + elif op == "lt": + if lhs < rhs: + return True + elif op == "ge": + if lhs >= rhs: + return True + else: + assert False + + if op == "eq": + return True + elif op == "lt": + return False + elif op == "ge": + return False + + def compare_all_fields(self, space, other, op): + if not space.interp_w(W_Kevent, other): + return space.w_NotImplemented + return space.wrap(self._compare_all_fields(other, op)) + + def descr__eq__(self, space, w_other): + return self.compare_all_fields(space, w_other, "eq") + + def descr__lt__(self, space, w_other): + return self.compare_all_fields(space, w_other, "lt") + + def descr__ge__(self, space, w_other): + return self.compare_all_fields(space, w_other, "ge") + + def descr_get_ident(self, space): + return space.wrap(self.event.c_ident) + + def descr_get_filter(self, space): + return space.wrap(self.event.c_filter) + + def descr_get_flags(self, space): + return space.wrap(self.event.c_flags) + + def descr_get_fflags(self, space): + return space.wrap(self.event.c_fflags) + + def descr_get_data(self, space): + return space.wrap(self.event.c_data) + + def descr_get_udata(self, space): + return space.wrap(rffi.cast(rffi.INT, self.event.c_udata)) + + +W_Kevent.typedef = TypeDef("select.kevent", + __new__ = generic_new_descr(W_Kevent), + __init__ = interp2app(W_Kevent.descr__init__), + __eq__ = interp2app(W_Kevent.descr__eq__), + __lt__ = interp2app(W_Kevent.descr__lt__), + __ge__ = interp2app(W_Kevent.descr__ge__), + + ident = GetSetProperty(W_Kevent.descr_get_ident), + filter = GetSetProperty(W_Kevent.descr_get_filter), + flags = GetSetProperty(W_Kevent.descr_get_flags), + fflags = GetSetProperty(W_Kevent.descr_get_fflags), + data = GetSetProperty(W_Kevent.descr_get_data), + udata = GetSetProperty(W_Kevent.descr_get_udata), +) +W_Kevent.typedef.acceptable_as_base_class = False From commits-noreply at bitbucket.org Sat Apr 9 21:41:29 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 21:41:29 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: Finish the dance with C files. Now debug_print is also in .c and .h Message-ID: <20110409194129.809FA282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43257:139ceb227869 Date: 2011-04-09 21:40 +0200 http://bitbucket.org/pypy/pypy/changeset/139ceb227869/ Log: Finish the dance with C files. Now debug_print is also in .c and .h diff --git a/pypy/translator/c/src/debug_print.h b/pypy/translator/c/src/debug_print.h --- a/pypy/translator/c/src/debug_print.h +++ b/pypy/translator/c/src/debug_print.h @@ -39,161 +39,24 @@ extern long pypy_have_debug_prints; extern FILE *pypy_debug_file; -/* implementations */ - #define OP_LL_READ_TIMESTAMP(val) READ_TIMESTAMP(val) -#ifndef PYPY_NOT_MAIN_FILE -#include +#include "src/asm.h" -long pypy_have_debug_prints = -1; -FILE *pypy_debug_file = NULL; -static bool_t debug_ready = 0; -static bool_t debug_profile = 0; -static char *debug_start_colors_1 = ""; -static char *debug_start_colors_2 = ""; -static char *debug_stop_colors = ""; -static char *debug_prefix = NULL; - -static void pypy_debug_open(void) -{ - char *filename = getenv("PYPYLOG"); - if (filename) -#ifndef MS_WINDOWS - unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ -#else - putenv("PYPYLOG="); /* don't pass it to subprocesses */ -#endif - if (filename && filename[0]) - { - char *colon = strchr(filename, ':'); - if (!colon) - { - /* PYPYLOG=filename --- profiling version */ - debug_profile = 1; - pypy_setup_profiling(); - } - else - { - /* PYPYLOG=prefix:filename --- conditional logging */ - int n = colon - filename; - debug_prefix = malloc(n + 1); - memcpy(debug_prefix, filename, n); - debug_prefix[n] = '\0'; - filename = colon + 1; - } - if (strcmp(filename, "-") != 0) - pypy_debug_file = fopen(filename, "w"); - } - if (!pypy_debug_file) - { - pypy_debug_file = stderr; - if (isatty(2)) - { - debug_start_colors_1 = "\033[1m\033[31m"; - debug_start_colors_2 = "\033[31m"; - debug_stop_colors = "\033[0m"; - } - } - debug_ready = 1; -} - -void pypy_debug_ensure_opened(void) -{ - if (!debug_ready) - pypy_debug_open(); -} - -#ifndef READ_TIMESTAMP /* asm_xxx.h may contain a specific implementation of READ_TIMESTAMP. * This is the default generic timestamp implementation. */ +#ifndef READ_TIMESTAMP + # ifdef _WIN32 # define READ_TIMESTAMP(val) QueryPerformanceCounter((LARGE_INTEGER*)&(val)) # else # include # include + +long long pypy_read_timestamp(); + # define READ_TIMESTAMP(val) (val) = pypy_read_timestamp() - static long long pypy_read_timestamp(void) - { -# ifdef CLOCK_THREAD_CPUTIME_ID - struct timespec tspec; - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tspec); - return ((long long)tspec.tv_sec) * 1000000000LL + tspec.tv_nsec; -# else - /* argh, we don't seem to have clock_gettime(). Bad OS. */ - struct timeval tv; - gettimeofday(&tv, NULL); - return ((long long)tv.tv_sec) * 1000000LL + tv.tv_usec; -# endif - } # endif #endif - - -static bool_t startswithoneof(const char *str, const char *substr) -{ - const char *p = str; - for (; *substr; substr++) - { - if (*substr != ',') - { - if (p && *p++ != *substr) - p = NULL; /* mismatch */ - } - else if (p != NULL) - return 1; /* match */ - else - p = str; /* mismatched, retry with the next */ - } - return p != NULL; -} - -#if defined(_MSC_VER) || defined(__MINGW32__) -#define PYPY_LONG_LONG_PRINTF_FORMAT "I64" -#else -#define PYPY_LONG_LONG_PRINTF_FORMAT "ll" -#endif - -static void display_startstop(const char *prefix, const char *postfix, - const char *category, const char *colors) -{ - long long timestamp; - READ_TIMESTAMP(timestamp); - fprintf(pypy_debug_file, "%s[%"PYPY_LONG_LONG_PRINTF_FORMAT"x] %s%s%s\n%s", - colors, - timestamp, prefix, category, postfix, - debug_stop_colors); -} - -void pypy_debug_start(const char *category) -{ - pypy_debug_ensure_opened(); - /* Enter a nesting level. Nested debug_prints are disabled by default - because the following left shift introduces a 0 in the last bit. - Note that this logic assumes that we are never going to nest - debug_starts more than 31 levels (63 on 64-bits). */ - pypy_have_debug_prints <<= 1; - if (!debug_profile) - { - /* non-profiling version */ - if (!debug_prefix || !startswithoneof(category, debug_prefix)) - { - /* wrong section name, or no PYPYLOG at all, skip it */ - return; - } - /* else make this subsection active */ - pypy_have_debug_prints |= 1; - } - display_startstop("{", "", category, debug_start_colors_1); -} - -void pypy_debug_stop(const char *category) -{ - if (debug_profile | (pypy_have_debug_prints & 1)) - display_startstop("", "}", category, debug_start_colors_2); - pypy_have_debug_prints >>= 1; -} - -#endif /* PYPY_NOT_MAIN_FILE */ diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h --- a/pypy/translator/c/src/g_include.h +++ b/pypy/translator/c/src/g_include.h @@ -41,10 +41,11 @@ #include "src/profiling.h" +#include "src/debug_print.h" + /*** modules ***/ #ifdef HAVE_RTYPER /* only if we have an RTyper */ # include "src/rtyper.h" -# include "src/debug_print.h" # include "src/debug_traceback.h" # include "src/debug_alloc.h" #ifndef AVR diff --git a/pypy/translator/c/src/align.h b/pypy/translator/c/src/align.h --- a/pypy/translator/c/src/align.h +++ b/pypy/translator/c/src/align.h @@ -1,3 +1,6 @@ + +#ifndef _PYPY_ALIGN_H +#define _PYPY_ALIGN_H /* alignment for arena-based garbage collectors: the following line enforces an alignment that should be enough for any structure @@ -14,3 +17,5 @@ #define ROUND_UP_FOR_ALLOCATION(x, minsize) \ ((((x)>=(minsize)?(x):(minsize)) \ + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1)) + +#endif //_PYPY_ALIGN_H diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py --- a/pypy/translator/c/genc.py +++ b/pypy/translator/c/genc.py @@ -917,8 +917,11 @@ def add_extra_files(eci): srcdir = py.path.local(autopath.pypydir).join('translator', 'c', 'src') - return eci.merge(ExternalCompilationInfo( - separate_module_files=[srcdir.join('profiling.c')])) + files = [ + srcdir / 'profiling.c', + srcdir / 'debug_print.c', + ] + return eci.merge(ExternalCompilationInfo(separate_module_files=files)) def gen_source_standalone(database, modulename, targetdir, eci, entrypointname, defines={}): From commits-noreply at bitbucket.org Sat Apr 9 21:46:01 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 21:46:01 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: merge default Message-ID: <20110409194601.24B85282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43258:629b5b05a04c Date: 2011-04-09 21:45 +0200 http://bitbucket.org/pypy/pypy/changeset/629b5b05a04c/ Log: merge default diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -179,6 +179,9 @@ """ raise NotImplementedError + def count_fields_if_immutable(self): + return -1 + def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -68,6 +68,16 @@ nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') + INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), + ('intval', lltype.Signed)) + INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), + ('intval', lltype.Signed), + hints={'immutable': True}) + intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') + immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -155,6 +165,8 @@ register_known_gctype(cpu, node_vtable2, NODE2) register_known_gctype(cpu, u_vtable, U) register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) + register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) + register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -2,6 +2,7 @@ from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror +from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop @@ -22,6 +23,8 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 + get_malloc_slowpath_addr = None + def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr @@ -35,6 +38,8 @@ pass def can_inline_malloc(self, descr): return False + def can_inline_malloc_varsize(self, descr, num_elem): + return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): @@ -588,6 +593,10 @@ self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() + # for the fast path of mallocs, the following must be true, at least + assert self.GCClass.inline_simple_malloc + assert self.GCClass.inline_simple_malloc_varsize + # make a malloc function, with three arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) @@ -666,20 +675,23 @@ x3 = x0 * 0.3 for_test_only.x = x0 + x1 + x2 + x3 # - def malloc_fixedsize_slowpath(size): + def malloc_slowpath(size): if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery try: + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, 0, size, True, False, False) except MemoryError: fatalerror("out of memory (from JITted code)") return 0 return rffi.cast(lltype.Signed, gcref) - self.malloc_fixedsize_slowpath = malloc_fixedsize_slowpath - self.MALLOC_FIXEDSIZE_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) + self.malloc_slowpath = malloc_slowpath + self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -689,9 +701,8 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_fixedsize_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_FIXEDSIZE_SLOWPATH), - self.malloc_fixedsize_slowpath) + def get_malloc_slowpath_addr(self): + fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) def initialize(self): @@ -837,6 +848,16 @@ return True return False + def can_inline_malloc_varsize(self, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + return size < self.max_size_of_young_obj + except OverflowError: + return False + def has_write_barrier_class(self): return WriteBarrierDescr diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -14,7 +14,7 @@ modname, _ = modname.split('.', 1) if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', - '_socket', '_sre', '_lsprof']: + '_socket', '_sre', '_lsprof', '_file']: return True return False diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,13 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level @@ -159,8 +152,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -522,7 +522,7 @@ return code = frame.pycode if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr <= frame.instr_prev: + if frame.last_instr < frame.instr_prev_plus_one: # We jumped backwards in the same line. executioncontext._trace(frame, 'line', self.space.w_None) else: @@ -560,5 +560,5 @@ frame.f_lineno = line executioncontext._trace(frame, 'line', self.space.w_None) - frame.instr_prev = frame.last_instr + frame.instr_prev_plus_one = frame.last_instr + 1 self.space.frame_trace_action.fire() # continue tracing diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -42,3 +42,13 @@ assert arr[1:].tolist() == [2,3,4] assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + + def test_buffer(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + # XXX big-endian + assert str(buffer(arr)) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') + diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -194,8 +194,8 @@ c_buf = py_str.c_ob_type.c_tp_as_buffer assert c_buf py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(rffi.INTP.TO)) == 1 - ref = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 assert ref[0] == 10 lltype.free(ref, flavor='raw') diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -201,6 +201,23 @@ assert cmpr == 3 assert cmpr != 42 + def test_richcompare(self): + module = self.import_module("comparisons") + cmpr = module.CmpType() + + # should not crash + cmpr < 4 + cmpr <= 4 + cmpr > 4 + cmpr >= 4 + + assert cmpr.__le__(4) is NotImplemented + + def test_tpcompare(self): + module = self.import_module("comparisons") + cmpr = module.OldCmpType() + assert cmpr < cmpr + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,20 +29,14 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) def test_basic_threadstate_dance(self, space, api): # Let extension modules call these functions, @@ -54,5 +48,3 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) - - clear_threadstate(space) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -56,13 +56,10 @@ """A frame is an environment supporting the execution of a code object. Abstract base class.""" - def __init__(self, space, w_globals=None, numlocals=-1): + def __init__(self, space, w_globals=None): self.space = space self.w_globals = w_globals # wrapped dict of globals self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(self.getcode().getvarnames()) - self.numlocals = numlocals def run(self): "Abstract method to override. Runs the frame" @@ -96,6 +93,10 @@ where the order is according to self.getcode().signature().""" raise TypeError, "abstract" + def getfastscopelength(self): + "Abstract. Get the expected number of locals." + raise TypeError, "abstract" + def fast2locals(self): # Copy values from self.fastlocals_w to self.w_locals if self.w_locals is None: @@ -113,10 +114,11 @@ # Copy values from self.w_locals to self.fastlocals_w assert self.w_locals is not None varnames = self.getcode().getvarnames() + numlocals = self.getfastscopelength() - new_fastlocals_w = [None]*self.numlocals - - for i in range(min(len(varnames), self.numlocals)): + new_fastlocals_w = [None] * numlocals + + for i in range(min(len(varnames), numlocals)): w_name = self.space.wrap(varnames[i]) try: w_value = self.space.getitem(self.w_locals, w_name) diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -4960,6 +4960,58 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i1, descr=nextdescr) """ + py.test.skip("no test here") + + def test_immutable_not(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_noimmut_vtable)) + setfield_gc(p0, 42, descr=noimmut_intval) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_variable(self): + ops = """ + [i0] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, i0, descr=immut_intval) + escape(p0) + jump(i0) + """ + self.optimize_loop(ops, ops) + + def test_immutable_incomplete(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_constantfold(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, 1242, descr=immut_intval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class IntObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(self, other): + return other.container.intval == 1242 + self.namespace['intobj1242'] = lltype._ptr(llmemory.GCREF, + IntObj1242()) + expected = """ + [] + escape(ConstPtr(intobj1242)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble=None): diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -514,12 +514,10 @@ break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(op.getarg(i)) - for i in range(op.numargs())] - resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.getdescr()) - # FIXME: Don't we need to check for an overflow here? - self.make_constant(op.result, resbox.constbox()) + resbox = self.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.make_constant(op.result, resbox) return # did we do the exact same operation already? @@ -538,6 +536,13 @@ if nextop: self.emit_operation(nextop) + def constant_fold(self, op): + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] + resbox = execute_nonspec(self.cpu, None, + op.getopnum(), argboxes, op.getdescr()) + return resbox.constbox() + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -860,15 +860,27 @@ self.PerformDiscard(op, arglocs) self.rm.possibly_free_vars_for_op(op) - def _fastpath_malloc(self, op, descr): + def fastpath_malloc_fixedsize(self, op, descr): assert isinstance(descr, BaseSizeDescr) + self._do_fastpath_malloc(op, descr.size, descr.tid) + + def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + size = basesize + itemsize * num_elem + self._do_fastpath_malloc(op, size, arraydescr.tid) + self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) + + def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: # ---- shadowstack ---- # We need edx as a temporary, but otherwise don't save any more - # register. See comments in _build_malloc_fixedsize_slowpath(). + # register. See comments in _build_malloc_slowpath(). tmp_box = TempBox() self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) @@ -885,16 +897,16 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=reg) self.rm.possibly_free_var(tmp_box) - self.assembler.malloc_cond_fixedsize( + self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - descr.size, descr.tid, + size, tid, ) def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): - self._fastpath_malloc(op, op.getdescr()) + self.fastpath_malloc_fixedsize(op, op.getdescr()) else: args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] @@ -904,7 +916,7 @@ classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self._fastpath_malloc(op, descrsize) + self.fastpath_malloc_fixedsize(op, descrsize) self.assembler.set_vtable(eax, imm(classint)) # result of fastpath malloc is in eax else: @@ -963,16 +975,25 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) + return # boehm GC (XXX kill the following code at some point) itemsize, basesize, ofs_length, _, _ = ( self._unpack_arraydescr(op.getdescr())) scale_of_field = _get_scale(itemsize) - return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + self._malloc_varsize(basesize, ofs_length, scale_of_field, + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py --- a/pypy/interpreter/test/test_eval.py +++ b/pypy/interpreter/test/test_eval.py @@ -13,7 +13,8 @@ def __init__(self, space, code, numlocals): self.code = code - Frame.__init__(self, space, numlocals=numlocals) + Frame.__init__(self, space) + self.numlocals = numlocals self.fastlocals_w = [None] * self.numlocals def getcode(self): @@ -24,7 +25,10 @@ def getfastscope(self): return self.fastlocals_w - + + def getfastscopelength(self): + return self.numlocals + self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -110,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -45,7 +45,8 @@ get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new - + get_malloc_slowpath_addr = None + moving_gc = True gcrootmap = MockGcRootMap() @@ -167,26 +168,29 @@ class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - + expected_malloc_slowpath_size = WORD*2 + def __init__(self): GcCache.__init__(self, False) # create a nursery NTP = rffi.CArray(lltype.Signed) self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 64 - # 64 bytes + self.addrs[1] = self.addrs[0] + 16*WORD + self.addrs[2] = 0 + # 16 WORDs def malloc_slowpath(size): - assert size == WORD*2 + assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size + self.addrs[2] += 1 return nadr self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) - self._counter = 123 + self._counter = 123000 def can_inline_malloc(self, descr): return True @@ -205,7 +209,7 @@ def get_nursery_top_addr(self): return rffi.cast(lltype.Signed, self.addrs) + WORD - def get_malloc_fixedsize_slowpath_addr(self): + def get_malloc_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) @@ -221,9 +225,11 @@ cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() - NODE = lltype.Struct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) # xxx hack: NODE is not a GcStruct + # hack: specify 'tid' explicitly, because this test is not running + # with the gc transformer + NODE = lltype.GcStruct('node', ('tid', lltype.Signed), + ('value', lltype.Signed)) + nodedescr = cpu.sizeof(NODE) valuedescr = cpu.fielddescrof(NODE, 'value') self.cpu = cpu @@ -255,6 +261,7 @@ assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called def test_malloc_slowpath(self): ops = ''' @@ -275,6 +282,7 @@ gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nadr + (WORD*2) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once def test_new_with_vtable(self): ops = ''' @@ -290,3 +298,93 @@ assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + +class Seen(Exception): + pass + +class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): + def can_inline_malloc_varsize(self, arraydescr, num_elem): + return num_elem < 5 + def get_funcptr_for_newarray(self): + return 52 + def init_array_descr(self, A, descr): + descr.tid = self._counter + self._counter += 1 + def args_for_new_array(self, descr): + raise Seen("args_for_new_array") + +class TestMallocVarsizeFastpath(BaseTestRegalloc): + def setup_method(self, method): + cpu = CPU(None, None) + cpu.vtable_offset = WORD + cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() + cpu.setup_once() + self.cpu = cpu + + ARRAY = lltype.GcArray(lltype.Signed) + arraydescr = cpu.arraydescrof(ARRAY) + self.arraydescr = arraydescr + + self.namespace = locals().copy() + + def test_malloc_varsize_fastpath(self): + # Hack. Running the GcLLDescr_framework without really having + # a complete GC means that we end up with both the tid and the + # length being at offset 0. In this case, so the length overwrites + # the tid. This is of course only the case in this test class. + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 142, descr=arraydescr) + setarrayitem_gc(p0, 3, 143, descr=arraydescr) + finish(p0) + ''' + self.interpret(ops, []) + # check the nursery + gc_ll_descr = self.cpu.gc_ll_descr + assert gc_ll_descr.nursery[0] == 4 + assert gc_ll_descr.nursery[1] == 142 + assert gc_ll_descr.nursery[4] == 143 + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + def test_malloc_varsize_slowpath(self): + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 420, descr=arraydescr) + setarrayitem_gc(p0, 3, 430, descr=arraydescr) + p1 = new_array(4, descr=arraydescr) + setarrayitem_gc(p1, 0, 421, descr=arraydescr) + setarrayitem_gc(p1, 3, 431, descr=arraydescr) + p2 = new_array(4, descr=arraydescr) + setarrayitem_gc(p2, 0, 422, descr=arraydescr) + setarrayitem_gc(p2, 3, 432, descr=arraydescr) + p3 = new_array(4, descr=arraydescr) + setarrayitem_gc(p3, 0, 423, descr=arraydescr) + setarrayitem_gc(p3, 3, 433, descr=arraydescr) + finish(p0, p1, p2, p3) + ''' + gc_ll_descr = self.cpu.gc_ll_descr + gc_ll_descr.expected_malloc_slowpath_size = 5*WORD + self.interpret(ops, []) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once + + def test_malloc_varsize_too_big(self): + ops = ''' + [] + p0 = new_array(5, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_varsize_variable(self): + ops = ''' + [i0] + p0 = new_array(i0, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -91,9 +91,10 @@ else: # XXX that's slow def case_ok(filename): - index1 = filename.rfind(os.sep) - index2 = filename.rfind(os.altsep) - index = max(index1, index2) + index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) if index < 0: directory = os.curdir else: @@ -118,107 +119,6 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) -def _get_relative_name(space, modulename, level, w_globals): - w = space.wrap - ctxt_w_package = space.finditem(w_globals, w('__package__')) - - ctxt_package = None - if ctxt_w_package is not None and ctxt_w_package is not space.w_None: - try: - ctxt_package = space.str_w(ctxt_w_package) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_ValueError, space.wrap( - "__package__ set to non-string")) - - if ctxt_package is not None: - # __package__ is set, so use it - if ctxt_package == '' and level < 0: - return None, 0 - - package_parts = ctxt_package.split('.') - while level > 1 and package_parts: - level -= 1 - package_parts.pop() - if not package_parts: - if len(ctxt_package) == 0: - msg = "Attempted relative import in non-package" - else: - msg = "Attempted relative import beyond toplevel package" - raise OperationError(space.w_ValueError, w(msg)) - - # Try to import parent package - try: - w_parent = absolute_import(space, ctxt_package, 0, - None, tentative=False) - except OperationError, e: - if not e.match(space, space.w_ImportError): - raise - if level > 0: - raise OperationError(space.w_SystemError, space.wrap( - "Parent module '%s' not loaded, " - "cannot perform relative import" % ctxt_package)) - else: - space.warn("Parent module '%s' not found " - "while handling absolute import" % ctxt_package, - space.w_RuntimeWarning) - - rel_level = len(package_parts) - if modulename: - package_parts.append(modulename) - rel_modulename = '.'.join(package_parts) - else: - # __package__ not set, so figure it out and set it - ctxt_w_name = space.finditem(w_globals, w('__name__')) - ctxt_w_path = space.finditem(w_globals, w('__path__')) - - ctxt_name = None - if ctxt_w_name is not None: - try: - ctxt_name = space.str_w(ctxt_w_name) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - - if not ctxt_name: - return None, 0 - - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - - if level > 0 and not ctxt_name_prefix_parts: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) - - rel_modulename = '.'.join(ctxt_name_prefix_parts) - - if ctxt_w_path is not None: - # __path__ is set, so __name__ is already the package name - space.setitem(w_globals, w("__package__"), ctxt_w_name) - else: - # Normal module, so work out the package name if any - if '.' not in ctxt_name: - space.setitem(w_globals, w("__package__"), space.w_None) - elif rel_modulename: - space.setitem(w_globals, w("__package__"), w(rel_modulename)) - - if modulename: - if rel_modulename: - rel_modulename += '.' + modulename - else: - rel_modulename = modulename - - rel_level = len(ctxt_name_prefix_parts) - - return rel_modulename, rel_level - - @unwrap_spec(name=str, level=int) def importhook(space, name, w_globals=None, w_locals=None, w_fromlist=None, level=-1): @@ -240,40 +140,68 @@ w_globals is not None and space.isinstance_w(w_globals, space.w_dict)): - rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) + ctxt_w_name = space.finditem(w_globals, w('__name__')) + ctxt_w_path = space.finditem(w_globals, w('__path__')) - if rel_modulename: - # if no level was set, ignore import errors, and - # fall back to absolute import at the end of the - # function. - if level == -1: - tentative = True - else: - tentative = False + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.str_w(ctxt_w_name) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise - w_mod = absolute_import(space, rel_modulename, rel_level, - fromlist_w, tentative=tentative) - if w_mod is not None: - space.timer.stop_name("importhook", modulename) - return w_mod + if ctxt_name is not None: + ctxt_name_prefix_parts = ctxt_name.split('.') + if level > 0: + n = len(ctxt_name_prefix_parts)-level+1 + assert n>=0 + ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] + if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module + ctxt_name_prefix_parts.pop() + if ctxt_name_prefix_parts: + rel_modulename = '.'.join(ctxt_name_prefix_parts) + if modulename: + rel_modulename += '.' + modulename + baselevel = len(ctxt_name_prefix_parts) + if rel_modulename is not None: + # XXX What is this check about? There is no test for it + w_mod = check_sys_modules(space, w(rel_modulename)) - w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) + if (w_mod is None or + not space.is_w(w_mod, space.w_None) or + level > 0): + + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + tentative = True + else: + tentative = False + + w_mod = absolute_import(space, rel_modulename, + baselevel, fromlist_w, + tentative=tentative) + if w_mod is not None: + space.timer.stop_name("importhook", modulename) + return w_mod + else: + rel_modulename = None + + if level > 0: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) + w_mod = absolute_import_try(space, modulename, 0, fromlist_w) + if w_mod is None or space.is_w(w_mod, space.w_None): + w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) if rel_modulename is not None: space.setitem(space.sys.get('modules'), w(rel_modulename), space.w_None) space.timer.stop_name("importhook", modulename) return w_mod + at jit.dont_look_inside def absolute_import(space, modulename, baselevel, fromlist_w, tentative): - # Short path: check in sys.modules - w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w) - if w_mod is not None and not space.is_w(w_mod, space.w_None): - return w_mod - return absolute_import_with_lock(space, modulename, baselevel, - fromlist_w, tentative) - - at jit.dont_look_inside -def absolute_import_with_lock(space, modulename, baselevel, - fromlist_w, tentative): lock = getimportlock(space) lock.acquire_lock() try: diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -18,12 +18,33 @@ descr_t = get_size_descr(c0, T) assert descr_s.size == symbolic.get_size(S, False) assert descr_t.size == symbolic.get_size(T, False) + assert descr_s.count_fields_if_immutable() == -1 + assert descr_t.count_fields_if_immutable() == -1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # descr_s = get_size_descr(c1, S) assert isinstance(descr_s.size, Symbolic) + assert descr_s.count_fields_if_immutable() == -1 +def test_get_size_descr_immut(): + S = lltype.GcStruct('S', hints={'immutable': True}) + T = lltype.GcStruct('T', ('parent', S), + ('x', lltype.Char), + hints={'immutable': True}) + U = lltype.GcStruct('U', ('parent', T), + ('u', lltype.Ptr(T)), + ('v', lltype.Signed), + hints={'immutable': True}) + V = lltype.GcStruct('V', ('parent', U), + ('miss1', lltype.Void), + ('miss2', lltype.Void), + hints={'immutable': True}) + for STRUCT, expected in [(S, 0), (T, 1), (U, 3), (V, 3)]: + for translated in [False, True]: + c0 = GcCache(translated) + descr_s = get_size_descr(c0, STRUCT) + assert descr_s.count_fields_if_immutable() == expected def test_get_field_descr(): U = lltype.Struct('U') diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -43,9 +43,14 @@ class SizeDescr(AbstractDescr): size = 0 # help translation + is_immutable = False - def __init__(self, size): + def __init__(self, size, count_fields_if_immut=-1): self.size = size + self.count_fields_if_immut = count_fields_if_immut + + def count_fields_if_immutable(self): + return self.count_fields_if_immut def repr_of_descr(self): return '' % self.size @@ -62,15 +67,15 @@ return cache[STRUCT] except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) + count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) else: - sizedescr = SizeDescr(size) + sizedescr = SizeDescr(size, count_fields_if_immut) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr - # ____________________________________________________________ # FieldDescrs diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -174,6 +174,17 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -69,12 +69,31 @@ }; +static int cmp_compare(PyObject *self, PyObject *other) { + return -1; +} + +PyTypeObject OldCmpType = { + PyVarObject_HEAD_INIT(NULL, 0) + "comparisons.OldCmpType", /* tp_name */ + sizeof(CmpObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)cmp_compare, /* tp_compare */ +}; + + void initcomparisons(void) { PyObject *m, *d; if (PyType_Ready(&CmpType) < 0) return; + if (PyType_Ready(&OldCmpType) < 0) + return; m = Py_InitModule("comparisons", NULL); if (m == NULL) return; @@ -83,4 +102,6 @@ return; if (PyDict_SetItemString(d, "CmpType", (PyObject *)&CmpType) < 0) return; + if (PyDict_SetItemString(d, "OldCmpType", (PyObject *)&OldCmpType) < 0) + return; } diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -266,7 +265,7 @@ if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -3,6 +3,7 @@ import py from py.test import skip import sys, os, re +import subprocess class BytecodeTrace(list): def get_opnames(self, prefix=""): @@ -116,13 +117,12 @@ print >> f, "print 'OK :-)'" f.close() - if sys.platform.startswith('win'): - py.test.skip("XXX this is not Windows-friendly") print logfilepath - child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( - logfilepath, self.pypy_c, filepath), 'r') - result = child_stdout.read() - child_stdout.close() + env = os.environ.copy() + env['PYPYLOG'] = ":%s" % (logfilepath,) + p = subprocess.Popen([self.pypy_c, str(filepath)], + env=env, stdout=subprocess.PIPE) + result, _ = p.communicate() assert result if result.strip().startswith('SKIP:'): py.test.skip(result.strip()) diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -388,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -10,6 +10,30 @@ def int2adr(int): return llmemory.cast_int_to_adr(int) +def count_fields_if_immutable(STRUCT): + assert isinstance(STRUCT, lltype.GcStruct) + if STRUCT._hints.get('immutable', False): + try: + return _count_fields(STRUCT) + except ValueError: + pass + return -1 + +def _count_fields(STRUCT): + if STRUCT == rclass.OBJECT: + return 0 # don't count 'typeptr' + result = 0 + for fieldname, TYPE in STRUCT._flds.items(): + if TYPE is lltype.Void: + pass # ignore Voids + elif not isinstance(TYPE, lltype.ContainerType): + result += 1 + elif isinstance(TYPE, lltype.GcStruct): + result += _count_fields(TYPE) + else: + raise ValueError(TYPE) + return result + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -160,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,6 +46,7 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,38 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] + +def ootype_to_mnemonic(FROM, TO, default=None): + if TO == ootype.Float: + return 'r8' + # + try: + size = str(INT_SIZE[TO]) + except KeyError: + return default + if FROM in UNSIGNED_TYPES: + return 'u' + size + else: + return 'i' + size class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + mnemonic = ootype_to_mnemonic(FROM, TO) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -219,12 +219,14 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] else: - nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + n = len(codeobj.co_freevars) + freevars = [None] * n + while True: + n -= 1 + if n < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -501,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -1148,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -138,11 +138,13 @@ # raised after the exception handler block was popped. try: trace = self.w_f_trace - self.w_f_trace = None + if trace is not None: + self.w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: - self.w_f_trace = trace + if trace is not None: + self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -28,7 +28,9 @@ self.items = items def getitems(self): - return jit.hint(self, promote=True).items + ## XXX! we would like: return jit.hint(self, promote=True).items + ## XXX! but it gives horrible performance in some cases + return self.items def getitem(self, idx): return self.getitems()[idx] diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -61,27 +61,3 @@ assert not imp.lock_held() self.waitfor(lambda: done) assert done - -class TestImportLock: - def test_lock(self, space, monkeypatch): - from pypy.module.imp.importing import getimportlock, importhook - - # Monkeypatch the import lock and add a counter - importlock = getimportlock(space) - original_acquire = importlock.acquire_lock - def acquire_lock(): - importlock.count += 1 - original_acquire() - importlock.count = 0 - monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) - - # An already imported module - importhook(space, 'sys') - assert importlock.count == 0 - # A new module - importhook(space, 're') - assert importlock.count == 7 - # Import it again - previous_count = importlock.count - importhook(space, 're') - assert importlock.count == previous_count diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -46,15 +46,15 @@ w_f_trace = None # For tracing instr_lb = 0 - instr_ub = -1 - instr_prev = -1 + instr_ub = 0 + instr_prev_plus_one = 0 is_being_profiled = False def __init__(self, space, code, w_globals, closure): self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code - eval.Frame.__init__(self, space, w_globals, code.co_nlocals) + eval.Frame.__init__(self, space, w_globals) self.valuestack_w = [None] * code.co_stacksize self.valuestackdepth = 0 self.lastblock = None @@ -63,7 +63,7 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(closure, code) - self.fastlocals_w = [None]*self.numlocals + self.fastlocals_w = [None] * code.co_nlocals make_sure_not_resized(self.fastlocals_w) self.f_lineno = code.co_firstlineno @@ -335,7 +335,7 @@ w(self.instr_lb), #do we need these three (that are for tracing) w(self.instr_ub), - w(self.instr_prev), + w(self.instr_prev_plus_one), w_cells, ] @@ -349,7 +349,7 @@ args_w = space.unpackiterable(w_args) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ - w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev, w_cells = args_w + w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w new_frame = self pycode = space.interp_w(PyCode, w_pycode) @@ -397,7 +397,7 @@ new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev = space.int_w(w_instr_prev) + new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) # XXX what if the frame is in another thread?? @@ -430,7 +430,10 @@ """Initialize cellvars from self.fastlocals_w This is overridden in nestedscope.py""" pass - + + def getfastscopelength(self): + return self.pycode.co_nlocals + def getclosure(self): return None diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,8 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.executor import execute +from pypy.jit.codewriter.heaptracker import vtable2descr class AbstractVirtualValue(optimizer.OptValue): @@ -72,28 +74,53 @@ assert isinstance(fieldvalue, optimizer.OptValue) self._fields[ofs] = fieldvalue + def _get_descr(self): + raise NotImplementedError + + def _is_immutable_and_filled_with_constants(self): + count = self._get_descr().count_fields_if_immutable() + if count != len(self._fields): # always the case if count == -1 + return False + for value in self._fields.itervalues(): + subbox = value.force_box() + if not isinstance(subbox, Const): + return False + return True + def _really_force(self): - assert self.source_op is not None + op = self.source_op + assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): - self.source_op.name = 'FORCE ' + self.source_op.name - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) + op.name = 'FORCE ' + self.source_op.name + + if self._is_immutable_and_filled_with_constants(): + box = self.optimizer.constant_fold(op) + self.make_constant(box) + for ofs, value in self._fields.iteritems(): + subbox = value.force_box() + assert isinstance(subbox, Const) + execute(self.optimizer.cpu, None, rop.SETFIELD_GC, + ofs, box, subbox) + # keep self._fields, because it's all immutable anyway + else: + newoperations = self.optimizer.newoperations newoperations.append(op) - self._fields = None + self.box = box = op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None def _get_field_descr_list(self): _cached_sorted_fields = self._cached_sorted_fields @@ -168,6 +195,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def _get_descr(self): + return vtable2descr(self.optimizer.cpu, self.known_class.getint()) + def __repr__(self): cls_name = self.known_class.value.adr.ptr._obj._TYPE._name if self._fields is None: @@ -185,6 +215,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_vstruct(self.structdescr, fielddescrs) + def _get_descr(self): + return self.structdescr + class VArrayValue(AbstractVirtualValue): def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -1116,20 +1108,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code @@ -1965,14 +1943,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,21 +400,9 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyBufferProcs = lltype.ForwardReference() PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) -def F(ARGS, RESULT=lltype.Signed): - return lltype.Ptr(lltype.FuncType(ARGS, RESULT)) -PyBufferProcsFields = ( - ("bf_getreadbuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getwritebuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getsegcount", F([PyObject, rffi.INTP])), - ("bf_getcharbuffer", F([PyObject, lltype.Signed, rffi.CCHARPP])), -# we don't support new buffer interface for now - ("bf_getbuffer", rffi.VOIDP), - ("bf_releasebuffer", rffi.VOIDP)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -cpython_struct('PyBufferProcs', PyBufferProcsFields, PyBufferProcs) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) @@ -539,7 +527,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -7,10 +7,10 @@ from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, + cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - PyBufferProcs, build_type_checkers) + build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, RefcountState, borrow_from) @@ -24,7 +24,7 @@ from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, - PyNumberMethods, PySequenceMethods) + PyNumberMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.interpreter.error import OperationError @@ -361,14 +361,14 @@ # hopefully this does not clash with the memory model assumed in # extension modules - at cpython_api([PyObject, rffi.INTP], lltype.Signed, external=False, + at cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: - ref[0] = rffi.cast(rffi.INT, space.len_w(w_obj)) + ref[0] = space.len_w(w_obj) return 1 - at cpython_api([PyObject, lltype.Signed, rffi.VOIDPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, external=False, error=-1) def str_getreadbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -381,7 +381,7 @@ Py_DecRef(space, pyref) return space.len_w(w_str) - at cpython_api([PyObject, lltype.Signed, rffi.CCHARPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, external=False, error=-1) def str_getcharbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -245,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, @@ -385,7 +395,7 @@ raise OperationError(space.w_TypeError, space.wrap( "expected a character buffer object")) if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(rffi.INTP.TO)) != 1: + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: raise OperationError(space.w_TypeError, space.wrap( "expected a single-segment buffer object")) size = generic_cpy_call(space, pb.c_bf_getcharbuffer, diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -487,6 +487,7 @@ """) def test_range_iter(self): + py.test.skip("until we fix defaults") def main(n): def g(n): return range(n) @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): @@ -685,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, -1, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) @@ -1009,6 +1010,7 @@ """) def test_func_defaults(self): + py.test.skip("skipped until we fix defaults") def main(n): i = 1 while i < n: diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -51,3 +53,23 @@ from pypy.module.imp.importing import reload return reload(space, w_mod) + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,12 +25,13 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None): + arg_types=None, count_fields_if_immut=-1): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types + self.count_fields_if_immut = count_fields_if_immut def get_arg_types(self): return self.arg_types @@ -63,6 +64,9 @@ def as_vtable_size_descr(self): return self + def count_fields_if_immutable(self): + return self.count_fields_if_immut + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -109,12 +113,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None): - key = (ofs, typeinfo, extrainfo, name, arg_types) + arg_types=None, count_fields_if_immut=-1): + key = (ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) try: return self._descrs[key] except KeyError: - descr = Descr(ofs, typeinfo, extrainfo, name, arg_types) + descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) self._descrs[key] = descr return descr @@ -284,7 +290,8 @@ def sizeof(self, S): assert not isinstance(S, lltype.Ptr) - return self.getdescr(symbolic.get_size(S)) + count = heaptracker.count_fields_if_immutable(S) + return self.getdescr(symbolic.get_size(S), count_fields_if_immut=count) class LLtypeCPU(BaseCPU): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,9 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import cpython_struct, \ - PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, \ - Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE, \ - PyTypeObject, PyTypeObjectPtr, PyBufferProcs, FILEP +from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -55,6 +54,14 @@ wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) +readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) +charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) +## We don't support new buffer interface for now +getbufferproc = rffi.VOIDP +releasebufferproc = rffi.VOIDP + PyGetSetDef = cpython_struct("PyGetSetDef", ( ("name", rffi.CCHARP), @@ -127,7 +134,6 @@ ("mp_ass_subscript", objobjargproc), )) -""" PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getreadbuffer", readbufferproc), ("bf_getwritebuffer", writebufferproc), @@ -136,7 +142,6 @@ ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), )) -""" PyMemberDef = cpython_struct("PyMemberDef", ( ("name", rffi.CCHARP), diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -438,38 +438,6 @@ res = __import__('', mydict, None, ['bar'], 2) assert res is pkg - def test__package__(self): - # Regression test for http://bugs.python.org/issue3221. - def check_absolute(): - exec "from os import path" in ns - def check_relative(): - exec "from . import a" in ns - - # Check both OK with __package__ and __name__ correct - ns = dict(__package__='pkg', __name__='pkg.notarealmodule') - check_absolute() - check_relative() - - # Check both OK with only __name__ wrong - ns = dict(__package__='pkg', __name__='notarealpkg.notarealmodule') - check_absolute() - check_relative() - - # Check relative fails with only __package__ wrong - ns = dict(__package__='foo', __name__='pkg.notarealmodule') - check_absolute() # XXX check warnings - raises(SystemError, check_relative) - - # Check relative fails with __package__ and __name__ wrong - ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') - check_absolute() # XXX check warnings - raises(SystemError, check_relative) - - # Check both fail with package set to a non-string - ns = dict(__package__=object()) - raises(ValueError, check_absolute) - raises(ValueError, check_relative) - def test_universal_newlines(self): import pkg_univnewlines assert pkg_univnewlines.a == 5 diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -253,8 +253,10 @@ except OperationError, e: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) - return 0 - return 1 + result = 0 + else: + result = 1 + return rffi.cast(rffi.INT, result) callback_type = lltype.Ptr(lltype.FuncType( [rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT)) XML_SetUnknownEncodingHandler = expat_external( diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -86,6 +86,8 @@ metainterp.history = History() metainterp.history.operations = loop.operations[:] metainterp.history.inputargs = loop.inputargs[:] + cpu._all_size_descrs_with_vtable = ( + LLtypeMixin.cpu._all_size_descrs_with_vtable) # loop_tokens = [] loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -114,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1350,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/translator/cli/ilgenerator.py b/pypy/translator/cli/ilgenerator.py --- a/pypy/translator/cli/ilgenerator.py +++ b/pypy/translator/cli/ilgenerator.py @@ -443,8 +443,8 @@ self.ilasm.opcode('newarr', clitype.itemtype.typename()) def _array_suffix(self, ARRAY, erase_unsigned=False): - from pypy.translator.cli.metavm import OOTYPE_TO_MNEMONIC - suffix = OOTYPE_TO_MNEMONIC.get(ARRAY.ITEM, 'ref') + from pypy.translator.cli.metavm import ootype_to_mnemonic + suffix = ootype_to_mnemonic(ARRAY.ITEM, ARRAY.ITEM, 'ref') if erase_unsigned: suffix = suffix.replace('u', 'i') return suffix diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,6 +61,12 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,16 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + def test_reload(self, space, api): pdb = api.PyImport_Import(space.wrap("pdb")) space.delattr(pdb, space.wrap("set_trace")) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,16 +1,18 @@ import re from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, PyObject, Py_ssize_t) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, - hashfunc, descrgetfunc, descrsetfunc, objobjproc) + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, readbufferproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.buffer import Buffer as W_Buffer from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import specialize @@ -193,18 +195,59 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) +class CPyBuffer(W_Buffer): + # Similar to Py_buffer + + def __init__(self, ptr, size, w_obj): + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + + def getlength(self): + return self.size + + def getitem(self, index): + return self.ptr[index] + +def wrap_getreadbuffer(space, w_self, w_args, func): + func_target = rffi.cast(readbufferproc, func) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: + index = rffi.cast(Py_ssize_t, 0) + size = generic_cpy_call(space, func_target, w_self, index, ptr) + if size < 0: + space.fromcache(State).check_and_raise_exception(always=True) + return space.wrap(CPyBuffer(ptr[0], size, w_self)) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) check_num_args(space, w_args, 1) - args_w = space.fixedview(w_args) - other_w = args_w[0] + w_other, = space.fixedview(w_args) return generic_cpy_call(space, func_target, - w_self, other_w, rffi.cast(rffi.INT_real, OP_CONST)) + w_self, w_other, rffi.cast(rffi.INT_real, OP_CONST)) return inner richcmp_eq = get_richcmp_func(Py_EQ) richcmp_ne = get_richcmp_func(Py_NE) +richcmp_lt = get_richcmp_func(Py_LT) +richcmp_le = get_richcmp_func(Py_LE) +richcmp_gt = get_richcmp_func(Py_GT) +richcmp_ge = get_richcmp_func(Py_GE) + +def wrap_cmpfunc(space, w_self, w_args, func): + func_target = rffi.cast(cmpfunc, func) + check_num_args(space, w_args, 1) + w_other, = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(w_self), + space.type(w_other))): + raise OperationError(space.w_TypeError, space.wrap( + "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % + (space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space)))) + + return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): @@ -571,12 +614,19 @@ for regex, repl in slotdef_replacements: slotdefs_str = re.sub(regex, repl, slotdefs_str) +slotdefs = eval(slotdefs_str) +# PyPy addition +slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), +) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in eval(slotdefs_str)]) + for x in slotdefs]) + slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) - for x in eval(slotdefs_str)]) + for x in slotdefs]) if __name__ == "__main__": print slotdefs_str diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -151,9 +151,9 @@ class CPythonFakeFrame(eval.Frame): - def __init__(self, space, code, w_globals=None, numlocals=-1): + def __init__(self, space, code, w_globals=None): self.fakecode = code - eval.Frame.__init__(self, space, w_globals, numlocals) + eval.Frame.__init__(self, space, w_globals) def getcode(self): return self.fakecode diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -77,8 +77,8 @@ self.loop_run_counters = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 - self.malloc_fixedsize_slowpath1 = 0 - self.malloc_fixedsize_slowpath2 = 0 + self.malloc_slowpath1 = 0 + self.malloc_slowpath2 = 0 self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False @@ -123,8 +123,8 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() - if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): - self._build_malloc_fixedsize_slowpath() + if gc_ll_descr.get_malloc_slowpath_addr is not None: + self._build_malloc_slowpath() self._build_stack_check_slowpath() debug_start('jit-backend-counts') self.set_debug(have_debug_prints()) @@ -171,7 +171,7 @@ self.float_const_neg_addr = float_constants self.float_const_abs_addr = float_constants + 16 - def _build_malloc_fixedsize_slowpath(self): + def _build_malloc_slowpath(self): # With asmgcc, we need two helpers, so that we can write two CALL # instructions in assembler, with a mark_gc_roots in between. # With shadowstack, this is not needed, so we produce a single helper. @@ -183,7 +183,7 @@ for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() # if gcrootmap is not None and gcrootmap.is_shadow_stack: # ---- shadowstack ---- @@ -208,7 +208,7 @@ mc.MOV_rr(edi.value, edx.value) mc.JMP(imm(addr)) # tail call to the real malloc rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart + self.malloc_slowpath1 = rawstart # ---------- second helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() # @@ -219,7 +219,7 @@ mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath2 = rawstart + self.malloc_slowpath2 = rawstart def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() @@ -1273,6 +1273,11 @@ assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + def set_new_array_length(self, loc, ofs_length, loc_num_elem): + assert isinstance(loc, RegLoc) + assert isinstance(loc_num_elem, ImmedLoc) + self.mc.MOV(mem(loc, ofs_length), loc_num_elem) + # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) def genop_new(self, op, arglocs, result_loc): @@ -2094,8 +2099,7 @@ else: self.mc.JMP(imm(loop_token._x86_loop_code)) - def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, - size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) @@ -2103,7 +2107,7 @@ self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - # See comments in _build_malloc_fixedsize_slowpath for the + # See comments in _build_malloc_slowpath for the # details of the two helper functions that we are calling below. # First, we need to call two of them and not just one because we # need to have a mark_gc_roots() in between. Then the calling @@ -2122,11 +2126,11 @@ shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) if not shadow_stack: # there are two helpers to call only with asmgcc - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + slowpath_addr1 = self.malloc_slowpath1 self.mc.CALL(imm(slowpath_addr1)) self.mark_gc_roots(self.write_new_force_index(), use_copy_area=shadow_stack) - slowpath_addr2 = self.malloc_fixedsize_slowpath2 + slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) offset = self.mc.get_relative_pos() - jmp_adr diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ From commits-noreply at bitbucket.org Sat Apr 9 21:50:57 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 21:50:57 +0200 (CEST) Subject: [pypy-svn] jitviewer default: aha! Message-ID: <20110409195057.EC59B282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r114:48254ef1335a Date: 2011-04-09 21:50 +0200 http://bitbucket.org/pypy/jitviewer/changeset/48254ef1335a/ Log: aha! diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -10,6 +10,6 @@ url='http://pypy.org', packages=['_jitviewer'], scripts=['bin/jitviewer.py'], - requires=['flask', 'pygments', 'simplejson'], + install_requires=['flask', 'pygments', 'simplejson'], include_package_data=True, package_data={'': ['templates/*.html', 'static/*']}) diff --git a/_jitviewer/test/x.py b/_jitviewer/test/x.py --- a/_jitviewer/test/x.py +++ b/_jitviewer/test/x.py @@ -1,12 +0,0 @@ - -def f(a, b): - return a + b - -def g(): - i = 0 - while i < 10: - a = 'foo' - i += 1 - -def h(): - [x for x in range(10)] From commits-noreply at bitbucket.org Sat Apr 9 22:43:05 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 22:43:05 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: Improve in case two codes start on the same line. Example is module code Message-ID: <20110409204305.1E842282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43259:66e11c1f3ecc Date: 2011-04-09 22:42 +0200 http://bitbucket.org/pypy/pypy/changeset/66e11c1f3ecc/ Log: Improve in case two codes start on the same line. Example is module code or two list compr in the same line diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,26 +30,28 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno # attribute of the code object is wrong (e.g., code objects # produced by gateway.applevel(), such as the ones found in # nanos.py) + import pdb + pdb.set_trace() return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res From commits-noreply at bitbucket.org Sat Apr 9 22:46:03 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 22:46:03 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: remove pdb Message-ID: <20110409204603.A0413282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43260:830f0fc0b584 Date: 2011-04-09 22:45 +0200 http://bitbucket.org/pypy/pypy/changeset/830f0fc0b584/ Log: remove pdb diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -48,8 +48,6 @@ # attribute of the code object is wrong (e.g., code objects # produced by gateway.applevel(), such as the ones found in # nanos.py) - import pdb - pdb.set_trace() return None code = codeobjs[(startlineno, name)] res = dis(code) From commits-noreply at bitbucket.org Sat Apr 9 22:47:37 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 22:47:37 +0200 (CEST) Subject: [pypy-svn] pypy default: Another attempt at promoting defaults - so far only for builtin func Message-ID: <20110409204737.CFCBE282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43261:26b1fa0e192d Date: 2011-04-09 22:26 +0200 http://bitbucket.org/pypy/pypy/changeset/26b1fa0e192d/ Log: Another attempt at promoting defaults - so far only for builtin func diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,14 +22,20 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - ## XXX! we would like: return jit.hint(self, promote=True).items - ## XXX! but it gives horrible performance in some cases + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items return self.items def getitem(self, idx): @@ -46,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -622,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module From commits-noreply at bitbucket.org Sat Apr 9 22:47:40 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 22:47:40 +0200 (CEST) Subject: [pypy-svn] pypy default: Improve in case two codes start on the same line. Example is module code Message-ID: <20110409204740.6F633282BD8@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43262:1e37dfa6b88e Date: 2011-04-09 22:42 +0200 http://bitbucket.org/pypy/pypy/changeset/1e37dfa6b88e/ Log: Improve in case two codes start on the same line. Example is module code or two list compr in the same line diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,26 +30,28 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno # attribute of the code object is wrong (e.g., code objects # produced by gateway.applevel(), such as the ones found in # nanos.py) + import pdb + pdb.set_trace() return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res From commits-noreply at bitbucket.org Sat Apr 9 22:48:14 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 22:48:14 +0200 (CEST) Subject: [pypy-svn] pypy default: remove pdb Message-ID: <20110409204814.19C38282B8B@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43263:c412f0d120ef Date: 2011-04-09 22:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c412f0d120ef/ Log: remove pdb diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -48,8 +48,6 @@ # attribute of the code object is wrong (e.g., code objects # produced by gateway.applevel(), such as the ones found in # nanos.py) - import pdb - pdb.set_trace() return None code = codeobjs[(startlineno, name)] res = dis(code) From commits-noreply at bitbucket.org Sat Apr 9 22:49:56 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 9 Apr 2011 22:49:56 +0200 (CEST) Subject: [pypy-svn] pypy kqueue: a little work Message-ID: <20110409204956.468A4282B8B@codespeak.net> Author: Alex Gaynor Branch: kqueue Changeset: r43264:cc37b011ea0f Date: 2011-04-09 16:25 -0400 http://bitbucket.org/pypy/pypy/changeset/cc37b011ea0f/ Log: a little work diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -84,6 +84,19 @@ def descr_close(self, space): self.close() + @unwrap_spec(max_events=int) + def descr_control(self, space, w_changelist, max_events, w_timeout=None): + self.check_closed(space) + + if max_events < 0: + raise operationerrfmt(space.w_ValueError, + "Length of eventlist must be 0 or positive, got %d", max_events + ) + + if space.is_w(w_timeout, space.w_None): + timeoutspec = + + W_Kqueue.typedef = TypeDef("select.kqueue", __new__ = interp2app(W_Kqueue.descr__new__.im_func), @@ -93,6 +106,7 @@ fileno = interp2app(W_Kqueue.descr_fileno), close = interp2app(W_Kqueue.descr_close), + control = interp2app(W_Kqueue.descr_control), ) W_Kqueue.typedef.acceptable_as_base_class = False From commits-noreply at bitbucket.org Sat Apr 9 22:49:57 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 9 Apr 2011 22:49:57 +0200 (CEST) Subject: [pypy-svn] pypy default: (alex, kleptog): optimize int_{l, r}shift where the second argument is 0. Message-ID: <20110409204957.91A40282BD7@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43265:aa9754ba032c Date: 2011-04-09 16:49 -0400 http://bitbucket.org/pypy/pypy/changeset/aa9754ba032c/ Log: (alex, kleptog): optimize int_{l,r}shift where the second argument is 0. diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -2757,7 +2757,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops(self): + def test_fold_partially_constant_add_sub(self): ops = """ [i0] i1 = int_sub(i0, 0) @@ -2791,7 +2791,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops_ovf(self): + def test_fold_partially_constant_add_sub_ovf(self): ops = """ [i0] i1 = int_sub_ovf(i0, 0) @@ -2828,6 +2828,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): + ops = """ + [i0] + i1 = int_lshift(i0, 0) + i2 = int_rshift(i1, 0) + i3 = int_eq(i2, i0) + guard_true(i3) [] + jump(i2) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + # ---------- class TestLLtype(OptimizeOptTest, LLtypeMixin): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -154,6 +154,24 @@ self.emit_operation(op) + def optimize_INT_LSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_RSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_CALL_PURE(self, op): arg_consts = [] for i in range(op.numargs()): From commits-noreply at bitbucket.org Sat Apr 9 22:49:59 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 9 Apr 2011 22:49:59 +0200 (CEST) Subject: [pypy-svn] pypy default: merged upstream Message-ID: <20110409204959.EB694282BD7@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43266:a0da832ddbe4 Date: 2011-04-09 16:49 -0400 http://bitbucket.org/pypy/pypy/changeset/a0da832ddbe4/ Log: merged upstream diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,14 +22,20 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - ## XXX! we would like: return jit.hint(self, promote=True).items - ## XXX! but it gives horrible performance in some cases + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items return self.items def getitem(self, idx): @@ -46,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -622,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,18 +30,18 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno @@ -49,7 +49,7 @@ # produced by gateway.applevel(), such as the ones found in # nanos.py) return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: From commits-noreply at bitbucket.org Sat Apr 9 23:09:13 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 23:09:13 +0200 (CEST) Subject: [pypy-svn] jitviewer default: oops, fix jitviewer Message-ID: <20110409210913.4DD6A36C210@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r115:03d8571e17c7 Date: 2011-04-09 23:09 +0200 http://bitbucket.org/pypy/jitviewer/changeset/03d8571e17c7/ Log: oops, fix jitviewer diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -140,8 +140,8 @@ flask.Flask.__init__(self, *args, **kwargs) class CheckingLoopStorage(LoopStorage): - def disassemble_code(self, fname, startlineno): - result = super(CheckingLoopStorage, self).disassemble_code(fname, startlineno) + def disassemble_code(self, fname, startlineno, name): + result = super(CheckingLoopStorage, self).disassemble_code(fname, startlineno, name) if result is None and fname is not None: raise CannotFindFile(fname) return result From commits-noreply at bitbucket.org Sat Apr 9 23:10:45 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 9 Apr 2011 23:10:45 +0200 (CEST) Subject: [pypy-svn] jitviewer default: bad me, fix tests Message-ID: <20110409211045.677E236C210@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r116:7f885788a5b8 Date: 2011-04-09 23:10 +0200 http://bitbucket.org/pypy/jitviewer/changeset/7f885788a5b8/ Log: bad me, fix tests diff --git a/_jitviewer/test/test_display.py b/_jitviewer/test/test_display.py --- a/_jitviewer/test/test_display.py +++ b/_jitviewer/test/test_display.py @@ -2,7 +2,7 @@ from _jitviewer.display import CodeRepr class MockLoop(object): - pass + inputargs = [] class MockCode(object): pass From commits-noreply at bitbucket.org Sun Apr 10 01:23:48 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Sun, 10 Apr 2011 01:23:48 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Add myself Message-ID: <20110409232348.71262282B8B@codespeak.net> Author: Guillebert Romain Branch: extradoc Changeset: r3490:873b9d9ae3d4 Date: 2011-04-10 00:23 +0100 http://bitbucket.org/pypy/extradoc/changeset/873b9d9ae3d4/ Log: Add myself diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -17,5 +17,6 @@ Antonio Cuni 26-30 Hotel Poseidon his own diet :) Armin Rigo 23-02 SGS Veckobostader Hakan Ardo 24-27 ??? +Romain Guillebert 23-03 ??? ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Sun Apr 10 01:35:08 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sun, 10 Apr 2011 01:35:08 +0200 (CEST) Subject: [pypy-svn] pypy default: Don't call libc for isnan, just implement it directly. Message-ID: <20110409233508.B1EFF36C213@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43267:fbf29f3a8642 Date: 2011-04-09 19:34 -0400 http://bitbucket.org/pypy/pypy/changeset/fbf29f3a8642/ Log: Don't call libc for isnan, just implement it directly. diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -2,7 +2,6 @@ that don't implement these functions already. */ int _pypy_math_isinf(double x); -int _pypy_math_isnan(double x); double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -28,12 +28,6 @@ return PyPy_IS_INFINITY(x); } -int -_pypy_math_isnan(double x) -{ - return PyPy_IS_NAN(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -21,7 +21,7 @@ export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf', '_pypy_math_isnan'], + '_pypy_math_isinf'], ) math_prefix = '_pypy_math_' else: @@ -58,7 +58,6 @@ math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -91,9 +90,10 @@ # # Custom implementations - at jit.purefunction def ll_math_isnan(y): - return bool(math_isnan(y)) + # By not calling into the extenal function the JIT can inline this. Floats + # are awesome. + return y != y @jit.purefunction def ll_math_isinf(y): From commits-noreply at bitbucket.org Sun Apr 10 01:40:48 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sun, 10 Apr 2011 01:40:48 +0200 (CEST) Subject: [pypy-svn] pypy default: Don't generate a call to libc for isinf, instead implement it directly. Message-ID: <20110409234048.68C8F36C213@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43268:c7a7acad0692 Date: 2011-04-09 19:40 -0400 http://bitbucket.org/pypy/pypy/changeset/c7a7acad0692/ Log: Don't generate a call to libc for isinf, instead implement it directly. diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,8 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,12 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -57,7 +56,6 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -95,9 +93,8 @@ # are awesome. return y != y - at jit.purefunction def ll_math_isinf(y): - return bool(math_isinf(y)) + return not isnan(y) and isnan(y - y) ll_math_copysign = math_copysign From commits-noreply at bitbucket.org Sun Apr 10 12:43:34 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sun, 10 Apr 2011 12:43:34 +0200 (CEST) Subject: [pypy-svn] pypy default: revert looking into _file module until we investigate it better Message-ID: <20110410104334.438C0282BAD@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43269:783ec949d907 Date: 2011-04-10 12:43 +0200 http://bitbucket.org/pypy/pypy/changeset/783ec949d907/ Log: revert looking into _file module until we investigate it better diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -14,7 +14,7 @@ modname, _ = modname.split('.', 1) if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', - '_socket', '_sre', '_file']: + '_socket', '_sre']: return True return False From commits-noreply at bitbucket.org Sun Apr 10 12:49:32 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sun, 10 Apr 2011 12:49:32 +0200 (CEST) Subject: [pypy-svn] jitviewer default: Doh. Fix the call on untested piece Message-ID: <20110410104932.CF4A736C210@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r117:b27539e5ab1d Date: 2011-04-10 12:49 +0200 http://bitbucket.org/pypy/jitviewer/changeset/b27539e5ab1d/ Log: Doh. Fix the call on untested piece diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -108,7 +108,8 @@ startline, endline = loop.linerange if loop.filename is not None: - code = self.storage.load_code(loop.filename)[loop.startlineno] + code = self.storage.load_code(loop.filename)[(loop.startlineno, + loop.name)] source = CodeRepr(inspect.getsource(code), code, loop) else: source = CodeReprNoFile(loop) From commits-noreply at bitbucket.org Sun Apr 10 21:50:39 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 10 Apr 2011 21:50:39 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: merge default Message-ID: <20110410195039.C13EE282BAD@codespeak.net> Author: Armin Rigo Branch: out-of-line-guards-2 Changeset: r43270:e0139924067c Date: 2011-04-09 12:27 +0200 http://bitbucket.org/pypy/pypy/changeset/e0139924067c/ Log: merge default diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -179,6 +179,9 @@ """ raise NotImplementedError + def count_fields_if_immutable(self): + return -1 + def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,8 +1,8 @@ from __future__ import with_statement import new import py -from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse -from pypy.objspace.flow.model import flatten, mkentrymap, c_last_exception +from pypy.objspace.flow.model import Constant, Block, Link, Variable +from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph from pypy.objspace.flow.objspace import FlowObjSpace, error @@ -37,12 +37,10 @@ def all_operations(self, graph): result = {} - def visit(node): - if isinstance(node, Block): - for op in node.operations: - result.setdefault(op.opname, 0) - result[op.opname] += 1 - traverse(visit, graph) + for node in graph.iterblocks(): + for op in node.operations: + result.setdefault(op.opname, 0) + result[op.opname] += 1 return result @@ -246,12 +244,9 @@ x = self.codetest(self.implicitException) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock - def implicitAttributeError(x): try: x = getattr(x, "y") @@ -263,10 +258,8 @@ x = self.codetest(self.implicitAttributeError) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock #__________________________________________________________ def implicitException_int_and_id(x): @@ -311,14 +304,12 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: if isinstance(link.args[0], Constant): found[link.args[0].value] = True else: found[link.exitcase] = None - traverse(find_exceptions, x) assert found == {IndexError: True, KeyError: True, Exception: None} def reraiseAnything(x): @@ -332,12 +323,10 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: assert isinstance(link.args[0], Constant) found[link.args[0].value] = True - traverse(find_exceptions, x) assert found == {ValueError: True, ZeroDivisionError: True, OverflowError: True} def loop_in_bare_except_bug(lst): @@ -521,11 +510,9 @@ def test_jump_target_specialization(self): x = self.codetest(self.jump_target_specialization) - def visitor(node): - if isinstance(node, Block): - for op in node.operations: - assert op.opname != 'mul', "mul should have disappeared" - traverse(visitor, x) + for block in x.iterblocks(): + for op in block.operations: + assert op.opname != 'mul', "mul should have disappeared" #__________________________________________________________ def highly_branching_example(a,b,c,d,e,f,g,h,i,j): @@ -573,7 +560,8 @@ def test_highly_branching_example(self): x = self.codetest(self.highly_branching_example) - assert len(flatten(x)) < 60 # roughly 20 blocks + 30 links + # roughly 20 blocks + 30 links + assert len(list(x.iterblocks())) + len(list(x.iterlinks())) < 60 #__________________________________________________________ def test_unfrozen_user_class1(self): @@ -589,11 +577,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 2 def test_unfrozen_user_class2(self): @@ -607,11 +593,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert not isinstance(results[0], Constant) def test_frozen_user_class1(self): @@ -630,11 +614,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 1 def test_frozen_user_class2(self): @@ -650,11 +632,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert results == [Constant(4)] def test_const_star_call(self): @@ -663,14 +643,9 @@ def f(): return g(1,*(2,3)) graph = self.codetest(f) - call_args = [] - def visit(block): - if isinstance(block, Block): - for op in block.operations: - if op.opname == "call_args": - call_args.append(op) - traverse(visit, graph) - assert not call_args + for block in graph.iterblocks(): + for op in block.operations: + assert not op.opname == "call_args" def test_catch_importerror_1(self): def f(): @@ -997,11 +972,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, AttributeError] @@ -1019,11 +992,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, TypeError] diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -68,6 +68,16 @@ nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') + INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), + ('intval', lltype.Signed)) + INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), + ('intval', lltype.Signed), + hints={'immutable': True}) + intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') + immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -155,6 +165,8 @@ register_known_gctype(cpu, node_vtable2, NODE2) register_known_gctype(cpu, u_vtable, U) register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) + register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) + register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -2,6 +2,7 @@ from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror +from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop @@ -22,6 +23,8 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 + get_malloc_slowpath_addr = None + def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr @@ -35,6 +38,8 @@ pass def can_inline_malloc(self, descr): return False + def can_inline_malloc_varsize(self, descr, num_elem): + return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): @@ -588,6 +593,10 @@ self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() + # for the fast path of mallocs, the following must be true, at least + assert self.GCClass.inline_simple_malloc + assert self.GCClass.inline_simple_malloc_varsize + # make a malloc function, with three arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) @@ -666,20 +675,23 @@ x3 = x0 * 0.3 for_test_only.x = x0 + x1 + x2 + x3 # - def malloc_fixedsize_slowpath(size): + def malloc_slowpath(size): if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery try: + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, 0, size, True, False, False) except MemoryError: fatalerror("out of memory (from JITted code)") return 0 return rffi.cast(lltype.Signed, gcref) - self.malloc_fixedsize_slowpath = malloc_fixedsize_slowpath - self.MALLOC_FIXEDSIZE_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) + self.malloc_slowpath = malloc_slowpath + self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -689,9 +701,8 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_fixedsize_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_FIXEDSIZE_SLOWPATH), - self.malloc_fixedsize_slowpath) + def get_malloc_slowpath_addr(self): + fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) def initialize(self): @@ -837,6 +848,16 @@ return True return False + def can_inline_malloc_varsize(self, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + return size < self.max_size_of_young_obj + except OverflowError: + return False + def has_write_barrier_class(self): return WriteBarrierDescr diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -79,7 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel **32-bit** environment needs ``4GB``. + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,13 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level @@ -159,8 +152,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -278,6 +278,22 @@ rex_mem_reg_plus_scaled_reg_plus_const) # ____________________________________________________________ +# Emit a mod/rm referencing an immediate address that fits in 32-bit +# (the immediate address itself must be explicitely encoded as well, +# with immediate(argnum)). + +def encode_abs(mc, _1, _2, orbyte): + # expands to either '\x05' on 32-bit, or '\x04\x25' or 64-bit + if mc.WORD == 8: + mc.writechar(chr(0x04 | orbyte)) + mc.writechar(chr(0x25)) + else: + mc.writechar(chr(0x05 | orbyte)) + return 0 + +abs_ = encode_abs, 0, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -348,8 +364,8 @@ INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) - INSN_rj = insn(rex_w, chr(base+3), register(1,8), '\x05', immediate(2)) - INSN_ji8 = insn(rex_w, '\x83', orbyte(base), '\x05', immediate(1), + INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2)) + INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1), immediate(2,'b')) INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b')) INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2)) @@ -460,10 +476,12 @@ CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32) CMP_mr = insn(rex_w, '\x39', register(2, 8), mem_reg_plus_const(1)) - CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b')) - CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2)) + CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_, + immediate(1), immediate(2, 'b')) + CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_, + immediate(1), immediate(2)) CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) - CMP_jr = insn(rex_w, '\x39', register(2, 8), '\x05', immediate(1)) + CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_, immediate(1)) CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) @@ -511,7 +529,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) - LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_, immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) @@ -537,7 +555,7 @@ CDQ = insn(rex_nw, '\x99') TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) - TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), '\x05', immediate(1), immediate(2, 'b')) + TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_, immediate(1), immediate(2, 'b')) TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') # x87 instructions @@ -645,7 +663,7 @@ add_insn('s', stack_sp(modrm_argnum)) add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) - add_insn('j', '\x05', immediate(modrm_argnum)) + add_insn('j', abs_, immediate(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register @@ -686,7 +704,7 @@ # assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') - add_insn('j', '\x05', immediate(2)) + add_insn('j', abs_, immediate(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -519,7 +519,7 @@ return code = frame.pycode if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr <= frame.instr_prev: + if frame.last_instr < frame.instr_prev_plus_one: # We jumped backwards in the same line. executioncontext._trace(frame, 'line', self.space.w_None) else: @@ -557,5 +557,5 @@ frame.f_lineno = line executioncontext._trace(frame, 'line', self.space.w_None) - frame.instr_prev = frame.last_instr + frame.instr_prev_plus_one = frame.last_instr + 1 self.space.frame_trace_action.fire() # continue tracing diff --git a/pypy/translator/backendopt/test/test_merge_if_blocks.py b/pypy/translator/backendopt/test/test_merge_if_blocks.py --- a/pypy/translator/backendopt/test/test_merge_if_blocks.py +++ b/pypy/translator/backendopt/test/test_merge_if_blocks.py @@ -2,7 +2,7 @@ from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof as tgraphof -from pypy.objspace.flow.model import flatten, Block +from pypy.objspace.flow.model import Block from pypy.translator.backendopt.removenoops import remove_same_as from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.simplify import (get_graph, transform_dead_op_vars, desugar_isinstance) -from pypy.objspace.flow.model import traverse, Block, Constant, summary +from pypy.objspace.flow.model import Block, Constant, summary from pypy import conftest def translate(func, argtypes, backend_optimize=True): @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -17,8 +17,8 @@ '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -42,3 +42,13 @@ assert arr[1:].tolist() == [2,3,4] assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + + def test_buffer(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + # XXX big-endian + assert str(buffer(arr)) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') + diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,6 +46,7 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -201,6 +201,23 @@ assert cmpr == 3 assert cmpr != 42 + def test_richcompare(self): + module = self.import_module("comparisons") + cmpr = module.CmpType() + + # should not crash + cmpr < 4 + cmpr <= 4 + cmpr > 4 + cmpr >= 4 + + assert cmpr.__le__(4) is NotImplemented + + def test_tpcompare(self): + module = self.import_module("comparisons") + cmpr = module.OldCmpType() + assert cmpr < cmpr + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,20 +29,14 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) def test_basic_threadstate_dance(self, space, api): # Let extension modules call these functions, @@ -54,5 +48,3 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) - - clear_threadstate(space) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -56,13 +56,10 @@ """A frame is an environment supporting the execution of a code object. Abstract base class.""" - def __init__(self, space, w_globals=None, numlocals=-1): + def __init__(self, space, w_globals=None): self.space = space self.w_globals = w_globals # wrapped dict of globals self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(self.getcode().getvarnames()) - self.numlocals = numlocals def run(self): "Abstract method to override. Runs the frame" @@ -96,6 +93,10 @@ where the order is according to self.getcode().signature().""" raise TypeError, "abstract" + def getfastscopelength(self): + "Abstract. Get the expected number of locals." + raise TypeError, "abstract" + def fast2locals(self): # Copy values from self.fastlocals_w to self.w_locals if self.w_locals is None: @@ -113,10 +114,11 @@ # Copy values from self.w_locals to self.fastlocals_w assert self.w_locals is not None varnames = self.getcode().getvarnames() + numlocals = self.getfastscopelength() - new_fastlocals_w = [None]*self.numlocals - - for i in range(min(len(varnames), self.numlocals)): + new_fastlocals_w = [None] * numlocals + + for i in range(min(len(varnames), numlocals)): w_name = self.space.wrap(varnames[i]) try: w_value = self.space.getitem(self.w_locals, w_name) diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -486,6 +486,7 @@ class W_IMap(Wrappable): _error_name = "imap" + _immutable_fields_ = ["w_fun", "iterators_w"] def __init__(self, space, w_fun, args_w): self.space = space diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/translator/backendopt/test/test_mallocprediction.py b/pypy/translator/backendopt/test/test_mallocprediction.py --- a/pypy/translator/backendopt/test/test_mallocprediction.py +++ b/pypy/translator/backendopt/test/test_mallocprediction.py @@ -4,7 +4,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.conftest import option import sys diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Block, Constant, Variable, flatten +from pypy.objspace.flow.model import Block, Constant, Variable from pypy.objspace.flow.model import checkgraph, mkentrymap from pypy.translator.backendopt.support import log @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -135,7 +135,7 @@ return type(self) is type(other) # xxx obscure def clone_if_mutable(self): res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res def _sortboxes(boxes): @@ -4960,6 +4960,58 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i1, descr=nextdescr) """ + py.test.skip("no test here") + + def test_immutable_not(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_noimmut_vtable)) + setfield_gc(p0, 42, descr=noimmut_intval) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_variable(self): + ops = """ + [i0] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, i0, descr=immut_intval) + escape(p0) + jump(i0) + """ + self.optimize_loop(ops, ops) + + def test_immutable_incomplete(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_constantfold(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, 1242, descr=immut_intval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class IntObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(self, other): + return other.container.intval == 1242 + self.namespace['intobj1242'] = lltype._ptr(llmemory.GCREF, + IntObj1242()) + expected = """ + [] + escape(ConstPtr(intobj1242)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble=None): diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -517,12 +517,10 @@ break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(op.getarg(i)) - for i in range(op.numargs())] - resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.getdescr()) - # FIXME: Don't we need to check for an overflow here? - self.make_constant(op.result, resbox.constbox()) + resbox = self.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.make_constant(op.result, resbox) return # did we do the exact same operation already? @@ -541,6 +539,13 @@ if nextop: self.emit_operation(nextop) + def constant_fold(self, op): + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] + resbox = execute_nonspec(self.cpu, None, + op.getopnum(), argboxes, op.getdescr()) + return resbox.constbox() + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,38 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] + +def ootype_to_mnemonic(FROM, TO, default=None): + if TO == ootype.Float: + return 'r8' + # + try: + size = str(INT_SIZE[TO]) + except KeyError: + return default + if FROM in UNSIGNED_TYPES: + return 'u' + size + else: + return 'i' + size class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + mnemonic = ootype_to_mnemonic(FROM, TO) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -22,8 +22,7 @@ remover = cls.MallocRemover() checkgraph(graph) count1 = count2 = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == cls.MallocRemover.MALLOC_OP: S = op.args[0].value @@ -47,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -158,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -199,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -860,15 +860,27 @@ self.PerformDiscard(op, arglocs) self.rm.possibly_free_vars_for_op(op) - def _fastpath_malloc(self, op, descr): + def fastpath_malloc_fixedsize(self, op, descr): assert isinstance(descr, BaseSizeDescr) + self._do_fastpath_malloc(op, descr.size, descr.tid) + + def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + size = basesize + itemsize * num_elem + self._do_fastpath_malloc(op, size, arraydescr.tid) + self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) + + def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: # ---- shadowstack ---- # We need edx as a temporary, but otherwise don't save any more - # register. See comments in _build_malloc_fixedsize_slowpath(). + # register. See comments in _build_malloc_slowpath(). tmp_box = TempBox() self.rm.force_allocate_reg(tmp_box, selected_reg=edx) self.rm.possibly_free_var(tmp_box) @@ -885,16 +897,16 @@ self.rm.force_allocate_reg(tmp_box, selected_reg=reg) self.rm.possibly_free_var(tmp_box) - self.assembler.malloc_cond_fixedsize( + self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - descr.size, descr.tid, + size, tid, ) def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): - self._fastpath_malloc(op, op.getdescr()) + self.fastpath_malloc_fixedsize(op, op.getdescr()) else: args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] @@ -904,7 +916,7 @@ classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self._fastpath_malloc(op, descrsize) + self.fastpath_malloc_fixedsize(op, descrsize) self.assembler.set_vtable(eax, imm(classint)) # result of fastpath malloc is in eax else: @@ -963,16 +975,25 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) + return # boehm GC (XXX kill the following code at some point) itemsize, basesize, ofs_length, _, _ = ( self._unpack_arraydescr(op.getdescr())) scale_of_field = _get_scale(itemsize) - return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + self._malloc_varsize(basesize, ofs_length, scale_of_field, + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py --- a/pypy/interpreter/test/test_eval.py +++ b/pypy/interpreter/test/test_eval.py @@ -13,7 +13,8 @@ def __init__(self, space, code, numlocals): self.code = code - Frame.__init__(self, space, numlocals=numlocals) + Frame.__init__(self, space) + self.numlocals = numlocals self.fastlocals_w = [None] * self.numlocals def getcode(self): @@ -24,7 +25,10 @@ def getfastscope(self): return self.fastlocals_w - + + def getfastscopelength(self): + return self.numlocals + self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -110,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -45,7 +45,8 @@ get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new - + get_malloc_slowpath_addr = None + moving_gc = True gcrootmap = MockGcRootMap() @@ -167,26 +168,29 @@ class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - + expected_malloc_slowpath_size = WORD*2 + def __init__(self): GcCache.__init__(self, False) # create a nursery NTP = rffi.CArray(lltype.Signed) self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 64 - # 64 bytes + self.addrs[1] = self.addrs[0] + 16*WORD + self.addrs[2] = 0 + # 16 WORDs def malloc_slowpath(size): - assert size == WORD*2 + assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size + self.addrs[2] += 1 return nadr self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) - self._counter = 123 + self._counter = 123000 def can_inline_malloc(self, descr): return True @@ -205,7 +209,7 @@ def get_nursery_top_addr(self): return rffi.cast(lltype.Signed, self.addrs) + WORD - def get_malloc_fixedsize_slowpath_addr(self): + def get_malloc_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) @@ -221,9 +225,11 @@ cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() - NODE = lltype.Struct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) # xxx hack: NODE is not a GcStruct + # hack: specify 'tid' explicitly, because this test is not running + # with the gc transformer + NODE = lltype.GcStruct('node', ('tid', lltype.Signed), + ('value', lltype.Signed)) + nodedescr = cpu.sizeof(NODE) valuedescr = cpu.fielddescrof(NODE, 'value') self.cpu = cpu @@ -255,6 +261,7 @@ assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called def test_malloc_slowpath(self): ops = ''' @@ -275,6 +282,7 @@ gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nadr + (WORD*2) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once def test_new_with_vtable(self): ops = ''' @@ -290,3 +298,93 @@ assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + +class Seen(Exception): + pass + +class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): + def can_inline_malloc_varsize(self, arraydescr, num_elem): + return num_elem < 5 + def get_funcptr_for_newarray(self): + return 52 + def init_array_descr(self, A, descr): + descr.tid = self._counter + self._counter += 1 + def args_for_new_array(self, descr): + raise Seen("args_for_new_array") + +class TestMallocVarsizeFastpath(BaseTestRegalloc): + def setup_method(self, method): + cpu = CPU(None, None) + cpu.vtable_offset = WORD + cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() + cpu.setup_once() + self.cpu = cpu + + ARRAY = lltype.GcArray(lltype.Signed) + arraydescr = cpu.arraydescrof(ARRAY) + self.arraydescr = arraydescr + + self.namespace = locals().copy() + + def test_malloc_varsize_fastpath(self): + # Hack. Running the GcLLDescr_framework without really having + # a complete GC means that we end up with both the tid and the + # length being at offset 0. In this case, so the length overwrites + # the tid. This is of course only the case in this test class. + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 142, descr=arraydescr) + setarrayitem_gc(p0, 3, 143, descr=arraydescr) + finish(p0) + ''' + self.interpret(ops, []) + # check the nursery + gc_ll_descr = self.cpu.gc_ll_descr + assert gc_ll_descr.nursery[0] == 4 + assert gc_ll_descr.nursery[1] == 142 + assert gc_ll_descr.nursery[4] == 143 + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + def test_malloc_varsize_slowpath(self): + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 420, descr=arraydescr) + setarrayitem_gc(p0, 3, 430, descr=arraydescr) + p1 = new_array(4, descr=arraydescr) + setarrayitem_gc(p1, 0, 421, descr=arraydescr) + setarrayitem_gc(p1, 3, 431, descr=arraydescr) + p2 = new_array(4, descr=arraydescr) + setarrayitem_gc(p2, 0, 422, descr=arraydescr) + setarrayitem_gc(p2, 3, 432, descr=arraydescr) + p3 = new_array(4, descr=arraydescr) + setarrayitem_gc(p3, 0, 423, descr=arraydescr) + setarrayitem_gc(p3, 3, 433, descr=arraydescr) + finish(p0, p1, p2, p3) + ''' + gc_ll_descr = self.cpu.gc_ll_descr + gc_ll_descr.expected_malloc_slowpath_size = 5*WORD + self.interpret(ops, []) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once + + def test_malloc_varsize_too_big(self): + ops = ''' + [] + p0 = new_array(5, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_varsize_variable(self): + ops = ''' + [i0] + p0 = new_array(i0, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -283,9 +283,14 @@ sys.stdout = out = Out() try: raises(UnicodeError, "print unichr(0xa2)") + assert out.data == [] out.encoding = "cp424" print unichr(0xa2) assert out.data == [unichr(0xa2).encode("cp424"), "\n"] + del out.data[:] + del out.encoding + print u"foo\t", u"bar\n", u"trick", u"baz\n" # softspace handling + assert out.data == ["foo\t", "bar\n", "trick", " ", "baz\n", "\n"] finally: sys.stdout = save diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -140,7 +140,7 @@ xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw', immortal=True) registers = rffi.ptradd(xmmregisters, 16) - stacklen = baseloc + 10 + stacklen = baseloc + 30 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw', immortal=True) expected_ints = [0] * len(content) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -91,9 +91,10 @@ else: # XXX that's slow def case_ok(filename): - index1 = filename.rfind(os.sep) - index2 = filename.rfind(os.altsep) - index = max(index1, index2) + index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) if index < 0: directory = os.curdir else: diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -18,7 +18,6 @@ def should_skip_instruction(self, instrname, argmodes): return ( super(TestRx86_64, self).should_skip_instruction(instrname, argmodes) or - ('j' in argmodes) or # Not testing FSTP on 64-bit for now (instrname == 'FSTP') ) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -311,8 +311,7 @@ # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations # that will be performed later on the flow graph. - def fixegg(link): - if isinstance(link, Link): + for link in list(self.graph.iterlinks()): block = link.target if isinstance(block, EggBlock): if (not block.operations and len(block.exits) == 1 and @@ -324,15 +323,14 @@ link.args = list(link2.args) link.target = link2.target assert link2.exitcase is None - fixegg(link) else: mapping = {} for a in block.inputargs: mapping[a] = Variable(a) block.renamevariables(mapping) - elif isinstance(link, SpamBlock): + for block in self.graph.iterblocks(): + if isinstance(link, SpamBlock): del link.framestate # memory saver - traverse(fixegg, self.graph) def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -18,12 +18,33 @@ descr_t = get_size_descr(c0, T) assert descr_s.size == symbolic.get_size(S, False) assert descr_t.size == symbolic.get_size(T, False) + assert descr_s.count_fields_if_immutable() == -1 + assert descr_t.count_fields_if_immutable() == -1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # descr_s = get_size_descr(c1, S) assert isinstance(descr_s.size, Symbolic) + assert descr_s.count_fields_if_immutable() == -1 +def test_get_size_descr_immut(): + S = lltype.GcStruct('S', hints={'immutable': True}) + T = lltype.GcStruct('T', ('parent', S), + ('x', lltype.Char), + hints={'immutable': True}) + U = lltype.GcStruct('U', ('parent', T), + ('u', lltype.Ptr(T)), + ('v', lltype.Signed), + hints={'immutable': True}) + V = lltype.GcStruct('V', ('parent', U), + ('miss1', lltype.Void), + ('miss2', lltype.Void), + hints={'immutable': True}) + for STRUCT, expected in [(S, 0), (T, 1), (U, 3), (V, 3)]: + for translated in [False, True]: + c0 = GcCache(translated) + descr_s = get_size_descr(c0, STRUCT) + assert descr_s.count_fields_if_immutable() == expected def test_get_field_descr(): U = lltype.Struct('U') diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -43,9 +43,14 @@ class SizeDescr(AbstractDescr): size = 0 # help translation + is_immutable = False - def __init__(self, size): + def __init__(self, size, count_fields_if_immut=-1): self.size = size + self.count_fields_if_immut = count_fields_if_immut + + def count_fields_if_immutable(self): + return self.count_fields_if_immut def repr_of_descr(self): return '' % self.size @@ -62,15 +67,15 @@ return cache[STRUCT] except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) + count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) else: - sizedescr = SizeDescr(size) + sizedescr = SizeDescr(size, count_fields_if_immut) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr - # ____________________________________________________________ # FieldDescrs diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -283,9 +283,15 @@ # These are the worst cases: val2 = loc2.value_i() code1 = loc1.location_code() - if (code1 == 'j' - or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) - or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + if code1 == 'j': + checkvalue = loc1.value_j() + elif code1 == 'm': + checkvalue = loc1.value_m()[1] + elif code1 == 'a': + checkvalue = loc1.value_a()[3] + else: + checkvalue = 0 + if not rx86.fits_in_32bits(checkvalue): # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai # and the constant offset in the address is 64-bit. # Hopefully this doesn't happen too often @@ -330,10 +336,10 @@ if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if self.WORD == 8 and possible_code1 == 'j': + if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif self.WORD == 8 and possible_code2 == 'j': + elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): @@ -378,6 +384,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) diff --git a/pypy/translator/backendopt/test/test_tailrecursion.py b/pypy/translator/backendopt/test/test_tailrecursion.py --- a/pypy/translator/backendopt/test/test_tailrecursion.py +++ b/pypy/translator/backendopt/test/test_tailrecursion.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -69,12 +69,31 @@ }; +static int cmp_compare(PyObject *self, PyObject *other) { + return -1; +} + +PyTypeObject OldCmpType = { + PyVarObject_HEAD_INIT(NULL, 0) + "comparisons.OldCmpType", /* tp_name */ + sizeof(CmpObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)cmp_compare, /* tp_compare */ +}; + + void initcomparisons(void) { PyObject *m, *d; if (PyType_Ready(&CmpType) < 0) return; + if (PyType_Ready(&OldCmpType) < 0) + return; m = Py_InitModule("comparisons", NULL); if (m == NULL) return; @@ -83,4 +102,6 @@ return; if (PyDict_SetItemString(d, "CmpType", (PyObject *)&CmpType) < 0) return; + if (PyDict_SetItemString(d, "OldCmpType", (PyObject *)&OldCmpType) < 0) + return; } diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -3,6 +3,7 @@ import py from py.test import skip import sys, os, re +import subprocess class BytecodeTrace(list): def get_opnames(self, prefix=""): @@ -116,13 +117,12 @@ print >> f, "print 'OK :-)'" f.close() - if sys.platform.startswith('win'): - py.test.skip("XXX this is not Windows-friendly") print logfilepath - child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( - logfilepath, self.pypy_c, filepath), 'r') - result = child_stdout.read() - child_stdout.close() + env = os.environ.copy() + env['PYPYLOG'] = ":%s" % (logfilepath,) + p = subprocess.Popen([self.pypy_c, str(filepath)], + env=env, stdout=subprocess.PIPE) + result, _ = p.communicate() assert result if result.strip().startswith('SKIP:'): py.test.skip(result.strip()) diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -388,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -347,8 +347,9 @@ assert list('') == [] assert list('abc') == ['a', 'b', 'c'] assert list((1, 2)) == [1, 2] - l = [] + l = [1] assert list(l) is not l + assert list(l) == l assert list(range(10)) == range(10) def test_explicit_new_init(self): diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -10,6 +10,30 @@ def int2adr(int): return llmemory.cast_int_to_adr(int) +def count_fields_if_immutable(STRUCT): + assert isinstance(STRUCT, lltype.GcStruct) + if STRUCT._hints.get('immutable', False): + try: + return _count_fields(STRUCT) + except ValueError: + pass + return -1 + +def _count_fields(STRUCT): + if STRUCT == rclass.OBJECT: + return 0 # don't count 'typeptr' + result = 0 + for fieldname, TYPE in STRUCT._flds.items(): + if TYPE is lltype.Void: + pass # ignore Voids + elif not isinstance(TYPE, lltype.ContainerType): + result += 1 + elif isinstance(TYPE, lltype.GcStruct): + result += _count_fields(TYPE) + else: + raise ValueError(TYPE) + return result + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -379,27 +379,6 @@ return result -def traverse(visit, functiongraph): - block = functiongraph.startblock - visit(block) - seen = identity_dict() - seen[block] = True - stack = list(block.exits[::-1]) - while stack: - link = stack.pop() - visit(link) - block = link.target - if block not in seen: - visit(block) - seen[block] = True - stack += block.exits[::-1] - - -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - def flattenobj(*args): for arg in args: try: @@ -497,6 +476,19 @@ assert block.operations == () assert block.exits == () + def definevar(v, only_in_link=None): + assert isinstance(v, Variable) + assert v not in vars, "duplicate variable %r" % (v,) + assert v not in vars_previous_blocks, ( + "variable %r used in more than one block" % (v,)) + vars[v] = only_in_link + + def usevar(v, in_link=None): + assert v in vars + if in_link is not None: + assert vars[v] is None or vars[v] is in_link + + for block in graph.iterblocks(): assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( @@ -506,18 +498,6 @@ assert block in exitblocks vars = {} - def definevar(v, only_in_link=None): - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = only_in_link - - def usevar(v, in_link=None): - assert v in vars - if in_link is not None: - assert vars[v] is None or vars[v] is in_link - for v in block.inputargs: definevar(v) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -160,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/translator/goal/query.py b/pypy/translator/goal/query.py --- a/pypy/translator/goal/query.py +++ b/pypy/translator/goal/query.py @@ -30,15 +30,13 @@ def polluted_qgen(translator): """list functions with still real SomeObject variables""" annotator = translator.annotator - def visit(block): - if isinstance(block, flowmodel.Block): - for v in block.getvariables(): - s = annotator.binding(v, None) - if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: - raise Found for g in translator.graphs: try: - flowmodel.traverse(visit, g) + for block in g.iterblocks(): + for v in block.getvariables(): + s = annotator.binding(v, None) + if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: + raise Found except Found: line = "%s: %s" % (g, graph_sig(translator, g)) yield line diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -174,6 +174,17 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -71,19 +71,6 @@ pieces.headerblock.exits[1], pieces.whileblock.exits[0]] -def test_traverse(): - lst = [] - traverse(lst.append, graph) - assert lst == [pieces.startblock, - pieces.startblock.exits[0], - pieces.headerblock, - pieces.headerblock.exits[0], - graph.returnblock, - pieces.headerblock.exits[1], - pieces.whileblock, - pieces.whileblock.exits[0]] - assert flatten(graph) == lst - def test_mkentrymap(): entrymap = mkentrymap(graph) startlink = entrymap[graph.startblock][0] diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -255,7 +255,7 @@ x = ord(s[0]) << 7 i = 0 while i < length: - x = (1000003*x) ^ ord(s[i]) + x = intmask((1000003*x) ^ ord(s[i])) i += 1 x ^= length return intmask(x) diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,12 +1,12 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops from pypy.translator.test.snippet import simple_method from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -12,7 +12,7 @@ def __init__(self, space, initargs): self.initargs = initargs ident = thread.get_ident() - self.dicts = {ident: space.newdict()} + self.dicts = {ident: space.newdict(instance=True)} def getdict(self, space): ident = thread.get_ident() @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/translator/goal/old_queries.py b/pypy/translator/goal/old_queries.py --- a/pypy/translator/goal/old_queries.py +++ b/pypy/translator/goal/old_queries.py @@ -415,12 +415,10 @@ ops = 0 count = Counter() def visit(block): - if isinstance(block, flowmodel.Block): + for block in graph.iterblocks(): count.blocks += 1 count.ops += len(block.operations) - elif isinstance(block, flowmodel.Link): - count.links += 1 - flowmodel.traverse(visit, graph) + count.links = len(list(graph.iterlinks())) return count.blocks, count.links, count.ops # better used before backends opts diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -219,12 +219,14 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] else: - nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + n = len(codeobj.co_freevars) + freevars = [None] * n + while True: + n -= 1 + if n < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -501,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -1148,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -9,7 +9,7 @@ from pypy.objspace.flow import operation from pypy.objspace.flow.model import (SpaceOperation, Variable, Constant, Block, Link, c_last_exception, checkgraph, - traverse, mkentrymap) + mkentrymap) from pypy.rlib import rarithmetic from pypy.translator import unsimplify from pypy.translator.backendopt import ssa @@ -76,23 +76,19 @@ def desugar_isinstance(graph): """Replace isinstance operation with a call to isinstance.""" constant_isinstance = Constant(isinstance) - def visit(block): - if not isinstance(block, Block): - return + for block in graph.iterblocks(): for i in range(len(block.operations) - 1, -1, -1): op = block.operations[i] if op.opname == "isinstance": args = [constant_isinstance, op.args[0], op.args[1]] new_op = SpaceOperation("simple_call", args, op.result) block.operations[i] = new_op - traverse(visit, graph) def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): + for link in list(graph.iterlinks()): while not link.target.operations: block1 = link.target if block1.exitswitch is not None: @@ -113,7 +109,6 @@ link.args = outputargs link.target = exit.target # the while loop above will simplify recursively the new link - traverse(visit, graph) def transform_ovfcheck(graph): """The special function calls ovfcheck and ovfcheck_lshift need to @@ -174,11 +169,10 @@ def rename(v): return renaming.get(v, v) - def visit(block): - if not (isinstance(block, Block) - and block.exitswitch == clastexc + for block in graph.iterblocks(): + if not (block.exitswitch == clastexc and block.exits[-1].exitcase is Exception): - return + continue covered = [link.exitcase for link in block.exits[1:-1]] seen = [] preserve = list(block.exits[:-1]) @@ -233,8 +227,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) - traverse(visit, graph) - def transform_xxxitem(graph): # xxx setitem too for block in graph.iterblocks(): @@ -262,9 +254,9 @@ return True return False - def visit(block): - if not (isinstance(block, Block) and block.exitswitch == clastexc): - return + for block in list(graph.iterblocks()): + if block.exitswitch != clastexc: + continue exits = [] seen = [] for link in block.exits: @@ -283,8 +275,6 @@ seen.append(case) block.recloseblock(*exits) - traverse(visit, graph) - def join_blocks(graph): """Links can be deleted if they are the single exit of a block and the single entry point of the next block. When this happens, we can @@ -340,8 +330,7 @@ this is how implicit exceptions are removed (see _implicit_ in flowcontext.py). """ - def visit(block): - if isinstance(block, Block): + for block in list(graph.iterblocks()): for i in range(len(block.exits)-1, -1, -1): exit = block.exits[i] if not (exit.target is graph.exceptblock and @@ -361,7 +350,6 @@ lst = list(block.exits) del lst[i] block.recloseblock(*lst) - traverse(visit, graph) # _____________________________________________________________________ @@ -627,12 +615,11 @@ tgts.append((exit.exitcase, tgt)) return tgts - def visit(block): - if isinstance(block, Block) and block.operations and block.operations[-1].opname == 'is_true': + for block in graph.iterblocks(): + if block.operations and block.operations[-1].opname == 'is_true': tgts = has_is_true_exitpath(block) if tgts: candidates.append((block, tgts)) - traverse(visit, graph) while candidates: cand, tgts = candidates.pop() diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -138,11 +138,13 @@ # raised after the exception handler block was popped. try: trace = self.w_f_trace - self.w_f_trace = None + if trace is not None: + self.w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: - self.w_f_trace = trace + if trace is not None: + self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( @@ -1421,9 +1423,10 @@ # add a softspace unless we just printed a string which ends in a '\t' # or '\n' -- or more generally any whitespace character but ' ' - if isinstance(x, str) and x and x[-1].isspace() and x[-1]!=' ': - return - # XXX add unicode handling + if isinstance(x, (str, unicode)) and x: + lastchar = x[-1] + if lastchar.isspace() and lastchar != ' ': + return file_softspace(stream, True) print_item_to._annspecialcase_ = "specialize:argtype(0)" diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/gensupp.py b/pypy/translator/gensupp.py --- a/pypy/translator/gensupp.py +++ b/pypy/translator/gensupp.py @@ -6,15 +6,13 @@ import sys from pypy.objspace.flow.model import Block -from pypy.objspace.flow.model import traverse # ordering the blocks of a graph by source position def ordered_blocks(graph): # collect all blocks allblocks = [] - def visit(block): - if isinstance(block, Block): + for block in graph.iterblocks(): # first we order by offset in the code string if block.operations: ofs = block.operations[0].offset @@ -26,7 +24,6 @@ else: txt = "dummy" allblocks.append((ofs, txt, block)) - traverse(visit, graph) allblocks.sort() #for ofs, txt, block in allblocks: # print ofs, txt, block diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -194,8 +194,8 @@ c_buf = py_str.c_ob_type.c_tp_as_buffer assert c_buf py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(rffi.INTP.TO)) == 1 - ref = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 assert ref[0] == 10 lltype.free(ref, flavor='raw') diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -5,7 +5,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem import lltype, llmemory, lloperation @@ -33,8 +33,7 @@ def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 count_calls = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == 'malloc': count_mallocs += 1 @@ -54,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -557,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -770,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -1,5 +1,5 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.objspace.flow.model import SpaceOperation, traverse +from pypy.objspace.flow.model import SpaceOperation from pypy.tool.algo.unionfind import UnionFind from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -149,8 +147,7 @@ set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except") set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except") - def visit(node): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname in self.IDENTITY_OPS: # special-case these operations to identify their input @@ -167,7 +164,7 @@ if isinstance(node.exitswitch, Variable): set_use_point(node, node.exitswitch, "exitswitch", node) - if isinstance(node, Link): + for node in graph.iterlinks(): if isinstance(node.last_exception, Variable): set_creation_point(node.prevblock, node.last_exception, "last_exception") @@ -187,7 +184,6 @@ else: d[arg] = True - traverse(visit, graph) return lifetimes.infos() def _try_inline_malloc(self, info): @@ -213,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -333,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -484,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -500,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -546,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -616,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -639,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -28,7 +28,9 @@ self.items = items def getitems(self): - return jit.hint(self, promote=True).items + ## XXX! we would like: return jit.hint(self, promote=True).items + ## XXX! but it gives horrible performance in some cases + return self.items def getitem(self, idx): return self.getitems()[idx] diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -163,7 +163,7 @@ if (not we_are_jitted() or w_self.is_heaptype() or w_self.space.config.objspace.std.mutable_builtintypes): return w_self._version_tag - # heap objects cannot get their version_tag changed + # prebuilt objects cannot get their version_tag changed return w_self._pure_version_tag() @purefunction_promote() @@ -253,7 +253,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,6 +262,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -46,15 +46,15 @@ w_f_trace = None # For tracing instr_lb = 0 - instr_ub = -1 - instr_prev = -1 + instr_ub = 0 + instr_prev_plus_one = 0 is_being_profiled = False def __init__(self, space, code, w_globals, closure): self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code - eval.Frame.__init__(self, space, w_globals, code.co_nlocals) + eval.Frame.__init__(self, space, w_globals) self.valuestack_w = [None] * code.co_stacksize self.valuestackdepth = 0 self.lastblock = None @@ -63,7 +63,7 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(closure, code) - self.fastlocals_w = [None]*self.numlocals + self.fastlocals_w = [None] * code.co_nlocals make_sure_not_resized(self.fastlocals_w) self.f_lineno = code.co_firstlineno @@ -335,7 +335,7 @@ w(self.instr_lb), #do we need these three (that are for tracing) w(self.instr_ub), - w(self.instr_prev), + w(self.instr_prev_plus_one), w_cells, ] @@ -349,7 +349,7 @@ args_w = space.unpackiterable(w_args) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ - w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev, w_cells = args_w + w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w new_frame = self pycode = space.interp_w(PyCode, w_pycode) @@ -397,7 +397,7 @@ new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev = space.int_w(w_instr_prev) + new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) # XXX what if the frame is in another thread?? @@ -430,7 +430,10 @@ """Initialize cellvars from self.fastlocals_w This is overridden in nestedscope.py""" pass - + + def getfastscopelength(self): + return self.pycode.co_nlocals + def getclosure(self): return None diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,8 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.executor import execute +from pypy.jit.codewriter.heaptracker import vtable2descr class AbstractVirtualValue(optimizer.OptValue): @@ -72,28 +74,53 @@ assert isinstance(fieldvalue, optimizer.OptValue) self._fields[ofs] = fieldvalue + def _get_descr(self): + raise NotImplementedError + + def _is_immutable_and_filled_with_constants(self): + count = self._get_descr().count_fields_if_immutable() + if count != len(self._fields): # always the case if count == -1 + return False + for value in self._fields.itervalues(): + subbox = value.force_box() + if not isinstance(subbox, Const): + return False + return True + def _really_force(self): - assert self.source_op is not None + op = self.source_op + assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): - self.source_op.name = 'FORCE ' + self.source_op.name - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) + op.name = 'FORCE ' + self.source_op.name + + if self._is_immutable_and_filled_with_constants(): + box = self.optimizer.constant_fold(op) + self.make_constant(box) + for ofs, value in self._fields.iteritems(): + subbox = value.force_box() + assert isinstance(subbox, Const) + execute(self.optimizer.cpu, None, rop.SETFIELD_GC, + ofs, box, subbox) + # keep self._fields, because it's all immutable anyway + else: + newoperations = self.optimizer.newoperations newoperations.append(op) - self._fields = None + self.box = box = op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None def _get_field_descr_list(self): _cached_sorted_fields = self._cached_sorted_fields @@ -168,6 +195,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def _get_descr(self): + return vtable2descr(self.optimizer.cpu, self.known_class.getint()) + def __repr__(self): cls_name = self.known_class.value.adr.ptr._obj._TYPE._name if self._fields is None: @@ -185,6 +215,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_vstruct(self.structdescr, fielddescrs) + def _get_descr(self): + return self.structdescr + class VArrayValue(AbstractVirtualValue): def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): diff --git a/pypy/jit/backend/x86/test/test_rx86.py b/pypy/jit/backend/x86/test/test_rx86.py --- a/pypy/jit/backend/x86/test/test_rx86.py +++ b/pypy/jit/backend/x86/test/test_rx86.py @@ -206,3 +206,8 @@ s = CodeBuilder64() s.MOV_rm(edx, (edi, -1)) assert s.getvalue() == '\x48\x8B\x57\xFF' + +def test_movsd_xj_64(): + s = CodeBuilder64() + s.MOVSD_xj(xmm2, 0x01234567) + assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -1116,20 +1108,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code @@ -1965,14 +1943,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,21 +400,9 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyBufferProcs = lltype.ForwardReference() PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) -def F(ARGS, RESULT=lltype.Signed): - return lltype.Ptr(lltype.FuncType(ARGS, RESULT)) -PyBufferProcsFields = ( - ("bf_getreadbuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getwritebuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getsegcount", F([PyObject, rffi.INTP])), - ("bf_getcharbuffer", F([PyObject, lltype.Signed, rffi.CCHARPP])), -# we don't support new buffer interface for now - ("bf_getbuffer", rffi.VOIDP), - ("bf_releasebuffer", rffi.VOIDP)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -cpython_struct('PyBufferProcs', PyBufferProcsFields, PyBufferProcs) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) @@ -539,7 +527,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -7,10 +7,10 @@ from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, + cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - PyBufferProcs, build_type_checkers) + build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, RefcountState, borrow_from) @@ -24,7 +24,7 @@ from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, - PyNumberMethods, PySequenceMethods) + PyNumberMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.interpreter.error import OperationError @@ -361,14 +361,14 @@ # hopefully this does not clash with the memory model assumed in # extension modules - at cpython_api([PyObject, rffi.INTP], lltype.Signed, external=False, + at cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: - ref[0] = rffi.cast(rffi.INT, space.len_w(w_obj)) + ref[0] = space.len_w(w_obj) return 1 - at cpython_api([PyObject, lltype.Signed, rffi.VOIDPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, external=False, error=-1) def str_getreadbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -381,7 +381,7 @@ Py_DecRef(space, pyref) return space.len_w(w_str) - at cpython_api([PyObject, lltype.Signed, rffi.CCHARPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, external=False, error=-1) def str_getcharbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -245,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, @@ -385,7 +395,7 @@ raise OperationError(space.w_TypeError, space.wrap( "expected a character buffer object")) if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(rffi.INTP.TO)) != 1: + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: raise OperationError(space.w_TypeError, space.wrap( "expected a single-segment buffer object")) size = generic_cpy_call(space, pb.c_bf_getcharbuffer, diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -51,3 +53,23 @@ from pypy.module.imp.importing import reload return reload(space, w_mod) + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/translator/backendopt/test/test_inline.py b/pypy/translator/backendopt/test/test_inline.py --- a/pypy/translator/backendopt/test/test_inline.py +++ b/pypy/translator/backendopt/test/test_inline.py @@ -1,7 +1,7 @@ # XXX clean up these tests to use more uniform helpers import py import os -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import last_exception, checkgraph from pypy.translator.backendopt import canraise from pypy.translator.backendopt.inline import simple_inline_function, CannotInline @@ -20,29 +20,27 @@ from pypy.translator.backendopt import removenoops from pypy.objspace.flow.model import summary -def no_missing_concretetype(node): - if isinstance(node, Block): - for v in node.inputargs: - assert hasattr(v, 'concretetype') - for op in node.operations: - for v in op.args: - assert hasattr(v, 'concretetype') - assert hasattr(op.result, 'concretetype') - if isinstance(node, Link): - if node.exitcase is not None: - assert hasattr(node, 'llexitcase') - for v in node.args: - assert hasattr(v, 'concretetype') - if isinstance(node.last_exception, (Variable, Constant)): - assert hasattr(node.last_exception, 'concretetype') - if isinstance(node.last_exc_value, (Variable, Constant)): - assert hasattr(node.last_exc_value, 'concretetype') - def sanity_check(t): # look for missing '.concretetype' for graph in t.graphs: checkgraph(graph) - traverse(no_missing_concretetype, graph) + for node in graph.iterblocks(): + for v in node.inputargs: + assert hasattr(v, 'concretetype') + for op in node.operations: + for v in op.args: + assert hasattr(v, 'concretetype') + assert hasattr(op.result, 'concretetype') + for node in graph.iterlinks(): + if node.exitcase is not None: + assert hasattr(node, 'llexitcase') + for v in node.args: + assert hasattr(v, 'concretetype') + if isinstance(node.last_exception, (Variable, Constant)): + assert hasattr(node.last_exception, 'concretetype') + if isinstance(node.last_exc_value, (Variable, Constant)): + assert hasattr(node.last_exc_value, 'concretetype') + class CustomError1(Exception): def __init__(self): diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -261,7 +261,8 @@ if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') - return space.wrap(rffi.charp2strn(buf, bufsize_p[0] - 1)) + length = intmask(bufsize_p[0] - 1) + return space.wrap(rffi.charp2strn(buf, length)) def convert_to_regdata(space, w_value, typ): buf = None @@ -445,9 +446,10 @@ continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') + length = intmask(retDataSize[0]) return space.newtuple([ convert_from_regdata(space, databuf, - retDataSize[0], retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) @@ -595,11 +597,11 @@ if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') + length = intmask(retDataSize[0]) return space.newtuple([ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, - retDataSize[0], - retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,12 +25,13 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None): + arg_types=None, count_fields_if_immut=-1): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types + self.count_fields_if_immut = count_fields_if_immut def get_arg_types(self): return self.arg_types @@ -63,6 +64,9 @@ def as_vtable_size_descr(self): return self + def count_fields_if_immutable(self): + return self.count_fields_if_immut + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -109,12 +113,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None): - key = (ofs, typeinfo, extrainfo, name, arg_types) + arg_types=None, count_fields_if_immut=-1): + key = (ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) try: return self._descrs[key] except KeyError: - descr = Descr(ofs, typeinfo, extrainfo, name, arg_types) + descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) self._descrs[key] = descr return descr @@ -284,7 +290,8 @@ def sizeof(self, S): assert not isinstance(S, lltype.Ptr) - return self.getdescr(symbolic.get_size(S)) + count = heaptracker.count_fields_if_immutable(S) + return self.getdescr(symbolic.get_size(S), count_fields_if_immut=count) class LLtypeCPU(BaseCPU): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,9 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import cpython_struct, \ - PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, \ - Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE, \ - PyTypeObject, PyTypeObjectPtr, PyBufferProcs, FILEP +from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -55,6 +54,14 @@ wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) +readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) +charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) +## We don't support new buffer interface for now +getbufferproc = rffi.VOIDP +releasebufferproc = rffi.VOIDP + PyGetSetDef = cpython_struct("PyGetSetDef", ( ("name", rffi.CCHARP), @@ -127,7 +134,6 @@ ("mp_ass_subscript", objobjargproc), )) -""" PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getreadbuffer", readbufferproc), ("bf_getwritebuffer", writebufferproc), @@ -136,7 +142,6 @@ ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), )) -""" PyMemberDef = cpython_struct("PyMemberDef", ( ("name", rffi.CCHARP), diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -135,7 +135,7 @@ return importing.check_sys_modules(space, w_modulename) def new_module(space, w_name): - return space.wrap(Module(space, w_name)) + return space.wrap(Module(space, w_name, add_package=False)) def init_builtin(space, w_name): name = space.str_w(w_name) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -253,8 +253,10 @@ except OperationError, e: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) - return 0 - return 1 + result = 0 + else: + result = 1 + return rffi.cast(rffi.INT, result) callback_type = lltype.Ptr(lltype.FuncType( [rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT)) XML_SetUnknownEncodingHandler = expat_external( diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -86,6 +86,8 @@ metainterp.history = History() metainterp.history.operations = loop.operations[:] metainterp.history.inputargs = loop.inputargs[:] + cpu._all_size_descrs_with_vtable = ( + LLtypeMixin.cpu._all_size_descrs_with_vtable) # loop_tokens = [] loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -1,10 +1,10 @@ -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rpython.tool import rffi_platform as platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import py, os from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rlib import jit from pypy.rlib.debug import ll_assert from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem.lloperation import llop @@ -79,6 +79,7 @@ # wrappers... + at jit.loop_invariant def get_ident(): return rffi.cast(lltype.Signed, c_thread_get_ident()) @@ -113,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1350,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/translator/cli/ilgenerator.py b/pypy/translator/cli/ilgenerator.py --- a/pypy/translator/cli/ilgenerator.py +++ b/pypy/translator/cli/ilgenerator.py @@ -443,8 +443,8 @@ self.ilasm.opcode('newarr', clitype.itemtype.typename()) def _array_suffix(self, ARRAY, erase_unsigned=False): - from pypy.translator.cli.metavm import OOTYPE_TO_MNEMONIC - suffix = OOTYPE_TO_MNEMONIC.get(ARRAY.ITEM, 'ref') + from pypy.translator.cli.metavm import ootype_to_mnemonic + suffix = ootype_to_mnemonic(ARRAY.ITEM, ARRAY.ITEM, 'ref') if erase_unsigned: suffix = suffix.replace('u', 'i') return suffix diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/translator/backendopt/ssa.py b/pypy/translator/backendopt/ssa.py --- a/pypy/translator/backendopt/ssa.py +++ b/pypy/translator/backendopt/ssa.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block +from pypy.objspace.flow.model import Variable, mkentrymap, Block from pypy.tool.algo.unionfind import UnionFind class DataFlowFamilyBuilder: diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: @@ -115,46 +114,6 @@ # in the second block! return split_block(annotator, block, 0, _forcelink=block.inputargs) -def remove_direct_loops(annotator, graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def remove_double_links(annotator, graph): - """This can be useful for code generators: it ensures that no block has - more than one incoming links from one and the same other block. It allows - argument passing along links to be implemented with phi nodes since the - value of an argument can be determined by looking from which block the - control passed. """ - def visit(block): - if isinstance(block, Block): - double_links = [] - seen = {} - for link in block.exits: - if link.target in seen: - double_links.append(link) - seen[link.target] = True - for link in double_links: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def no_links_to_startblock(graph): - """Ensure no links to start block.""" - links_to_start_block = False - for block in graph.iterblocks(): - for link in block.exits: - if link.target == graph.startblock: - links_to_start_block = True - break - if links_to_start_block: - insert_empty_startblock(None, graph) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from pypy.annotation import model as annmodel diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -36,29 +36,35 @@ init_defaults = Defaults([None]) def init__List(space, w_list, __args__): + from pypy.objspace.std.tupleobject import W_TupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - # - # this is the old version of the loop at the end of this function: - # - # w_list.wrappeditems = space.unpackiterable(w_iterable) - # - # This is commented out to avoid assigning a new RPython list to - # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. - # items_w = w_list.wrappeditems del items_w[:] if w_iterable is not None: - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - items_w.append(w_item) + # unfortunately this is duplicating space.unpackiterable to avoid + # assigning a new RPython list to 'wrappeditems', which defeats the + # W_FastSeqIterObject optimization. + if isinstance(w_iterable, W_ListObject): + items_w.extend(w_iterable.wrappeditems) + elif isinstance(w_iterable, W_TupleObject): + items_w.extend(w_iterable.wrappeditems) + else: + _init_from_iterable(space, items_w, w_iterable) + +def _init_from_iterable(space, items_w, w_iterable): + # in its own function to make the JIT look into init__List + # XXX this would need a JIT driver somehow? + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + items_w.append(w_item) def len__List(space, w_list): result = len(w_list.wrappeditems) diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4.1' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.5-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/translator/backendopt/test/test_ssa.py b/pypy/translator/backendopt/test/test_ssa.py --- a/pypy/translator/backendopt/test/test_ssa.py +++ b/pypy/translator/backendopt/test/test_ssa.py @@ -1,6 +1,6 @@ from pypy.translator.backendopt.ssa import * from pypy.translator.translator import TranslationContext -from pypy.objspace.flow.model import flatten, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import SpaceOperation diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,6 +61,12 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link from pypy.objspace.flow.model import SpaceOperation, c_last_exception from pypy.objspace.flow.model import FunctionGraph -from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph +from pypy.objspace.flow.model import mkentrymap, checkgraph from pypy.annotation import model as annmodel from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr from pypy.rpython.lltypesystem.lltype import normalizeptr @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,16 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + def test_reload(self, space, api): pdb = api.PyImport_Import(space.wrap("pdb")) space.delattr(pdb, space.wrap("set_trace")) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,16 +1,18 @@ import re from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, PyObject, Py_ssize_t) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, - hashfunc, descrgetfunc, descrsetfunc, objobjproc) + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, readbufferproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.buffer import Buffer as W_Buffer from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import specialize @@ -193,18 +195,59 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) +class CPyBuffer(W_Buffer): + # Similar to Py_buffer + + def __init__(self, ptr, size, w_obj): + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + + def getlength(self): + return self.size + + def getitem(self, index): + return self.ptr[index] + +def wrap_getreadbuffer(space, w_self, w_args, func): + func_target = rffi.cast(readbufferproc, func) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: + index = rffi.cast(Py_ssize_t, 0) + size = generic_cpy_call(space, func_target, w_self, index, ptr) + if size < 0: + space.fromcache(State).check_and_raise_exception(always=True) + return space.wrap(CPyBuffer(ptr[0], size, w_self)) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) check_num_args(space, w_args, 1) - args_w = space.fixedview(w_args) - other_w = args_w[0] + w_other, = space.fixedview(w_args) return generic_cpy_call(space, func_target, - w_self, other_w, rffi.cast(rffi.INT_real, OP_CONST)) + w_self, w_other, rffi.cast(rffi.INT_real, OP_CONST)) return inner richcmp_eq = get_richcmp_func(Py_EQ) richcmp_ne = get_richcmp_func(Py_NE) +richcmp_lt = get_richcmp_func(Py_LT) +richcmp_le = get_richcmp_func(Py_LE) +richcmp_gt = get_richcmp_func(Py_GT) +richcmp_ge = get_richcmp_func(Py_GE) + +def wrap_cmpfunc(space, w_self, w_args, func): + func_target = rffi.cast(cmpfunc, func) + check_num_args(space, w_args, 1) + w_other, = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(w_self), + space.type(w_other))): + raise OperationError(space.w_TypeError, space.wrap( + "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % + (space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space)))) + + return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): @@ -571,12 +614,19 @@ for regex, repl in slotdef_replacements: slotdefs_str = re.sub(regex, repl, slotdefs_str) +slotdefs = eval(slotdefs_str) +# PyPy addition +slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), +) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in eval(slotdefs_str)]) + for x in slotdefs]) + slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) - for x in eval(slotdefs_str)]) + for x in slotdefs]) if __name__ == "__main__": print slotdefs_str diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -151,9 +151,9 @@ class CPythonFakeFrame(eval.Frame): - def __init__(self, space, code, w_globals=None, numlocals=-1): + def __init__(self, space, code, w_globals=None): self.fakecode = code - eval.Frame.__init__(self, space, w_globals, numlocals) + eval.Frame.__init__(self, space, w_globals) def getcode(self): return self.fakecode diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -77,8 +77,8 @@ self.loop_run_counters = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 - self.malloc_fixedsize_slowpath1 = 0 - self.malloc_fixedsize_slowpath2 = 0 + self.malloc_slowpath1 = 0 + self.malloc_slowpath2 = 0 self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False @@ -123,8 +123,8 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() - if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): - self._build_malloc_fixedsize_slowpath() + if gc_ll_descr.get_malloc_slowpath_addr is not None: + self._build_malloc_slowpath() self._build_stack_check_slowpath() debug_start('jit-backend-counts') self.set_debug(have_debug_prints()) @@ -171,7 +171,7 @@ self.float_const_neg_addr = float_constants self.float_const_abs_addr = float_constants + 16 - def _build_malloc_fixedsize_slowpath(self): + def _build_malloc_slowpath(self): # With asmgcc, we need two helpers, so that we can write two CALL # instructions in assembler, with a mark_gc_roots in between. # With shadowstack, this is not needed, so we produce a single helper. @@ -183,7 +183,7 @@ for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() # if gcrootmap is not None and gcrootmap.is_shadow_stack: # ---- shadowstack ---- @@ -208,7 +208,7 @@ mc.MOV_rr(edi.value, edx.value) mc.JMP(imm(addr)) # tail call to the real malloc rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart + self.malloc_slowpath1 = rawstart # ---------- second helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() # @@ -219,7 +219,7 @@ mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath2 = rawstart + self.malloc_slowpath2 = rawstart def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() @@ -951,7 +951,7 @@ def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: - return self._emit_call_64(x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start) p = 0 n = len(arglocs) @@ -979,7 +979,7 @@ self.mc.CALL(x) self.mark_gc_roots(force_index) - def _emit_call_64(self, force_index, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start): src_locs = [] dst_locs = [] xmm_src_locs = [] @@ -1273,6 +1273,11 @@ assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + def set_new_array_length(self, loc, ofs_length, loc_num_elem): + assert isinstance(loc, RegLoc) + assert isinstance(loc_num_elem, ImmedLoc) + self.mc.MOV(mem(loc, ofs_length), loc_num_elem) + # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) def genop_new(self, op, arglocs, result_loc): @@ -2083,8 +2088,7 @@ else: self.mc.JMP(imm(loop_token._x86_loop_code)) - def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, - size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) @@ -2092,7 +2096,7 @@ self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - # See comments in _build_malloc_fixedsize_slowpath for the + # See comments in _build_malloc_slowpath for the # details of the two helper functions that we are calling below. # First, we need to call two of them and not just one because we # need to have a mark_gc_roots() in between. Then the calling @@ -2111,11 +2115,11 @@ shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) if not shadow_stack: # there are two helpers to call only with asmgcc - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + slowpath_addr1 = self.malloc_slowpath1 self.mc.CALL(imm(slowpath_addr1)) self.mark_gc_roots(self.write_new_force_index(), use_copy_area=shadow_stack) - slowpath_addr2 = self.malloc_fixedsize_slowpath2 + slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) offset = self.mc.get_relative_pos() - jmp_adr diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ From commits-noreply at bitbucket.org Sun Apr 10 21:50:40 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 10 Apr 2011 21:50:40 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix (thanks Matthew). Message-ID: <20110410195040.AA6A4282BAD@codespeak.net> Author: Armin Rigo Branch: Changeset: r43271:dc5579a7f70c Date: 2011-04-10 21:49 +0200 http://bitbucket.org/pypy/pypy/changeset/dc5579a7f70c/ Log: Fix (thanks Matthew). diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) From commits-noreply at bitbucket.org Sun Apr 10 23:16:43 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 10 Apr 2011 23:16:43 +0200 (CEST) Subject: [pypy-svn] pypy default: Tentative: disable .cfi_xxx on darwin64, where they are not recognized. Message-ID: <20110410211643.D7E8B36C214@codespeak.net> Author: Armin Rigo Branch: Changeset: r43272:795b479356ba Date: 2011-04-10 23:16 +0200 http://bitbucket.org/pypy/pypy/changeset/795b479356ba/ Log: Tentative: disable .cfi_xxx on darwin64, where they are not recognized. diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1645,7 +1645,7 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ .cfi_startproc movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ @@ -1691,6 +1691,12 @@ ret .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: From commits-noreply at bitbucket.org Mon Apr 11 00:40:20 2011 From: commits-noreply at bitbucket.org (ademan) Date: Mon, 11 Apr 2011 00:40:20 +0200 (CEST) Subject: [pypy-svn] pypy fold_intadd: Partially fixed my test, now to figure out why I'm getting a preamble. Message-ID: <20110410224020.2AE58282BAD@codespeak.net> Author: Daniel Roberts Branch: fold_intadd Changeset: r43273:d7fca7dd9fa2 Date: 2011-04-10 15:39 -0700 http://bitbucket.org/pypy/pypy/changeset/d7fca7dd9fa2/ Log: Partially fixed my test, now to figure out why I'm getting a preamble. diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -16,7 +16,6 @@ ('intbounds', OptIntBounds), ('addition', OptAddition), ('rewrite', OptRewrite), - #('lastsetitem', OptLastSetitem), ('virtualize', OptVirtualize), ('string', OptString), ('heap', OptHeap), diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5541,7 +5541,7 @@ i2 = int_sub(i1, 16) i3 = int_add(i2, 9) i4 = int_sub(i3, 29) - jump(i3) + jump(i4) """ expected = """ @@ -5550,7 +5550,7 @@ i2 = int_sub(i0, 13) i3 = int_sub(i0, 4) i4 = int_sub(i0, 33) - jump(i3) + jump(i4) """ self.optimize_loop(ops, expected) @@ -5558,18 +5558,18 @@ ops = """ [i0] i1 = int_add(i0, 3) - i2 = int_sub(4, i1) - i3 = int_sub(i2, 14) - i4 = int_add(6, i2) + i2 = int_sub(5, i1) + i3 = int_sub(i2, 7) + i4 = int_add(11, i3) jump(i4) """ expected = """ [i0] i1 = int_add(i0, 3) - i2 = int_sub(1, i0) - i3 = int_sub(-13, i0) - i4 = int_sub(-7, i0) + i2 = int_sub(2, i0) + i3 = int_sub(-5, i0) + i4 = int_sub(6, i0) jump(i4) """ self.optimize_loop(ops, expected) From commits-noreply at bitbucket.org Mon Apr 11 01:26:07 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 01:26:07 +0200 (CEST) Subject: [pypy-svn] pypy default: An experiemnt - for guards write a hint "branch unlikely taken". Let's see Message-ID: <20110410232607.26503282BAD@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43274:2e31ce2f1e90 Date: 2011-04-11 01:24 +0200 http://bitbucket.org/pypy/pypy/changeset/2e31ce2f1e90/ Log: An experiemnt - for guards write a hint "branch unlikely taken". Let's see if it affects benchmarks at all diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1849,6 +1849,9 @@ def implement_guard(self, guard_token, condition=None): # These jumps are patched later. if condition: + if condition not in ['B', 'E', 'BE']: + # this is a hint "branch not taken + self.mc.writechar('\x2E') self.mc.J_il(rx86.Conditions[condition], 0) else: self.mc.JMP_l(0) From commits-noreply at bitbucket.org Mon Apr 11 01:26:08 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 01:26:08 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110410232608.5DF92282BAD@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43275:9237aaa86f91 Date: 2011-04-11 01:25 +0200 http://bitbucket.org/pypy/pypy/changeset/9237aaa86f91/ Log: merge diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1645,7 +1645,7 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ .cfi_startproc movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ @@ -1691,6 +1691,12 @@ ret .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) From commits-noreply at bitbucket.org Mon Apr 11 12:01:12 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:12 +0200 (CEST) Subject: [pypy-svn] pypy default: add support for SHORT type, and for force_cast Message-ID: <20110411100112.7FD24282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43276:3ec40eb2408a Date: 2011-04-11 08:15 +0000 http://bitbucket.org/pypy/pypy/changeset/3ec40eb2408a/ Log: add support for SHORT type, and for force_cast diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -181,6 +181,7 @@ jIntegerClass = JvmClassType('java.lang.Integer') jLongClass = JvmClassType('java.lang.Long') +jShortClass = JvmClassType('java.lang.Short') jDoubleClass = JvmClassType('java.lang.Double') jByteClass = JvmClassType('java.lang.Byte') jCharClass = JvmClassType('java.lang.Character') @@ -239,6 +240,7 @@ jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue') jByte = JvmScalarType('B', jByteClass, 'byteValue') jChar = JvmScalarType('C', jCharClass, 'charValue') +jShort = JvmScalarType('S', jShortClass, 'shortValue') class Generifier(object): @@ -527,6 +529,7 @@ if desc == 'C': return self._o("i") # Characters if desc == 'B': return self._o("i") # Bytes if desc == 'Z': return self._o("i") # Boolean + if desc == 'S': return self._o("i") # Short assert False, "Unknown argtype=%s" % repr(argtype) raise NotImplementedError @@ -625,6 +628,7 @@ NOP = Opcode('nop') I2D = Opcode('i2d') I2L = Opcode('i2l') +I2S = Opcode('i2s') D2I= Opcode('d2i') #D2L= Opcode('d2l') #PAUL L2I = Opcode('l2i') @@ -891,6 +895,7 @@ SYSTEMIDENTITYHASH = Method.s(jSystem, 'identityHashCode', (jObject,), jInt) SYSTEMGC = Method.s(jSystem, 'gc', (), jVoid) INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString) +SHORTTOSTRINGS = Method.s(jShortClass, 'toString', (jShort,), jString) LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString) DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString) CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString) diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -4,7 +4,7 @@ """ from cStringIO import StringIO -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.ootypesystem import ootype, rclass from pypy.rpython.ootypesystem.module import ll_os from pypy.translator.jvm import node, methods @@ -359,6 +359,7 @@ ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, ootype.String:jvm.PYPYESCAPEDSTRING, ootype.Unicode:jvm.PYPYESCAPEDUNICODE, + rffi.SHORT:jvm.SHORTTOSTRINGS, } def toString_method_for_ootype(self, OOTYPE): @@ -406,6 +407,7 @@ ootype.UniChar: jvm.jChar, ootype.Class: jvm.jClass, ootype.ROOT: jvm.jObject, # treat like a scalar + rffi.SHORT: jvm.jShort, } # Dictionary for non-scalar types; in this case, if we see the key, we diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -228,4 +228,5 @@ 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], + 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -1,4 +1,5 @@ from pypy.rpython.ootypesystem import ootype +from pypy.rpython.lltypesystem import rffi from pypy.translator.oosupport.metavm import MicroInstruction from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType import pypy.translator.jvm.typesystem as jvm @@ -94,6 +95,7 @@ (ootype.SignedLongLong, ootype.Signed): jvm.L2I, (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, + (ootype.Signed, rffi.SHORT): jvm.I2S, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, } From commits-noreply at bitbucket.org Mon Apr 11 12:01:13 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:13 +0200 (CEST) Subject: [pypy-svn] pypy default: add support for some rlocale to the jvm backend Message-ID: <20110411100113.53CC2282BE3@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43277:bf822ab5a563 Date: 2011-04-11 08:30 +0000 http://bitbucket.org/pypy/pypy/changeset/bf822ab5a563/ Log: add support for some rlocale to the jvm backend diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -1187,9 +1187,42 @@ return Character.toLowerCase(c); } + public int locale_tolower(int chr) + { + return Character.toLowerCase(chr); + } + + public int locale_isupper(int chr) + { + return boolean2int(Character.isUpperCase(chr)); + } + + public int locale_islower(int chr) + { + return boolean2int(Character.isLowerCase(chr)); + } + + public int locale_isalpha(int chr) + { + return boolean2int(Character.isLetter(chr)); + } + + public int locale_isalnum(int chr) + { + return boolean2int(Character.isLetterOrDigit(chr)); + } + + // ---------------------------------------------------------------------- // Self Test + public static int boolean2int(boolean b) + { + if (b) + return 1; + return 0; + } + public static int __counter = 0, __failures = 0; public static void ensure(boolean f) { if (f) { From commits-noreply at bitbucket.org Mon Apr 11 12:01:15 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:15 +0200 (CEST) Subject: [pypy-svn] pypy default: cast_float_to_ulonglong for the jvm backend Message-ID: <20110411100115.49648282BEB@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43278:77f2c8d3cde0 Date: 2011-04-11 08:56 +0000 http://bitbucket.org/pypy/pypy/changeset/77f2c8d3cde0/ Log: cast_float_to_ulonglong for the jvm backend diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -163,6 +163,13 @@ return ULONG_MAX + value; } } + + public static long double_to_ulong(double value) { + if (value < 0) + return (long)(ULONG_MAX + value); + else + return (long)value; + } public static int double_to_uint(double value) { if (value <= Integer.MAX_VALUE) diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -227,6 +227,7 @@ 'cast_float_to_uint': jvm.PYPYDOUBLETOUINT, 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, + 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -936,6 +936,7 @@ PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL +PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) From commits-noreply at bitbucket.org Mon Apr 11 12:01:17 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:17 +0200 (CEST) Subject: [pypy-svn] pypy default: cast_ulonglong_to_float for the jvm backend Message-ID: <20110411100117.A2A8C282BEB@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43279:9d89bb0e8d32 Date: 2011-04-11 08:58 +0000 http://bitbucket.org/pypy/pypy/changeset/9d89bb0e8d32/ Log: cast_ulonglong_to_float for the jvm backend diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -228,6 +228,7 @@ 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, + 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -937,6 +937,7 @@ PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) +PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) From commits-noreply at bitbucket.org Mon Apr 11 12:01:18 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:18 +0200 (CEST) Subject: [pypy-svn] pypy default: ll_math_copysign for the jvm backend Message-ID: <20110411100118.8403D282BEB@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43280:adcc160d3661 Date: 2011-04-11 09:00 +0000 http://bitbucket.org/pypy/pypy/changeset/adcc160d3661/ Log: ll_math_copysign for the jvm backend diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -1182,6 +1182,10 @@ return Math.tanh(x); } + public double ll_math_copysign(double x, double y) { + return Math.copySign(x, y); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); From commits-noreply at bitbucket.org Mon Apr 11 12:01:19 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:19 +0200 (CEST) Subject: [pypy-svn] pypy default: isnan for the jvm backend Message-ID: <20110411100119.3156A282BEB@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43281:f83a249c12d4 Date: 2011-04-11 09:01 +0000 http://bitbucket.org/pypy/pypy/changeset/f83a249c12d4/ Log: isnan for the jvm backend diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -1186,6 +1186,10 @@ return Math.copySign(x, y); } + public boolean ll_math_isnan(double x) { + return Double.isNaN(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); From commits-noreply at bitbucket.org Mon Apr 11 12:01:20 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:20 +0200 (CEST) Subject: [pypy-svn] pypy default: isinf for the jvm backend Message-ID: <20110411100120.A5DC1282C1A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43282:51aebd3c4d99 Date: 2011-04-11 09:02 +0000 http://bitbucket.org/pypy/pypy/changeset/51aebd3c4d99/ Log: isinf for the jvm backend diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -1190,6 +1190,10 @@ return Double.isNaN(x); } + public boolean ll_math_isinf(double x) { + return Double.isInfinite(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); From commits-noreply at bitbucket.org Mon Apr 11 12:01:22 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:22 +0200 (CEST) Subject: [pypy-svn] pypy default: ullong_{or,and} for the jvm backend Message-ID: <20110411100122.77FE8282BEA@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43283:6941a8b016dd Date: 2011-04-11 09:03 +0000 http://bitbucket.org/pypy/pypy/changeset/6941a8b016dd/ Log: ullong_{or,and} for the jvm backend diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -205,6 +205,8 @@ 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, + 'ullong_or': jvm.LOR, + 'ullong_and': jvm.LAND, # when casting from bool we want that every truth value is casted # to 1: we can't simply DoNothing, because the CLI stack could From commits-noreply at bitbucket.org Mon Apr 11 12:01:23 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:23 +0200 (CEST) Subject: [pypy-svn] pypy default: fix (u)llong_{r, l}shift: I do not really understand how it could have worked before, since the signature was just wrong Message-ID: <20110411100123.7168C282C1A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43284:65f67dbc9ca5 Date: 2011-04-11 09:13 +0000 http://bitbucket.org/pypy/pypy/changeset/65f67dbc9ca5/ Log: fix (u)llong_{r,l}shift: I do not really understand how it could have worked before, since the signature was just wrong diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -185,8 +185,8 @@ 'llong_mod_zer': _check_zer(jvm.LREM), 'llong_and': jvm.LAND, 'llong_or': jvm.LOR, - 'llong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'llong_rshift': [PushAllArgs, jvm.L2I, jvm.LSHR, StoreResult], + 'llong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'llong_rshift': [PushAllArgs, jvm.LSHR, StoreResult], 'llong_xor': jvm.LXOR, 'llong_floordiv_ovf': jvm.LFLOORDIVOVF, 'llong_floordiv_ovf_zer': jvm.LFLOORDIVZEROVF, @@ -202,8 +202,8 @@ 'ullong_truediv': None, # TODO 'ullong_floordiv': jvm.LDIV, # valid? 'ullong_mod': jvm.PYPYULONGMOD, - 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], + 'ullong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'ullong_rshift': [PushAllArgs, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, 'ullong_or': jvm.LOR, 'ullong_and': jvm.LAND, From commits-noreply at bitbucket.org Mon Apr 11 12:01:24 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:24 +0200 (CEST) Subject: [pypy-svn] pypy default: implement cast_uint_to_longlong for the jvm Message-ID: <20110411100124.ADE6B282BEA@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43285:3519e82ef771 Date: 2011-04-11 09:16 +0000 http://bitbucket.org/pypy/pypy/changeset/3519e82ef771/ Log: implement cast_uint_to_longlong for the jvm diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -96,6 +96,7 @@ (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, (ootype.Signed, rffi.SHORT): jvm.I2S, + (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, } diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -933,6 +933,7 @@ PYPYUINTMUL = Method.v(jPyPy, 'uint_mul', (jInt, jInt), jInt) PYPYUINTDIV = Method.v(jPyPy, 'uint_div', (jInt, jInt), jInt) PYPYULONGMOD = Method.v(jPyPy, 'ulong_mod', (jLong, jLong), jLong) +PYPYUINTTOLONG = Method.s(jPyPy, 'uint_to_long', (jInt,), jLong) PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL From commits-noreply at bitbucket.org Mon Apr 11 12:01:26 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:26 +0200 (CEST) Subject: [pypy-svn] pypy default: int_between for the jvm backend Message-ID: <20110411100126.264D4282BF7@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43286:e4105b9cc21c Date: 2011-04-11 09:22 +0000 http://bitbucket.org/pypy/pypy/changeset/e4105b9cc21c/ Log: int_between for the jvm backend diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -38,6 +38,10 @@ public final static int INT_MIN = Integer.MIN_VALUE; public final static double ULONG_MAX = 18446744073709551616.0; + public static boolean int_between(int a, int b, int c) { + return a <= b && b < c; + } + /** * Compares two unsigned integers (value1 and value2) and returns * a value greater than, equal to, or less than zero if value 1 is diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -144,6 +144,7 @@ 'int_xor_ovf': jvm.IXOR, 'int_floordiv_ovf_zer': jvm.IFLOORDIVZEROVF, 'int_mod_ovf_zer': _check_zer(jvm.IREMOVF), + 'int_between': jvm.PYPYINTBETWEEN, 'uint_invert': 'bitwise_negate', diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -927,6 +927,7 @@ CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool) STRINGBUILDERAPPEND = Method.v(jStringBuilder, 'append', (jString,), jStringBuilder) +PYPYINTBETWEEN = Method.s(jPyPy, 'int_between', (jInt,jInt,jInt), jBool) PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt) PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt) PYPYUINTMOD = Method.v(jPyPy, 'uint_mod', (jInt, jInt), jInt) From commits-noreply at bitbucket.org Mon Apr 11 12:01:29 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 12:01:29 +0200 (CEST) Subject: [pypy-svn] pypy default: skip these two failing tests Message-ID: <20110411100129.DE632282BE9@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43287:5c11f21f2e15 Date: 2011-04-11 09:22 +0000 http://bitbucket.org/pypy/pypy/changeset/5c11f21f2e15/ Log: skip these two failing tests diff --git a/pypy/translator/jvm/test/test_list.py b/pypy/translator/jvm/test/test_list.py --- a/pypy/translator/jvm/test/test_list.py +++ b/pypy/translator/jvm/test/test_list.py @@ -6,7 +6,10 @@ def test_recursive(self): py.test.skip("JVM doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_r_short_list(self): From commits-noreply at bitbucket.org Mon Apr 11 12:24:38 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 12:24:38 +0200 (CEST) Subject: [pypy-svn] pypy default: "Good". Branch prediction macro does not seem to help. Also, it's unclear Message-ID: <20110411102438.26B18282BDF@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43288:317393b08e4e Date: 2011-04-11 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/317393b08e4e/ Log: "Good". Branch prediction macro does not seem to help. Also, it's unclear whether this works (but it does not segfault at least) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1849,9 +1849,6 @@ def implement_guard(self, guard_token, condition=None): # These jumps are patched later. if condition: - if condition not in ['B', 'E', 'BE']: - # this is a hint "branch not taken - self.mc.writechar('\x2E') self.mc.J_il(rx86.Conditions[condition], 0) else: self.mc.JMP_l(0) From commits-noreply at bitbucket.org Mon Apr 11 13:06:05 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 13:06:05 +0200 (CEST) Subject: [pypy-svn] pypy default: postpone the USE_SHORT_FLOAT_REPR check, else it is executed at import time; we want to be able to change its value later, e.g. in translationoption.py Message-ID: <20110411110605.B20C3282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43289:6a7e581aa200 Date: 2011-04-11 10:23 +0000 http://bitbucket.org/pypy/pypy/changeset/6a7e581aa200/ Log: postpone the USE_SHORT_FLOAT_REPR check, else it is executed at import time; we want to be able to change its value later, e.g. in translationoption.py diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -167,128 +167,132 @@ result = formatd(value, tp, precision, flags) return result, special -if USE_SHORT_FLOAT_REPR: - def round_double(value, ndigits): - # The basic idea is very simple: convert and round the double to - # a decimal string using _Py_dg_dtoa, then convert that decimal - # string back to a double with _Py_dg_strtod. There's one minor - # difficulty: Python 2.x expects round to do - # round-half-away-from-zero, while _Py_dg_dtoa does - # round-half-to-even. So we need some way to detect and correct - # the halfway cases. +def round_double(value, ndigits): + if USE_SHORT_FLOAT_REPR: + return round_double_short_repr(value, ndigits) + else: + return round_double_fallback_repr(value, ndigits) - # a halfway value has the form k * 0.5 * 10**-ndigits for some - # odd integer k. Or in other words, a rational number x is - # exactly halfway between two multiples of 10**-ndigits if its - # 2-valuation is exactly -ndigits-1 and its 5-valuation is at - # least -ndigits. For ndigits >= 0 the latter condition is - # automatically satisfied for a binary float x, since any such - # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x - # needs to be an integral multiple of 5**-ndigits; we can check - # this using fmod. For -22 > ndigits, there are no halfway - # cases: 5**23 takes 54 bits to represent exactly, so any odd - # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of - # precision to represent exactly. +def round_double_short_repr(value, ndigits): + # The basic idea is very simple: convert and round the double to + # a decimal string using _Py_dg_dtoa, then convert that decimal + # string back to a double with _Py_dg_strtod. There's one minor + # difficulty: Python 2.x expects round to do + # round-half-away-from-zero, while _Py_dg_dtoa does + # round-half-to-even. So we need some way to detect and correct + # the halfway cases. - sign = copysign(1.0, value) - value = abs(value) + # a halfway value has the form k * 0.5 * 10**-ndigits for some + # odd integer k. Or in other words, a rational number x is + # exactly halfway between two multiples of 10**-ndigits if its + # 2-valuation is exactly -ndigits-1 and its 5-valuation is at + # least -ndigits. For ndigits >= 0 the latter condition is + # automatically satisfied for a binary float x, since any such + # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x + # needs to be an integral multiple of 5**-ndigits; we can check + # this using fmod. For -22 > ndigits, there are no halfway + # cases: 5**23 takes 54 bits to represent exactly, so any odd + # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of + # precision to represent exactly. - # find 2-valuation value - m, expo = math.frexp(value) - while m != math.floor(m): - m *= 2.0 - expo -= 1 + sign = copysign(1.0, value) + value = abs(value) - # determine whether this is a halfway case. - halfway_case = 0 - if expo == -ndigits - 1: - if ndigits >= 0: + # find 2-valuation value + m, expo = math.frexp(value) + while m != math.floor(m): + m *= 2.0 + expo -= 1 + + # determine whether this is a halfway case. + halfway_case = 0 + if expo == -ndigits - 1: + if ndigits >= 0: + halfway_case = 1 + elif ndigits >= -22: + # 22 is the largest k such that 5**k is exactly + # representable as a double + five_pow = 1.0 + for i in range(-ndigits): + five_pow *= 5.0 + if math.fmod(value, five_pow) == 0.0: halfway_case = 1 - elif ndigits >= -22: - # 22 is the largest k such that 5**k is exactly - # representable as a double - five_pow = 1.0 - for i in range(-ndigits): - five_pow *= 5.0 - if math.fmod(value, five_pow) == 0.0: - halfway_case = 1 - # round to a decimal string; use an extra place for halfway case - strvalue = formatd(value, 'f', ndigits + halfway_case) + # round to a decimal string; use an extra place for halfway case + strvalue = formatd(value, 'f', ndigits + halfway_case) - if halfway_case: - buf = [c for c in strvalue] - if ndigits >= 0: - endpos = len(buf) - 1 - else: - endpos = len(buf) + ndigits - # Sanity checks: there should be exactly ndigits+1 places - # following the decimal point, and the last digit in the - # buffer should be a '5' - if not objectmodel.we_are_translated(): - assert buf[endpos] == '5' - if '.' in buf: - assert endpos == len(buf) - 1 - assert buf.index('.') == len(buf) - ndigits - 2 + if halfway_case: + buf = [c for c in strvalue] + if ndigits >= 0: + endpos = len(buf) - 1 + else: + endpos = len(buf) + ndigits + # Sanity checks: there should be exactly ndigits+1 places + # following the decimal point, and the last digit in the + # buffer should be a '5' + if not objectmodel.we_are_translated(): + assert buf[endpos] == '5' + if '.' in buf: + assert endpos == len(buf) - 1 + assert buf.index('.') == len(buf) - ndigits - 2 - # increment and shift right at the same time - i = endpos - 1 - carry = 1 - while i >= 0: + # increment and shift right at the same time + i = endpos - 1 + carry = 1 + while i >= 0: + digit = ord(buf[i]) + if digit == ord('.'): + buf[i+1] = chr(digit) + i -= 1 digit = ord(buf[i]) - if digit == ord('.'): - buf[i+1] = chr(digit) - i -= 1 - digit = ord(buf[i]) - carry += digit - ord('0') - buf[i+1] = chr(carry % 10 + ord('0')) - carry /= 10 - i -= 1 - buf[0] = chr(carry + ord('0')) - if ndigits < 0: - buf.append('0') + carry += digit - ord('0') + buf[i+1] = chr(carry % 10 + ord('0')) + carry /= 10 + i -= 1 + buf[0] = chr(carry + ord('0')) + if ndigits < 0: + buf.append('0') - strvalue = ''.join(buf) + strvalue = ''.join(buf) - return sign * rstring_to_float(strvalue) + return sign * rstring_to_float(strvalue) -else: - # fallback version, to be used when correctly rounded - # binary<->decimal conversions aren't available - def round_double(value, ndigits): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 +# fallback version, to be used when correctly rounded +# binary<->decimal conversions aren't available +def round_double_fallback_repr(value, ndigits): + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow + pow1 = math.pow(10.0, ndigits - 22) + pow2 = 1e22 + else: + pow1 = math.pow(10.0, ndigits) + pow2 = 1.0 - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value + y = (value * pow1) * pow2 + # if y overflows, then rounded value is exactly x + if isinf(y): + return value - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 + else: + pow1 = math.pow(10.0, -ndigits); + pow2 = 1.0 # unused; for translation + y = value / pow1 - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y-z) == 1.0: # obscure case, see the test - z = y + if y >= 0.0: + z = math.floor(y + 0.5) + else: + z = math.ceil(y - 0.5) + if math.fabs(y-z) == 1.0: # obscure case, see the test + z = y - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + return z INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY From commits-noreply at bitbucket.org Mon Apr 11 13:06:06 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 13:06:06 +0200 (CEST) Subject: [pypy-svn] pypy default: ignore these opcodes for noew Message-ID: <20110411110606.4AEE1282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43290:590906d274b8 Date: 2011-04-11 10:33 +0000 http://bitbucket.org/pypy/pypy/changeset/590906d274b8/ Log: ignore these opcodes for noew diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -106,6 +106,9 @@ 'debug_catch_exception': Ignore, 'debug_reraise_traceback': Ignore, 'debug_print_traceback': Ignore, + 'debug_start': Ignore, + 'debug_stop': Ignore, + 'debug_print': Ignore, # __________ numeric operations __________ From commits-noreply at bitbucket.org Mon Apr 11 13:06:06 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 13:06:06 +0200 (CEST) Subject: [pypy-svn] pypy default: do nothing if we are trying to cast something into the very same type Message-ID: <20110411110606.CD893282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43291:cfa47b6c5890 Date: 2011-04-11 11:05 +0000 http://bitbucket.org/pypy/pypy/changeset/cfa47b6c5890/ Log: do nothing if we are trying to cast something into the very same type diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -105,6 +105,8 @@ def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype + if TO == FROM: + return opcode = CASTS[(FROM, TO)] if opcode: generator.emit(opcode) From commits-noreply at bitbucket.org Mon Apr 11 13:06:08 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 13:06:08 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110411110608.1E045282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43292:21463a3965de Date: 2011-04-11 11:05 +0000 http://bitbucket.org/pypy/pypy/changeset/21463a3965de/ Log: merge heads diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1849,9 +1849,6 @@ def implement_guard(self, guard_token, condition=None): # These jumps are patched later. if condition: - if condition not in ['B', 'E', 'BE']: - # this is a hint "branch not taken - self.mc.writechar('\x2E') self.mc.J_il(rx86.Conditions[condition], 0) else: self.mc.JMP_l(0) From commits-noreply at bitbucket.org Mon Apr 11 13:31:56 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 13:31:56 +0200 (CEST) Subject: [pypy-svn] pypy default: Add Samuel Reis to LICENSE as per request Message-ID: <20110411113156.A1BC8282BE9@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43293:ce129bd74328 Date: 2011-04-11 13:31 +0200 http://bitbucket.org/pypy/pypy/changeset/ce129bd74328/ Log: Add Samuel Reis to LICENSE as per request diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -119,6 +119,9 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reise and is distributed on terms of Creative Commons Share Alike +License. License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' ============================================================== From commits-noreply at bitbucket.org Mon Apr 11 14:02:06 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 14:02:06 +0200 (CEST) Subject: [pypy-svn] pypy default: Those damn polish never get spelling right Message-ID: <20110411120206.3BDF8282BE9@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43294:8b1954953450 Date: 2011-04-11 14:01 +0200 http://bitbucket.org/pypy/pypy/changeset/8b1954953450/ Log: Those damn polish never get spelling right diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -120,7 +120,7 @@ Change Maker, Sweden The PyPy Logo as used by http://speed.pypy.org and others was created -by Samuel Reise and is distributed on terms of Creative Commons Share Alike +by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' From commits-noreply at bitbucket.org Mon Apr 11 15:10:26 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Mon, 11 Apr 2011 15:10:26 +0200 (CEST) Subject: [pypy-svn] pypy default: Try another approach to isinf. Message-ID: <20110411131026.3FDAB282BE9@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43295:55888b692fe7 Date: 2011-04-11 09:10 -0400 http://bitbucket.org/pypy/pypy/changeset/55888b692fe7/ Log: Try another approach to isinf. diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -11,6 +11,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -94,7 +94,7 @@ return y != y def ll_math_isinf(y): - return not isnan(y) and isnan(y - y) + return y != 0 and y * .5 == y ll_math_copysign = math_copysign From commits-noreply at bitbucket.org Mon Apr 11 15:11:53 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 15:11:53 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: mention nightly builds Message-ID: <20110411131153.CDD8F282BE9@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3491:0fa5bf956a10 Date: 2011-04-11 15:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/0fa5bf956a10/ Log: mention nightly builds diff --git a/pypy.org/source/download.txt b/pypy.org/source/download.txt --- a/pypy.org/source/download.txt +++ b/pypy.org/source/download.txt @@ -6,6 +6,12 @@ Download ============================================================ +.. class:: download_menu + + There are `nightly binary builds`_ available. Those builds are not always + as stable as the release, but they contain Python 2.7 compatibility, + numerous bugfixes and performance improvements. + Here are the various binaries of **PyPy 1.4.1** that we provide for x86 Linux, Mac OS/X or Windows. This is mostly a bugfix release, although the performance over the previous release 1.4 has improved in some cases. @@ -160,6 +166,7 @@ .. _`stackless`: http://www.stackless.com/ .. _`greenlets`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt .. _Mercurial: http://mercurial.selenic.com/ +.. _`nightly binary builds`: http://buildbot.pypy.org/nightly/trunk/ Checksums --------- diff --git a/pypy.org/source/README b/pypy.org/source/README --- a/pypy.org/source/README +++ b/pypy.org/source/README @@ -1,14 +1,14 @@ -You generate this website by using yatiblog from here: +You can get necessary software by doing: -http://github.com/tav/ampify/blob/master/environ/yatiblog +git clone https://github.com/tav/ampify.git -by running +and then recreate the website in this directory by running ..../ampify/environ/yatiblog -o .. you'll get html output in the parent directory. Then you can check it in, go to codespeak in /www/pypy.org/htdocs/ -and type "svn up". +and type "hg pull -u". Other required dependencies: * "docutils" from "easy_install docutils" diff --git a/pypy.org/download.html b/pypy.org/download.html --- a/pypy.org/download.html +++ b/pypy.org/download.html @@ -47,6 +47,9 @@

Download and install

+

There are nightly binary builds available. Those builds are not always +as stable as the release, but they contain Python 2.7 compatibility, +numerous bugfixes and performance improvements.

Here are the various binaries of PyPy 1.4.1 that we provide for x86 Linux, Mac OS/X or Windows. This is mostly a bugfix release, although the performance over the previous release 1.4 has improved in some cases.

From commits-noreply at bitbucket.org Mon Apr 11 15:22:34 2011 From: commits-noreply at bitbucket.org (tav) Date: Mon, 11 Apr 2011 15:22:34 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Testing write access to extradocs Message-ID: <20110411132234.BD928282BE9@codespeak.net> Author: tav Branch: extradoc Changeset: r3492:97a2625b587a Date: 2011-04-11 14:22 +0100 http://bitbucket.org/pypy/extradoc/changeset/97a2625b587a/ Log: Testing write access to extradocs diff --git a/pypy.org/source/README b/pypy.org/source/README --- a/pypy.org/source/README +++ b/pypy.org/source/README @@ -4,7 +4,7 @@ and then recreate the website in this directory by running -..../ampify/environ/yatiblog -o .. +ampify/environ/yatiblog -o .. you'll get html output in the parent directory. Then you can check it in, go to codespeak in /www/pypy.org/htdocs/ From commits-noreply at bitbucket.org Mon Apr 11 16:24:02 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Mon, 11 Apr 2011 16:24:02 +0200 (CEST) Subject: [pypy-svn] pypy default: fix for 2.5 Message-ID: <20110411142402.483A6282BE9@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43296:349c163fc8aa Date: 2011-04-11 10:23 -0400 http://bitbucket.org/pypy/pypy/changeset/349c163fc8aa/ Log: fix for 2.5 diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,3 +1,5 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype @@ -509,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), From commits-noreply at bitbucket.org Mon Apr 11 17:02:55 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 11 Apr 2011 17:02:55 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: fix one of maciej's comment, ignore two Message-ID: <20110411150255.C10F0282BDF@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3493:fb8940151514 Date: 2011-04-11 17:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/fb8940151514/ Log: fix one of maciej's comment, ignore two diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -215,9 +215,6 @@ and less error prone than manually writing a JIT compiler. Similarly, writing in a high level language such as RPython is easier than writing in C. -XXX [fijal] yet another advantage is that JIT is by design supporting the whole - language - We call the code that runs on top of an interpreter implemented with PyPy the \emph{user code} or \emph{user program}. @@ -264,7 +261,8 @@ Tracing through the execution of an interpreter has many advantages. It makes the tracer, its optimizers and backends reusable for a variety of languages. The language semantics do not need to be encoded into the JIT. Instead the tracer -just picks them up from the interpreter. +just picks them up from the interpreter. This also means that the JIT by +construction supports the full language. While the operations in a trace are those of the interpreter, the loops that are traced by the tracer are the loops in the @@ -783,10 +781,6 @@ class did not change since the trace was produced. It will fail if somebody calls the \texttt{write\_method} method on the class. -XXX [fijal] maybe it's worth noting that those guards are removed out of - the loop by loop-invariant-code motion, unless our is so special - we want to write a special paper about it - %___________________________________________________________________________ \subsection{Real-World Considerations} @@ -880,8 +874,6 @@ all benchmarks, which is not surprising because CPython is a simple bytecode-based interpreter. -XXX [fijal] wouldn't a graph be better? - \begin{figure} \begin{center} {\footnotesize From commits-noreply at bitbucket.org Mon Apr 11 17:37:34 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 17:37:34 +0200 (CEST) Subject: [pypy-svn] pypy default: ignore keepalives in the jvm backend Message-ID: <20110411153734.63E89282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43297:87f6a16d8996 Date: 2011-04-11 14:04 +0200 http://bitbucket.org/pypy/pypy/changeset/87f6a16d8996/ Log: ignore keepalives in the jvm backend diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -109,6 +109,7 @@ 'debug_start': Ignore, 'debug_stop': Ignore, 'debug_print': Ignore, + 'keepalive': Ignore, # __________ numeric operations __________ From commits-noreply at bitbucket.org Mon Apr 11 17:37:34 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 17:37:34 +0200 (CEST) Subject: [pypy-svn] pypy default: teach the jvm backend how to cast between signed and unsigned Message-ID: <20110411153734.EAFCC282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43298:d6d408608ff5 Date: 2011-04-11 14:07 +0200 http://bitbucket.org/pypy/pypy/changeset/d6d408608ff5/ Log: teach the jvm backend how to cast between signed and unsigned diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -99,6 +99,8 @@ (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, + (ootype.Signed, ootype.Unsigned): None, + (ootype.Unsigned, ootype.Signed): None, } class _CastPrimitive(MicroInstruction): From commits-noreply at bitbucket.org Mon Apr 11 17:37:35 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 17:37:35 +0200 (CEST) Subject: [pypy-svn] pypy default: comment out broken logic ("how could it have ever worked?") Message-ID: <20110411153735.73DB3282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43299:a8dcb66a4a5e Date: 2011-04-11 15:50 +0200 http://bitbucket.org/pypy/pypy/changeset/a8dcb66a4a5e/ Log: comment out broken logic ("how could it have ever worked?") diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -229,9 +229,15 @@ if not ootype.isSubclass(OOTYPE, SELF): continue mobj = self._function_for_graph( clsobj, mname, False, mimpl.graph) - graphs = OOTYPE._lookup_graphs(mname) - if len(graphs) == 1: - mobj.is_final = True + # XXX: this logic is broken: it might happen that there are + # ootype.Instance which contains a meth whose graph is exactly + # the same as the meth in the superclass: in this case, + # len(graphs) == 1 but we cannot just mark the method as final + # (or we can, but we should avoid to emit the method in the + # subclass, then) + ## graphs = OOTYPE._lookup_graphs(mname) + ## if len(graphs) == 1: + ## mobj.is_final = True clsobj.add_method(mobj) # currently, we always include a special "dump" method for debugging From commits-noreply at bitbucket.org Mon Apr 11 17:37:36 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 17:37:36 +0200 (CEST) Subject: [pypy-svn] pypy default: this class needs to be public Message-ID: <20110411153736.1C2C0282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43300:58e4bb9eb0ed Date: 2011-04-11 15:51 +0200 http://bitbucket.org/pypy/pypy/changeset/58e4bb9eb0ed/ Log: this class needs to be public diff --git a/pypy/translator/jvm/src/pypy/StatResult.java b/pypy/translator/jvm/src/pypy/StatResult.java --- a/pypy/translator/jvm/src/pypy/StatResult.java +++ b/pypy/translator/jvm/src/pypy/StatResult.java @@ -8,7 +8,7 @@ * *

The actual stat() function is defined in PyPy.java. */ -class StatResult { +public class StatResult { public int item0, item3, item4, item5; public long item1, item2, item6; public double item7, item8, item9; From commits-noreply at bitbucket.org Mon Apr 11 17:37:39 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 17:37:39 +0200 (CEST) Subject: [pypy-svn] pypy default: implement os.fstat for the jvm backend Message-ID: <20110411153739.802E52A202E@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43301:f70561d31d68 Date: 2011-04-11 17:30 +0200 http://bitbucket.org/pypy/pypy/changeset/f70561d31d68/ Log: implement os.fstat for the jvm backend diff --git a/pypy/translator/jvm/src/pypy/ll_os.java b/pypy/translator/jvm/src/pypy/ll_os.java --- a/pypy/translator/jvm/src/pypy/ll_os.java +++ b/pypy/translator/jvm/src/pypy/ll_os.java @@ -14,10 +14,22 @@ abstract class FileWrapper { + private final String name; + + public FileWrapper(String name) + { + this.name = name; + } + public abstract void write(String buffer); public abstract String read(int count); public abstract void close(); public abstract RandomAccessFile getFile(); + + public String getName() + { + return this.name; + } } class PrintStreamWrapper extends FileWrapper @@ -25,8 +37,9 @@ private final PrintStream stream; private final ll_os os; - public PrintStreamWrapper(PrintStream stream, ll_os os) + public PrintStreamWrapper(String name, PrintStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -58,8 +71,9 @@ private final InputStream stream; private final ll_os os; - public InputStreamWrapper(InputStream stream, ll_os os) + public InputStreamWrapper(String name, InputStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -102,11 +116,13 @@ private final boolean canWrite; private final ll_os os; - public RandomAccessFileWrapper(RandomAccessFile file, + public RandomAccessFileWrapper(String name, + RandomAccessFile file, boolean canRead, boolean canWrite, ll_os os) { + super(name); this.file = file; this.canRead = canRead; this.canWrite = canWrite; @@ -228,9 +244,9 @@ public ll_os(Interlink interlink) { this.interlink = interlink; - FileDescriptors.put(0, new InputStreamWrapper(System.in, this)); - FileDescriptors.put(1, new PrintStreamWrapper(System.out, this)); - FileDescriptors.put(2, new PrintStreamWrapper(System.err, this)); + FileDescriptors.put(0, new InputStreamWrapper("", System.in, this)); + FileDescriptors.put(1, new PrintStreamWrapper("", System.out, this)); + FileDescriptors.put(2, new PrintStreamWrapper("", System.err, this)); fdcount = 2; } @@ -339,7 +355,7 @@ // XXX: we ignore O_CREAT RandomAccessFile file = open_file(name, javaMode, flags); RandomAccessFileWrapper wrapper = - new RandomAccessFileWrapper(file, canRead, canWrite, this); + new RandomAccessFileWrapper(name, file, canRead, canWrite, this); fdcount++; FileDescriptors.put(fdcount, wrapper); @@ -418,6 +434,12 @@ return ll_os_stat(path); // XXX } + public StatResult ll_os_fstat(int fd) + { + String name = getfd(fd).getName(); + return ll_os_stat(name); + } + public String ll_os_strerror(int errno) { String msg = ErrorMessages.remove(errno); diff --git a/pypy/translator/jvm/test/test_builtin.py b/pypy/translator/jvm/test/test_builtin.py --- a/pypy/translator/jvm/test/test_builtin.py +++ b/pypy/translator/jvm/test/test_builtin.py @@ -37,6 +37,15 @@ def test_cast_primitive(self): py.test.skip('fixme!') + def test_os_fstat(self): + import os, stat + def fn(): + fd = os.open(__file__, os.O_RDONLY, 0) + st = os.fstat(fd) + os.close(fd) + return st.st_mode + res = self.interpret(fn, []) + assert stat.S_ISREG(res) class TestJvmTime(JvmTest, BaseTestTime): From commits-noreply at bitbucket.org Mon Apr 11 17:37:42 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 11 Apr 2011 17:37:42 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110411153742.C8DFE282BDF@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43302:865006b23964 Date: 2011-04-11 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/865006b23964/ Log: merge heads diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,3 +1,5 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype @@ -509,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -11,6 +11,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -119,6 +119,9 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' ============================================================== diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -94,7 +94,7 @@ return y != y def ll_math_isinf(y): - return not isnan(y) and isnan(y - y) + return y != 0 and y * .5 == y ll_math_copysign = math_copysign From commits-noreply at bitbucket.org Mon Apr 11 19:40:08 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 19:40:08 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: This were "investigated" in a sense relevant commits were reverted Message-ID: <20110411174008.9271736C20E@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3494:b7e0ae0c5aee Date: 2011-04-11 19:38 +0200 http://bitbucket.org/pypy/extradoc/changeset/b7e0ae0c5aee/ Log: This were "investigated" in a sense relevant commits were reverted (but the investigation might continue once smallints are on again) diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -4,16 +4,6 @@ * a CALL that may release the GIL needs to have effectinfo=None, because random other code can run at that point. -INVESTIGATIONS --------------- - -* 25% slowdown on pyflate fast (Jan 29) - - pyflate_fast uses python longs on 32bit - - some places don't create SmallLongs even when they should (like consts) - - we end up with comparison of Longs and SmallLongs - -* 10% slowdown on spitfire (Feb 01) - NEW TASKS --------- From commits-noreply at bitbucket.org Mon Apr 11 19:40:10 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 19:40:10 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: merge Message-ID: <20110411174010.CC89C282BAA@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3495:4082a4379c58 Date: 2011-04-11 19:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/4082a4379c58/ Log: merge diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -17,5 +17,6 @@ Antonio Cuni 26-30 Hotel Poseidon his own diet :) Armin Rigo 23-02 SGS Veckobostader Hakan Ardo 24-27 ??? +Romain Guillebert 23-03 ??? ==================== ============== ===================== ================== diff --git a/pypy.org/source/download.txt b/pypy.org/source/download.txt --- a/pypy.org/source/download.txt +++ b/pypy.org/source/download.txt @@ -6,6 +6,12 @@ Download ============================================================ +.. class:: download_menu + + There are `nightly binary builds`_ available. Those builds are not always + as stable as the release, but they contain Python 2.7 compatibility, + numerous bugfixes and performance improvements. + Here are the various binaries of **PyPy 1.4.1** that we provide for x86 Linux, Mac OS/X or Windows. This is mostly a bugfix release, although the performance over the previous release 1.4 has improved in some cases. @@ -160,6 +166,7 @@ .. _`stackless`: http://www.stackless.com/ .. _`greenlets`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt .. _Mercurial: http://mercurial.selenic.com/ +.. _`nightly binary builds`: http://buildbot.pypy.org/nightly/trunk/ Checksums --------- diff --git a/pypy.org/source/README b/pypy.org/source/README --- a/pypy.org/source/README +++ b/pypy.org/source/README @@ -1,14 +1,14 @@ -You generate this website by using yatiblog from here: +You can get necessary software by doing: -http://github.com/tav/ampify/blob/master/environ/yatiblog +git clone https://github.com/tav/ampify.git -by running +and then recreate the website in this directory by running -..../ampify/environ/yatiblog -o .. +ampify/environ/yatiblog -o .. you'll get html output in the parent directory. Then you can check it in, go to codespeak in /www/pypy.org/htdocs/ -and type "svn up". +and type "hg pull -u". Other required dependencies: * "docutils" from "easy_install docutils" diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -215,9 +215,6 @@ and less error prone than manually writing a JIT compiler. Similarly, writing in a high level language such as RPython is easier than writing in C. -XXX [fijal] yet another advantage is that JIT is by design supporting the whole - language - We call the code that runs on top of an interpreter implemented with PyPy the \emph{user code} or \emph{user program}. @@ -264,7 +261,8 @@ Tracing through the execution of an interpreter has many advantages. It makes the tracer, its optimizers and backends reusable for a variety of languages. The language semantics do not need to be encoded into the JIT. Instead the tracer -just picks them up from the interpreter. +just picks them up from the interpreter. This also means that the JIT by +construction supports the full language. While the operations in a trace are those of the interpreter, the loops that are traced by the tracer are the loops in the @@ -783,10 +781,6 @@ class did not change since the trace was produced. It will fail if somebody calls the \texttt{write\_method} method on the class. -XXX [fijal] maybe it's worth noting that those guards are removed out of - the loop by loop-invariant-code motion, unless our is so special - we want to write a special paper about it - %___________________________________________________________________________ \subsection{Real-World Considerations} @@ -880,8 +874,6 @@ all benchmarks, which is not surprising because CPython is a simple bytecode-based interpreter. -XXX [fijal] wouldn't a graph be better? - \begin{figure} \begin{center} {\footnotesize diff --git a/pypy.org/download.html b/pypy.org/download.html --- a/pypy.org/download.html +++ b/pypy.org/download.html @@ -47,6 +47,9 @@

Download and install

+

There are nightly binary builds available. Those builds are not always +as stable as the release, but they contain Python 2.7 compatibility, +numerous bugfixes and performance improvements.

Here are the various binaries of PyPy 1.4.1 that we provide for x86 Linux, Mac OS/X or Windows. This is mostly a bugfix release, although the performance over the previous release 1.4 has improved in some cases.

From commits-noreply at bitbucket.org Mon Apr 11 19:41:19 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 11 Apr 2011 19:41:19 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: I failed to find an actual python code that does it. I swear it was somewhere Message-ID: <20110411174119.3E5AC282BA1@codespeak.net> Author: Maciej Fijalkowski Branch: extradoc Changeset: r3496:2ca3043ec4e0 Date: 2011-04-11 19:41 +0200 http://bitbucket.org/pypy/extradoc/changeset/2ca3043ec4e0/ Log: I failed to find an actual python code that does it. I swear it was somewhere in loops from translate, but I'll put it back when I find it again. It's kind of pointless to keep it without an example diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -23,14 +23,6 @@ is a compile time constant (and call unrolled version of string formatting loop in this case). -- generators are still fairly inefficient. We get a lot of: - i = ptr_eq(frame, some_other_frame) - guard_value(i, 0) - every second instruction. - - there is also manipulating of valuestackdepth and such. - XXX find precise python code - - consider how much old style classes in stdlib hurt us. - support raw mallocs From commits-noreply at bitbucket.org Mon Apr 11 21:21:44 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 11 Apr 2011 21:21:44 +0200 (CEST) Subject: [pypy-svn] pypy default: Patch by ???@pocketnix.org. Message-ID: <20110411192144.05B6436C201@codespeak.net> Author: Armin Rigo Branch: Changeset: r43303:3e12c40ef735 Date: 2011-04-11 21:18 +0200 http://bitbucket.org/pypy/pypy/changeset/3e12c40ef735/ Log: Patch by ???@pocketnix.org. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,7 +39,7 @@ translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "md5", "cStringIO", "array"])) + "struct", "_md5", "cStringIO", "array"])) working_oo_modules = default_modules.copy() working_oo_modules.update(dict.fromkeys( From commits-noreply at bitbucket.org Tue Apr 12 11:46:25 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 11:46:25 +0200 (CEST) Subject: [pypy-svn] pypy default: Patch modified from a patch submitted by Da_Blitz: Message-ID: <20110412094625.567C536C203@codespeak.net> Author: Armin Rigo Branch: Changeset: r43304:99db330d6f9c Date: 2011-04-12 11:45 +0200 http://bitbucket.org/pypy/pypy/changeset/99db330d6f9c/ Log: Patch modified from a patch submitted by Da_Blitz: double check to ensure we are not overwriting the current interpreter diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -285,6 +285,15 @@ elif drv.exe_name is None and '__name__' in targetspec_dic: drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s' + # Double check to ensure we are not overwriting the current interpreter + try: + exe_name = str(drv.compute_exe_name()) + assert not os.path.samefile(exe_name, sys.executable), ( + 'Output file %r is the currently running ' + 'interpreter (use --output=...)'% exe_name) + except OSError: + pass + goals = translateconfig.goals try: drv.proceed(goals) From commits-noreply at bitbucket.org Tue Apr 12 12:28:12 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 12:28:12 +0200 (CEST) Subject: [pypy-svn] pypy default: Remove dead import. Message-ID: <20110412102812.F25A62A202C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43305:e552cfef6388 Date: 2011-04-12 12:11 +0200 http://bitbucket.org/pypy/pypy/changeset/e552cfef6388/ Log: Remove dead import. diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -5,7 +5,7 @@ soon as possible (at least in a simple case). """ -import weakref, random +import weakref import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc From commits-noreply at bitbucket.org Tue Apr 12 12:30:06 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 12:30:06 +0200 (CEST) Subject: [pypy-svn] pypy jitypes2: hg merge default Message-ID: <20110412103006.0A05F2A202C@codespeak.net> Author: Armin Rigo Branch: jitypes2 Changeset: r43306:2b755cc2b438 Date: 2011-04-12 12:22 +0200 http://bitbucket.org/pypy/pypy/changeset/2b755cc2b438/ Log: hg merge default diff --git a/pypy/jit/metainterp/optimizeopt/string.py b/pypy/jit/metainterp/optimizeopt/string.py --- a/pypy/jit/metainterp/optimizeopt/string.py +++ b/pypy/jit/metainterp/optimizeopt/string.py @@ -47,7 +47,7 @@ class __extend__(optimizer.OptValue): """New methods added to the base class OptValue for this file.""" - def getstrlen(self, newoperations, mode): + def getstrlen(self, optimization, mode): if mode is mode_string: s = self.get_constant_string_spec(mode_string) if s is not None: @@ -56,12 +56,12 @@ s = self.get_constant_string_spec(mode_unicode) if s is not None: return ConstInt(len(s)) - if newoperations is None: + if optimization is None: return None self.ensure_nonnull() box = self.force_box() lengthbox = BoxInt() - newoperations.append(ResOperation(mode.STRLEN, [box], lengthbox)) + optimization.emit_operation(ResOperation(mode.STRLEN, [box], lengthbox)) return lengthbox @specialize.arg(1) @@ -72,13 +72,13 @@ else: return None - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): + def string_copy_parts(self, optimization, targetbox, offsetbox, mode): # Copies the pointer-to-string 'self' into the target string # given by 'targetbox', at the specified offset. Returns the offset # at the end of the copy. - lengthbox = self.getstrlen(newoperations, mode) + lengthbox = self.getstrlen(optimization, mode) srcbox = self.force_box() - return copy_str_content(newoperations, srcbox, targetbox, + return copy_str_content(optimization, srcbox, targetbox, CONST_0, offsetbox, lengthbox, mode) @@ -105,13 +105,12 @@ return assert self.source_op is not None self.box = box = self.source_op.result - newoperations = self.optimizer.newoperations - lengthbox = self.getstrlen(newoperations, self.mode) + lengthbox = self.getstrlen(self.optimizer, self.mode) op = ResOperation(self.mode.NEWSTR, [lengthbox], box) if not we_are_translated(): op.name = 'FORCE' - newoperations.append(op) - self.string_copy_parts(newoperations, box, CONST_0, self.mode) + self.optimizer.emit_operation(op) + self.string_copy_parts(self.optimizer, box, CONST_0, self.mode) class VStringPlainValue(VAbstractStringValue): @@ -145,14 +144,14 @@ return mode.emptystr.join([mode.chr(c.box.getint()) for c in self._chars]) - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): for i in range(len(self._chars)): charbox = self._chars[i].force_box() - newoperations.append(ResOperation(mode.STRSETITEM, [targetbox, + optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox], None)) - offsetbox = _int_add(newoperations, offsetbox, CONST_1) + offsetbox = _int_add(optimizer, offsetbox, CONST_1) return offsetbox def get_args_for_fail(self, modifier): @@ -186,16 +185,16 @@ self.left = left self.right = right - def getstrlen(self, newoperations, mode): + def getstrlen(self, optimizer, mode): if self.lengthbox is None: - len1box = self.left.getstrlen(newoperations, mode) + len1box = self.left.getstrlen(optimizer, mode) if len1box is None: return None - len2box = self.right.getstrlen(newoperations, mode) + len2box = self.right.getstrlen(optimizer, mode) if len2box is None: return None - self.lengthbox = _int_add(newoperations, len1box, len2box) - # ^^^ may still be None, if newoperations is None + self.lengthbox = _int_add(optimizer, len1box, len2box) + # ^^^ may still be None, if optimizer is None return self.lengthbox @specialize.arg(1) @@ -208,10 +207,10 @@ return None return s1 + s2 - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): - offsetbox = self.left.string_copy_parts(newoperations, targetbox, + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): + offsetbox = self.left.string_copy_parts(optimizer, targetbox, offsetbox, mode) - offsetbox = self.right.string_copy_parts(newoperations, targetbox, + offsetbox = self.right.string_copy_parts(optimizer, targetbox, offsetbox, mode) return offsetbox @@ -266,9 +265,9 @@ return s1[start : start + length] return None - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): - lengthbox = self.getstrlen(newoperations, mode) - return copy_str_content(newoperations, + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): + lengthbox = self.getstrlen(optimizer, mode) + return copy_str_content(optimizer, self.vstr.force_box(), targetbox, self.vstart.force_box(), offsetbox, lengthbox, mode) @@ -299,7 +298,7 @@ return modifier.make_vstrslice(self.mode is mode_unicode) -def copy_str_content(newoperations, srcbox, targetbox, +def copy_str_content(optimizer, srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox, mode): if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const): M = 5 @@ -309,23 +308,23 @@ # up to M characters are done "inline", i.e. with STRGETITEM/STRSETITEM # instead of just a COPYSTRCONTENT. for i in range(lengthbox.value): - charbox = _strgetitem(newoperations, srcbox, srcoffsetbox, mode) - srcoffsetbox = _int_add(newoperations, srcoffsetbox, CONST_1) - newoperations.append(ResOperation(mode.STRSETITEM, [targetbox, + charbox = _strgetitem(optimizer, srcbox, srcoffsetbox, mode) + srcoffsetbox = _int_add(optimizer, srcoffsetbox, CONST_1) + optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox], None)) - offsetbox = _int_add(newoperations, offsetbox, CONST_1) + offsetbox = _int_add(optimizer, offsetbox, CONST_1) else: - nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) + nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox) op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox], None) - newoperations.append(op) + optimizer.emit_operation(op) offsetbox = nextoffsetbox return offsetbox -def _int_add(newoperations, box1, box2): +def _int_add(optimizer, box1, box2): if isinstance(box1, ConstInt): if box1.value == 0: return box2 @@ -333,23 +332,23 @@ return ConstInt(box1.value + box2.value) elif isinstance(box2, ConstInt) and box2.value == 0: return box1 - if newoperations is None: + if optimizer is None: return None resbox = BoxInt() - newoperations.append(ResOperation(rop.INT_ADD, [box1, box2], resbox)) + optimizer.emit_operation(ResOperation(rop.INT_ADD, [box1, box2], resbox)) return resbox -def _int_sub(newoperations, box1, box2): +def _int_sub(optimizer, box1, box2): if isinstance(box2, ConstInt): if box2.value == 0: return box1 if isinstance(box1, ConstInt): return ConstInt(box1.value - box2.value) resbox = BoxInt() - newoperations.append(ResOperation(rop.INT_SUB, [box1, box2], resbox)) + optimizer.emit_operation(ResOperation(rop.INT_SUB, [box1, box2], resbox)) return resbox -def _strgetitem(newoperations, strbox, indexbox, mode): +def _strgetitem(optimizer, strbox, indexbox, mode): if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt): if mode is mode_string: s = strbox.getref(lltype.Ptr(rstr.STR)) @@ -358,7 +357,7 @@ s = strbox.getref(lltype.Ptr(rstr.UNICODE)) return ConstInt(ord(s.chars[indexbox.getint()])) resbox = BoxInt() - newoperations.append(ResOperation(mode.STRGETITEM, [strbox, indexbox], + optimizer.emit_operation(ResOperation(mode.STRGETITEM, [strbox, indexbox], resbox)) return resbox @@ -370,7 +369,7 @@ def reconstruct_for_next_iteration(self, optimizer, valuemap): self.enabled = True return self - + def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(self.optimizer, box, source_op, mode) self.make_equal_to(box, vvalue) @@ -431,7 +430,7 @@ value.ensure_nonnull() # if value.is_virtual() and isinstance(value, VStringSliceValue): - fullindexbox = _int_add(self.optimizer.newoperations, + fullindexbox = _int_add(self.optimizer, value.vstart.force_box(), vindex.force_box()) value = value.vstr @@ -441,7 +440,7 @@ if vindex.is_constant(): return value.getitem(vindex.box.getint()) # - resbox = _strgetitem(self.optimizer.newoperations, + resbox = _strgetitem(self.optimizer, value.force_box(),vindex.force_box(), mode) return self.getvalue(resbox) @@ -452,7 +451,7 @@ def _optimize_STRLEN(self, op, mode): value = self.getvalue(op.getarg(0)) - lengthbox = value.getstrlen(self.optimizer.newoperations, mode) + lengthbox = value.getstrlen(self, mode) self.make_equal_to(op.result, self.getvalue(lengthbox)) def optimize_CALL(self, op): @@ -498,13 +497,11 @@ vright = self.getvalue(op.getarg(2)) vleft.ensure_nonnull() vright.ensure_nonnull() - newoperations = self.optimizer.newoperations value = self.make_vstring_concat(op.result, op, mode) value.setup(vleft, vright) return True def opt_call_stroruni_STR_SLICE(self, op, mode): - newoperations = self.optimizer.newoperations vstr = self.getvalue(op.getarg(1)) vstart = self.getvalue(op.getarg(2)) vstop = self.getvalue(op.getarg(3)) @@ -518,14 +515,14 @@ return True # vstr.ensure_nonnull() - lengthbox = _int_sub(newoperations, vstop.force_box(), + lengthbox = _int_sub(self.optimizer, vstop.force_box(), vstart.force_box()) # if isinstance(vstr, VStringSliceValue): # double slicing s[i:j][k:l] vintermediate = vstr vstr = vintermediate.vstr - startbox = _int_add(newoperations, + startbox = _int_add(self.optimizer, vintermediate.vstart.force_box(), vstart.force_box()) vstart = self.getvalue(startbox) @@ -574,7 +571,7 @@ l2box = v2.getstrlen(None, mode) if isinstance(l2box, ConstInt): if l2box.value == 0: - lengthbox = v1.getstrlen(self.optimizer.newoperations, mode) + lengthbox = v1.getstrlen(self.optimizer, mode) seo = self.optimizer.send_extra_operation seo(ResOperation(rop.INT_EQ, [lengthbox, CONST_0], resultbox)) return True @@ -609,7 +606,7 @@ op = ResOperation(rop.PTR_EQ, [v1.force_box(), llhelper.CONST_NULL], resultbox) - self.optimizer.newoperations.append(op) + self.optimizer.emit_operation(op) return True # return False @@ -646,7 +643,7 @@ calldescr, func = cic.callinfo_for_oopspec(oopspecindex) op = ResOperation(rop.CALL, [ConstInt(func)] + args, result, descr=calldescr) - self.optimizer.newoperations.append(op) + self.optimizer.emit_operation(op) def propagate_forward(self, op): if not self.enabled: diff --git a/pypy/jit/tl/pypyjit_child.py b/pypy/jit/tl/pypyjit_child.py --- a/pypy/jit/tl/pypyjit_child.py +++ b/pypy/jit/tl/pypyjit_child.py @@ -2,7 +2,6 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp import warmspot from pypy.module.pypyjit.policy import PyPyJitPolicy -from pypy.rlib.jit import OPTIMIZER_FULL, OPTIMIZER_NO_UNROLL def run_child(glob, loc): @@ -34,6 +33,5 @@ option.view = True warmspot.jittify_and_run(interp, graph, [], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -179,6 +179,9 @@ """ raise NotImplementedError + def count_fields_if_immutable(self): + return -1 + def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/module/cpyext/include/abstract.h b/pypy/module/cpyext/include/abstract.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/abstract.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -108,6 +108,7 @@ Anders Qvist Alan McIntyre Bert Freudenberg + Tav Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -118,6 +119,9 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' ============================================================== diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -39,6 +39,10 @@ state = space.fromcache(State) state.clear_exception() + at cpython_api([PyObject], PyObject) +def PyExceptionInstance_Class(space, w_obj): + return space.type(w_obj) + @cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void) def PyErr_Fetch(space, ptype, pvalue, ptraceback): """Retrieve the error indicator into three variables whose addresses are passed. @@ -75,6 +79,9 @@ error indicator temporarily; use PyErr_Fetch() to save the current exception state.""" state = space.fromcache(State) + if w_type is None: + state.clear_exception() + return state.set_exception(OperationError(w_type, w_value)) Py_DecRef(space, w_type) Py_DecRef(space, w_value) @@ -300,3 +307,11 @@ operror = state.clear_exception() if operror: operror.write_unraisable(space, space.str_w(space.repr(w_where))) + + at cpython_api([], lltype.Void) +def PyErr_SetInterrupt(space): + """This function simulates the effect of a SIGINT signal arriving --- the + next time PyErr_CheckSignals() is called, KeyboardInterrupt will be raised. + It may be called without holding the interpreter lock.""" + space.check_signal_action.set_interrupt() + diff --git a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py --- a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py @@ -91,19 +91,22 @@ class AppTestDistributedTasklets(object): spaceconfig = {"objspace.std.withtproxy": True, "objspace.usemodules._stackless": True} + reclimit = sys.getrecursionlimit() + def setup_class(cls): + import py.test + py.test.importorskip('greenlet') #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, # "usemodules":("_stackless",)}) cls.w_test_env_ = cls.space.appexec([], """(): from distributed import test_env return (test_env,) """) - cls.reclimit = sys.getrecursionlimit() sys.setrecursionlimit(100000) def teardown_class(cls): sys.setrecursionlimit(cls.reclimit) - + def test_remote_protocol_call(self): def f(x, y): return x + y diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -103,6 +103,7 @@ except KeyError: subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, weakrefable) + assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -4,7 +4,7 @@ """ from cStringIO import StringIO -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.ootypesystem import ootype, rclass from pypy.rpython.ootypesystem.module import ll_os from pypy.translator.jvm import node, methods @@ -229,9 +229,15 @@ if not ootype.isSubclass(OOTYPE, SELF): continue mobj = self._function_for_graph( clsobj, mname, False, mimpl.graph) - graphs = OOTYPE._lookup_graphs(mname) - if len(graphs) == 1: - mobj.is_final = True + # XXX: this logic is broken: it might happen that there are + # ootype.Instance which contains a meth whose graph is exactly + # the same as the meth in the superclass: in this case, + # len(graphs) == 1 but we cannot just mark the method as final + # (or we can, but we should avoid to emit the method in the + # subclass, then) + ## graphs = OOTYPE._lookup_graphs(mname) + ## if len(graphs) == 1: + ## mobj.is_final = True clsobj.add_method(mobj) # currently, we always include a special "dump" method for debugging @@ -359,6 +365,7 @@ ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, ootype.String:jvm.PYPYESCAPEDSTRING, ootype.Unicode:jvm.PYPYESCAPEDUNICODE, + rffi.SHORT:jvm.SHORTTOSTRINGS, } def toString_method_for_ootype(self, OOTYPE): @@ -406,6 +413,7 @@ ootype.UniChar: jvm.jChar, ootype.Class: jvm.jClass, ootype.ROOT: jvm.jObject, # treat like a scalar + rffi.SHORT: jvm.jShort, } # Dictionary for non-scalar types; in this case, if we see the key, we diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -23,18 +23,22 @@ self.fail_descr_list = [] self.fail_descr_free_list = [] + def reserve_some_free_fail_descr_number(self): + lst = self.fail_descr_list + if len(self.fail_descr_free_list) > 0: + n = self.fail_descr_free_list.pop() + assert lst[n] is None + else: + n = len(lst) + lst.append(None) + return n + def get_fail_descr_number(self, descr): assert isinstance(descr, history.AbstractFailDescr) n = descr.index if n < 0: - lst = self.fail_descr_list - if len(self.fail_descr_free_list) > 0: - n = self.fail_descr_free_list.pop() - assert lst[n] is None - lst[n] = descr - else: - n = len(lst) - lst.append(descr) + n = self.reserve_some_free_fail_descr_number() + self.fail_descr_list[n] = descr descr.index = n return n @@ -294,6 +298,13 @@ def record_faildescr_index(self, n): self.faildescr_indices.append(n) + def reserve_and_record_some_faildescr_index(self): + # like record_faildescr_index(), but invent and return a new, + # unused faildescr index + n = self.cpu.reserve_some_free_fail_descr_number() + self.record_faildescr_index(n) + return n + def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -6,6 +6,7 @@ from pypy.tool.udir import udir from pypy.rlib import streamio from pypy.conftest import gettestobjspace +import pytest import sys, os import tempfile, marshal @@ -109,6 +110,14 @@ p.join('lone.pyc').write(p.join('x.pyc').read(mode='rb'), mode='wb') + # create a .pyw file + p = setuppkg("windows", x = "x = 78") + try: + p.join('x.pyw').remove() + except py.error.ENOENT: + pass + p.join('x.py').rename(p.join('x.pyw')) + return str(root) @@ -177,6 +186,14 @@ import a assert a == a0 + def test_trailing_slash(self): + import sys + try: + sys.path[0] += '/' + import a + finally: + sys.path[0] = sys.path[0].rstrip('/') + def test_import_pkg(self): import sys import pkg @@ -325,6 +342,11 @@ import compiled.x assert compiled.x == sys.modules.get('compiled.x') + @pytest.mark.skipif("sys.platform != 'win32'") + def test_pyw(self): + import windows.x + assert windows.x.__file__.endswith('x.pyw') + def test_cannot_write_pyc(self): import sys, os p = os.path.join(sys.path[-1], 'readonly') @@ -985,7 +1007,8 @@ class AppTestPyPyExtension(object): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['imp', 'zipimport']) + cls.space = gettestobjspace(usemodules=['imp', 'zipimport', + '__pypy__']) cls.w_udir = cls.space.wrap(str(udir)) def test_run_compiled_module(self): diff --git a/lib_pypy/pyrepl/unicodedata_.py b/lib_pypy/pyrepl/unicodedata_.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unicodedata_.py @@ -0,0 +1,59 @@ +try: + from unicodedata import * +except ImportError: + + def category(ch): + """ + ASCII only implementation + """ + if type(ch) is not unicode: + raise TypeError + if len(ch) != 1: + raise TypeError + return _categories.get(ord(ch), 'Co') # "Other, private use" + + _categories = { + 0: 'Cc', 1: 'Cc', 2: 'Cc', 3: 'Cc', 4: 'Cc', 5: 'Cc', + 6: 'Cc', 7: 'Cc', 8: 'Cc', 9: 'Cc', 10: 'Cc', 11: 'Cc', + 12: 'Cc', 13: 'Cc', 14: 'Cc', 15: 'Cc', 16: 'Cc', 17: 'Cc', + 18: 'Cc', 19: 'Cc', 20: 'Cc', 21: 'Cc', 22: 'Cc', 23: 'Cc', + 24: 'Cc', 25: 'Cc', 26: 'Cc', 27: 'Cc', 28: 'Cc', 29: 'Cc', + 30: 'Cc', 31: 'Cc', 32: 'Zs', 33: 'Po', 34: 'Po', 35: 'Po', + 36: 'Sc', 37: 'Po', 38: 'Po', 39: 'Po', 40: 'Ps', 41: 'Pe', + 42: 'Po', 43: 'Sm', 44: 'Po', 45: 'Pd', 46: 'Po', 47: 'Po', + 48: 'Nd', 49: 'Nd', 50: 'Nd', 51: 'Nd', 52: 'Nd', 53: 'Nd', + 54: 'Nd', 55: 'Nd', 56: 'Nd', 57: 'Nd', 58: 'Po', 59: 'Po', + 60: 'Sm', 61: 'Sm', 62: 'Sm', 63: 'Po', 64: 'Po', 65: 'Lu', + 66: 'Lu', 67: 'Lu', 68: 'Lu', 69: 'Lu', 70: 'Lu', 71: 'Lu', + 72: 'Lu', 73: 'Lu', 74: 'Lu', 75: 'Lu', 76: 'Lu', 77: 'Lu', + 78: 'Lu', 79: 'Lu', 80: 'Lu', 81: 'Lu', 82: 'Lu', 83: 'Lu', + 84: 'Lu', 85: 'Lu', 86: 'Lu', 87: 'Lu', 88: 'Lu', 89: 'Lu', + 90: 'Lu', 91: 'Ps', 92: 'Po', 93: 'Pe', 94: 'Sk', 95: 'Pc', + 96: 'Sk', 97: 'Ll', 98: 'Ll', 99: 'Ll', 100: 'Ll', 101: 'Ll', + 102: 'Ll', 103: 'Ll', 104: 'Ll', 105: 'Ll', 106: 'Ll', 107: 'Ll', + 108: 'Ll', 109: 'Ll', 110: 'Ll', 111: 'Ll', 112: 'Ll', 113: 'Ll', + 114: 'Ll', 115: 'Ll', 116: 'Ll', 117: 'Ll', 118: 'Ll', 119: 'Ll', + 120: 'Ll', 121: 'Ll', 122: 'Ll', 123: 'Ps', 124: 'Sm', 125: 'Pe', + 126: 'Sm', 127: 'Cc', 128: 'Cc', 129: 'Cc', 130: 'Cc', 131: 'Cc', + 132: 'Cc', 133: 'Cc', 134: 'Cc', 135: 'Cc', 136: 'Cc', 137: 'Cc', + 138: 'Cc', 139: 'Cc', 140: 'Cc', 141: 'Cc', 142: 'Cc', 143: 'Cc', + 144: 'Cc', 145: 'Cc', 146: 'Cc', 147: 'Cc', 148: 'Cc', 149: 'Cc', + 150: 'Cc', 151: 'Cc', 152: 'Cc', 153: 'Cc', 154: 'Cc', 155: 'Cc', + 156: 'Cc', 157: 'Cc', 158: 'Cc', 159: 'Cc', 160: 'Zs', 161: 'Po', + 162: 'Sc', 163: 'Sc', 164: 'Sc', 165: 'Sc', 166: 'So', 167: 'So', + 168: 'Sk', 169: 'So', 170: 'Ll', 171: 'Pi', 172: 'Sm', 173: 'Cf', + 174: 'So', 175: 'Sk', 176: 'So', 177: 'Sm', 178: 'No', 179: 'No', + 180: 'Sk', 181: 'Ll', 182: 'So', 183: 'Po', 184: 'Sk', 185: 'No', + 186: 'Ll', 187: 'Pf', 188: 'No', 189: 'No', 190: 'No', 191: 'Po', + 192: 'Lu', 193: 'Lu', 194: 'Lu', 195: 'Lu', 196: 'Lu', 197: 'Lu', + 198: 'Lu', 199: 'Lu', 200: 'Lu', 201: 'Lu', 202: 'Lu', 203: 'Lu', + 204: 'Lu', 205: 'Lu', 206: 'Lu', 207: 'Lu', 208: 'Lu', 209: 'Lu', + 210: 'Lu', 211: 'Lu', 212: 'Lu', 213: 'Lu', 214: 'Lu', 215: 'Sm', + 216: 'Lu', 217: 'Lu', 218: 'Lu', 219: 'Lu', 220: 'Lu', 221: 'Lu', + 222: 'Lu', 223: 'Ll', 224: 'Ll', 225: 'Ll', 226: 'Ll', 227: 'Ll', + 228: 'Ll', 229: 'Ll', 230: 'Ll', 231: 'Ll', 232: 'Ll', 233: 'Ll', + 234: 'Ll', 235: 'Ll', 236: 'Ll', 237: 'Ll', 238: 'Ll', 239: 'Ll', + 240: 'Ll', 241: 'Ll', 242: 'Ll', 243: 'Ll', 244: 'Ll', 245: 'Ll', + 246: 'Ll', 247: 'Sm', 248: 'Ll', 249: 'Ll', 250: 'Ll', 251: 'Ll', + 252: 'Ll', 253: 'Ll', 254: 'Ll' + } diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/lib_pypy/pyrepl/keymaps.py b/lib_pypy/pyrepl/keymaps.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/keymaps.py @@ -0,0 +1,140 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +reader_emacs_keymap = tuple( + [(r'\C-a', 'beginning-of-line'), + (r'\C-b', 'left'), + (r'\C-c', 'interrupt'), + (r'\C-d', 'delete'), + (r'\C-e', 'end-of-line'), + (r'\C-f', 'right'), + (r'\C-g', 'cancel'), + (r'\C-h', 'backspace'), + (r'\C-j', 'self-insert'), + (r'\', 'accept'), + (r'\C-k', 'kill-line'), + (r'\C-l', 'clear-screen'), +# (r'\C-m', 'accept'), + (r'\C-q', 'quoted-insert'), + (r'\C-t', 'transpose-characters'), + (r'\C-u', 'unix-line-discard'), + (r'\C-v', 'quoted-insert'), + (r'\C-w', 'unix-word-rubout'), + (r'\C-x\C-u', 'upcase-region'), + (r'\C-y', 'yank'), + (r'\C-z', 'suspend'), + + (r'\M-b', 'backward-word'), + (r'\M-c', 'capitalize-word'), + (r'\M-d', 'kill-word'), + (r'\M-f', 'forward-word'), + (r'\M-l', 'downcase-word'), + (r'\M-t', 'transpose-words'), + (r'\M-u', 'upcase-word'), + (r'\M-y', 'yank-pop'), + (r'\M--', 'digit-arg'), + (r'\M-0', 'digit-arg'), + (r'\M-1', 'digit-arg'), + (r'\M-2', 'digit-arg'), + (r'\M-3', 'digit-arg'), + (r'\M-4', 'digit-arg'), + (r'\M-5', 'digit-arg'), + (r'\M-6', 'digit-arg'), + (r'\M-7', 'digit-arg'), + (r'\M-8', 'digit-arg'), + (r'\M-9', 'digit-arg'), + (r'\M-\n', 'self-insert'), + (r'\', 'self-insert')] + \ + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\', 'up'), + (r'\', 'down'), + (r'\', 'left'), + (r'\', 'right'), + (r'\', 'quoted-insert'), + (r'\', 'delete'), + (r'\', 'backspace'), + (r'\M-\', 'backward-kill-word'), + (r'\', 'end'), + (r'\', 'home'), + (r'\', 'help'), + (r'\EOF', 'end'), # the entries in the terminfo database for xterms + (r'\EOH', 'home'), # seem to be wrong. this is a less than ideal + # workaround + ]) + +hist_emacs_keymap = reader_emacs_keymap + ( + (r'\C-n', 'next-history'), + (r'\C-p', 'previous-history'), + (r'\C-o', 'operate-and-get-next'), + (r'\C-r', 'reverse-history-isearch'), + (r'\C-s', 'forward-history-isearch'), + (r'\M-r', 'restore-history'), + (r'\M-.', 'yank-arg'), + (r'\', 'last-history'), + (r'\', 'first-history')) + +comp_emacs_keymap = hist_emacs_keymap + ( + (r'\t', 'complete'),) + +python_emacs_keymap = comp_emacs_keymap + ( + (r'\n', 'maybe-accept'), + (r'\M-\n', 'self-insert')) + +reader_vi_insert_keymap = tuple( + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\C-d', 'delete'), + (r'\', 'backspace'), + ('')]) + +reader_vi_command_keymap = tuple( + [ + ('E', 'enter-emacs-mode'), + ('R', 'enter-replace-mode'), + ('dw', 'delete-word'), + ('dd', 'delete-line'), + + ('h', 'left'), + ('i', 'enter-insert-mode'), + ('j', 'down'), + ('k', 'up'), + ('l', 'right'), + ('r', 'replace-char'), + ('w', 'forward-word'), + ('x', 'delete'), + ('.', 'repeat-edit'), # argh! + (r'\', 'enter-insert-mode'), + ] + + [(c, 'digit-arg') for c in '01234567689'] + + []) + + +reader_keymaps = { + 'emacs' : reader_emacs_keymap, + 'vi-insert' : reader_vi_insert_keymap, + 'vi-command' : reader_vi_command_keymap + } + +del c # from the listcomps + diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,6 +1,7 @@ import py from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver +from pypy.rlib import objectmodel class DictTests: @@ -69,6 +70,66 @@ res = self.meta_interp(f, [10], listops=True) assert res == expected + def test_dict_trace_hash(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + if total not in dct: + dct[total] = [] + dct[total].append(total) + total -= 1 + return len(dct[0]) + + res1 = f(100) + res2 = self.meta_interp(f, [100], listops=True) + assert res1 == res2 + self.check_loops(int_mod=1) # the hash was traced + + def test_dict_setdefault(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def f(n): + dct = {} + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct.setdefault(total % 2, []).append(total) + total -= 1 + return len(dct[0]) + + assert f(100) == 50 + res = self.meta_interp(f, [100], listops=True) + assert res == 50 + self.check_loops(new=0, new_with_vtable=0) + + def test_dict_as_counter(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct[total] = dct.get(total, 0) + 1 + total -= 1 + return dct[0] + + assert f(100) == 50 + res = self.meta_interp(f, [100], listops=True) + assert res == 50 + self.check_loops(int_mod=1) + class TestOOtype(DictTests, OOJitMixin): pass diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -68,6 +68,16 @@ nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') + INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), + ('intval', lltype.Signed)) + INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), + ('intval', lltype.Signed), + hints={'immutable': True}) + intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') + immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -147,7 +157,6 @@ FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token - virtualrefindexdescr = vrefinfo.descr_virtualref_index virtualforceddescr = vrefinfo.descr_forced jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) @@ -156,6 +165,8 @@ register_known_gctype(cpu, node_vtable2, NODE2) register_known_gctype(cpu, u_vtable, U) register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) + register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) + register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,8 @@ +import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror +from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop @@ -20,6 +22,8 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 + get_malloc_slowpath_addr = None + def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr @@ -33,6 +37,8 @@ pass def can_inline_malloc(self, descr): return False + def can_inline_malloc_varsize(self, descr, num_elem): + return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): @@ -211,10 +217,12 @@ return addr_ref -class GcRootMap_asmgcc: +class GcRootMap_asmgcc(object): """Handles locating the stack roots in the assembler. This is the class supporting --gcrootfinder=asmgcc. """ + is_shadow_stack = False + LOC_REG = 0 LOC_ESP_PLUS = 1 LOC_EBP_PLUS = 2 @@ -223,7 +231,7 @@ GCMAP_ARRAY = rffi.CArray(lltype.Signed) CALLSHAPE_ARRAY_PTR = rffi.CArrayPtr(rffi.UCHAR) - def __init__(self): + def __init__(self, gcdescr=None): # '_gcmap' is an array of length '_gcmap_maxlength' of addresses. # '_gcmap_curlength' tells how full the array really is. # The addresses are actually grouped in pairs: @@ -236,6 +244,13 @@ self._gcmap_deadentries = 0 self._gcmap_sorted = True + def add_jit2gc_hooks(self, jit2gc): + jit2gc.update({ + 'gcmapstart': lambda: self.gcmapstart(), + 'gcmapend': lambda: self.gcmapend(), + 'gcmarksorted': lambda: self.gcmarksorted(), + }) + def initialize(self): # hack hack hack. Remove these lines and see MissingRTypeAttribute # when the rtyper tries to annotate these methods only when GC-ing... @@ -365,7 +380,7 @@ number >>= 7 shape.append(chr(number | flag)) - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset @@ -388,6 +403,126 @@ return rawaddr +class GcRootMap_shadowstack(object): + """Handles locating the stack roots in the assembler. + This is the class supporting --gcrootfinder=shadowstack. + """ + is_shadow_stack = True + MARKER = 8 + + # The "shadowstack" is a portable way in which the GC finds the + # roots that live in the stack. Normally it is just a list of + # pointers to GC objects. The pointers may be moved around by a GC + # collection. But with the JIT, an entry can also be MARKER, in + # which case the next entry points to an assembler stack frame. + # During a residual CALL from the assembler (which may indirectly + # call the GC), we use the force_index stored in the assembler + # stack frame to identify the call: we can go from the force_index + # to a list of where the GC pointers are in the frame (this is the + # purpose of the present class). + # + # Note that across CALL_MAY_FORCE or CALL_ASSEMBLER, we can also go + # from the force_index to a ResumeGuardForcedDescr instance, which + # is used if the virtualizable or the virtualrefs need to be forced + # (see pypy.jit.backend.model). The force_index number in the stack + # frame is initially set to a non-negative value x, but it is + # occasionally turned into (~x) in case of forcing. + + INTARRAYPTR = rffi.CArrayPtr(rffi.INT) + CALLSHAPES_ARRAY = rffi.CArray(INTARRAYPTR) + + def __init__(self, gcdescr): + self._callshapes = lltype.nullptr(self.CALLSHAPES_ARRAY) + self._callshapes_maxlength = 0 + self.force_index_ofs = gcdescr.force_index_ofs + + def add_jit2gc_hooks(self, jit2gc): + # + def collect_jit_stack_root(callback, gc, addr): + if addr.signed[0] != GcRootMap_shadowstack.MARKER: + # common case + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return WORD + else: + # case of a MARKER followed by an assembler stack frame + follow_stack_frame_of_assembler(callback, gc, addr) + return 2 * WORD + # + def follow_stack_frame_of_assembler(callback, gc, addr): + frame_addr = addr.signed[1] + addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + callshape = self._callshapes[force_index] + n = 0 + while True: + offset = rffi.cast(lltype.Signed, callshape[n]) + if offset == 0: + break + addr = llmemory.cast_int_to_adr(frame_addr + offset) + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + n += 1 + # + jit2gc.update({ + 'rootstackhook': collect_jit_stack_root, + }) + + def initialize(self): + pass + + def get_basic_shape(self, is_64_bit=False): + return [] + + def add_frame_offset(self, shape, offset): + assert offset != 0 + shape.append(offset) + + def add_callee_save_reg(self, shape, register): + msg = "GC pointer in %s was not spilled" % register + os.write(2, '[llsupport/gc] %s\n' % msg) + raise AssertionError(msg) + + def compress_callshape(self, shape, datablockwrapper): + length = len(shape) + SZINT = rffi.sizeof(rffi.INT) + rawaddr = datablockwrapper.malloc_aligned((length + 1) * SZINT, SZINT) + p = rffi.cast(self.INTARRAYPTR, rawaddr) + for i in range(length): + p[i] = rffi.cast(rffi.INT, shape[i]) + p[length] = rffi.cast(rffi.INT, 0) + return p + + def write_callshape(self, p, force_index): + if force_index >= self._callshapes_maxlength: + self._enlarge_callshape_list(force_index + 1) + self._callshapes[force_index] = p + + def _enlarge_callshape_list(self, minsize): + newlength = 250 + (self._callshapes_maxlength // 3) * 4 + if newlength < minsize: + newlength = minsize + newarray = lltype.malloc(self.CALLSHAPES_ARRAY, newlength, + flavor='raw', track_allocation=False) + if self._callshapes: + i = self._callshapes_maxlength - 1 + while i >= 0: + newarray[i] = self._callshapes[i] + i -= 1 + lltype.free(self._callshapes, flavor='raw') + self._callshapes = newarray + self._callshapes_maxlength = newlength + + def freeing_block(self, start, stop): + pass # nothing needed here + + def get_root_stack_top_addr(self): + rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) + return rffi.cast(lltype.Signed, rst_addr) + + class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 @@ -437,7 +572,7 @@ except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls() + gcrootmap = cls(gcdescr) self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) @@ -446,12 +581,9 @@ # where it can be fished and reused by the FrameworkGCTransformer self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = { - 'layoutbuilder': self.layoutbuilder, - 'gcmapstart': lambda: gcrootmap.gcmapstart(), - 'gcmapend': lambda: gcrootmap.gcmapend(), - 'gcmarksorted': lambda: gcrootmap.gcmarksorted(), - } + self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) @@ -461,6 +593,10 @@ self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() + # for the fast path of mallocs, the following must be true, at least + assert self.GCClass.inline_simple_malloc + assert self.GCClass.inline_simple_malloc_varsize + # make a malloc function, with three arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) @@ -539,20 +675,23 @@ x3 = x0 * 0.3 for_test_only.x = x0 + x1 + x2 + x3 # - def malloc_fixedsize_slowpath(size): + def malloc_slowpath(size): if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery try: + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, 0, size, True, False, False) except MemoryError: fatalerror("out of memory (from JITted code)") return 0 return rffi.cast(lltype.Signed, gcref) - self.malloc_fixedsize_slowpath = malloc_fixedsize_slowpath - self.MALLOC_FIXEDSIZE_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) + self.malloc_slowpath = malloc_slowpath + self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -562,9 +701,8 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_fixedsize_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_FIXEDSIZE_SLOWPATH), - self.malloc_fixedsize_slowpath) + def get_malloc_slowpath_addr(self): + fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) def initialize(self): @@ -710,6 +848,16 @@ return True return False + def can_inline_malloc_varsize(self, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + return size < self.max_size_of_young_obj + except OverflowError: + return False + def has_write_barrier_class(self): return WriteBarrierDescr diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -39,13 +39,15 @@ [user at debian-box ~]$ sudo apt-get install \ gcc make python-dev libffi-dev pkg-config \ - libz-dev libbz2-dev libncurses-dev libexpat1-dev libssl-dev libgc-dev python-sphinx + libz-dev libbz2-dev libncurses-dev libexpat1-dev \ + libssl-dev libgc-dev python-sphinx python-greenlet On a Fedora box these are:: [user at fedora-or-rh-box ~]$ sudo yum install \ gcc make python-devel libffi-devel pkg-config \ - zlib-devel bzip2-devel ncurses-devel expat-devel openssl-devel gc-devel python-sphinx + zlib-devel bzip2-devel ncurses-devel expat-devel \ + openssl-devel gc-devel python-sphinx python-greenlet The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. @@ -57,6 +59,7 @@ * ``libssl-dev`` (for the optional ``_ssl`` module) * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) * ``python-sphinx`` (for the optional documentation build) + * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) 2. Translation is somewhat time-consuming (30 min to over one hour) and RAM-hungry. If you have less than 1.5 GB of @@ -76,7 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel **32-bit** environment needs ``4GB``. + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html @@ -229,6 +233,12 @@ ../../.. etc. +If the executable fails to find suitable libraries, it will report +``debug: WARNING: library path not found, using compiled-in sys.path`` +and then attempt to continue normally. If the default path is usable, +most code will be fine. However, the ``sys.prefix`` will be unset +and some existing libraries assume that this is never the case. + In order to use ``distutils`` or ``setuptools`` a directory ``PREFIX/site-packages`` needs to be created. Here's an example session setting up and using ``easy_install``:: $ cd PREFIX diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -127,12 +127,15 @@ checks[2], checks[3])) subclasses = {} for key, subcls in typedef._subclass_cache.items(): + if key[0] is not space.config: + continue cls = key[1] subclasses.setdefault(cls, {}) - subclasses[cls][subcls] = True + prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls) + assert subcls is prevsubcls for cls, set in subclasses.items(): assert len(set) <= 6, "%s has %d subclasses:\n%r" % ( - cls, len(set), [subcls.__name__ for subcls in set]) + cls, len(set), list(set)) def test_getsetproperty(self): class W_SomeType(Wrappable): diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ pypy/doc/*.html pypy/doc/config/*.html pypy/doc/discussion/*.html +pypy/module/cpyext/src/*.o +pypy/module/cpyext/test/*.o pypy/module/test_lib_pypy/ctypes_tests/*.o pypy/translator/c/src/dtoa.o pypy/translator/goal/pypy-c diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/completing_reader.py @@ -0,0 +1,280 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl import commands, reader +from pyrepl.reader import Reader + +def uniqify(l): + d = {} + for i in l: + d[i] = 1 + r = d.keys() + r.sort() + return r + +def prefix(wordlist, j = 0): + d = {} + i = j + try: + while 1: + for word in wordlist: + d[word[i]] = 1 + if len(d) > 1: + return wordlist[0][j:i] + i += 1 + d = {} + except IndexError: + return wordlist[0][j:i] + +import re +def stripcolor(s): + return stripcolor.regexp.sub('', s) +stripcolor.regexp = re.compile(r"\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[m|K]") + +def real_len(s): + return len(stripcolor(s)) + +def left_align(s, maxlen): + stripped = stripcolor(s) + if len(stripped) > maxlen: + # too bad, we remove the color + return stripped[:maxlen] + padding = maxlen - len(stripped) + return s + ' '*padding + +def build_menu(cons, wordlist, start, use_brackets, sort_in_column): + if use_brackets: + item = "[ %s ]" + padding = 4 + else: + item = "%s " + padding = 2 + maxlen = min(max(map(real_len, wordlist)), cons.width - padding) + cols = cons.width / (maxlen + padding) + rows = (len(wordlist) - 1)/cols + 1 + + if sort_in_column: + # sort_in_column=False (default) sort_in_column=True + # A B C A D G + # D E F B E + # G C F + # + # "fill" the table with empty words, so we always have the same amout + # of rows for each column + missing = cols*rows - len(wordlist) + wordlist = wordlist + ['']*missing + indexes = [(i%cols)*rows + i//cols for i in range(len(wordlist))] + wordlist = [wordlist[i] for i in indexes] + menu = [] + i = start + for r in range(rows): + row = [] + for col in range(cols): + row.append(item % left_align(wordlist[i], maxlen)) + i += 1 + if i >= len(wordlist): + break + menu.append( ''.join(row) ) + if i >= len(wordlist): + i = 0 + break + if r + 5 > cons.height: + menu.append(" %d more... "%(len(wordlist) - i)) + break + return menu, i + +# this gets somewhat user interface-y, and as a result the logic gets +# very convoluted. +# +# To summarise the summary of the summary:- people are a problem. +# -- The Hitch-Hikers Guide to the Galaxy, Episode 12 + +#### Desired behaviour of the completions commands. +# the considerations are: +# (1) how many completions are possible +# (2) whether the last command was a completion +# (3) if we can assume that the completer is going to return the same set of +# completions: this is controlled by the ``assume_immutable_completions`` +# variable on the reader, which is True by default to match the historical +# behaviour of pyrepl, but e.g. False in the ReadlineAlikeReader to match +# more closely readline's semantics (this is needed e.g. by +# fancycompleter) +# +# if there's no possible completion, beep at the user and point this out. +# this is easy. +# +# if there's only one possible completion, stick it in. if the last thing +# user did was a completion, point out that he isn't getting anywhere, but +# only if the ``assume_immutable_completions`` is True. +# +# now it gets complicated. +# +# for the first press of a completion key: +# if there's a common prefix, stick it in. + +# irrespective of whether anything got stuck in, if the word is now +# complete, show the "complete but not unique" message + +# if there's no common prefix and if the word is not now complete, +# beep. + +# common prefix -> yes no +# word complete \/ +# yes "cbnu" "cbnu" +# no - beep + +# for the second bang on the completion key +# there will necessarily be no common prefix +# show a menu of the choices. + +# for subsequent bangs, rotate the menu around (if there are sufficient +# choices). + +class complete(commands.Command): + def do(self): + r = self.reader + stem = r.get_stem() + if r.assume_immutable_completions and \ + r.last_command_is(self.__class__): + completions = r.cmpltn_menu_choices + else: + r.cmpltn_menu_choices = completions = \ + r.get_completions(stem) + if len(completions) == 0: + r.error("no matches") + elif len(completions) == 1: + if r.assume_immutable_completions and \ + len(completions[0]) == len(stem) and \ + r.last_command_is(self.__class__): + r.msg = "[ sole completion ]" + r.dirty = 1 + r.insert(completions[0][len(stem):]) + else: + p = prefix(completions, len(stem)) + if p <> '': + r.insert(p) + if r.last_command_is(self.__class__): + if not r.cmpltn_menu_vis: + r.cmpltn_menu_vis = 1 + r.cmpltn_menu, r.cmpltn_menu_end = build_menu( + r.console, completions, r.cmpltn_menu_end, + r.use_brackets, r.sort_in_column) + r.dirty = 1 + elif stem + p in completions: + r.msg = "[ complete but not unique ]" + r.dirty = 1 + else: + r.msg = "[ not unique ]" + r.dirty = 1 + +class self_insert(commands.self_insert): + def do(self): + commands.self_insert.do(self) + r = self.reader + if r.cmpltn_menu_vis: + stem = r.get_stem() + if len(stem) < 1: + r.cmpltn_reset() + else: + completions = [w for w in r.cmpltn_menu_choices + if w.startswith(stem)] + if completions: + r.cmpltn_menu, r.cmpltn_menu_end = build_menu( + r.console, completions, 0, + r.use_brackets, r.sort_in_column) + else: + r.cmpltn_reset() + +class CompletingReader(Reader): + """Adds completion support + + Adds instance variables: + * cmpltn_menu, cmpltn_menu_vis, cmpltn_menu_end, cmpltn_choices: + * + """ + # see the comment for the complete command + assume_immutable_completions = True + use_brackets = True # display completions inside [] + sort_in_column = False + + def collect_keymap(self): + return super(CompletingReader, self).collect_keymap() + ( + (r'\t', 'complete'),) + + def __init__(self, console): + super(CompletingReader, self).__init__(console) + self.cmpltn_menu = ["[ menu 1 ]", "[ menu 2 ]"] + self.cmpltn_menu_vis = 0 + self.cmpltn_menu_end = 0 + for c in [complete, self_insert]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + + def after_command(self, cmd): + super(CompletingReader, self).after_command(cmd) + if not isinstance(cmd, complete) and not isinstance(cmd, self_insert): + self.cmpltn_reset() + + def calc_screen(self): + screen = super(CompletingReader, self).calc_screen() + if self.cmpltn_menu_vis: + ly = self.lxy[1] + screen[ly:ly] = self.cmpltn_menu + self.screeninfo[ly:ly] = [(0, [])]*len(self.cmpltn_menu) + self.cxy = self.cxy[0], self.cxy[1] + len(self.cmpltn_menu) + return screen + + def finish(self): + super(CompletingReader, self).finish() + self.cmpltn_reset() + + def cmpltn_reset(self): + self.cmpltn_menu = [] + self.cmpltn_menu_vis = 0 + self.cmpltn_menu_end = 0 + self.cmpltn_menu_choices = [] + + def get_stem(self): + st = self.syntax_table + SW = reader.SYNTAX_WORD + b = self.buffer + p = self.pos - 1 + while p >= 0 and st.get(b[p], SW) == SW: + p -= 1 + return u''.join(b[p+1:self.pos]) + + def get_completions(self, stem): + return [] + +def test(): + class TestReader(CompletingReader): + def get_completions(self, stem): + return [s for l in map(lambda x:x.split(),self.history) + for s in l if s and s.startswith(stem)] + reader = TestReader() + reader.ps1 = "c**> " + reader.ps2 = "c/*> " + reader.ps3 = "c|*> " + reader.ps4 = "c\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/.hgsubstate b/.hgsubstate deleted file mode 100644 --- a/.hgsubstate +++ /dev/null @@ -1,2 +0,0 @@ -80037 greenlet -80409 lib_pypy/pyrepl diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -9,7 +9,6 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.test.test_optimizeopt import equaloplists -from pypy.rpython.memory.gctransform import asmgcroot def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -75,8 +74,8 @@ num2a = ((-num2|3) >> 7) | 128 num2b = (-num2|3) & 127 shape = gcrootmap.get_basic_shape() - gcrootmap.add_ebp_offset(shape, num1) - gcrootmap.add_ebp_offset(shape, num2) + gcrootmap.add_frame_offset(shape, num1) + gcrootmap.add_frame_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, @@ -228,6 +227,33 @@ gc.asmgcroot = saved +class TestGcRootMapShadowStack: + class FakeGcDescr: + force_index_ofs = 92 + + def test_make_shapes(self): + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = gcrootmap.get_basic_shape() + gcrootmap.add_frame_offset(shape, 16) + gcrootmap.add_frame_offset(shape, -24) + assert shape == [16, -24] + + def test_compress_callshape(self): + class FakeDataBlockWrapper: + def malloc_aligned(self, size, alignment): + assert alignment == 4 # even on 64-bits + assert size == 12 # 4*3, even on 64-bits + return rffi.cast(lltype.Signed, p) + datablockwrapper = FakeDataBlockWrapper() + p = lltype.malloc(rffi.CArray(rffi.INT), 3, immortal=True) + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = [16, -24] + gcrootmap.compress_callshape(shape, datablockwrapper) + assert rffi.cast(lltype.Signed, p[0]) == 16 + assert rffi.cast(lltype.Signed, p[1]) == -24 + assert rffi.cast(lltype.Signed, p[2]) == 0 + + class FakeLLOp(object): def __init__(self): self.record = [] diff --git a/pypy/rlib/rdtoa.py b/pypy/rlib/rdtoa.py --- a/pypy/rlib/rdtoa.py +++ b/pypy/rlib/rdtoa.py @@ -5,16 +5,33 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder -import py +import py, sys cdir = py.path.local(pypydir) / 'translator' / 'c' include_dirs = [cdir] +# set the word endianness based on the host's endianness +# and the C double's endianness (which should be equal) +if hasattr(float, '__getformat__'): + assert float.__getformat__('double') == 'IEEE, %s-endian' % sys.byteorder +if sys.byteorder == 'little': + source_file = ['#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754'] +elif sys.byteorder == 'big': + source_file = ['#define WORDS_BIGENDIAN', + '#define DOUBLE_IS_BIG_ENDIAN_IEEE754'] +else: + raise AssertionError(sys.byteorder) + +source_file.append('#include "src/dtoa.c"') +source_file = '\n\n'.join(source_file) + +# ____________________________________________________________ + eci = ExternalCompilationInfo( include_dirs = [cdir], includes = ['src/dtoa.h'], libraries = [], - separate_module_files = [cdir / 'src' / 'dtoa.c'], + separate_module_sources = [source_file], export_symbols = ['_PyPy_dg_strtod', '_PyPy_dg_dtoa', '_PyPy_dg_freedtoa', diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,13 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level @@ -159,8 +152,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_wait.py @@ -0,0 +1,51 @@ +from ctypes import CDLL, c_int, POINTER, byref +from ctypes.util import find_library +from resource import _struct_rusage, struct_rusage + +__all__ = ["wait3", "wait4"] + +libc = CDLL(find_library("c")) +c_wait3 = libc.wait3 + +c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] + +c_wait4 = libc.wait4 + +c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] + +def create_struct_rusage(c_struct): + return struct_rusage(( + float(c_struct.ru_utime), + float(c_struct.ru_stime), + c_struct.ru_maxrss, + c_struct.ru_ixrss, + c_struct.ru_idrss, + c_struct.ru_isrss, + c_struct.ru_minflt, + c_struct.ru_majflt, + c_struct.ru_nswap, + c_struct.ru_inblock, + c_struct.ru_oublock, + c_struct.ru_msgsnd, + c_struct.ru_msgrcv, + c_struct.ru_nsignals, + c_struct.ru_nvcsw, + c_struct.ru_nivcsw)) + +def wait3(options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage + +def wait4(pid, options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -34,11 +34,7 @@ @jit.purefunction def _getcell_makenew(self, key): - res = self.content.get(key, None) - if res is not None: - return res - result = self.content[key] = ModuleCell() - return result + return self.content.setdefault(key, ModuleCell()) def impl_setitem(self, w_key, w_value): space = self.space @@ -50,6 +46,16 @@ def impl_setitem_str(self, name, w_value): self.getcell(name, True).w_value = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + cell = self.getcell(space.str_w(w_key), True) + if cell.w_value is None: + cell.w_value = w_default + return cell.w_value + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -7,6 +7,7 @@ CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject +from pypy.objspace.std.objectobject import W_ObjectObject from pypy.rlib.objectmodel import specialize, we_are_translated from pypy.rlib.rweakref import RWeakKeyDictionary from pypy.rpython.annlowlevel import llhelper @@ -370,6 +371,15 @@ @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): obj.c_ob_refcnt = 1 + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + assert isinstance(w_type, W_TypeObject) + if w_type.is_cpytype(): + w_obj = space.allocate_instance(W_ObjectObject, w_type) + track_reference(space, obj, w_obj) + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + else: + assert False, "Please add more cases in _Py_NewReference()" def _Py_Dealloc(space, obj): from pypy.module.cpyext.api import generic_cpy_call_dont_decref diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unix_console.py @@ -0,0 +1,567 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import termios, select, os, struct, errno +import signal, re, time, sys +from fcntl import ioctl +from pyrepl import curses +from pyrepl.fancy_termios import tcgetattr, tcsetattr +from pyrepl.console import Console, Event +from pyrepl import unix_eventqueue + +class InvalidTerminal(RuntimeError): + pass + +_error = (termios.error, curses.error, InvalidTerminal) + +# there are arguments for changing this to "refresh" +SIGWINCH_EVENT = 'repaint' + +FIONREAD = getattr(termios, "FIONREAD", None) +TIOCGWINSZ = getattr(termios, "TIOCGWINSZ", None) + +def _my_getstr(cap, optional=0): + r = curses.tigetstr(cap) + if not optional and r is None: + raise InvalidTerminal, \ + "terminal doesn't have the required '%s' capability"%cap + return r + +# at this point, can we say: AAAAAAAAAAAAAAAAAAAAAARGH! +def maybe_add_baudrate(dict, rate): + name = 'B%d'%rate + if hasattr(termios, name): + dict[getattr(termios, name)] = rate + +ratedict = {} +for r in [0, 110, 115200, 1200, 134, 150, 1800, 19200, 200, 230400, + 2400, 300, 38400, 460800, 4800, 50, 57600, 600, 75, 9600]: + maybe_add_baudrate(ratedict, r) + +del r, maybe_add_baudrate + +delayprog = re.compile("\\$<([0-9]+)((?:/|\\*){0,2})>") + +try: + poll = select.poll +except AttributeError: + # this is exactly the minumum necessary to support what we + # do with poll objects + class poll: + def __init__(self): + pass + def register(self, fd, flag): + self.fd = fd + def poll(self, timeout=None): + r,w,e = select.select([self.fd],[],[],timeout) + return r + +POLLIN = getattr(select, "POLLIN", None) + +class UnixConsole(Console): + def __init__(self, f_in=0, f_out=1, term=None, encoding=None): + if encoding is None: + encoding = sys.getdefaultencoding() + + self.encoding = encoding + + if isinstance(f_in, int): + self.input_fd = f_in + else: + self.input_fd = f_in.fileno() + + if isinstance(f_out, int): + self.output_fd = f_out + else: + self.output_fd = f_out.fileno() + + self.pollob = poll() + self.pollob.register(self.input_fd, POLLIN) + curses.setupterm(term, self.output_fd) + self.term = term + + self._bel = _my_getstr("bel") + self._civis = _my_getstr("civis", optional=1) + self._clear = _my_getstr("clear") + self._cnorm = _my_getstr("cnorm", optional=1) + self._cub = _my_getstr("cub", optional=1) + self._cub1 = _my_getstr("cub1", 1) + self._cud = _my_getstr("cud", 1) + self._cud1 = _my_getstr("cud1", 1) + self._cuf = _my_getstr("cuf", 1) + self._cuf1 = _my_getstr("cuf1", 1) + self._cup = _my_getstr("cup") + self._cuu = _my_getstr("cuu", 1) + self._cuu1 = _my_getstr("cuu1", 1) + self._dch1 = _my_getstr("dch1", 1) + self._dch = _my_getstr("dch", 1) + self._el = _my_getstr("el") + self._hpa = _my_getstr("hpa", 1) + self._ich = _my_getstr("ich", 1) + self._ich1 = _my_getstr("ich1", 1) + self._ind = _my_getstr("ind", 1) + self._pad = _my_getstr("pad", 1) + self._ri = _my_getstr("ri", 1) + self._rmkx = _my_getstr("rmkx", 1) + self._smkx = _my_getstr("smkx", 1) + + ## work out how we're going to sling the cursor around + if 0 and self._hpa: # hpa don't work in windows telnet :-( + self.__move_x = self.__move_x_hpa + elif self._cub and self._cuf: + self.__move_x = self.__move_x_cub_cuf + elif self._cub1 and self._cuf1: + self.__move_x = self.__move_x_cub1_cuf1 + else: + raise RuntimeError, "insufficient terminal (horizontal)" + + if self._cuu and self._cud: + self.__move_y = self.__move_y_cuu_cud + elif self._cuu1 and self._cud1: + self.__move_y = self.__move_y_cuu1_cud1 + else: + raise RuntimeError, "insufficient terminal (vertical)" + + if self._dch1: + self.dch1 = self._dch1 + elif self._dch: + self.dch1 = curses.tparm(self._dch, 1) + else: + self.dch1 = None + + if self._ich1: + self.ich1 = self._ich1 + elif self._ich: + self.ich1 = curses.tparm(self._ich, 1) + else: + self.ich1 = None + + self.__move = self.__move_short + + self.event_queue = unix_eventqueue.EventQueue(self.input_fd) + self.partial_char = '' + self.cursor_visible = 1 + + def change_encoding(self, encoding): + self.encoding = encoding + + def refresh(self, screen, (cx, cy)): + # this function is still too long (over 90 lines) + + if not self.__gone_tall: + while len(self.screen) < min(len(screen), self.height): + self.__hide_cursor() + self.__move(0, len(self.screen) - 1) + self.__write("\n") + self.__posxy = 0, len(self.screen) + self.screen.append("") + else: + while len(self.screen) < len(screen): + self.screen.append("") + + if len(screen) > self.height: + self.__gone_tall = 1 + self.__move = self.__move_tall + + px, py = self.__posxy + old_offset = offset = self.__offset + height = self.height + + if 0: + global counter + try: + counter + except NameError: + counter = 0 + self.__write_code(curses.tigetstr("setaf"), counter) + counter += 1 + if counter > 8: + counter = 0 + + # we make sure the cursor is on the screen, and that we're + # using all of the screen if we can + if cy < offset: + offset = cy + elif cy >= offset + height: + offset = cy - height + 1 + elif offset > 0 and len(screen) < offset + height: + offset = max(len(screen) - height, 0) + screen.append("") + + oldscr = self.screen[old_offset:old_offset + height] + newscr = screen[offset:offset + height] + + # use hardware scrolling if we have it. + if old_offset > offset and self._ri: + self.__hide_cursor() + self.__write_code(self._cup, 0, 0) + self.__posxy = 0, old_offset + for i in range(old_offset - offset): + self.__write_code(self._ri) + oldscr.pop(-1) + oldscr.insert(0, "") + elif old_offset < offset and self._ind: + self.__hide_cursor() + self.__write_code(self._cup, self.height - 1, 0) + self.__posxy = 0, old_offset + self.height - 1 + for i in range(offset - old_offset): + self.__write_code(self._ind) + oldscr.pop(0) + oldscr.append("") + + self.__offset = offset + + for y, oldline, newline, in zip(range(offset, offset + height), + oldscr, + newscr): + if oldline != newline: + self.__write_changed_line(y, oldline, newline, px) + + y = len(newscr) + while y < len(oldscr): + self.__hide_cursor() + self.__move(0, y) + self.__posxy = 0, y + self.__write_code(self._el) + y += 1 + + self.__show_cursor() + + self.screen = screen + self.move_cursor(cx, cy) + self.flushoutput() + + def __write_changed_line(self, y, oldline, newline, px): + # this is frustrating; there's no reason to test (say) + # self.dch1 inside the loop -- but alternative ways of + # structuring this function are equally painful (I'm trying to + # avoid writing code generators these days...) + x = 0 + minlen = min(len(oldline), len(newline)) + # + # reuse the oldline as much as possible, but stop as soon as we + # encounter an ESCAPE, because it might be the start of an escape + # sequene + while x < minlen and oldline[x] == newline[x] and newline[x] != '\x1b': + x += 1 + if oldline[x:] == newline[x+1:] and self.ich1: + if ( y == self.__posxy[1] and x > self.__posxy[0] + and oldline[px:x] == newline[px+1:x+1] ): + x = px + self.__move(x, y) + self.__write_code(self.ich1) + self.__write(newline[x]) + self.__posxy = x + 1, y + elif x < minlen and oldline[x + 1:] == newline[x + 1:]: + self.__move(x, y) + self.__write(newline[x]) + self.__posxy = x + 1, y + elif (self.dch1 and self.ich1 and len(newline) == self.width + and x < len(newline) - 2 + and newline[x+1:-1] == oldline[x:-2]): + self.__hide_cursor() + self.__move(self.width - 2, y) + self.__posxy = self.width - 2, y + self.__write_code(self.dch1) + self.__move(x, y) + self.__write_code(self.ich1) + self.__write(newline[x]) + self.__posxy = x + 1, y + else: + self.__hide_cursor() + self.__move(x, y) + if len(oldline) > len(newline): + self.__write_code(self._el) + self.__write(newline[x:]) + self.__posxy = len(newline), y + + if '\x1b' in newline: + # ANSI escape characters are present, so we can't assume + # anything about the position of the cursor. Moving the cursor + # to the left margin should work to get to a known position. + self.move_cursor(0, y) + + def __write(self, text): + self.__buffer.append((text, 0)) + + def __write_code(self, fmt, *args): + self.__buffer.append((curses.tparm(fmt, *args), 1)) + + def __maybe_write_code(self, fmt, *args): + if fmt: + self.__write_code(fmt, *args) + + def __move_y_cuu1_cud1(self, y): + dy = y - self.__posxy[1] + if dy > 0: + self.__write_code(dy*self._cud1) + elif dy < 0: + self.__write_code((-dy)*self._cuu1) + + def __move_y_cuu_cud(self, y): + dy = y - self.__posxy[1] + if dy > 0: + self.__write_code(self._cud, dy) + elif dy < 0: + self.__write_code(self._cuu, -dy) + + def __move_x_hpa(self, x): + if x != self.__posxy[0]: + self.__write_code(self._hpa, x) + + def __move_x_cub1_cuf1(self, x): + dx = x - self.__posxy[0] + if dx > 0: + self.__write_code(self._cuf1*dx) + elif dx < 0: + self.__write_code(self._cub1*(-dx)) + + def __move_x_cub_cuf(self, x): + dx = x - self.__posxy[0] + if dx > 0: + self.__write_code(self._cuf, dx) + elif dx < 0: + self.__write_code(self._cub, -dx) + + def __move_short(self, x, y): + self.__move_x(x) + self.__move_y(y) + + def __move_tall(self, x, y): + assert 0 <= y - self.__offset < self.height, y - self.__offset + self.__write_code(self._cup, y - self.__offset, x) + + def move_cursor(self, x, y): + if y < self.__offset or y >= self.__offset + self.height: + self.event_queue.insert(Event('scroll', None)) + else: + self.__move(x, y) + self.__posxy = x, y + self.flushoutput() + + def prepare(self): + # per-readline preparations: + self.__svtermstate = tcgetattr(self.input_fd) + raw = self.__svtermstate.copy() + raw.iflag &=~ (termios.BRKINT | termios.INPCK | + termios.ISTRIP | termios.IXON) + raw.oflag &=~ (termios.OPOST) + raw.cflag &=~ (termios.CSIZE|termios.PARENB) + raw.cflag |= (termios.CS8) + raw.lflag &=~ (termios.ICANON|termios.ECHO| + termios.IEXTEN|(termios.ISIG*1)) + raw.cc[termios.VMIN] = 1 + raw.cc[termios.VTIME] = 0 + tcsetattr(self.input_fd, termios.TCSADRAIN, raw) + + self.screen = [] + self.height, self.width = self.getheightwidth() + + self.__buffer = [] + + self.__posxy = 0, 0 + self.__gone_tall = 0 + self.__move = self.__move_short + self.__offset = 0 + + self.__maybe_write_code(self._smkx) + + self.old_sigwinch = signal.signal( + signal.SIGWINCH, self.__sigwinch) + + def restore(self): + self.__maybe_write_code(self._rmkx) + self.flushoutput() + tcsetattr(self.input_fd, termios.TCSADRAIN, self.__svtermstate) + + signal.signal(signal.SIGWINCH, self.old_sigwinch) + + def __sigwinch(self, signum, frame): + self.height, self.width = self.getheightwidth() + self.event_queue.insert(Event('resize', None)) + + def push_char(self, char): + self.partial_char += char + try: + c = unicode(self.partial_char, self.encoding) + except UnicodeError, e: + if len(e.args) > 4 and \ + e.args[4] == 'unexpected end of data': + pass + else: + raise + else: + self.partial_char = '' + self.event_queue.push(c) + + def get_event(self, block=1): + while self.event_queue.empty(): + while 1: # All hail Unix! + try: + self.push_char(os.read(self.input_fd, 1)) + except (IOError, OSError), err: + if err.errno == errno.EINTR: + if not self.event_queue.empty(): + return self.event_queue.get() + else: + continue + else: + raise + else: + break + if not block: + break + return self.event_queue.get() + + def wait(self): + self.pollob.poll() + + def set_cursor_vis(self, vis): + if vis: + self.__show_cursor() + else: + self.__hide_cursor() + + def __hide_cursor(self): + if self.cursor_visible: + self.__maybe_write_code(self._civis) + self.cursor_visible = 0 + + def __show_cursor(self): + if not self.cursor_visible: + self.__maybe_write_code(self._cnorm) + self.cursor_visible = 1 + + def repaint_prep(self): + if not self.__gone_tall: + self.__posxy = 0, self.__posxy[1] + self.__write("\r") + ns = len(self.screen)*['\000'*self.width] + self.screen = ns + else: + self.__posxy = 0, self.__offset + self.__move(0, self.__offset) + ns = self.height*['\000'*self.width] + self.screen = ns + + if TIOCGWINSZ: + def getheightwidth(self): + try: + return int(os.environ["LINES"]), int(os.environ["COLUMNS"]) + except KeyError: + height, width = struct.unpack( + "hhhh", ioctl(self.input_fd, TIOCGWINSZ, "\000"*8))[0:2] + if not height: return 25, 80 + return height, width + else: + def getheightwidth(self): + try: + return int(os.environ["LINES"]), int(os.environ["COLUMNS"]) + except KeyError: + return 25, 80 + + def forgetinput(self): + termios.tcflush(self.input_fd, termios.TCIFLUSH) + + def flushoutput(self): + for text, iscode in self.__buffer: + if iscode: + self.__tputs(text) + else: + os.write(self.output_fd, text.encode(self.encoding)) + del self.__buffer[:] + + def __tputs(self, fmt, prog=delayprog): + """A Python implementation of the curses tputs function; the + curses one can't really be wrapped in a sane manner. + + I have the strong suspicion that this is complexity that + will never do anyone any good.""" + # using .get() means that things will blow up + # only if the bps is actually needed (which I'm + # betting is pretty unlkely) + bps = ratedict.get(self.__svtermstate.ospeed) + while 1: + m = prog.search(fmt) + if not m: + os.write(self.output_fd, fmt) + break + x, y = m.span() + os.write(self.output_fd, fmt[:x]) + fmt = fmt[y:] + delay = int(m.group(1)) + if '*' in m.group(2): + delay *= self.height + if self._pad: + nchars = (bps*delay)/1000 + os.write(self.output_fd, self._pad*nchars) + else: + time.sleep(float(delay)/1000.0) + + def finish(self): + y = len(self.screen) - 1 + while y >= 0 and not self.screen[y]: + y -= 1 + self.__move(0, min(y, self.height + self.__offset - 1)) + self.__write("\n\r") + self.flushoutput() + + def beep(self): + self.__maybe_write_code(self._bel) + self.flushoutput() + + if FIONREAD: + def getpending(self): + e = Event('key', '', '') + + while not self.event_queue.empty(): + e2 = self.event_queue.get() + e.data += e2.data + e.raw += e.raw + + amount = struct.unpack( + "i", ioctl(self.input_fd, FIONREAD, "\0\0\0\0"))[0] + raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace') + e.data += raw + e.raw += raw + return e + else: + def getpending(self): + e = Event('key', '', '') + + while not self.event_queue.empty(): + e2 = self.event_queue.get() + e.data += e2.data + e.raw += e.raw + + amount = 10000 + raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace') + e.data += raw + e.raw += raw + return e + + def clear(self): + self.__write_code(self._clear) + self.__gone_tall = 1 + self.__move = self.__move_tall + self.__posxy = 0, 0 + self.screen = [] + diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -3,9 +3,8 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import Py_LT, Py_LE, Py_NE, Py_EQ,\ - Py_GE, Py_GT, fopen, fclose, fwrite -from pypy.tool.udir import udir +from pypy.module.cpyext.api import ( + Py_LT, Py_LE, Py_NE, Py_EQ, Py_GE, Py_GT) class TestObject(BaseApiTest): def test_IsTrue(self, space, api): @@ -175,58 +174,23 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" assert api.PyObject_Unicode(space.wrap("\xe9")) is None api.PyErr_Clear() - def test_file_fromstring(self, space, api): - filename = rffi.str2charp(str(udir / "_test_file")) - mode = rffi.str2charp("wb") - w_file = api.PyFile_FromString(filename, mode) - rffi.free_charp(filename) - rffi.free_charp(mode) - - assert api.PyFile_Check(w_file) - assert api.PyFile_CheckExact(w_file) - assert not api.PyFile_Check(space.wrap("text")) - - space.call_method(w_file, "write", space.wrap("text")) - space.call_method(w_file, "close") - assert (udir / "_test_file").read() == "text" - - def test_file_getline(self, space, api): - filename = rffi.str2charp(str(udir / "_test_file")) - - mode = rffi.str2charp("w") - w_file = api.PyFile_FromString(filename, mode) - space.call_method(w_file, "write", - space.wrap("line1\nline2\nline3\nline4")) - space.call_method(w_file, "close") - - rffi.free_charp(mode) - mode = rffi.str2charp("r") - w_file = api.PyFile_FromString(filename, mode) - rffi.free_charp(filename) - rffi.free_charp(mode) - - w_line = api.PyFile_GetLine(w_file, 0) - assert space.str_w(w_line) == "line1\n" - - w_line = api.PyFile_GetLine(w_file, 4) - assert space.str_w(w_line) == "line" - - w_line = api.PyFile_GetLine(w_file, 0) - assert space.str_w(w_line) == "2\n" - - # XXX We ought to raise an EOFError here, but don't - w_line = api.PyFile_GetLine(w_file, -1) - # assert api.PyErr_Occurred() is space.w_EOFError - assert space.str_w(w_line) == "line3\n" - - space.call_method(w_file, "close") - class AppTestObject(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/lib_pypy/pyrepl/tests/wishes.py b/lib_pypy/pyrepl/tests/wishes.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/wishes.py @@ -0,0 +1,38 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +# this test case should contain as-verbatim-as-possible versions of +# (applicable) feature requests + +class WishesTestCase(ReaderTestCase): + + def test_quoted_insert_repeat(self): + self.run_test([(('digit-arg', '3'), ['']), + ( 'quoted-insert', ['']), + (('self-insert', '\033'), ['^[^[^[']), + ( 'accept', None)]) + +def test(): + run_testcase(WishesTestCase) + +if __name__ == '__main__': + test() diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -278,6 +278,22 @@ rex_mem_reg_plus_scaled_reg_plus_const) # ____________________________________________________________ +# Emit a mod/rm referencing an immediate address that fits in 32-bit +# (the immediate address itself must be explicitely encoded as well, +# with immediate(argnum)). + +def encode_abs(mc, _1, _2, orbyte): + # expands to either '\x05' on 32-bit, or '\x04\x25' or 64-bit + if mc.WORD == 8: + mc.writechar(chr(0x04 | orbyte)) + mc.writechar(chr(0x25)) + else: + mc.writechar(chr(0x05 | orbyte)) + return 0 + +abs_ = encode_abs, 0, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -348,7 +364,9 @@ INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) - INSN_rj = insn(rex_w, chr(base+3), register(1,8), '\x05', immediate(2)) + INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2)) + INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1), + immediate(2,'b')) INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b')) INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2)) @@ -366,7 +384,8 @@ INSN_bi32(mc, offset, immed) INSN_bi._always_inline_ = True # try to constant-fold single_byte() - return INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj + return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, + INSN_ji8) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -444,23 +463,25 @@ # ------------------------------ Arithmetic ------------------------------ - ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj = common_modes(0) - OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj = common_modes(1) - AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj = common_modes(4) - SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj = common_modes(5) - SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj = common_modes(3) - XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj = common_modes(6) - CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj = common_modes(7) + ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj, _ = common_modes(0) + OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj, _ = common_modes(1) + AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj, _ = common_modes(4) + SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj, SUB_ji8 = common_modes(5) + SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj, _ = common_modes(3) + XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj, _ = common_modes(6) + CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj, _ = common_modes(7) CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32) CMP_mr = insn(rex_w, '\x39', register(2, 8), mem_reg_plus_const(1)) - CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b')) - CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2)) + CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_, + immediate(1), immediate(2, 'b')) + CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_, + immediate(1), immediate(2)) CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) - CMP_jr = insn(rex_w, '\x39', register(2, 8), '\x05', immediate(1)) + CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_, immediate(1)) CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) @@ -509,7 +530,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) - LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_, immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) @@ -535,12 +556,15 @@ CDQ = insn(rex_nw, '\x99') TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) - TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), '\x05', immediate(1), immediate(2, 'b')) + TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_, immediate(1), immediate(2, 'b')) TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') # x87 instructions FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + # reserved as an illegal instruction + UD2 = insn('\x0F\x0B') + # ------------------------------ SSE2 ------------------------------ # Conversion @@ -640,7 +664,7 @@ add_insn('s', stack_sp(modrm_argnum)) add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) - add_insn('j', '\x05', immediate(modrm_argnum)) + add_insn('j', abs_, immediate(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register @@ -681,7 +705,7 @@ # assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') - add_insn('j', '\x05', immediate(2)) + add_insn('j', abs_, immediate(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/jit/metainterp/virtualref.py b/pypy/jit/metainterp/virtualref.py --- a/pypy/jit/metainterp/virtualref.py +++ b/pypy/jit/metainterp/virtualref.py @@ -13,7 +13,6 @@ self.JIT_VIRTUAL_REF = lltype.GcStruct('JitVirtualRef', ('super', rclass.OBJECT), ('virtual_token', lltype.Signed), - ('virtualref_index', lltype.Signed), ('forced', rclass.OBJECTPTR)) self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE, zero=True, flavor='raw', @@ -27,8 +26,6 @@ fielddescrof = self.cpu.fielddescrof self.descr_virtual_token = fielddescrof(self.JIT_VIRTUAL_REF, 'virtual_token') - self.descr_virtualref_index = fielddescrof(self.JIT_VIRTUAL_REF, - 'virtualref_index') self.descr_forced = fielddescrof(self.JIT_VIRTUAL_REF, 'forced') # # record the type JIT_VIRTUAL_REF explicitly in the rtyper, too diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -519,7 +519,7 @@ return code = frame.pycode if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr <= frame.instr_prev: + if frame.last_instr < frame.instr_prev_plus_one: # We jumped backwards in the same line. executioncontext._trace(frame, 'line', self.space.w_None) else: @@ -557,5 +557,5 @@ frame.f_lineno = line executioncontext._trace(frame, 'line', self.space.w_None) - frame.instr_prev = frame.last_instr + frame.instr_prev_plus_one = frame.last_instr + 1 self.space.frame_trace_action.fire() # continue tracing diff --git a/pypy/translator/backendopt/test/test_merge_if_blocks.py b/pypy/translator/backendopt/test/test_merge_if_blocks.py --- a/pypy/translator/backendopt/test/test_merge_if_blocks.py +++ b/pypy/translator/backendopt/test/test_merge_if_blocks.py @@ -2,7 +2,7 @@ from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof as tgraphof -from pypy.objspace.flow.model import flatten, Block +from pypy.objspace.flow.model import Block from pypy.translator.backendopt.removenoops import remove_same_as from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.simplify import (get_graph, transform_dead_op_vars, desugar_isinstance) -from pypy.objspace.flow.model import traverse, Block, Constant, summary +from pypy.objspace.flow.model import Block, Constant, summary from pypy import conftest def translate(func, argtypes, backend_optimize=True): @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -190,14 +190,30 @@ def wait(): """ wait() -> (pid, status) - + Wait for completion of a child process. """ return posix.waitpid(-1, 0) + def wait3(options): + """ wait3(options) -> (pid, status, rusage) + + Wait for completion of a child process and provides resource usage informations + """ + from _pypy_wait import wait3 + return wait3(options) + + def wait4(pid, options): + """ wait4(pid, options) -> (pid, status, rusage) + + Wait for completion of the child process "pid" and provides resource usage informations + """ + from _pypy_wait import wait4 + return wait4(pid, options) + else: # Windows implementations - + # Supply os.popen() based on subprocess def popen(cmd, mode="r", bufsize=-1): """popen(command [, mode='r' [, bufsize]]) -> pipe @@ -285,7 +301,7 @@ raise TypeError("invalid cmd type (%s, expected string)" % (type(cmd),)) return cmd - + # A proxy for a file whose close waits for the process class _wrap_close(object): def __init__(self, stream, proc): diff --git a/lib_pypy/pypy_test/test_os_wait.py b/lib_pypy/pypy_test/test_os_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_os_wait.py @@ -0,0 +1,44 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + +import os + +from lib_pypy._pypy_wait import wait3, wait4 + +if hasattr(os, 'wait3'): + def test_os_wait3(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait3()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) + +if hasattr(os, 'wait4'): + def test_os_wait4(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait4()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/doc/config/confrest.py b/pypy/doc/config/confrest.py --- a/pypy/doc/config/confrest.py +++ b/pypy/doc/config/confrest.py @@ -7,7 +7,6 @@ all_optiondescrs = [pypyoption.pypy_optiondescription, translationoption.translation_optiondescription, ] - start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) class PyPyPage(PyPyPage): @@ -29,7 +28,7 @@ Page = PyPyPage def get_content(self, txtpath, encoding): - if txtpath.basename == "commandline.txt": + if txtpath.basename == "commandline.rst": result = [] for line in txtpath.read().splitlines(): if line.startswith('.. GENERATE:'): diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -12,12 +12,13 @@ 'get_ident': 'os_thread.get_ident', 'exit': 'os_thread.exit', 'exit_thread': 'os_thread.exit', # obsolete synonym + 'interrupt_main': 'os_thread.interrupt_main', 'stack_size': 'os_thread.stack_size', '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -382,7 +382,7 @@ send_bridge_to_backend(metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.token) - def copy_all_attrbutes_into(self, res): + def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here res.rd_snapshot = self.rd_snapshot res.rd_frame_info_list = self.rd_frame_info_list @@ -393,13 +393,13 @@ def _clone_if_mutable(self): res = ResumeGuardDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeGuardForcedDescr(ResumeGuardDescr): @@ -473,7 +473,7 @@ def _clone_if_mutable(self): res = ResumeGuardForcedDescr(self.metainterp_sd, self.jitdriver_sd) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res diff --git a/pypy/jit/metainterp/test/test_optimizebasic.py b/pypy/jit/metainterp/test/test_optimizebasic.py --- a/pypy/jit/metainterp/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/test/test_optimizebasic.py @@ -256,7 +256,7 @@ loop.call_pure_results = args_dict() if call_pure_results is not None: for k, v in call_pure_results.items(): - loop.call_pure_results[list(k)] = v + loop.call_pure_results[list(k)] = v metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo @@ -2889,7 +2889,7 @@ # the result of the call, recorded as the first arg), or turned into # a regular CALL. arg_consts = [ConstInt(i) for i in (123456, 4, 5, 6)] - call_pure_results = {tuple(arg_consts): ConstInt(42)} + call_pure_results = {tuple(arg_consts): ConstInt(42)} ops = ''' [i0, i1, i2] escape(i1) @@ -2934,7 +2934,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -2967,7 +2966,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3008,7 +3006,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3065,7 +3062,7 @@ self.loop.inputargs[0].value = self.nodeobjvalue self.check_expanded_fail_descr('''p2, p1 p0.refdescr = p2 - where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 where p1 is a node_vtable, nextdescr=p1b where p1b is a node_vtable, valuedescr=i1 ''', rop.GUARD_NO_EXCEPTION) @@ -3087,7 +3084,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3114,7 +3110,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3363,7 +3358,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) i3 = int_lt(i2, -5) guard_true(i3) [] @@ -3374,7 +3369,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) jump(i0) """ diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -42,3 +42,13 @@ assert arr[1:].tolist() == [2,3,4] assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + + def test_buffer(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + # XXX big-endian + assert str(buffer(arr)) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') + diff --git a/pypy/rpython/lltypesystem/rdict.py b/pypy/rpython/lltypesystem/rdict.py --- a/pypy/rpython/lltypesystem/rdict.py +++ b/pypy/rpython/lltypesystem/rdict.py @@ -4,12 +4,15 @@ from pypy.rpython.rdict import AbstractDictRepr, AbstractDictIteratorRepr,\ rtype_newdict from pypy.rpython.lltypesystem import lltype -from pypy.rlib.rarithmetic import r_uint, intmask +from pypy.rlib.rarithmetic import r_uint, intmask, LONG_BIT from pypy.rlib.objectmodel import hlinvoke from pypy.rpython import robject -from pypy.rlib import objectmodel +from pypy.rlib import objectmodel, jit from pypy.rpython import rmodel +HIGHEST_BIT = intmask(1 << (LONG_BIT - 1)) +MASK = intmask(HIGHEST_BIT - 1) + # ____________________________________________________________ # # generic implementation of RPython dictionary, with parametric DICTKEY and @@ -405,6 +408,10 @@ ENTRIES = lltype.typeOf(entries).TO return ENTRIES.fasthashfn(entries[i].key) + at jit.dont_look_inside +def ll_get_value(d, i): + return d.entries[i].value + def ll_keyhash_custom(d, key): DICT = lltype.typeOf(d).TO return hlinvoke(DICT.r_rdict_hashfn, d.fnkeyhash, key) @@ -422,18 +429,21 @@ def ll_dict_getitem(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - entries = d.entries - if entries.valid(i): - return entries[i].value - else: - raise KeyError -ll_dict_getitem.oopspec = 'dict.getitem(d, key)' + if not i & HIGHEST_BIT: + return ll_get_value(d, i) + else: + raise KeyError def ll_dict_setitem(d, key, value): hash = d.keyhash(key) i = ll_dict_lookup(d, key, hash) + return _ll_dict_setitem_lookup_done(d, key, value, hash, i) + + at jit.dont_look_inside +def _ll_dict_setitem_lookup_done(d, key, value, hash, i): + valid = (i & HIGHEST_BIT) == 0 + i = i & MASK everused = d.entries.everused(i) - valid = d.entries.valid(i) # set up the new entry ENTRY = lltype.typeOf(d.entries).TO.OF entry = d.entries[i] @@ -449,7 +459,6 @@ d.num_pristine_entries -= 1 if d.num_pristine_entries <= len(d.entries) / 3: ll_dict_resize(d) -ll_dict_setitem.oopspec = 'dict.setitem(d, key, value)' def ll_dict_insertclean(d, key, value, hash): # Internal routine used by ll_dict_resize() to insert an item which is @@ -470,7 +479,7 @@ def ll_dict_delitem(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - if not d.entries.valid(i): + if i & HIGHEST_BIT: raise KeyError _ll_dict_del(d, i) ll_dict_delitem.oopspec = 'dict.delitem(d, key)' @@ -542,7 +551,7 @@ elif entries.everused(i): freeslot = i else: - return i # pristine entry -- lookup failed + return i | HIGHEST_BIT # pristine entry -- lookup failed # In the loop, a deleted entry (everused and not valid) is by far # (factor of 100s) the least likely outcome, so test for that last. @@ -557,7 +566,7 @@ if not entries.everused(i): if freeslot == -1: freeslot = i - return freeslot + return freeslot | HIGHEST_BIT elif entries.valid(i): checkingkey = entries[i].key if direct_compare and checkingkey == key: @@ -711,22 +720,19 @@ def ll_get(dict, key, default): i = ll_dict_lookup(dict, key, dict.keyhash(key)) - entries = dict.entries - if entries.valid(i): - return entries[i].value - else: + if not i & HIGHEST_BIT: + return ll_get_value(dict, i) + else: return default -ll_get.oopspec = 'dict.get(dict, key, default)' def ll_setdefault(dict, key, default): - i = ll_dict_lookup(dict, key, dict.keyhash(key)) - entries = dict.entries - if entries.valid(i): - return entries[i].value + hash = dict.keyhash(key) + i = ll_dict_lookup(dict, key, hash) + if not i & HIGHEST_BIT: + return ll_get_value(dict, i) else: - ll_dict_setitem(dict, key, default) + _ll_dict_setitem_lookup_done(dict, key, default, hash, i) return default -ll_setdefault.oopspec = 'dict.setdefault(dict, key, default)' def ll_copy(dict): DICT = lltype.typeOf(dict).TO @@ -768,7 +774,10 @@ while i < d2len: if entries.valid(i): entry = entries[i] - ll_dict_setitem(dic1, entry.key, entry.value) + hash = entries.hash(i) + key = entry.key + j = ll_dict_lookup(dic1, key, hash) + _ll_dict_setitem_lookup_done(dic1, key, entry.value, hash, j) i += 1 ll_update.oopspec = 'dict.update(dic1, dic2)' @@ -818,8 +827,7 @@ def ll_contains(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - return d.entries.valid(i) -ll_contains.oopspec = 'dict.contains(d, key)' + return not i & HIGHEST_BIT POPITEMINDEX = lltype.Struct('PopItemIndex', ('nextindex', lltype.Signed)) global_popitem_index = lltype.malloc(POPITEMINDEX, zero=True, immortal=True) diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -11,6 +11,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -15,7 +15,7 @@ ## The problem ## ----------- ## -## PyString_AsString() must returns a (non-movable) pointer to the underlying +## PyString_AsString() must return a (non-movable) pointer to the underlying ## buffer, whereas pypy strings are movable. C code may temporarily store ## this address and use it, as long as it owns a reference to the PyObject. ## There is no "release" function to specify that the pointer is not needed diff --git a/lib_pypy/pyrepl/copy_code.py b/lib_pypy/pyrepl/copy_code.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/copy_code.py @@ -0,0 +1,73 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import new + +def copy_code_with_changes(codeobject, + argcount=None, + nlocals=None, + stacksize=None, + flags=None, + code=None, + consts=None, + names=None, + varnames=None, + filename=None, + name=None, + firstlineno=None, + lnotab=None): + if argcount is None: argcount = codeobject.co_argcount + if nlocals is None: nlocals = codeobject.co_nlocals + if stacksize is None: stacksize = codeobject.co_stacksize + if flags is None: flags = codeobject.co_flags + if code is None: code = codeobject.co_code + if consts is None: consts = codeobject.co_consts + if names is None: names = codeobject.co_names + if varnames is None: varnames = codeobject.co_varnames + if filename is None: filename = codeobject.co_filename + if name is None: name = codeobject.co_name + if firstlineno is None: firstlineno = codeobject.co_firstlineno + if lnotab is None: lnotab = codeobject.co_lnotab + return new.code(argcount, + nlocals, + stacksize, + flags, + code, + consts, + names, + varnames, + filename, + name, + firstlineno, + lnotab) + +code_attrs=['argcount', + 'nlocals', + 'stacksize', + 'flags', + 'code', + 'consts', + 'names', + 'varnames', + 'filename', + 'name', + 'firstlineno', + 'lnotab'] + + diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -26,9 +26,10 @@ CPU = getcpuclass() class MockGcRootMap(object): + is_shadow_stack = False def get_basic_shape(self, is_64_bit): return ['shape'] - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): shape.append(offset) def add_callee_save_reg(self, shape, reg_index): index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } @@ -44,7 +45,8 @@ get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new - + get_malloc_slowpath_addr = None + moving_gc = True gcrootmap = MockGcRootMap() @@ -166,26 +168,29 @@ class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - + expected_malloc_slowpath_size = WORD*2 + def __init__(self): GcCache.__init__(self, False) # create a nursery NTP = rffi.CArray(lltype.Signed) self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 64 - # 64 bytes + self.addrs[1] = self.addrs[0] + 16*WORD + self.addrs[2] = 0 + # 16 WORDs def malloc_slowpath(size): - assert size == WORD*2 + assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size + self.addrs[2] += 1 return nadr self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) - self._counter = 123 + self._counter = 123000 def can_inline_malloc(self, descr): return True @@ -204,7 +209,7 @@ def get_nursery_top_addr(self): return rffi.cast(lltype.Signed, self.addrs) + WORD - def get_malloc_fixedsize_slowpath_addr(self): + def get_malloc_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) @@ -220,9 +225,11 @@ cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() - NODE = lltype.Struct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) # xxx hack: NODE is not a GcStruct + # hack: specify 'tid' explicitly, because this test is not running + # with the gc transformer + NODE = lltype.GcStruct('node', ('tid', lltype.Signed), + ('value', lltype.Signed)) + nodedescr = cpu.sizeof(NODE) valuedescr = cpu.fielddescrof(NODE, 'value') self.cpu = cpu @@ -254,6 +261,7 @@ assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called def test_malloc_slowpath(self): ops = ''' @@ -274,6 +282,7 @@ gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nadr + (WORD*2) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once def test_new_with_vtable(self): ops = ''' @@ -289,3 +298,93 @@ assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + +class Seen(Exception): + pass + +class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): + def can_inline_malloc_varsize(self, arraydescr, num_elem): + return num_elem < 5 + def get_funcptr_for_newarray(self): + return 52 + def init_array_descr(self, A, descr): + descr.tid = self._counter + self._counter += 1 + def args_for_new_array(self, descr): + raise Seen("args_for_new_array") + +class TestMallocVarsizeFastpath(BaseTestRegalloc): + def setup_method(self, method): + cpu = CPU(None, None) + cpu.vtable_offset = WORD + cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() + cpu.setup_once() + self.cpu = cpu + + ARRAY = lltype.GcArray(lltype.Signed) + arraydescr = cpu.arraydescrof(ARRAY) + self.arraydescr = arraydescr + + self.namespace = locals().copy() + + def test_malloc_varsize_fastpath(self): + # Hack. Running the GcLLDescr_framework without really having + # a complete GC means that we end up with both the tid and the + # length being at offset 0. In this case, so the length overwrites + # the tid. This is of course only the case in this test class. + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 142, descr=arraydescr) + setarrayitem_gc(p0, 3, 143, descr=arraydescr) + finish(p0) + ''' + self.interpret(ops, []) + # check the nursery + gc_ll_descr = self.cpu.gc_ll_descr + assert gc_ll_descr.nursery[0] == 4 + assert gc_ll_descr.nursery[1] == 142 + assert gc_ll_descr.nursery[4] == 143 + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + def test_malloc_varsize_slowpath(self): + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 420, descr=arraydescr) + setarrayitem_gc(p0, 3, 430, descr=arraydescr) + p1 = new_array(4, descr=arraydescr) + setarrayitem_gc(p1, 0, 421, descr=arraydescr) + setarrayitem_gc(p1, 3, 431, descr=arraydescr) + p2 = new_array(4, descr=arraydescr) + setarrayitem_gc(p2, 0, 422, descr=arraydescr) + setarrayitem_gc(p2, 3, 432, descr=arraydescr) + p3 = new_array(4, descr=arraydescr) + setarrayitem_gc(p3, 0, 423, descr=arraydescr) + setarrayitem_gc(p3, 3, 433, descr=arraydescr) + finish(p0, p1, p2, p3) + ''' + gc_ll_descr = self.cpu.gc_ll_descr + gc_ll_descr.expected_malloc_slowpath_size = 5*WORD + self.interpret(ops, []) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once + + def test_malloc_varsize_too_big(self): + ops = ''' + [] + p0 = new_array(5, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_varsize_variable(self): + ops = ''' + [i0] + p0 = new_array(i0, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,9 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); -int _pypy_math_isnan(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -201,6 +201,23 @@ assert cmpr == 3 assert cmpr != 42 + def test_richcompare(self): + module = self.import_module("comparisons") + cmpr = module.CmpType() + + # should not crash + cmpr < 4 + cmpr <= 4 + cmpr > 4 + cmpr >= 4 + + assert cmpr.__le__(4) is NotImplemented + + def test_tpcompare(self): + module = self.import_module("comparisons") + cmpr = module.OldCmpType() + assert cmpr < cmpr + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() @@ -245,6 +262,11 @@ obj = foo.new() assert module.read_tp_dict(obj) == foo.fooType.copy + def test_custom_allocation(self): + foo = self.import_module("foo") + obj = foo.newCustom() + assert type(obj) is foo.Custom + assert type(foo.Custom) is foo.MetaType class TestTypes(BaseApiTest): def test_type_attributes(self, space, api): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,17 +29,22 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) + + def test_basic_threadstate_dance(self, space, api): + # Let extension modules call these functions, + # Not sure of the semantics in pypy though. + # (cpyext always acquires and releases the GIL around calls) + tstate = api.PyThreadState_Swap(None) + assert tstate is not None + assert not api.PyThreadState_Swap(tstate) + + api.PyEval_AcquireThread(tstate) + api.PyEval_ReleaseThread(tstate) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -56,13 +56,10 @@ """A frame is an environment supporting the execution of a code object. Abstract base class.""" - def __init__(self, space, w_globals=None, numlocals=-1): + def __init__(self, space, w_globals=None): self.space = space self.w_globals = w_globals # wrapped dict of globals self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(self.getcode().getvarnames()) - self.numlocals = numlocals def run(self): "Abstract method to override. Runs the frame" @@ -96,6 +93,10 @@ where the order is according to self.getcode().signature().""" raise TypeError, "abstract" + def getfastscopelength(self): + "Abstract. Get the expected number of locals." + raise TypeError, "abstract" + def fast2locals(self): # Copy values from self.fastlocals_w to self.w_locals if self.w_locals is None: @@ -113,10 +114,11 @@ # Copy values from self.w_locals to self.fastlocals_w assert self.w_locals is not None varnames = self.getcode().getvarnames() + numlocals = self.getfastscopelength() - new_fastlocals_w = [None]*self.numlocals - - for i in range(min(len(varnames), self.numlocals)): + new_fastlocals_w = [None] * numlocals + + for i in range(min(len(varnames), numlocals)): w_name = self.space.wrap(varnames[i]) try: w_value = self.space.getitem(self.w_locals, w_name) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -399,12 +399,7 @@ return ll_rdict.ll_newdict(DICT) _ll_0_newdict.need_result_type = True - _ll_2_dict_getitem = ll_rdict.ll_dict_getitem - _ll_3_dict_setitem = ll_rdict.ll_dict_setitem _ll_2_dict_delitem = ll_rdict.ll_dict_delitem - _ll_3_dict_setdefault = ll_rdict.ll_setdefault - _ll_2_dict_contains = ll_rdict.ll_contains - _ll_3_dict_get = ll_rdict.ll_get _ll_1_dict_copy = ll_rdict.ll_copy _ll_1_dict_clear = ll_rdict.ll_clear _ll_2_dict_update = ll_rdict.ll_update diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -1,17 +1,29 @@ # Constants that depend on whether we are on 32-bit or 64-bit +# The frame size gives the standard fixed part at the start of +# every assembler frame: the saved value of some registers, +# one word for the force_index, and some extra space used only +# during a malloc that needs to go via its slow path. + import sys if sys.maxint == (2**31 - 1): WORD = 4 - # ebp + ebx + esi + edi + force_index = 5 words - FRAME_FIXED_SIZE = 5 + # ebp + ebx + esi + edi + 4 extra words + force_index = 9 words + FRAME_FIXED_SIZE = 9 + FORCE_INDEX_OFS = -8*WORD + MY_COPY_OF_REGS = -7*WORD IS_X86_32 = True IS_X86_64 = False else: WORD = 8 - # rbp + rbx + r12 + r13 + r14 + r15 + force_index = 7 words - FRAME_FIXED_SIZE = 7 + # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 + FRAME_FIXED_SIZE = 18 + FORCE_INDEX_OFS = -17*WORD + MY_COPY_OF_REGS = -16*WORD IS_X86_32 = False IS_X86_64 = True -FORCE_INDEX_OFS = -(FRAME_FIXED_SIZE-1)*WORD +# The extra space has room for almost all registers, apart from eax and edx +# which are used in the malloc itself. They are: +# ecx, ebx, esi, edi [32 and 64 bits] +# r8, r9, r10, r12, r13, r14, r15 [64 bits only] diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -486,6 +486,7 @@ class W_IMap(Wrappable): _error_name = "imap" + _immutable_fields_ = ["w_fun", "iterators_w"] def __init__(self, space, w_fun, args_w): self.space = space diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): @@ -153,128 +167,132 @@ result = formatd(value, tp, precision, flags) return result, special -if USE_SHORT_FLOAT_REPR: - def round_double(value, ndigits): - # The basic idea is very simple: convert and round the double to - # a decimal string using _Py_dg_dtoa, then convert that decimal - # string back to a double with _Py_dg_strtod. There's one minor - # difficulty: Python 2.x expects round to do - # round-half-away-from-zero, while _Py_dg_dtoa does - # round-half-to-even. So we need some way to detect and correct - # the halfway cases. +def round_double(value, ndigits): + if USE_SHORT_FLOAT_REPR: + return round_double_short_repr(value, ndigits) + else: + return round_double_fallback_repr(value, ndigits) - # a halfway value has the form k * 0.5 * 10**-ndigits for some - # odd integer k. Or in other words, a rational number x is - # exactly halfway between two multiples of 10**-ndigits if its - # 2-valuation is exactly -ndigits-1 and its 5-valuation is at - # least -ndigits. For ndigits >= 0 the latter condition is - # automatically satisfied for a binary float x, since any such - # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x - # needs to be an integral multiple of 5**-ndigits; we can check - # this using fmod. For -22 > ndigits, there are no halfway - # cases: 5**23 takes 54 bits to represent exactly, so any odd - # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of - # precision to represent exactly. +def round_double_short_repr(value, ndigits): + # The basic idea is very simple: convert and round the double to + # a decimal string using _Py_dg_dtoa, then convert that decimal + # string back to a double with _Py_dg_strtod. There's one minor + # difficulty: Python 2.x expects round to do + # round-half-away-from-zero, while _Py_dg_dtoa does + # round-half-to-even. So we need some way to detect and correct + # the halfway cases. - sign = copysign(1.0, value) - value = abs(value) + # a halfway value has the form k * 0.5 * 10**-ndigits for some + # odd integer k. Or in other words, a rational number x is + # exactly halfway between two multiples of 10**-ndigits if its + # 2-valuation is exactly -ndigits-1 and its 5-valuation is at + # least -ndigits. For ndigits >= 0 the latter condition is + # automatically satisfied for a binary float x, since any such + # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x + # needs to be an integral multiple of 5**-ndigits; we can check + # this using fmod. For -22 > ndigits, there are no halfway + # cases: 5**23 takes 54 bits to represent exactly, so any odd + # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of + # precision to represent exactly. - # find 2-valuation value - m, expo = math.frexp(value) - while m != math.floor(m): - m *= 2.0 - expo -= 1 + sign = copysign(1.0, value) + value = abs(value) - # determine whether this is a halfway case. - halfway_case = 0 - if expo == -ndigits - 1: - if ndigits >= 0: + # find 2-valuation value + m, expo = math.frexp(value) + while m != math.floor(m): + m *= 2.0 + expo -= 1 + + # determine whether this is a halfway case. + halfway_case = 0 + if expo == -ndigits - 1: + if ndigits >= 0: + halfway_case = 1 + elif ndigits >= -22: + # 22 is the largest k such that 5**k is exactly + # representable as a double + five_pow = 1.0 + for i in range(-ndigits): + five_pow *= 5.0 + if math.fmod(value, five_pow) == 0.0: halfway_case = 1 - elif ndigits >= -22: - # 22 is the largest k such that 5**k is exactly - # representable as a double - five_pow = 1.0 - for i in range(-ndigits): - five_pow *= 5.0 - if math.fmod(value, five_pow) == 0.0: - halfway_case = 1 - # round to a decimal string; use an extra place for halfway case - strvalue = formatd(value, 'f', ndigits + halfway_case) + # round to a decimal string; use an extra place for halfway case + strvalue = formatd(value, 'f', ndigits + halfway_case) - if halfway_case: - buf = [c for c in strvalue] - if ndigits >= 0: - endpos = len(buf) - 1 - else: - endpos = len(buf) + ndigits - # Sanity checks: there should be exactly ndigits+1 places - # following the decimal point, and the last digit in the - # buffer should be a '5' - if not objectmodel.we_are_translated(): - assert buf[endpos] == '5' - if '.' in buf: - assert endpos == len(buf) - 1 - assert buf.index('.') == len(buf) - ndigits - 2 + if halfway_case: + buf = [c for c in strvalue] + if ndigits >= 0: + endpos = len(buf) - 1 + else: + endpos = len(buf) + ndigits + # Sanity checks: there should be exactly ndigits+1 places + # following the decimal point, and the last digit in the + # buffer should be a '5' + if not objectmodel.we_are_translated(): + assert buf[endpos] == '5' + if '.' in buf: + assert endpos == len(buf) - 1 + assert buf.index('.') == len(buf) - ndigits - 2 - # increment and shift right at the same time - i = endpos - 1 - carry = 1 - while i >= 0: + # increment and shift right at the same time + i = endpos - 1 + carry = 1 + while i >= 0: + digit = ord(buf[i]) + if digit == ord('.'): + buf[i+1] = chr(digit) + i -= 1 digit = ord(buf[i]) - if digit == ord('.'): - buf[i+1] = chr(digit) - i -= 1 - digit = ord(buf[i]) - carry += digit - ord('0') - buf[i+1] = chr(carry % 10 + ord('0')) - carry /= 10 - i -= 1 - buf[0] = chr(carry + ord('0')) - if ndigits < 0: - buf.append('0') + carry += digit - ord('0') + buf[i+1] = chr(carry % 10 + ord('0')) + carry /= 10 + i -= 1 + buf[0] = chr(carry + ord('0')) + if ndigits < 0: + buf.append('0') - strvalue = ''.join(buf) + strvalue = ''.join(buf) - return sign * rstring_to_float(strvalue) + return sign * rstring_to_float(strvalue) -else: - # fallback version, to be used when correctly rounded - # binary<->decimal conversions aren't available - def round_double(value, ndigits): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 +# fallback version, to be used when correctly rounded +# binary<->decimal conversions aren't available +def round_double_fallback_repr(value, ndigits): + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow + pow1 = math.pow(10.0, ndigits - 22) + pow2 = 1e22 + else: + pow1 = math.pow(10.0, ndigits) + pow2 = 1.0 - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value + y = (value * pow1) * pow2 + # if y overflows, then rounded value is exactly x + if isinf(y): + return value - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 + else: + pow1 = math.pow(10.0, -ndigits); + pow2 = 1.0 # unused; for translation + y = value / pow1 - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y-z) == 1.0: # obscure case, see the test - z = y + if y >= 0.0: + z = math.floor(y + 0.5) + else: + z = math.ceil(y - 0.5) + if math.fabs(y-z) == 1.0: # obscure case, see the test + z = y - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + return z INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -314,6 +314,7 @@ 'Py_BuildValue', 'Py_VaBuildValue', 'PyTuple_Pack', 'PyErr_Format', 'PyErr_NewException', 'PyErr_NewExceptionWithDoc', + 'PySys_WriteStdout', 'PySys_WriteStderr', 'PyEval_CallFunction', 'PyEval_CallMethod', 'PyObject_CallFunction', 'PyObject_CallMethod', 'PyObject_CallFunctionObjArgs', 'PyObject_CallMethodObjArgs', @@ -399,21 +400,9 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyBufferProcs = lltype.ForwardReference() PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) -def F(ARGS, RESULT=lltype.Signed): - return lltype.Ptr(lltype.FuncType(ARGS, RESULT)) -PyBufferProcsFields = ( - ("bf_getreadbuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getwritebuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getsegcount", F([PyObject, rffi.INTP])), - ("bf_getcharbuffer", F([PyObject, lltype.Signed, rffi.CCHARPP])), -# we don't support new buffer interface for now - ("bf_getbuffer", rffi.VOIDP), - ("bf_releasebuffer", rffi.VOIDP)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -cpython_struct('PyBufferProcs', PyBufferProcsFields, PyBufferProcs) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) @@ -538,7 +527,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): @@ -883,6 +873,7 @@ source_dir / "stringobject.c", source_dir / "mysnprintf.c", source_dir / "pythonrun.c", + source_dir / "sysmodule.c", source_dir / "bufferobject.c", source_dir / "object.c", source_dir / "cobject.c", diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import \ W_DictMultiObject, setitem__DictMulti_ANY_ANY, getitem__DictMulti_ANY, \ @@ -151,6 +152,8 @@ class AppTest_DictObject: + def setup_class(cls): + cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names) def test_equality(self): d = {1:2} @@ -259,7 +262,29 @@ d[33] = 99 assert d == dd assert x == 99 - + + def test_setdefault_fast(self): + class Key(object): + calls = 0 + def __hash__(self): + self.calls += 1 + return object.__hash__(self) + + k = Key() + d = {} + d.setdefault(k, []) + if self.on_pypy: + assert k.calls == 1 + + d.setdefault(k, 1) + if self.on_pypy: + assert k.calls == 2 + + k = Key() + d.setdefault(k, 42) + if self.on_pypy: + assert k.calls == 1 + def test_update(self): d = {1:2, 3:4} dd = d.copy() @@ -704,13 +729,20 @@ class FakeString(str): + hash_count = 0 def unwrap(self, space): self.unwrapped = True return str(self) + def __hash__(self): + self.hash_count += 1 + return str.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: + hash_count = 0 def hash_w(self, obj): + self.hash_count += 1 return hash(obj) def unwrap(self, x): return x @@ -726,6 +758,8 @@ return [] DictObjectCls = W_DictMultiObject def type(self, w_obj): + if isinstance(w_obj, FakeString): + return str return type(w_obj) w_str = str def str_w(self, string): @@ -890,6 +924,19 @@ impl.setitem(x, x) assert impl.r_dict_content is not None + def test_setdefault_fast(self): + on_pypy = "__pypy__" in sys.builtin_module_names + impl = self.impl + key = FakeString(self.string) + x = impl.setdefault(key, 1) + assert x == 1 + if on_pypy: + assert key.hash_count == 1 + x = impl.setdefault(key, 2) + assert x == 1 + if on_pypy: + assert key.hash_count == 2 + class TestStrDictImplementation(BaseTestRDictImplementation): ImplementionClass = StrDictImplementation diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -96,6 +96,10 @@ out, err = capfd.readouterr() assert "Exception ValueError: 'message' in 'location' ignored" == err.strip() + def test_ExceptionInstance_Class(self, space, api): + instance = space.call_function(space.w_ValueError) + assert api.PyExceptionInstance_Class(instance) is space.w_ValueError + class AppTestFetch(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -221,14 +221,33 @@ def rtype_method_split(self, hop): rstr = hop.args_r[0].repr - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO except AttributeError: list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr) + return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr, v_max) + + def rtype_method_rsplit(self, hop): + rstr = hop.args_r[0].repr + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) + try: + list_type = hop.r_result.lowleveltype.TO + except AttributeError: + list_type = hop.r_result.lowleveltype + cLIST = hop.inputconst(Void, list_type) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_rsplit_chr, cLIST, v_str, v_chr, v_max) def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,16 +1,20 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, PyObject, Py_ssize_t) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, - getattrfunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, - ssizeobjargproc, iternextfunc, initproc, richcmpfunc, hashfunc, - descrgetfunc, descrsetfunc, objobjproc) + getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, + ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, readbufferproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.buffer import Buffer as W_Buffer from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import specialize @@ -65,6 +69,12 @@ finally: rffi.free_charp(name_ptr) +def wrap_getattro(space, w_self, w_args, func): + func_target = rffi.cast(getattrofunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + return generic_cpy_call(space, func_target, w_self, args_w[0]) + def wrap_setattr(space, w_self, w_args, func): func_target = rffi.cast(setattrofunc, func) check_num_args(space, w_args, 2) @@ -187,18 +197,59 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) +class CPyBuffer(W_Buffer): + # Similar to Py_buffer + + def __init__(self, ptr, size, w_obj): + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + + def getlength(self): + return self.size + + def getitem(self, index): + return self.ptr[index] + +def wrap_getreadbuffer(space, w_self, w_args, func): + func_target = rffi.cast(readbufferproc, func) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: + index = rffi.cast(Py_ssize_t, 0) + size = generic_cpy_call(space, func_target, w_self, index, ptr) + if size < 0: + space.fromcache(State).check_and_raise_exception(always=True) + return space.wrap(CPyBuffer(ptr[0], size, w_self)) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) check_num_args(space, w_args, 1) - args_w = space.fixedview(w_args) - other_w = args_w[0] + w_other, = space.fixedview(w_args) return generic_cpy_call(space, func_target, - w_self, other_w, rffi.cast(rffi.INT_real, OP_CONST)) + w_self, w_other, rffi.cast(rffi.INT_real, OP_CONST)) return inner richcmp_eq = get_richcmp_func(Py_EQ) richcmp_ne = get_richcmp_func(Py_NE) +richcmp_lt = get_richcmp_func(Py_LT) +richcmp_le = get_richcmp_func(Py_LE) +richcmp_gt = get_richcmp_func(Py_GT) +richcmp_ge = get_richcmp_func(Py_GE) + +def wrap_cmpfunc(space, w_self, w_args, func): + func_target = rffi.cast(cmpfunc, func) + check_num_args(space, w_args, 1) + w_other, = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(w_self), + space.type(w_other))): + raise OperationError(space.w_TypeError, space.wrap( + "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % + (space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space)))) + + return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): @@ -289,7 +340,12 @@ # irregular interface, because of tp_getattr/tp_getattro confusion if NAME == "__getattr__": - wrapper = wrap_getattr + if SLOT == "tp_getattro": + wrapper = wrap_getattro + elif SLOT == "tp_getattr": + wrapper = wrap_getattr + else: + assert False function = globals().get(FUNCTION, None) assert FLAGS == 0 or FLAGS == PyWrapperFlag_KEYWORDS @@ -455,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), @@ -560,12 +616,19 @@ for regex, repl in slotdef_replacements: slotdefs_str = re.sub(regex, repl, slotdefs_str) +slotdefs = eval(slotdefs_str) +# PyPy addition +slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), +) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in eval(slotdefs_str)]) + for x in slotdefs]) + slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) - for x in eval(slotdefs_str)]) + for x in slotdefs]) if __name__ == "__main__": print slotdefs_str diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Block, Constant, Variable, flatten +from pypy.objspace.flow.model import Block, Constant, Variable from pypy.objspace.flow.model import checkgraph, mkentrymap from pypy.translator.backendopt.support import log @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -5,9 +5,16 @@ cpython_api, build_type_checkers, PyObject, CONST_STRING, CANNOT_FAIL, Py_ssize_t) from pypy.rlib.rarithmetic import r_uint +import sys PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") + at cpython_api([], lltype.Signed, error=CANNOT_FAIL) +def PyInt_GetMax(space): + """Return the system's idea of the largest integer it can handle (LONG_MAX, + as defined in the system header files).""" + return sys.maxint + @cpython_api([lltype.Signed], PyObject) def PyInt_FromLong(space, ival): """Create a new integer object with a value of ival. diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -163,7 +163,7 @@ if (not we_are_jitted() or w_self.is_heaptype() or w_self.space.config.objspace.std.mutable_builtintypes): return w_self._version_tag - # heap objects cannot get their version_tag changed + # prebuilt objects cannot get their version_tag changed return w_self._pure_version_tag() @purefunction_promote() @@ -253,7 +253,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,6 +262,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found diff --git a/lib-python/modified-2.7.0/distutils/command/build_ext.py b/lib-python/modified-2.7.0/distutils/command/build_ext.py --- a/lib-python/modified-2.7.0/distutils/command/build_ext.py +++ b/lib-python/modified-2.7.0/distutils/command/build_ext.py @@ -184,7 +184,7 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: @@ -192,8 +192,13 @@ # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree - self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) - if MSVC_VERSION == 9: + if 0: + # pypy has no PC directory + self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) + if 1: + # pypy has no PCBuild directory + pass + elif MSVC_VERSION == 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' @@ -695,24 +700,14 @@ shared extension. On most platforms, this is just 'ext.libraries'; on Windows and OS/2, we add the Python library (eg. python20.dll). """ - # The python library is always needed on Windows. For MSVC, this - # is redundant, since the library is mentioned in a pragma in - # pyconfig.h that MSVC groks. The other Windows compilers all seem - # to need it mentioned explicitly, though, so that's what we do. - # Append '_d' to the python import library on debug builds. + # The python library is always needed on Windows. if sys.platform == "win32": - from distutils.msvccompiler import MSVCCompiler - if not isinstance(self.compiler, MSVCCompiler): - template = "python%d%d" - if self.debug: - template = template + '_d' - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - # don't extend ext.libraries, it may be shared with other - # extensions, it is a reference to the original list - return ext.libraries + [pythonlib] - else: - return ext.libraries + template = "python%d%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + # don't extend ext.libraries, it may be shared with other + # extensions, it is a reference to the original list + return ext.libraries + [pythonlib] elif sys.platform == "os2emx": # EMX/GCC requires the python library explicitly, and I # believe VACPP does as well (though not confirmed) - AIM Apr01 diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -135,7 +135,7 @@ return type(self) is type(other) # xxx obscure def clone_if_mutable(self): res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res def _sortboxes(boxes): @@ -816,6 +816,52 @@ """ self.optimize_loop(ops, expected, preamble) + def test_compare_with_itself(self): + ops = """ + [] + i0 = escape() + i1 = int_lt(i0, i0) + guard_false(i1) [] + i2 = int_le(i0, i0) + guard_true(i2) [] + i3 = int_eq(i0, i0) + guard_true(i3) [] + i4 = int_ne(i0, i0) + guard_false(i4) [] + i5 = int_gt(i0, i0) + guard_false(i5) [] + i6 = int_ge(i0, i0) + guard_true(i6) [] + jump() + """ + expected = """ + [] + i0 = escape() + jump() + """ + self.optimize_loop(ops, expected) + + def test_compare_with_itself_uint(self): + py.test.skip("implement me") + ops = """ + [] + i0 = escape() + i7 = uint_lt(i0, i0) + guard_false(i7) [] + i8 = uint_le(i0, i0) + guard_true(i8) [] + i9 = uint_gt(i0, i0) + guard_false(i9) [] + i10 = uint_ge(i0, i0) + guard_true(i10) [] + jump() + """ + expected = """ + [] + i0 = escape() + jump() + """ + self.optimize_loop(ops, expected) @@ -1791,7 +1837,7 @@ """ self.optimize_loop(ops, ops) - def test_duplicate_setfield_1(self): + def test_duplicate_setfield_0(self): ops = """ [p1, i1, i2] setfield_gc(p1, i1, descr=valuedescr) @@ -1800,8 +1846,27 @@ """ expected = """ [p1, i1, i2] + jump(p1, i1, i2) + """ + # in this case, all setfields are removed, because we can prove + # that in the loop it will always have the same value + self.optimize_loop(ops, expected) + + def test_duplicate_setfield_1(self): + ops = """ + [p1] + i1 = escape() + i2 = escape() + setfield_gc(p1, i1, descr=valuedescr) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2) + jump(p1) + """ + expected = """ + [p1] + i1 = escape() + i2 = escape() + setfield_gc(p1, i2, descr=valuedescr) + jump(p1) """ self.optimize_loop(ops, expected) @@ -1848,6 +1913,7 @@ setfield_gc(p1, i4, descr=nextdescr) # setfield_gc(p1, i2, descr=valuedescr) + escape() jump(p1, i1, i2, p3) """ preamble = """ @@ -1860,6 +1926,7 @@ # setfield_gc(p1, i2, descr=valuedescr) setfield_gc(p1, i4, descr=nextdescr) + escape() jump(p1, i1, i2, p3, i3) """ expected = """ @@ -1871,6 +1938,7 @@ # setfield_gc(p1, i2, descr=valuedescr) setfield_gc(p1, i4, descr=nextdescr) + escape() jump(p1, i1, i2, p3, i3) """ self.optimize_loop(ops, expected, preamble) @@ -1943,6 +2011,7 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ preamble = """ @@ -1950,12 +2019,14 @@ guard_true(i3) [p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ expected = """ [p1, i2, i4] guard_true(i4) [p1] setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, 1) """ self.optimize_loop(ops, expected, preamble) @@ -1969,6 +2040,7 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ preamble = """ @@ -1976,12 +2048,14 @@ guard_true(i3) [i2, p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ expected = """ [p1, i2, i4] guard_true(i4) [i2, p1] setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, 1) """ self.optimize_loop(ops, expected) @@ -2027,15 +2101,34 @@ guard_value(p1, ConstPtr(myptr)) [] setfield_gc(p1, i1, descr=valuedescr) setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) + escape() jump(p1, i1, i2) """ expected = """ [i1, i2] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) + escape() jump(i1, i2) """ self.optimize_loop(ops, expected) + def test_dont_force_setfield_around_copystrcontent(self): + ops = """ + [p0, i0, p1, i1, i2] + setfield_gc(p0, i1, descr=valuedescr) + copystrcontent(p0, i0, p1, i1, i2) + escape() + jump(p0, i0, p1, i1, i2) + """ + expected = """ + [p0, i0, p1, i1, i2] + copystrcontent(p0, i0, p1, i1, i2) + setfield_gc(p0, i1, descr=valuedescr) + escape() + jump(p0, i0, p1, i1, i2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getarrayitem_1(self): ops = """ [p1] @@ -2356,6 +2449,33 @@ """ self.optimize_loop(ops, expected, preamble) + def test_bug_5(self): + ops = """ + [p0] + i0 = escape() + i2 = getfield_gc(p0, descr=valuedescr) + i4 = int_add(i2, 1) + setfield_gc(p0, i4, descr=valuedescr) + guard_true(i0) [] + i6 = getfield_gc(p0, descr=valuedescr) + i8 = int_sub(i6, 1) + setfield_gc(p0, i8, descr=valuedescr) + escape() + jump(p0) + """ + expected = """ + [p0] + i0 = escape() + i2 = getfield_gc(p0, descr=valuedescr) + i4 = int_add(i2, 1) + setfield_gc(p0, i4, descr=valuedescr) + guard_true(i0) [] + setfield_gc(p0, i2, descr=valuedescr) + escape() + jump(p0) + """ + self.optimize_loop(ops, expected) + def test_invalid_loop_1(self): ops = """ [p1] @@ -2637,7 +2757,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops(self): + def test_fold_partially_constant_add_sub(self): ops = """ [i0] i1 = int_sub(i0, 0) @@ -2671,7 +2791,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops_ovf(self): + def test_fold_partially_constant_add_sub_ovf(self): ops = """ [i0] i1 = int_sub_ovf(i0, 0) @@ -2708,6 +2828,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): + ops = """ + [i0] + i1 = int_lshift(i0, 0) + i2 = int_rshift(i1, 0) + i3 = int_eq(i2, i0) + guard_true(i3) [] + jump(i2) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + # ---------- class TestLLtype(OptimizeOptTest, LLtypeMixin): @@ -2992,7 +3127,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3025,7 +3159,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3065,7 +3198,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3103,6 +3235,7 @@ guard_no_exception(descr=fdescr) [p2, p1] virtual_ref_finish(p2, p1) setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ preamble = """ @@ -3111,6 +3244,7 @@ call(i1, descr=nonwritedescr) guard_no_exception(descr=fdescr) [i3, i1, p0] setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ expected = """ @@ -3119,6 +3253,7 @@ call(i1, descr=nonwritedescr) guard_no_exception(descr=fdescr2) [i3, i1, p0] setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ self.optimize_loop(ops, expected, preamble) @@ -3129,7 +3264,7 @@ #self.loop.inputargs[0].value = self.nodeobjvalue #self.check_expanded_fail_descr('''p2, p1 # p0.refdescr = p2 - # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 # where p1 is a node_vtable, nextdescr=p1b # where p1b is a node_vtable, valuedescr=i1 # ''', rop.GUARD_NO_EXCEPTION) @@ -3150,7 +3285,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3176,7 +3310,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3693,13 +3826,16 @@ guard_true(i1) [] jump(p0) """ - # The dead strlen will be eliminated be the backend. - expected = """ + preamble = """ [p0] i0 = strlen(p0) jump(p0) """ - self.optimize_strunicode_loop(ops, expected, expected) + expected = """ + [p0] + jump(p0) + """ + self.optimize_strunicode_loop(ops, expected, preamble) def test_addsub_const(self): ops = """ @@ -4839,6 +4975,58 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i1, descr=nextdescr) """ + py.test.skip("no test here") + + def test_immutable_not(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_noimmut_vtable)) + setfield_gc(p0, 42, descr=noimmut_intval) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_variable(self): + ops = """ + [i0] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, i0, descr=immut_intval) + escape(p0) + jump(i0) + """ + self.optimize_loop(ops, ops) + + def test_immutable_incomplete(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_constantfold(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, 1242, descr=immut_intval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class IntObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(self, other): + return other.container.intval == 1242 + self.namespace['intobj1242'] = lltype._ptr(llmemory.GCREF, + IntObj1242()) + expected = """ + [] + escape(ConstPtr(intobj1242)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble=None): @@ -5150,7 +5338,21 @@ """ expected = """ [p0] + jump(p0) + """ + self.optimize_loop(ops, expected) + + def test_strlen_repeated(self): + ops = """ + [p0] i0 = strlen(p0) + i1 = strlen(p0) + i2 = int_eq(i0, i1) + guard_true(i2) [] + jump(p0) + """ + expected = """ + [p0] jump(p0) """ self.optimize_loop(ops, expected) diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -191,7 +191,7 @@ def test_emittable(self, op): return self.is_emittable(op) - + def is_emittable(self, op): return self.next_optimization.test_emittable(op) @@ -247,7 +247,7 @@ def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None): #return self.__class__() raise NotImplementedError - + class Optimizer(Optimization): @@ -283,20 +283,20 @@ else: optimizations = [] self.first_optimization = self - - self.optimizations = optimizations + + self.optimizations = optimizations def force_at_end_of_preamble(self): self.resumedata_memo = resume.ResumeDataLoopMemo(self.metainterp_sd) for o in self.optimizations: o.force_at_end_of_preamble() - + def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None): assert optimizer is None assert valuemap is None valuemap = {} new = Optimizer(self.metainterp_sd, self.loop) - optimizations = [o.reconstruct_for_next_iteration(new, valuemap) for o in + optimizations = [o.reconstruct_for_next_iteration(new, valuemap) for o in self.optimizations] new.set_optimizations(optimizations) @@ -313,7 +313,7 @@ for key, value in self.loop_invariant_results.items(): new.loop_invariant_results[key] = \ value.get_reconstructed(new, valuemap) - + new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None @@ -439,7 +439,7 @@ def test_emittable(self, op): return True - + def emit_operation(self, op): ###self.heap_op_optimizer.emitting_operation(op) self._emit_operation(op) @@ -517,19 +517,17 @@ canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW else: nextop = None - + if canfold: for i in range(op.numargs()): if self.get_constant_box(op.getarg(i)) is None: break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(op.getarg(i)) - for i in range(op.numargs())] - resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.getdescr()) - # FIXME: Don't we need to check for an overflow here? - self.make_constant(op.result, resbox.constbox()) + resbox = self.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.make_constant(op.result, resbox) return # did we do the exact same operation already? @@ -548,6 +546,13 @@ if nextop: self.emit_operation(nextop) + def constant_fold(self, op): + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] + resbox = execute_nonspec(self.cpu, None, + op.getopnum(), argboxes, op.getdescr()) + return resbox.constbox() + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,19 +1,16 @@ try: - import pypyjit - pypyjit.set_param(threshold=3, inlining=True) + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + main(10) - def sqrt(y, n=10000): - x = y / 2 - while n > 0: - #assert y > 0 and x > 0 - if y > 0 and x > 0: pass - n -= 1 - x = (x + y/x) / 2 - return x - - print sqrt(1234, 4) - except Exception, e: print "Exception: ", type(e) print e diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -213,7 +213,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) - elif v1.intbound.known_ge(v2.intbound): + elif v1.intbound.known_ge(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 0) else: self.emit_operation(op) @@ -223,7 +223,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) - elif v1.intbound.known_le(v2.intbound): + elif v1.intbound.known_le(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 0) else: self.emit_operation(op) @@ -231,7 +231,7 @@ def optimize_INT_LE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_le(v2.intbound): + if v1.intbound.known_le(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 1) elif v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 0) @@ -241,7 +241,7 @@ def optimize_INT_GE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_ge(v2.intbound): + if v1.intbound.known_ge(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 0) diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,8 +1,8 @@ from __future__ import with_statement import new import py -from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse -from pypy.objspace.flow.model import flatten, mkentrymap, c_last_exception +from pypy.objspace.flow.model import Constant, Block, Link, Variable +from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph from pypy.objspace.flow.objspace import FlowObjSpace, error @@ -37,12 +37,10 @@ def all_operations(self, graph): result = {} - def visit(node): - if isinstance(node, Block): - for op in node.operations: - result.setdefault(op.opname, 0) - result[op.opname] += 1 - traverse(visit, graph) + for node in graph.iterblocks(): + for op in node.operations: + result.setdefault(op.opname, 0) + result[op.opname] += 1 return result @@ -246,12 +244,9 @@ x = self.codetest(self.implicitException) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock - def implicitAttributeError(x): try: x = getattr(x, "y") @@ -263,10 +258,8 @@ x = self.codetest(self.implicitAttributeError) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock #__________________________________________________________ def implicitException_int_and_id(x): @@ -311,14 +304,12 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: if isinstance(link.args[0], Constant): found[link.args[0].value] = True else: found[link.exitcase] = None - traverse(find_exceptions, x) assert found == {IndexError: True, KeyError: True, Exception: None} def reraiseAnything(x): @@ -332,12 +323,10 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: assert isinstance(link.args[0], Constant) found[link.args[0].value] = True - traverse(find_exceptions, x) assert found == {ValueError: True, ZeroDivisionError: True, OverflowError: True} def loop_in_bare_except_bug(lst): @@ -521,11 +510,9 @@ def test_jump_target_specialization(self): x = self.codetest(self.jump_target_specialization) - def visitor(node): - if isinstance(node, Block): - for op in node.operations: - assert op.opname != 'mul', "mul should have disappeared" - traverse(visitor, x) + for block in x.iterblocks(): + for op in block.operations: + assert op.opname != 'mul', "mul should have disappeared" #__________________________________________________________ def highly_branching_example(a,b,c,d,e,f,g,h,i,j): @@ -573,7 +560,8 @@ def test_highly_branching_example(self): x = self.codetest(self.highly_branching_example) - assert len(flatten(x)) < 60 # roughly 20 blocks + 30 links + # roughly 20 blocks + 30 links + assert len(list(x.iterblocks())) + len(list(x.iterlinks())) < 60 #__________________________________________________________ def test_unfrozen_user_class1(self): @@ -589,11 +577,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 2 def test_unfrozen_user_class2(self): @@ -607,11 +593,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert not isinstance(results[0], Constant) def test_frozen_user_class1(self): @@ -630,11 +614,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 1 def test_frozen_user_class2(self): @@ -650,11 +632,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert results == [Constant(4)] def test_const_star_call(self): @@ -663,14 +643,9 @@ def f(): return g(1,*(2,3)) graph = self.codetest(f) - call_args = [] - def visit(block): - if isinstance(block, Block): - for op in block.operations: - if op.opname == "call_args": - call_args.append(op) - traverse(visit, graph) - assert not call_args + for block in graph.iterblocks(): + for op in block.operations: + assert not op.opname == "call_args" def test_catch_importerror_1(self): def f(): @@ -997,11 +972,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, AttributeError] @@ -1019,11 +992,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, TypeError] diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.tool.autopath import pypydir -from pypy.rlib import rposix +from pypy.rlib import jit, rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf', '_pypy_math_isnan'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -57,8 +56,6 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -91,13 +88,13 @@ # # Custom implementations - def ll_math_isnan(y): - return bool(math_isnan(y)) - + # By not calling into the extenal function the JIT can inline this. Floats + # are awesome. + return y != y def ll_math_isinf(y): - return bool(math_isinf(y)) + return y != 0 and y * .5 == y ll_math_copysign = math_copysign diff --git a/lib_pypy/pyrepl/test/test_functional.py b/lib_pypy/pyrepl/test/test_functional.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/test/test_functional.py @@ -0,0 +1,50 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# some functional tests, to see if this is really working + +import py +import sys + +class TestTerminal(object): + def _spawn(self, *args, **kwds): + try: + import pexpect + except ImportError, e: + py.test.skip(str(e)) + kwds.setdefault('timeout', 10) + child = pexpect.spawn(*args, **kwds) + child.logfile = sys.stdout + return child + + def spawn(self, argv=[]): + # avoid running start.py, cause it might contain + # things like readline or rlcompleter(2) included + child = self._spawn(sys.executable, ['-S'] + argv) + child.sendline('from pyrepl.python_reader import main') + child.sendline('main()') + return child + + def test_basic(self): + child = self.spawn() + child.sendline('a = 3') + child.sendline('a') + child.expect('3') + diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -22,8 +22,7 @@ remover = cls.MallocRemover() checkgraph(graph) count1 = count2 = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == cls.MallocRemover.MALLOC_OP: S = op.args[0].value @@ -47,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -158,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -199,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/rlib/_rweakvaldict.py b/pypy/rlib/_rweakvaldict.py --- a/pypy/rlib/_rweakvaldict.py +++ b/pypy/rlib/_rweakvaldict.py @@ -113,7 +113,7 @@ @jit.dont_look_inside def ll_get(self, d, llkey): hash = self.ll_keyhash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK #llop.debug_print(lltype.Void, i, 'get') valueref = d.entries[i].value if valueref: @@ -132,7 +132,7 @@ def ll_set_nonnull(self, d, llkey, llvalue): hash = self.ll_keyhash(llkey) valueref = weakref_create(llvalue) # GC effects here, before the rest - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK everused = d.entries.everused(i) d.entries[i].key = llkey d.entries[i].value = valueref @@ -146,7 +146,7 @@ @jit.dont_look_inside def ll_set_null(self, d, llkey): hash = self.ll_keyhash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK if d.entries.everused(i): # If the entry was ever used, clean up its key and value. # We don't store a NULL value, but a dead weakref, because diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -9,6 +9,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert +from pypy.rlib.objectmodel import we_are_translated from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc from pypy.annotation import model as annmodel @@ -151,8 +152,13 @@ # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.root_stack_jit_hook = None if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + try: + self.root_stack_jit_hook = translator._jit2gc['rootstackhook'] + except KeyError: + pass else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) self.layoutbuilder.transformer = self @@ -500,6 +506,10 @@ s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) + s_gc_data = self.translator.annotator.bookkeeper.valueoftype( + gctypelayout.GCData) + r_gc_data = self.translator.rtyper.getrepr(s_gc_data) + self.c_const_gcdata = rmodel.inputconst(r_gc_data, self.gcdata) self.malloc_zero_filled = GCClass.malloc_zero_filled HDR = self.HDR = self.gcdata.gc.gcheaderbuilder.HDR @@ -786,6 +796,15 @@ resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) + def gct_gc_adr_of_root_stack_top(self, hop): + op = hop.spaceop + ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, + 'inst_root_stack_top') + c_ofs = rmodel.inputconst(lltype.Signed, ofs) + v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], + resulttype=llmemory.Address) + hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) + def gct_gc_x_swap_pool(self, hop): op = hop.spaceop [v_malloced] = op.args @@ -1327,6 +1346,14 @@ return top self.decr_stack = decr_stack + self.rootstackhook = gctransformer.root_stack_jit_hook + if self.rootstackhook is None: + def collect_stack_root(callback, gc, addr): + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return sizeofaddr + self.rootstackhook = collect_stack_root + def push_stack(self, addr): top = self.incr_stack(1) top.address[0] = addr @@ -1336,10 +1363,7 @@ return top.address[0] def allocate_stack(self): - result = llmemory.raw_malloc(self.rootstacksize) - if result: - llmemory.raw_memclear(result, self.rootstacksize) - return result + return llmemory.raw_malloc(self.rootstacksize) def setup_root_walker(self): stackbase = self.allocate_stack() @@ -1351,12 +1375,11 @@ def walk_stack_roots(self, collect_stack_root): gcdata = self.gcdata gc = self.gc + rootstackhook = self.rootstackhook addr = gcdata.root_stack_base end = gcdata.root_stack_top while addr != end: - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - addr += sizeofaddr + addr += rootstackhook(collect_stack_root, gc, addr) if self.collect_stacks_from_other_threads is not None: self.collect_stacks_from_other_threads(collect_stack_root) @@ -1463,12 +1486,11 @@ # collect all valid stacks from the dict (the entry # corresponding to the current thread is not valid) gc = self.gc + rootstackhook = self.rootstackhook end = stacktop - sizeofaddr addr = end.address[0] while addr != end: - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - addr += sizeofaddr + addr += rootstackhook(callback, gc, addr) def collect_more_stacks(callback): ll_assert(get_aid() == gcdata.active_thread, diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/curses.py @@ -0,0 +1,39 @@ + +# Copyright 2000-2010 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Some try-import logic for two purposes: avoiding to bring in the whole +# pure Python curses package if possible; and, in _curses is not actually +# present, falling back to _minimal_curses (which is either a ctypes-based +# pure Python module or a PyPy built-in module). +try: + import _curses +except ImportError: + try: + import _minimal_curses as _curses + except ImportError: + # Who knows, maybe some environment has "curses" but not "_curses". + # If not, at least the following import gives a clean ImportError. + import _curses + +setupterm = _curses.setupterm +tigetstr = _curses.tigetstr +tparm = _curses.tparm +error = _curses.error diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,10 +46,12 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject import pypy.module.cpyext.codecs +import pypy.module.cpyext.pyfile # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py --- a/pypy/interpreter/test/test_eval.py +++ b/pypy/interpreter/test/test_eval.py @@ -13,7 +13,8 @@ def __init__(self, space, code, numlocals): self.code = code - Frame.__init__(self, space, numlocals=numlocals) + Frame.__init__(self, space) + self.numlocals = numlocals self.fastlocals_w = [None] * self.numlocals def getcode(self): @@ -24,7 +25,10 @@ def getfastscope(self): return self.fastlocals_w - + + def getfastscopelength(self): + return self.numlocals + self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -102,6 +102,7 @@ #include "modsupport.h" #include "pythonrun.h" #include "pyerrors.h" +#include "sysmodule.h" #include "stringobject.h" #include "descrobject.h" #include "tupleobject.h" @@ -109,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] @@ -385,6 +390,19 @@ assert module.__doc__ == "docstring" assert module.return_cookie() == 3.14 + def test_load_dynamic(self): + import sys + init = """ + if (Py_IsInitialized()) + Py_InitModule("foo", NULL); + """ + foo = self.import_module(name='foo', init=init) + assert 'foo' in sys.modules + del sys.modules['foo'] + import imp + foo2 = imp.load_dynamic('foo', foo.__file__) + assert 'foo' in sys.modules + assert foo.__dict__ == foo2.__dict__ def test_InitModule4_dotted(self): """ diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.extmodules.rst @@ -0,0 +1,12 @@ +You can pass a comma-separated list of third-party builtin modules +which should be translated along with the standard modules within +``pypy.module``. + +The module names need to be fully qualified (i.e. have a ``.`` in them), +be on the ``$PYTHONPATH`` and not conflict with any existing ones, e.g. +``mypkg.somemod``. + +Once translated, the module will be accessible with a simple:: + + import somemod + diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -306,6 +306,15 @@ stackcounter = StackCounter() stackcounter._freeze_() +def llexternal_use_eci(compilation_info): + """Return a dummy function that, if called in a RPython program, + adds the given ExternalCompilationInfo to it.""" + eci = ExternalCompilationInfo(post_include_bits=['#define PYPY_NO_OP()']) + eci = eci.merge(compilation_info) + return llexternal('PYPY_NO_OP', [], lltype.Void, + compilation_info=eci, sandboxsafe=True, _nowrapper=True, + _callable=lambda: None) + # ____________________________________________________________ # Few helpers for keeping callback arguments alive # this makes passing opaque objects possible (they don't even pass @@ -738,6 +747,7 @@ def charpsize2str(cp, size): l = [cp[i] for i in range(size)] return emptystr.join(l) + charpsize2str._annenforceargs_ = [None, int] return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - diff --git a/lib_pypy/pyrepl/tests/__init__.py b/lib_pypy/pyrepl/tests/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# moo diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/lib_pypy/pyrepl/cmdrepl.py b/lib_pypy/pyrepl/cmdrepl.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/cmdrepl.py @@ -0,0 +1,118 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""Wedge pyrepl behaviour into cmd.Cmd-derived classes. + +replize, when given a subclass of cmd.Cmd, returns a class that +behaves almost identically to the supplied class, except that it uses +pyrepl instead if raw_input. + +It was designed to let you do this: + +>>> import pdb +>>> from pyrepl import replize +>>> pdb.Pdb = replize(pdb.Pdb) + +which is in fact done by the `pythoni' script that comes with +pyrepl.""" + +from __future__ import nested_scopes + +from pyrepl import completing_reader as cr, reader, completer +from pyrepl.completing_reader import CompletingReader as CR +import cmd + +class CmdReader(CR): + def collect_keymap(self): + return super(CmdReader, self).collect_keymap() + ( + ("\\M-\\n", "invalid-key"), + ("\\n", "accept")) + + CR_init = CR.__init__ + def __init__(self, completions): + self.CR_init(self) + self.completions = completions + + def get_completions(self, stem): + if len(stem) != self.pos: + return [] + return cr.uniqify([s for s in self.completions + if s.startswith(stem)]) + +def replize(klass, history_across_invocations=1): + + """Return a subclass of the cmd.Cmd-derived klass that uses + pyrepl instead of readline. + + Raises a ValueError if klass does not derive from cmd.Cmd. + + The optional history_across_invocations parameter (default 1) + controls whether instances of the returned class share + histories.""" + + completions = [s[3:] + for s in completer.get_class_members(klass) + if s.startswith("do_")] + + if not issubclass(klass, cmd.Cmd): + raise Exception +# if klass.cmdloop.im_class is not cmd.Cmd: +# print "this may not work" + + class CmdRepl(klass): + k_init = klass.__init__ + + if history_across_invocations: + _CmdRepl__history = [] + def __init__(self, *args, **kw): + self.k_init(*args, **kw) + self.__reader = CmdReader(completions) + self.__reader.history = CmdRepl._CmdRepl__history + self.__reader.historyi = len(CmdRepl._CmdRepl__history) + else: + def __init__(self, *args, **kw): + self.k_init(*args, **kw) + self.__reader = CmdReader(completions) + + def cmdloop(self, intro=None): + self.preloop() + if intro is not None: + self.intro = intro + if self.intro: + print self.intro + stop = None + while not stop: + if self.cmdqueue: + line = self.cmdqueue[0] + del self.cmdqueue[0] + else: + try: + self.__reader.ps1 = self.prompt + line = self.__reader.readline() + except EOFError: + line = "EOF" + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + + CmdRepl.__name__ = "replize(%s.%s)"%(klass.__module__, klass.__name__) + return CmdRepl + diff --git a/lib_pypy/pyrepl/historical_reader.py b/lib_pypy/pyrepl/historical_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/historical_reader.py @@ -0,0 +1,311 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl import reader, commands +from pyrepl.reader import Reader as R + +isearch_keymap = tuple( + [('\\%03o'%c, 'isearch-end') for c in range(256) if chr(c) != '\\'] + \ + [(c, 'isearch-add-character') + for c in map(chr, range(32, 127)) if c != '\\'] + \ + [('\\%03o'%c, 'isearch-add-character') + for c in range(256) if chr(c).isalpha() and chr(c) != '\\'] + \ + [('\\\\', 'self-insert'), + (r'\C-r', 'isearch-backwards'), + (r'\C-s', 'isearch-forwards'), + (r'\C-c', 'isearch-cancel'), + (r'\C-g', 'isearch-cancel'), + (r'\', 'isearch-backspace')]) + +del c + +ISEARCH_DIRECTION_NONE = '' +ISEARCH_DIRECTION_BACKWARDS = 'r' +ISEARCH_DIRECTION_FORWARDS = 'f' + +class next_history(commands.Command): + def do(self): + r = self.reader + if r.historyi == len(r.history): + r.error("end of history list") + return + r.select_item(r.historyi + 1) + +class previous_history(commands.Command): + def do(self): + r = self.reader + if r.historyi == 0: + r.error("start of history list") + return + r.select_item(r.historyi - 1) + +class restore_history(commands.Command): + def do(self): + r = self.reader + if r.historyi != len(r.history): + if r.get_unicode() != r.history[r.historyi]: + r.buffer = list(r.history[r.historyi]) + r.pos = len(r.buffer) + r.dirty = 1 + +class first_history(commands.Command): + def do(self): + self.reader.select_item(0) + +class last_history(commands.Command): + def do(self): + self.reader.select_item(len(self.reader.history)) + +class operate_and_get_next(commands.FinishCommand): + def do(self): + self.reader.next_history = self.reader.historyi + 1 + +class yank_arg(commands.Command): + def do(self): + r = self.reader + if r.last_command is self.__class__: + r.yank_arg_i += 1 + else: + r.yank_arg_i = 0 + if r.historyi < r.yank_arg_i: + r.error("beginning of history list") + return + a = r.get_arg(-1) + # XXX how to split? + words = r.get_item(r.historyi - r.yank_arg_i - 1).split() + if a < -len(words) or a >= len(words): + r.error("no such arg") + return + w = words[a] + b = r.buffer + if r.yank_arg_i > 0: + o = len(r.yank_arg_yanked) + else: + o = 0 + b[r.pos - o:r.pos] = list(w) + r.yank_arg_yanked = w + r.pos += len(w) - o + r.dirty = 1 + +class forward_history_isearch(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_FORWARDS + r.isearch_start = r.historyi, r.pos + r.isearch_term = '' + r.dirty = 1 + r.push_input_trans(r.isearch_trans) + + +class reverse_history_isearch(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_BACKWARDS + r.dirty = 1 + r.isearch_term = '' + r.push_input_trans(r.isearch_trans) + r.isearch_start = r.historyi, r.pos + +class isearch_cancel(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_NONE + r.pop_input_trans() + r.select_item(r.isearch_start[0]) + r.pos = r.isearch_start[1] + r.dirty = 1 + +class isearch_add_character(commands.Command): + def do(self): + r = self.reader + b = r.buffer + r.isearch_term += self.event[-1] + r.dirty = 1 + p = r.pos + len(r.isearch_term) - 1 + if b[p:p+1] != [r.isearch_term[-1]]: + r.isearch_next() + +class isearch_backspace(commands.Command): + def do(self): + r = self.reader + if len(r.isearch_term) > 0: + r.isearch_term = r.isearch_term[:-1] + r.dirty = 1 + else: + r.error("nothing to rubout") + +class isearch_forwards(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_FORWARDS + r.isearch_next() + +class isearch_backwards(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_BACKWARDS + r.isearch_next() + +class isearch_end(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_NONE + r.console.forgetinput() + r.pop_input_trans() + r.dirty = 1 + +class HistoricalReader(R): + """Adds history support (with incremental history searching) to the + Reader class. + + Adds the following instance variables: + * history: + a list of strings + * historyi: + * transient_history: + * next_history: + * isearch_direction, isearch_term, isearch_start: + * yank_arg_i, yank_arg_yanked: + used by the yank-arg command; not actually manipulated by any + HistoricalReader instance methods. + """ + + def collect_keymap(self): + return super(HistoricalReader, self).collect_keymap() + ( + (r'\C-n', 'next-history'), + (r'\C-p', 'previous-history'), + (r'\C-o', 'operate-and-get-next'), + (r'\C-r', 'reverse-history-isearch'), + (r'\C-s', 'forward-history-isearch'), + (r'\M-r', 'restore-history'), + (r'\M-.', 'yank-arg'), + (r'\', 'last-history'), + (r'\', 'first-history')) + + + def __init__(self, console): + super(HistoricalReader, self).__init__(console) + self.history = [] + self.historyi = 0 + self.transient_history = {} + self.next_history = None + self.isearch_direction = ISEARCH_DIRECTION_NONE + for c in [next_history, previous_history, restore_history, + first_history, last_history, yank_arg, + forward_history_isearch, reverse_history_isearch, + isearch_end, isearch_add_character, isearch_cancel, + isearch_add_character, isearch_backspace, + isearch_forwards, isearch_backwards, operate_and_get_next]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + from pyrepl import input + self.isearch_trans = input.KeymapTranslator( + isearch_keymap, invalid_cls=isearch_end, + character_cls=isearch_add_character) + + def select_item(self, i): + self.transient_history[self.historyi] = self.get_unicode() + buf = self.transient_history.get(i) + if buf is None: + buf = self.history[i] + self.buffer = list(buf) + self.historyi = i + self.pos = len(self.buffer) + self.dirty = 1 + + def get_item(self, i): + if i <> len(self.history): + return self.transient_history.get(i, self.history[i]) + else: + return self.transient_history.get(i, self.get_unicode()) + + def prepare(self): + super(HistoricalReader, self).prepare() + try: + self.transient_history = {} + if self.next_history is not None \ + and self.next_history < len(self.history): + self.historyi = self.next_history + self.buffer[:] = list(self.history[self.next_history]) + self.pos = len(self.buffer) + self.transient_history[len(self.history)] = '' + else: + self.historyi = len(self.history) + self.next_history = None + except: + self.restore() + raise + + def get_prompt(self, lineno, cursor_on_line): + if cursor_on_line and self.isearch_direction <> ISEARCH_DIRECTION_NONE: + d = 'rf'[self.isearch_direction == ISEARCH_DIRECTION_FORWARDS] + return "(%s-search `%s') "%(d, self.isearch_term) + else: + return super(HistoricalReader, self).get_prompt(lineno, cursor_on_line) + + def isearch_next(self): + st = self.isearch_term + p = self.pos + i = self.historyi + s = self.get_unicode() + forwards = self.isearch_direction == ISEARCH_DIRECTION_FORWARDS + while 1: + if forwards: + p = s.find(st, p + 1) + else: + p = s.rfind(st, 0, p + len(st) - 1) + if p != -1: + self.select_item(i) + self.pos = p + return + elif ((forwards and i == len(self.history) - 1) + or (not forwards and i == 0)): + self.error("not found") + return + else: + if forwards: + i += 1 + s = self.get_item(i) + p = -1 + else: + i -= 1 + s = self.get_item(i) + p = len(s) + + def finish(self): + super(HistoricalReader, self).finish() + ret = self.get_unicode() + for i, t in self.transient_history.items(): + if i < len(self.history) and i != self.historyi: + self.history[i] = t + if ret: + self.history.append(ret) + +def test(): + from pyrepl.unix_console import UnixConsole + reader = HistoricalReader(UnixConsole()) + reader.ps1 = "h**> " + reader.ps2 = "h/*> " + reader.ps3 = "h|*> " + reader.ps4 = "h\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/pypy/module/cpyext/src/modsupport.c b/pypy/module/cpyext/src/modsupport.c --- a/pypy/module/cpyext/src/modsupport.c +++ b/pypy/module/cpyext/src/modsupport.c @@ -241,13 +241,12 @@ case 'I': { - Py_FatalError("I unsupported so far"); - //unsigned int n; - //n = va_arg(*p_va, unsigned int); - //if (n > (unsigned long)PyInt_GetMax()) - // return PyLong_FromUnsignedLong((unsigned long)n); - //else - // return PyInt_FromLong(n); + unsigned int n; + n = va_arg(*p_va, unsigned int); + if (n > (unsigned long)PyInt_GetMax()) + return PyLong_FromUnsignedLong((unsigned long)n); + else + return PyInt_FromLong(n); } case 'n': @@ -260,23 +259,20 @@ case 'k': { - Py_FatalError("Py_BuildValue k unsupported so far\n"); - /* unsigned long n; */ - /* n = va_arg(*p_va, unsigned long); */ - /* if (n > (unsigned long)PyInt_GetMax()) */ - /* return PyLong_FromUnsignedLong(n); */ - /* else */ - /* return PyInt_FromLong(n); */ + unsigned long n; + n = va_arg(*p_va, unsigned long); + if (n > (unsigned long)PyInt_GetMax()) + return PyLong_FromUnsignedLong(n); + else + return PyInt_FromLong(n); } #ifdef HAVE_LONG_LONG case 'L': - Py_FatalError("Py_BuildValue L unsupported for now\n"); - //return PyLong_FromLongLong((PY_LONG_LONG)va_arg(*p_va, PY_LONG_LONG)); + return PyLong_FromLongLong((PY_LONG_LONG)va_arg(*p_va, PY_LONG_LONG)); case 'K': - Py_FatalError("Py_BuildValue K unsupported for now\n"); - //return PyLong_FromUnsignedLongLong((PY_LONG_LONG)va_arg(*p_va, unsigned PY_LONG_LONG)); + return PyLong_FromUnsignedLongLong((PY_LONG_LONG)va_arg(*p_va, unsigned PY_LONG_LONG)); #endif #ifdef Py_USING_UNICODE case 'u': diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -283,9 +283,14 @@ sys.stdout = out = Out() try: raises(UnicodeError, "print unichr(0xa2)") + assert out.data == [] out.encoding = "cp424" print unichr(0xa2) assert out.data == [unichr(0xa2).encode("cp424"), "\n"] + del out.data[:] + del out.encoding + print u"foo\t", u"bar\n", u"trick", u"baz\n" # softspace handling + assert out.data == ["foo\t", "bar\n", "trick", " ", "baz\n", "\n"] finally: sys.stdout = save diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -1,3 +1,4 @@ +from __future__ import with_statement MARKER = 42 class AppTestImpModule: @@ -34,7 +35,8 @@ def test_load_dynamic(self): raises(ImportError, self.imp.load_dynamic, 'foo', 'bar') - raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', 'baz.so') + raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', + open(self.file_module)) def test_suffixes(self): for suffix, mode, type in self.imp.get_suffixes(): @@ -138,3 +140,58 @@ ) # Doesn't end up in there when run with -A assert sys.path_importer_cache.get(lib_pypy) is None + + def test_rewrite_pyc_check_code_name(self): + # This one is adapted from cpython's Lib/test/test_import.py + from os import chmod + from os.path import join + from sys import modules, path + from shutil import rmtree + from tempfile import mkdtemp + code = """if 1: + import sys + code_filename = sys._getframe().f_code.co_filename + module_filename = __file__ + constant = 1 + def func(): + pass + func_filename = func.func_code.co_filename + """ + + module_name = "unlikely_module_name" + dir_name = mkdtemp(prefix='pypy_test') + file_name = join(dir_name, module_name + '.py') + with open(file_name, "wb") as f: + f.write(code) + compiled_name = file_name + ("c" if __debug__ else "o") + chmod(file_name, 0777) + + # Setup + sys_path = path[:] + orig_module = modules.pop(module_name, None) + assert modules.get(module_name) == None + path.insert(0, dir_name) + + # Test + import py_compile + py_compile.compile(file_name, dfile="another_module.py") + __import__(module_name, globals(), locals()) + mod = modules.get(module_name) + + try: + # Ensure proper results + assert mod != orig_module + assert mod.module_filename == compiled_name + assert mod.code_filename == file_name + assert mod.func_filename == file_name + finally: + # TearDown + path[:] = sys_path + if orig_module is not None: + modules[module_name] = orig_module + else: + try: + del modules[module_name] + except KeyError: + pass + rmtree(dir_name, True) diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -112,6 +112,7 @@ try: while True: count = fread(buf, 1, BUF_SIZE, fp) + count = rffi.cast(lltype.Signed, count) source += rffi.charpsize2str(buf, count) if count < BUF_SIZE: if feof(fp): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -46,4 +46,5 @@ return PyBuffer_New(150); """), ]) - module.buffer_new() + b = module.buffer_new() + raises(AttributeError, getattr, b, 'x') diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -1,4 +1,5 @@ from pypy.rpython.ootypesystem import ootype +from pypy.rpython.lltypesystem import rffi from pypy.translator.oosupport.metavm import MicroInstruction from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType import pypy.translator.jvm.typesystem as jvm @@ -94,14 +95,20 @@ (ootype.SignedLongLong, ootype.Signed): jvm.L2I, (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, + (ootype.Signed, rffi.SHORT): jvm.I2S, + (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, + (ootype.Signed, ootype.Unsigned): None, + (ootype.Unsigned, ootype.Signed): None, } class _CastPrimitive(MicroInstruction): def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype + if TO == FROM: + return opcode = CASTS[(FROM, TO)] if opcode: generator.emit(opcode) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -10,6 +10,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.eval import Code +from pypy.interpreter.pycode import PyCode from pypy.rlib import streamio, jit, rposix from pypy.rlib.streamio import StreamErrors from pypy.rlib.rarithmetic import intmask @@ -31,6 +32,7 @@ else: SO = ".so" DEFAULT_SOABI = 'pypy-14' +CHECK_FOR_PYW = sys.platform == 'win32' @specialize.memo() def get_so_extension(space): @@ -57,6 +59,12 @@ if os.path.exists(pyfile) and case_ok(pyfile): return PY_SOURCE, ".py", "U" + # on Windows, also check for a .pyw file + if CHECK_FOR_PYW: + pyfile = filepart + ".pyw" + if os.path.exists(pyfile) and case_ok(pyfile): + return PY_SOURCE, ".pyw", "U" + # The .py file does not exist. By default on PyPy, lonepycfiles # is False: if a .py file does not exist, we don't even try to # look for a lone .pyc file. @@ -84,6 +92,9 @@ # XXX that's slow def case_ok(filename): index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) if index < 0: directory = os.curdir else: @@ -774,10 +785,24 @@ if space.config.objspace.usepycfiles and write_pyc: write_compiled_module(space, code_w, cpathname, mode, mtime) + update_code_filenames(space, code_w, pathname) exec_code_module(space, w_mod, code_w) return w_mod +def update_code_filenames(space, code_w, pathname, oldname=None): + assert isinstance(code_w, PyCode) + if oldname is None: + oldname = code_w.co_filename + elif code_w.co_filename != oldname: + return + + code_w.co_filename = pathname + constants = code_w.co_consts_w + for const in constants: + if const is not None and isinstance(const, PyCode): + update_code_filenames(space, const, pathname, oldname) + def _get_long(s): a = ord(s[0]) b = ord(s[1]) diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -18,7 +18,6 @@ def should_skip_instruction(self, instrname, argmodes): return ( super(TestRx86_64, self).should_skip_instruction(instrname, argmodes) or - ('j' in argmodes) or # Not testing FSTP on 64-bit for now (instrname == 'FSTP') ) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -311,8 +311,7 @@ # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations # that will be performed later on the flow graph. - def fixegg(link): - if isinstance(link, Link): + for link in list(self.graph.iterlinks()): block = link.target if isinstance(block, EggBlock): if (not block.operations and len(block.exits) == 1 and @@ -324,15 +323,14 @@ link.args = list(link2.args) link.target = link2.target assert link2.exitcase is None - fixegg(link) else: mapping = {} for a in block.inputargs: mapping[a] = Variable(a) block.renamevariables(mapping) - elif isinstance(link, SpamBlock): + for block in self.graph.iterblocks(): + if isinstance(link, SpamBlock): del link.framestate # memory saver - traverse(fixegg, self.graph) def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -745,13 +737,6 @@ described there.""" raise NotImplementedError - at cpython_api([], lltype.Void) -def PyErr_SetInterrupt(space): - """This function simulates the effect of a SIGINT signal arriving --- the - next time PyErr_CheckSignals() is called, KeyboardInterrupt will be raised. - It may be called without holding the interpreter lock.""" - raise NotImplementedError - @cpython_api([rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) def PySignal_SetWakeupFd(space, fd): """This utility function specifies a file descriptor to which a '\0' byte will @@ -850,13 +835,6 @@ successful invocation of Py_EnterRecursiveCall().""" raise NotImplementedError - at cpython_api([FILE, rffi.CCHARP, rffi.CCHARP, rffi.INT_real], PyObject) -def PyFile_FromFile(space, fp, name, mode, close): - """Create a new PyFileObject from the already-open standard C file - pointer, fp. The function close will be called when the file should be - closed. Return NULL on failure.""" - raise NotImplementedError - @cpython_api([PyFileObject], lltype.Void) def PyFile_IncUseCount(space, p): """Increments the PyFileObject's internal use count to indicate @@ -899,12 +877,6 @@ borrow_from() raise NotImplementedError - at cpython_api([PyFileObject, rffi.INT_real], lltype.Void) -def PyFile_SetBufSize(space, p, n): - """Available on systems with setvbuf() only. This should only be called - immediately after file object creation.""" - raise NotImplementedError - @cpython_api([PyFileObject, rffi.CCHARP], rffi.INT_real, error=0) def PyFile_SetEncoding(space, p, enc): """Set the file's encoding for Unicode output to enc. Return 1 on success and 0 @@ -941,12 +913,6 @@ appropriate exception will be set.""" raise NotImplementedError - at cpython_api([rffi.CCHARP, PyObject], rffi.INT_real, error=-1) -def PyFile_WriteString(space, s, p): - """Write string s to file object p. Return 0 on success or -1 on - failure; the appropriate exception will be set.""" - raise NotImplementedError - @cpython_api([], PyObject) def PyFloat_GetInfo(space): """Return a structseq instance which contains information about the @@ -1142,20 +1108,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code @@ -1770,12 +1722,6 @@ """ raise NotImplementedError - at cpython_api([], lltype.Signed, error=CANNOT_FAIL) -def PyInt_GetMax(space): - """Return the system's idea of the largest integer it can handle (LONG_MAX, - as defined in the system header files).""" - raise NotImplementedError - @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyInt_ClearFreeList(space): """Clear the integer free list. Return the number of items that could not @@ -1997,14 +1943,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias @@ -2336,28 +2274,6 @@ (: on Unix, ; on Windows).""" raise NotImplementedError - at cpython_api([rffi.CCHARP, ], lltype.Void) -def PySys_WriteStdout(space, format): - """Write the output string described by format to sys.stdout. No - exceptions are raised, even if truncation occurs (see below). - - format should limit the total size of the formatted output string to - 1000 bytes or less -- after 1000 bytes, the output string is truncated. - In particular, this means that no unrestricted "%s" formats should occur; - these should be limited using "%.s" where is a decimal number - calculated so that plus the maximum size of other formatted text does not - exceed 1000 bytes. Also watch out for "%f", which can print hundreds of - digits for very large numbers. - - If a problem occurs, or sys.stdout is unset, the formatted message - is written to the real (C level) stdout.""" - raise NotImplementedError - - at cpython_api([rffi.CCHARP, ], lltype.Void) -def PySys_WriteStderr(space, format): - """As above, but write to sys.stderr or stderr instead.""" - raise NotImplementedError - @cpython_api([rffi.INT_real], lltype.Void) def Py_Exit(space, status): """Exit the current process. This calls Py_Finalize() and then calls the diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -722,31 +722,75 @@ newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) - def ll_split_chr(LIST, s, c): + def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) count = 1 i = 0 + if max == 0: + i = strlen while i < strlen: if chars[i] == c: count += 1 + if max >= 0 and count > max: + break i += 1 res = LIST.ll_newlist(count) items = res.ll_items() i = 0 j = 0 resindex = 0 + if max == 0: + j = strlen while j < strlen: if chars[j] == c: item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) resindex += 1 i = j + 1 + if max >= 0 and resindex >= max: + j = strlen + break j += 1 item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) return res + def ll_rsplit_chr(LIST, s, c, max): + chars = s.chars + strlen = len(chars) + count = 1 + i = 0 + if max == 0: + i = strlen + while i < strlen: + if chars[i] == c: + count += 1 + if max >= 0 and count > max: + break + i += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + i = strlen + j = strlen + resindex = count - 1 + assert resindex >= 0 + if max == 0: + j = 0 + while j > 0: + j -= 1 + if chars[j] == c: + item = items[resindex] = s.malloc(i - j - 1) + item.copy_contents(s, item, j + 1, 0, i - j - 1) + resindex -= 1 + i = j + if resindex == 0: + j = 0 + break + item = items[resindex] = s.malloc(i - j) + item.copy_contents(s, item, j, 0, i - j) + return res + @purefunction def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -43,9 +43,14 @@ class SizeDescr(AbstractDescr): size = 0 # help translation + is_immutable = False - def __init__(self, size): + def __init__(self, size, count_fields_if_immut=-1): self.size = size + self.count_fields_if_immut = count_fields_if_immut + + def count_fields_if_immutable(self): + return self.count_fields_if_immut def repr_of_descr(self): return '' % self.size @@ -62,15 +67,15 @@ return cache[STRUCT] except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) + count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) else: - sizedescr = SizeDescr(size) + sizedescr = SizeDescr(size, count_fields_if_immut) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr - # ____________________________________________________________ # FieldDescrs diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -283,9 +283,15 @@ # These are the worst cases: val2 = loc2.value_i() code1 = loc1.location_code() - if (code1 == 'j' - or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) - or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + if code1 == 'j': + checkvalue = loc1.value_j() + elif code1 == 'm': + checkvalue = loc1.value_m()[1] + elif code1 == 'a': + checkvalue = loc1.value_a()[3] + else: + checkvalue = 0 + if not rx86.fits_in_32bits(checkvalue): # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai # and the constant offset in the address is 64-bit. # Hopefully this doesn't happen too often @@ -330,10 +336,10 @@ if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if self.WORD == 8 and possible_code1 == 'j': + if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif self.WORD == 8 and possible_code2 == 'j': + elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): @@ -378,6 +384,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) diff --git a/pypy/translator/backendopt/test/test_tailrecursion.py b/pypy/translator/backendopt/test/test_tailrecursion.py --- a/pypy/translator/backendopt/test/test_tailrecursion.py +++ b/pypy/translator/backendopt/test/test_tailrecursion.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -69,12 +69,31 @@ }; +static int cmp_compare(PyObject *self, PyObject *other) { + return -1; +} + +PyTypeObject OldCmpType = { + PyVarObject_HEAD_INIT(NULL, 0) + "comparisons.OldCmpType", /* tp_name */ + sizeof(CmpObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)cmp_compare, /* tp_compare */ +}; + + void initcomparisons(void) { PyObject *m, *d; if (PyType_Ready(&CmpType) < 0) return; + if (PyType_Ready(&OldCmpType) < 0) + return; m = Py_InitModule("comparisons", NULL); if (m == NULL) return; @@ -83,4 +102,6 @@ return; if (PyDict_SetItemString(d, "CmpType", (PyObject *)&CmpType) < 0) return; + if (PyDict_SetItemString(d, "OldCmpType", (PyObject *)&OldCmpType) < 0) + return; } diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,18 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - -int -_pypy_math_isnan(double x) -{ - return PyPy_IS_NAN(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -260,13 +259,13 @@ @classmethod def is_const(cls, v1): return isinstance(v1, str) and v1.startswith('ConstClass(') - + def match_var(self, v1, exp_v2): assert v1 != '_' if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 @@ -285,9 +284,9 @@ self.match_var(op.res, exp_res) self._assert(len(op.args) == len(exp_args), "wrong number of arguments") for arg, exp_arg in zip(op.args, exp_args): - self._assert(self.match_var(arg, exp_arg), "variable mismatch") + self._assert(self.match_var(arg, exp_arg), "variable mismatch: %r instead of %r" % (arg, exp_arg)) self.match_descr(op.descr, exp_descr) - + def _next_op(self, iter_ops, assert_raises=False): try: diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -1,10 +1,10 @@ -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rpython.tool import rffi_platform as platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import py, os from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rlib import jit from pypy.rlib.debug import ll_assert from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem.lloperation import llop @@ -79,6 +79,7 @@ # wrappers... + at jit.loop_invariant def get_ident(): return rffi.cast(lltype.Signed, c_thread_get_ident()) @@ -113,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,5 +1,5 @@ -from pypy.jit.metainterp.history import Const, Box +from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated class TempBox(Box): @@ -318,11 +318,12 @@ self.assembler.regalloc_mov(reg, to) # otherwise it's clean - def before_call(self, force_store=[], save_all_regs=False): + def before_call(self, force_store=[], save_all_regs=0): """ Spill registers before a call, as described by 'self.save_around_call_regs'. Registers are not spilled if they don't survive past the current operation, unless they - are listed in 'force_store'. + are listed in 'force_store'. 'save_all_regs' can be 0 (default), + 1 (save all), or 2 (save default+PTRs). """ for v, reg in self.reg_bindings.items(): if v not in force_store and self.longevity[v][1] <= self.position: @@ -330,9 +331,11 @@ del self.reg_bindings[v] self.free_regs.append(reg) continue - if not save_all_regs and reg not in self.save_around_call_regs: - # we don't have to - continue + if save_all_regs != 1 and reg not in self.save_around_call_regs: + if save_all_regs == 0: + continue # we don't have to + if v.type != REF: + continue # only save GC pointers self._sync_var(v) del self.reg_bindings[v] self.free_regs.append(reg) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -3,6 +3,7 @@ import py from py.test import skip import sys, os, re +import subprocess class BytecodeTrace(list): def get_opnames(self, prefix=""): @@ -118,13 +119,12 @@ print >> f, "print 'OK :-)'" f.close() - if sys.platform.startswith('win'): - py.test.skip("XXX this is not Windows-friendly") print logfilepath - child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( - logfilepath, self.pypy_c, filepath), 'r') - result = child_stdout.read() - child_stdout.close() + env = os.environ.copy() + env['PYPYLOG'] = ":%s" % (logfilepath,) + p = subprocess.Popen([self.pypy_c, str(filepath)], + env=env, stdout=subprocess.PIPE) + result, _ = p.communicate() assert result if result.strip().startswith('SKIP:'): py.test.skip(result.strip()) @@ -1454,6 +1454,8 @@ res1 += dd(a, b, a1, b1) res2 += dd(a, b, a2, b2) res3 += dd(a, b, a3, b3) + # The purpose of this test is to check that we get + # the correct results, not really to count operations. self.run_source(''' def main(a, b): i = sa = 0 @@ -1461,11 +1463,10 @@ %s i += 1 return sa - ''' % code, 179, ([a1, b1], 2000 * res1), - ([a2, b2], 2000 * res2), - ([a3, b3], 2000 * res3), - count_debug_merge_point=False) - + ''' % code, sys.maxint, ([a1, b1], 2000 * res1), + ([a2, b2], 2000 * res2), + ([a3, b3], 2000 * res3)) + def test_mod(self): avalues = ('a', 'b', 7, -42, 8) bvalues = ['b'] + range(-10, 0) + range(1,10) @@ -1486,6 +1487,8 @@ res1 += dd(a, b, a1, b1) res2 += dd(a, b, a2, b2) res3 += dd(a, b, a3, b3) + # The purpose of this test is to check that we get + # the correct results, not really to count operations. self.run_source(''' def main(a, b): i = sa = 0 @@ -1495,11 +1498,10 @@ %s i += 1 return sa - ''' % code, 450, ([a1, b1], 2000 * res1), - ([a2, b2], 2000 * res2), - ([a3, b3], 2000 * res3), - count_debug_merge_point=False) - + ''' % code, sys.maxint, ([a1, b1], 2000 * res1), + ([a2, b2], 2000 * res2), + ([a3, b3], 2000 * res3)) + def test_dont_trace_every_iteration(self): self.run_source(''' def main(a, b): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -18,12 +18,33 @@ descr_t = get_size_descr(c0, T) assert descr_s.size == symbolic.get_size(S, False) assert descr_t.size == symbolic.get_size(T, False) + assert descr_s.count_fields_if_immutable() == -1 + assert descr_t.count_fields_if_immutable() == -1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # descr_s = get_size_descr(c1, S) assert isinstance(descr_s.size, Symbolic) + assert descr_s.count_fields_if_immutable() == -1 +def test_get_size_descr_immut(): + S = lltype.GcStruct('S', hints={'immutable': True}) + T = lltype.GcStruct('T', ('parent', S), + ('x', lltype.Char), + hints={'immutable': True}) + U = lltype.GcStruct('U', ('parent', T), + ('u', lltype.Ptr(T)), + ('v', lltype.Signed), + hints={'immutable': True}) + V = lltype.GcStruct('V', ('parent', U), + ('miss1', lltype.Void), + ('miss2', lltype.Void), + hints={'immutable': True}) + for STRUCT, expected in [(S, 0), (T, 1), (U, 3), (V, 3)]: + for translated in [False, True]: + c0 = GcCache(translated) + descr_s = get_size_descr(c0, STRUCT) + assert descr_s.count_fields_if_immutable() == expected def test_get_field_descr(): U = lltype.Struct('U') diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -266,6 +274,8 @@ x = inttype(random.randint(-100000, 100000)) y = inttype(random.randint(-100000, 100000)) if not y: continue + if (i & 31) == 0: + x = (x//y) * y # case where x is exactly divisible by y res = self.interpret(d, [x, y]) assert res == d(x, y) @@ -276,6 +286,8 @@ x = inttype(random.randint(-100000, 100000)) y = inttype(random.randint(-100000, 100000)) if not y: continue + if (i & 31) == 0: + x = (x//y) * y # case where x is exactly divisible by y res = self.interpret(m, [x, y]) assert res == m(x, y) @@ -384,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -38,6 +38,10 @@ public final static int INT_MIN = Integer.MIN_VALUE; public final static double ULONG_MAX = 18446744073709551616.0; + public static boolean int_between(int a, int b, int c) { + return a <= b && b < c; + } + /** * Compares two unsigned integers (value1 and value2) and returns * a value greater than, equal to, or less than zero if value 1 is @@ -163,6 +167,13 @@ return ULONG_MAX + value; } } + + public static long double_to_ulong(double value) { + if (value < 0) + return (long)(ULONG_MAX + value); + else + return (long)value; + } public static int double_to_uint(double value) { if (value <= Integer.MAX_VALUE) @@ -746,11 +757,13 @@ return str.substring(start, end); } - public static Object[] ll_split_chr(String str, char c) { + public static Object[] ll_split_chr(String str, char c, int max) { ArrayList list = new ArrayList(); int lastidx = 0, idx = 0; while ((idx = str.indexOf(c, lastidx)) != -1) { + if (max >= 0 && list.size() >= max) + break; String sub = str.substring(lastidx, idx); list.add(sub); lastidx = idx+1; @@ -759,6 +772,21 @@ return list.toArray(new String[list.size()]); } + public static Object[] ll_rsplit_chr(String str, char c, int max) { + ArrayList list = new ArrayList(); + int lastidx = str.length(), idx = 0; + while ((idx = str.lastIndexOf(c, lastidx - 1)) != -1) + { + if (max >= 0 && list.size() >= max) + break; + String sub = str.substring(idx + 1, lastidx); + list.add(0, sub); + lastidx = idx; + } + list.add(0, str.substring(0, lastidx)); + return list.toArray(new String[list.size()]); + } + public static String ll_substring(String str, int start, int cnt) { return str.substring(start,start+cnt); } @@ -1158,6 +1186,18 @@ return Math.tanh(x); } + public double ll_math_copysign(double x, double y) { + return Math.copySign(x, y); + } + + public boolean ll_math_isnan(double x) { + return Double.isNaN(x); + } + + public boolean ll_math_isinf(double x) { + return Double.isInfinite(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); @@ -1170,9 +1210,42 @@ return Character.toLowerCase(c); } + public int locale_tolower(int chr) + { + return Character.toLowerCase(chr); + } + + public int locale_isupper(int chr) + { + return boolean2int(Character.isUpperCase(chr)); + } + + public int locale_islower(int chr) + { + return boolean2int(Character.isLowerCase(chr)); + } + + public int locale_isalpha(int chr) + { + return boolean2int(Character.isLetter(chr)); + } + + public int locale_isalnum(int chr) + { + return boolean2int(Character.isLetterOrDigit(chr)); + } + + // ---------------------------------------------------------------------- // Self Test + public static int boolean2int(boolean b) + { + if (b) + return 1; + return 0; + } + public static int __counter = 0, __failures = 0; public static void ensure(boolean f) { if (f) { diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -38,11 +38,7 @@ if sys.platform == 'win32': ensure_sse2_floats = lambda : None else: - _sse2_eci = ExternalCompilationInfo( + ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = ['-msse2', '-mfpmath=sse', '-DPYPY_CPU_HAS_STANDARD_PRECISION'], - separate_module_sources = ['void PYPY_NO_OP(void) {}'], - ) - ensure_sse2_floats = rffi.llexternal('PYPY_NO_OP', [], lltype.Void, - compilation_info=_sse2_eci, - sandboxsafe=True) + )) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -347,8 +347,9 @@ assert list('') == [] assert list('abc') == ['a', 'b', 'c'] assert list((1, 2)) == [1, 2] - l = [] + l = [1] assert list(l) is not l + assert list(l) == l assert list(range(10)) == range(10) def test_explicit_new_init(self): diff --git a/pypy/translator/jvm/test/test_builtin.py b/pypy/translator/jvm/test/test_builtin.py --- a/pypy/translator/jvm/test/test_builtin.py +++ b/pypy/translator/jvm/test/test_builtin.py @@ -37,6 +37,15 @@ def test_cast_primitive(self): py.test.skip('fixme!') + def test_os_fstat(self): + import os, stat + def fn(): + fd = os.open(__file__, os.O_RDONLY, 0) + st = os.fstat(fd) + os.close(fd) + return st.st_mode + res = self.interpret(fn, []) + assert stat.S_ISREG(res) class TestJvmTime(JvmTest, BaseTestTime): diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -10,6 +10,30 @@ def int2adr(int): return llmemory.cast_int_to_adr(int) +def count_fields_if_immutable(STRUCT): + assert isinstance(STRUCT, lltype.GcStruct) + if STRUCT._hints.get('immutable', False): + try: + return _count_fields(STRUCT) + except ValueError: + pass + return -1 + +def _count_fields(STRUCT): + if STRUCT == rclass.OBJECT: + return 0 # don't count 'typeptr' + result = 0 + for fieldname, TYPE in STRUCT._flds.items(): + if TYPE is lltype.Void: + pass # ignore Voids + elif not isinstance(TYPE, lltype.ContainerType): + result += 1 + elif isinstance(TYPE, lltype.GcStruct): + result += _count_fields(TYPE) + else: + raise ValueError(TYPE) + return result + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -379,27 +379,6 @@ return result -def traverse(visit, functiongraph): - block = functiongraph.startblock - visit(block) - seen = identity_dict() - seen[block] = True - stack = list(block.exits[::-1]) - while stack: - link = stack.pop() - visit(link) - block = link.target - if block not in seen: - visit(block) - seen[block] = True - stack += block.exits[::-1] - - -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - def flattenobj(*args): for arg in args: try: @@ -497,6 +476,19 @@ assert block.operations == () assert block.exits == () + def definevar(v, only_in_link=None): + assert isinstance(v, Variable) + assert v not in vars, "duplicate variable %r" % (v,) + assert v not in vars_previous_blocks, ( + "variable %r used in more than one block" % (v,)) + vars[v] = only_in_link + + def usevar(v, in_link=None): + assert v in vars + if in_link is not None: + assert vars[v] is None or vars[v] is in_link + + for block in graph.iterblocks(): assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( @@ -506,18 +498,6 @@ assert block in exitblocks vars = {} - def definevar(v, only_in_link=None): - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = only_in_link - - def usevar(v, in_link=None): - assert v in vars - if in_link is not None: - assert vars[v] is None or vars[v] is in_link - for v in block.inputargs: definevar(v) diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -17,7 +17,6 @@ ^pypy/doc/.+\.html$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ -^pypy/translator/c/src/dtoa.o$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ ^pypy/translator/c/src/libffi_msvc/.+\.dll$ ^pypy/translator/c/src/libffi_msvc/.+\.lib$ diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -0,0 +1,72 @@ +from pypy.module.cpyext.api import fopen, fclose, fwrite +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.tool.udir import udir +import pytest + +class TestFile(BaseApiTest): + + def test_file_fromstring(self, space, api): + filename = rffi.str2charp(str(udir / "_test_file")) + mode = rffi.str2charp("wb") + w_file = api.PyFile_FromString(filename, mode) + rffi.free_charp(filename) + rffi.free_charp(mode) + + assert api.PyFile_Check(w_file) + assert api.PyFile_CheckExact(w_file) + assert not api.PyFile_Check(space.wrap("text")) + + space.call_method(w_file, "write", space.wrap("text")) + space.call_method(w_file, "close") + assert (udir / "_test_file").read() == "text" + + def test_file_getline(self, space, api): + filename = rffi.str2charp(str(udir / "_test_file")) + + mode = rffi.str2charp("w") + w_file = api.PyFile_FromString(filename, mode) + space.call_method(w_file, "write", + space.wrap("line1\nline2\nline3\nline4")) + space.call_method(w_file, "close") + + rffi.free_charp(mode) + mode = rffi.str2charp("r") + w_file = api.PyFile_FromString(filename, mode) + rffi.free_charp(filename) + rffi.free_charp(mode) + + w_line = api.PyFile_GetLine(w_file, 0) + assert space.str_w(w_line) == "line1\n" + + w_line = api.PyFile_GetLine(w_file, 4) + assert space.str_w(w_line) == "line" + + w_line = api.PyFile_GetLine(w_file, 0) + assert space.str_w(w_line) == "2\n" + + # XXX We ought to raise an EOFError here, but don't + w_line = api.PyFile_GetLine(w_file, -1) + # assert api.PyErr_Occurred() is space.w_EOFError + assert space.str_w(w_line) == "line3\n" + + space.call_method(w_file, "close") + + @pytest.mark.xfail + def test_file_fromfile(self, space, api): + api.PyFile_Fromfile() + + @pytest.mark.xfail + def test_file_setbufsize(self, space, api): + api.PyFile_SetBufSize() + + def test_file_writestring(self, space, api, capfd): + s = rffi.str2charp("test\n") + try: + api.PyFile_WriteString(s, space.sys.get("stdout")) + finally: + rffi.free_charp(s) + out, err = capfd.readouterr() + out = out.replace('\r\n', '\n') + assert out == "test\n" + diff --git a/lib_pypy/pyrepl/completer.py b/lib_pypy/pyrepl/completer.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/completer.py @@ -0,0 +1,87 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import __builtin__ + +class Completer: + def __init__(self, ns): + self.ns = ns + + def complete(self, text): + if "." in text: + return self.attr_matches(text) + else: + return self.global_matches(text) + + def global_matches(self, text): + """Compute matches when text is a simple name. + + Return a list of all keywords, built-in functions and names + currently defines in __main__ that match. + + """ + import keyword + matches = [] + n = len(text) + for list in [keyword.kwlist, + __builtin__.__dict__.keys(), + self.ns.keys()]: + for word in list: + if word[:n] == text and word != "__builtins__": + matches.append(word) + return matches + + def attr_matches(self, text): + """Compute matches when text contains a dot. + + Assuming the text is of the form NAME.NAME....[NAME], and is + evaluatable in the globals of __main__, it will be evaluated + and its attributes (as revealed by dir()) are used as possible + completions. (For class instances, class members are are also + considered.) + + WARNING: this can still invoke arbitrary C code, if an object + with a __getattr__ hook is evaluated. + + """ + import re + m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) + if not m: + return [] + expr, attr = m.group(1, 3) + object = eval(expr, self.ns) + words = dir(object) + if hasattr(object, '__class__'): + words.append('__class__') + words = words + get_class_members(object.__class__) + matches = [] + n = len(attr) + for word in words: + if word[:n] == attr and word != "__builtins__": + matches.append("%s.%s" % (expr, word)) + return matches + +def get_class_members(klass): + ret = dir(klass) + if hasattr(klass, '__bases__'): + for base in klass.__bases__: + ret = ret + get_class_members(base) + return ret + + diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() @@ -40,8 +40,9 @@ translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", "struct", "md5", "cStringIO", "array", "_ffi", - # the following are needed for pyrepl (and hence for the interactive prompt/pdb) - "termios", "_minimal_curses", "fcntl", "signal", + # the following are needed for pyrepl (and hence for the + # interactive prompt/pdb) + "termios", "_minimal_curses", ])) working_oo_modules = default_modules.copy() @@ -162,6 +163,11 @@ cmdline="--allworkingmodules", negation=True), + StrOption("extmodules", + "Comma-separated list of third-party builtin modules", + cmdline="--ext", + default=None), + BoolOption("translationmodules", "use only those modules that are needed to run translate.py on pypy", default=False, @@ -355,8 +361,8 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) - if not IS_64_BITS: - config.objspace.std.suggest(withsmalllong=True) + #if not IS_64_BITS: + # config.objspace.std.suggest(withsmalllong=True) # extra costly optimizations only go in level 3 if level == '3': diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -31,6 +50,10 @@ if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' + if hasattr(os, 'wait3'): + appleveldefs['wait3'] = 'app_posix.wait3' + if hasattr(os, 'wait4'): + appleveldefs['wait4'] = 'app_posix.wait4' interpleveldefs = { 'open' : 'interp_posix.open', @@ -156,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/lib_pypy/pyrepl/input.py b/lib_pypy/pyrepl/input.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/input.py @@ -0,0 +1,97 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# (naming modules after builtin functions is not such a hot idea...) + +# an KeyTrans instance translates Event objects into Command objects + +# hmm, at what level do we want [C-i] and [tab] to be equivalent? +# [meta-a] and [esc a]? obviously, these are going to be equivalent +# for the UnixConsole, but should they be for PygameConsole? + +# it would in any situation seem to be a bad idea to bind, say, [tab] +# and [C-i] to *different* things... but should binding one bind the +# other? + +# executive, temporary decision: [tab] and [C-i] are distinct, but +# [meta-key] is identified with [esc key]. We demand that any console +# class does quite a lot towards emulating a unix terminal. + +from pyrepl import unicodedata_ + +class InputTranslator(object): + def push(self, evt): + pass + def get(self): + pass + def empty(self): + pass + +class KeymapTranslator(InputTranslator): + def __init__(self, keymap, verbose=0, + invalid_cls=None, character_cls=None): + self.verbose = verbose + from pyrepl.keymap import compile_keymap, parse_keys + self.keymap = keymap + self.invalid_cls = invalid_cls + self.character_cls = character_cls + d = {} + for keyspec, command in keymap: + keyseq = tuple(parse_keys(keyspec)) + d[keyseq] = command + if self.verbose: + print d + self.k = self.ck = compile_keymap(d, ()) + self.results = [] + self.stack = [] + def push(self, evt): + if self.verbose: + print "pushed", evt.data, + key = evt.data + d = self.k.get(key) + if isinstance(d, dict): + if self.verbose: + print "transition" + self.stack.append(key) + self.k = d + else: + if d is None: + if self.verbose: + print "invalid" + if self.stack or len(key) > 1 or unicodedata_.category(key) == 'C': + self.results.append( + (self.invalid_cls, self.stack + [key])) + else: + # small optimization: + self.k[key] = self.character_cls + self.results.append( + (self.character_cls, [key])) + else: + if self.verbose: + print "matched", d + self.results.append((d, self.stack + [key])) + self.stack = [] + self.k = self.ck + def get(self): + if self.results: + return self.results.pop(0) + else: + return None + def empty(self): + return not self.results diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -604,6 +604,18 @@ else: self._as_rdict().impl_fallback_setitem(w_key, w_value) + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + key = space.str_w(w_key) + w_result = self.impl_getitem_str(key) + if w_result is not None: + return w_result + self.impl_setitem_str(key, w_default) + return w_default + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/rlib/test/test_rcoroutine.py b/pypy/rlib/test/test_rcoroutine.py --- a/pypy/rlib/test/test_rcoroutine.py +++ b/pypy/rlib/test/test_rcoroutine.py @@ -1,13 +1,16 @@ """ testing coroutines at interprepter level """ - +import py import os from pypy import conftest; conftest.translation_test_so_skip_if_appdirect() from pypy.rlib.rcoroutine import make_coroutine_classes from pypy.translator.c.test.test_stackless import StacklessTest from pypy.translator.c import gc +def setup_module(mod): + py.test.importorskip('greenlet') + d = make_coroutine_classes(object) syncstate = d['syncstate'] Coroutine = d['Coroutine'] diff --git a/pypy/translator/goal/query.py b/pypy/translator/goal/query.py --- a/pypy/translator/goal/query.py +++ b/pypy/translator/goal/query.py @@ -30,15 +30,13 @@ def polluted_qgen(translator): """list functions with still real SomeObject variables""" annotator = translator.annotator - def visit(block): - if isinstance(block, flowmodel.Block): - for v in block.getvariables(): - s = annotator.binding(v, None) - if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: - raise Found for g in translator.graphs: try: - flowmodel.traverse(visit, g) + for block in g.iterblocks(): + for v in block.getvariables(): + s = annotator.binding(v, None) + if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: + raise Found except Found: line = "%s: %s" % (g, graph_sig(translator, g)) yield line diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,18 +30,18 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno @@ -49,7 +49,7 @@ # produced by gateway.applevel(), such as the ones found in # nanos.py) return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/module/cpyext/test/test_sysmodule.py b/pypy/module/cpyext/test/test_sysmodule.py --- a/pypy/module/cpyext/test/test_sysmodule.py +++ b/pypy/module/cpyext/test/test_sysmodule.py @@ -14,6 +14,21 @@ assert module.get("excepthook") assert not module.get("spam_spam_spam") + def test_writestdout(self): + module = self.import_extension('foo', [ + ("writestdout", "METH_NOARGS", + """ + PySys_WriteStdout("format: %d\\n", 42); + Py_RETURN_NONE; + """)]) + import sys, StringIO + sys.stdout = StringIO.StringIO() + try: + module.writestdout() + assert sys.stdout.getvalue() == "format: 42\n" + finally: + sys.stdout = sys.__stdout__ + class TestSysModule(BaseApiTest): def test_sysmodule(self, space, api): buf = rffi.str2charp("last_tb") diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py --- a/py/_io/terminalwriter.py +++ b/py/_io/terminalwriter.py @@ -81,6 +81,9 @@ oldcolors = GetConsoleInfo(handle).wAttributes attr |= (oldcolors & 0x0f0) SetConsoleTextAttribute(handle, attr) + while len(text) > 32768: + file.write(text[:32768]) + text = text[32768:] file.write(text) SetConsoleTextAttribute(handle, oldcolors) else: diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/module/cpyext/include/pyerrors.h b/pypy/module/cpyext/include/pyerrors.h --- a/pypy/module/cpyext/include/pyerrors.h +++ b/pypy/module/cpyext/include/pyerrors.h @@ -15,6 +15,20 @@ PyObject *PyErr_NewExceptionWithDoc(char *name, char *doc, PyObject *base, PyObject *dict); PyObject *PyErr_Format(PyObject *exception, const char *format, ...); +/* These APIs aren't really part of the error implementation, but + often needed to format error messages; the native C lib APIs are + not available on all platforms, which is why we provide emulations + for those platforms in Python/mysnprintf.c, + WARNING: The return value of snprintf varies across platforms; do + not rely on any particular behavior; eventually the C99 defn may + be reliable. +*/ +#if defined(MS_WIN32) && !defined(HAVE_SNPRINTF) +# define HAVE_SNPRINTF +# define snprintf _snprintf +# define vsnprintf _vsnprintf +#endif + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -194,8 +194,8 @@ c_buf = py_str.c_ob_type.c_tp_as_buffer assert c_buf py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(rffi.INTP.TO)) == 1 - ref = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 assert ref[0] == 10 lltype.free(ref, flavor='raw') diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -255,7 +255,7 @@ x = ord(s[0]) << 7 i = 0 while i < length: - x = (1000003*x) ^ ord(s[i]) + x = intmask((1000003*x) ^ ord(s[i])) i += 1 x ^= length return intmask(x) diff --git a/pypy/module/cpyext/include/sysmodule.h b/pypy/module/cpyext/include/sysmodule.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/sysmodule.h @@ -0,0 +1,13 @@ +#ifndef Py_SYSMODULE_H +#define Py_SYSMODULE_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(void) PySys_WriteStdout(const char *format, ...); +PyAPI_FUNC(void) PySys_WriteStderr(const char *format, ...); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_SYSMODULE_H */ diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -181,6 +181,7 @@ jIntegerClass = JvmClassType('java.lang.Integer') jLongClass = JvmClassType('java.lang.Long') +jShortClass = JvmClassType('java.lang.Short') jDoubleClass = JvmClassType('java.lang.Double') jByteClass = JvmClassType('java.lang.Byte') jCharClass = JvmClassType('java.lang.Character') @@ -239,6 +240,7 @@ jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue') jByte = JvmScalarType('B', jByteClass, 'byteValue') jChar = JvmScalarType('C', jCharClass, 'charValue') +jShort = JvmScalarType('S', jShortClass, 'shortValue') class Generifier(object): @@ -527,6 +529,7 @@ if desc == 'C': return self._o("i") # Characters if desc == 'B': return self._o("i") # Bytes if desc == 'Z': return self._o("i") # Boolean + if desc == 'S': return self._o("i") # Short assert False, "Unknown argtype=%s" % repr(argtype) raise NotImplementedError @@ -625,6 +628,7 @@ NOP = Opcode('nop') I2D = Opcode('i2d') I2L = Opcode('i2l') +I2S = Opcode('i2s') D2I= Opcode('d2i') #D2L= Opcode('d2l') #PAUL L2I = Opcode('l2i') @@ -891,6 +895,7 @@ SYSTEMIDENTITYHASH = Method.s(jSystem, 'identityHashCode', (jObject,), jInt) SYSTEMGC = Method.s(jSystem, 'gc', (), jVoid) INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString) +SHORTTOSTRINGS = Method.s(jShortClass, 'toString', (jShort,), jString) LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString) DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString) CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString) @@ -922,15 +927,19 @@ CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool) STRINGBUILDERAPPEND = Method.v(jStringBuilder, 'append', (jString,), jStringBuilder) +PYPYINTBETWEEN = Method.s(jPyPy, 'int_between', (jInt,jInt,jInt), jBool) PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt) PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt) PYPYUINTMOD = Method.v(jPyPy, 'uint_mod', (jInt, jInt), jInt) PYPYUINTMUL = Method.v(jPyPy, 'uint_mul', (jInt, jInt), jInt) PYPYUINTDIV = Method.v(jPyPy, 'uint_div', (jInt, jInt), jInt) PYPYULONGMOD = Method.v(jPyPy, 'ulong_mod', (jLong, jLong), jLong) +PYPYUINTTOLONG = Method.s(jPyPy, 'uint_to_long', (jInt,), jLong) PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL +PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) +PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -27,7 +27,7 @@ def optimize_loop_1(metainterp_sd, loop, enable_opts, inline_short_preamble=True, retraced=False): - """Optimize loop.operations to remove internal overheadish operations. + """Optimize loop.operations to remove internal overheadish operations. """ optimizations = [] unroll = 'unroll' in enable_opts @@ -43,7 +43,7 @@ if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: optimizations.append(OptSimplify()) - + if inline_short_preamble: optimizations = [OptInlineShortPreamble(retraced)] + optimizations diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,12 +1,12 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops from pypy.translator.test.snippet import simple_method from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,38 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] + +def ootype_to_mnemonic(FROM, TO, default=None): + if TO == ootype.Float: + return 'r8' + # + try: + size = str(INT_SIZE[TO]) + except KeyError: + return default + if FROM in UNSIGNED_TYPES: + return 'u' + size + else: + return 'i' + size class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + mnemonic = ootype_to_mnemonic(FROM, TO) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -8,8 +8,8 @@ for descr in all_optiondescrs: prefix = descr._name c = config.Config(descr) - thisdir.join(prefix + ".txt").ensure() + thisdir.join(prefix + ".rst").ensure() for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".txt" + basename = prefix + "." + p + ".rst" f = thisdir.join(basename) f.ensure() diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -138,11 +138,13 @@ # raised after the exception handler block was popped. try: trace = self.w_f_trace - self.w_f_trace = None + if trace is not None: + self.w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: - self.w_f_trace = trace + if trace is not None: + self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( @@ -1421,9 +1423,10 @@ # add a softspace unless we just printed a string which ends in a '\t' # or '\n' -- or more generally any whitespace character but ' ' - if isinstance(x, str) and x and x[-1].isspace() and x[-1]!=' ': - return - # XXX add unicode handling + if isinstance(x, (str, unicode)) and x: + lastchar = x[-1] + if lastchar.isspace() and lastchar != ' ': + return file_softspace(stream, True) print_item_to._annspecialcase_ = "specialize:argtype(0)" diff --git a/pypy/translator/goal/old_queries.py b/pypy/translator/goal/old_queries.py --- a/pypy/translator/goal/old_queries.py +++ b/pypy/translator/goal/old_queries.py @@ -415,12 +415,10 @@ ops = 0 count = Counter() def visit(block): - if isinstance(block, flowmodel.Block): + for block in graph.iterblocks(): count.blocks += 1 count.ops += len(block.operations) - elif isinstance(block, flowmodel.Link): - count.links += 1 - flowmodel.traverse(visit, graph) + count.links = len(list(graph.iterlinks())) return count.blocks, count.links, count.ops # better used before backends opts diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -219,12 +219,14 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] else: - nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + n = len(codeobj.co_freevars) + freevars = [None] * n + while True: + n -= 1 + if n < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/lib_pypy/pyrepl/unix_eventqueue.py b/lib_pypy/pyrepl/unix_eventqueue.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unix_eventqueue.py @@ -0,0 +1,86 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Bah, this would be easier to test if curses/terminfo didn't have so +# much non-introspectable global state. + +from pyrepl import keymap +from pyrepl.console import Event +from pyrepl import curses +from termios import tcgetattr, VERASE +import os + +_keynames = { + "delete" : "kdch1", + "down" : "kcud1", + "end" : "kend", + "enter" : "kent", + "f1" : "kf1", "f2" : "kf2", "f3" : "kf3", "f4" : "kf4", + "f5" : "kf5", "f6" : "kf6", "f7" : "kf7", "f8" : "kf8", + "f9" : "kf9", "f10" : "kf10", "f11" : "kf11", "f12" : "kf12", + "f13" : "kf13", "f14" : "kf14", "f15" : "kf15", "f16" : "kf16", + "f17" : "kf17", "f18" : "kf18", "f19" : "kf19", "f20" : "kf20", + "home" : "khome", + "insert" : "kich1", + "left" : "kcub1", + "page down" : "knp", + "page up" : "kpp", + "right" : "kcuf1", + "up" : "kcuu1", + } + +class EventQueue(object): + def __init__(self, fd): + our_keycodes = {} + for key, tiname in _keynames.items(): + keycode = curses.tigetstr(tiname) + if keycode: + our_keycodes[keycode] = unicode(key) + if os.isatty(fd): + our_keycodes[tcgetattr(fd)[6][VERASE]] = u'backspace' + self.k = self.ck = keymap.compile_keymap(our_keycodes) + self.events = [] + self.buf = [] + def get(self): + if self.events: + return self.events.pop(0) + else: + return None + def empty(self): + return not self.events + def insert(self, event): + self.events.append(event) + def push(self, char): + if char in self.k: + k = self.k[char] + if isinstance(k, dict): + self.buf.append(char) + self.k = k + else: + self.events.append(Event('key', k, ''.join(self.buf) + char)) + self.buf = [] + self.k = self.ck + elif self.buf: + self.events.extend([Event('key', c, c) for c in self.buf]) + self.buf = [] + self.k = self.ck + self.push(char) + else: + self.events.append(Event('key', char, char)) diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/simple_interact.py @@ -0,0 +1,64 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""This is an alternative to python_reader which tries to emulate +the CPython prompt as closely as possible, with the exception of +allowing multiline input and multiline history entries. +""" + +import sys +from pyrepl.readline import multiline_input, _error, _get_reader + +def check(): # returns False if there is a problem initializing the state + try: + _get_reader() + except _error: + return False + return True + +def run_multiline_interactive_console(mainmodule=None): + import code + if mainmodule is None: + import __main__ as mainmodule + console = code.InteractiveConsole(mainmodule.__dict__) + + def more_lines(unicodetext): + # ooh, look at the hack: + src = "#coding:utf-8\n"+unicodetext.encode('utf-8') + try: + code = console.compile(src, '', 'single') + except (OverflowError, SyntaxError, ValueError): + return False + else: + return code is None + + while 1: + try: + ps1 = getattr(sys, 'ps1', '>>> ') + ps2 = getattr(sys, 'ps2', '... ') + try: + statement = multiline_input(more_lines, ps1, ps2) + except EOFError: + break + more = console.push(statement) + assert not more + except KeyboardInterrupt: + console.write("\nKeyboardInterrupt\n") + console.resetbuffer() diff --git a/lib_pypy/pyrepl/__init__.py b/lib_pypy/pyrepl/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -26,7 +26,10 @@ else { string res = ""; foreach(char ch in x) - res+= string.Format("\\x{0:X2}", (int)ch); + if (ch >= 32 && ch < 128) + res+= ch; + else + res+= string.Format("\\x{0:X2}", (int)ch); return string.Format("'{0}'", res); } } @@ -498,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -717,9 +725,31 @@ return s.Substring(start, count); } - public static string[] ll_split_chr(string s, char ch) + public static string[] ll_split_chr(string s, char ch, int max) { - return s.Split(ch); + if (max < 0) + return s.Split(ch); + else + return s.Split(new Char[] {ch}, max + 1); + } + + public static string[] ll_rsplit_chr(string s, char ch, int max) + { + string[] splits = s.Split(ch); + if (max < 0 || splits.Length <= max + 1) + return splits; + else { + /* XXX not very efficient */ + string first = splits[0]; + // join the first (length - max - 1) items + int i; + for (i = 1; i < splits.Length - max; i++) + first += ch + splits[i]; + splits[0] = first; + Array.Copy(splits, i, splits, 1, max); + Array.Resize(ref splits, max + 1); + return splits; + } } public static bool ll_contains(string s, char ch) @@ -1123,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -9,7 +9,7 @@ from pypy.objspace.flow import operation from pypy.objspace.flow.model import (SpaceOperation, Variable, Constant, Block, Link, c_last_exception, checkgraph, - traverse, mkentrymap) + mkentrymap) from pypy.rlib import rarithmetic from pypy.translator import unsimplify from pypy.translator.backendopt import ssa @@ -76,23 +76,19 @@ def desugar_isinstance(graph): """Replace isinstance operation with a call to isinstance.""" constant_isinstance = Constant(isinstance) - def visit(block): - if not isinstance(block, Block): - return + for block in graph.iterblocks(): for i in range(len(block.operations) - 1, -1, -1): op = block.operations[i] if op.opname == "isinstance": args = [constant_isinstance, op.args[0], op.args[1]] new_op = SpaceOperation("simple_call", args, op.result) block.operations[i] = new_op - traverse(visit, graph) def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): + for link in list(graph.iterlinks()): while not link.target.operations: block1 = link.target if block1.exitswitch is not None: @@ -113,7 +109,6 @@ link.args = outputargs link.target = exit.target # the while loop above will simplify recursively the new link - traverse(visit, graph) def transform_ovfcheck(graph): """The special function calls ovfcheck and ovfcheck_lshift need to @@ -174,11 +169,10 @@ def rename(v): return renaming.get(v, v) - def visit(block): - if not (isinstance(block, Block) - and block.exitswitch == clastexc + for block in graph.iterblocks(): + if not (block.exitswitch == clastexc and block.exits[-1].exitcase is Exception): - return + continue covered = [link.exitcase for link in block.exits[1:-1]] seen = [] preserve = list(block.exits[:-1]) @@ -233,8 +227,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) - traverse(visit, graph) - def transform_xxxitem(graph): # xxx setitem too for block in graph.iterblocks(): @@ -262,9 +254,9 @@ return True return False - def visit(block): - if not (isinstance(block, Block) and block.exitswitch == clastexc): - return + for block in list(graph.iterblocks()): + if block.exitswitch != clastexc: + continue exits = [] seen = [] for link in block.exits: @@ -283,8 +275,6 @@ seen.append(case) block.recloseblock(*exits) - traverse(visit, graph) - def join_blocks(graph): """Links can be deleted if they are the single exit of a block and the single entry point of the next block. When this happens, we can @@ -340,8 +330,7 @@ this is how implicit exceptions are removed (see _implicit_ in flowcontext.py). """ - def visit(block): - if isinstance(block, Block): + for block in list(graph.iterblocks()): for i in range(len(block.exits)-1, -1, -1): exit = block.exits[i] if not (exit.target is graph.exceptblock and @@ -361,7 +350,6 @@ lst = list(block.exits) del lst[i] block.recloseblock(*lst) - traverse(visit, graph) # _____________________________________________________________________ @@ -627,12 +615,11 @@ tgts.append((exit.exitcase, tgt)) return tgts - def visit(block): - if isinstance(block, Block) and block.operations and block.operations[-1].opname == 'is_true': + for block in graph.iterblocks(): + if block.operations and block.operations[-1].opname == 'is_true': tgts = has_is_true_exitpath(block) if tgts: candidates.append((block, tgts)) - traverse(visit, graph) while candidates: cand, tgts = candidates.pop() diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -12,7 +12,7 @@ def __init__(self, space, initargs): self.initargs = initargs ident = thread.get_ident() - self.dicts = {ident: space.newdict()} + self.dicts = {ident: space.newdict(instance=True)} def getdict(self, space): ident = thread.get_ident() @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/jvm/src/pypy/StatResult.java b/pypy/translator/jvm/src/pypy/StatResult.java --- a/pypy/translator/jvm/src/pypy/StatResult.java +++ b/pypy/translator/jvm/src/pypy/StatResult.java @@ -8,7 +8,7 @@ * *

The actual stat() function is defined in PyPy.java. */ -class StatResult { +public class StatResult { public int item0, item3, item4, item5; public long item1, item2, item6; public double item7, item8, item9; diff --git a/pypy/translator/gensupp.py b/pypy/translator/gensupp.py --- a/pypy/translator/gensupp.py +++ b/pypy/translator/gensupp.py @@ -6,15 +6,13 @@ import sys from pypy.objspace.flow.model import Block -from pypy.objspace.flow.model import traverse # ordering the blocks of a graph by source position def ordered_blocks(graph): # collect all blocks allblocks = [] - def visit(block): - if isinstance(block, Block): + for block in graph.iterblocks(): # first we order by offset in the code string if block.operations: ofs = block.operations[0].offset @@ -26,7 +24,6 @@ else: txt = "dummy" allblocks.append((ofs, txt, block)) - traverse(visit, graph) allblocks.sort() #for ofs, txt, block in allblocks: # print ofs, txt, block diff --git a/pypy/translator/jvm/src/pypy/ll_os.java b/pypy/translator/jvm/src/pypy/ll_os.java --- a/pypy/translator/jvm/src/pypy/ll_os.java +++ b/pypy/translator/jvm/src/pypy/ll_os.java @@ -14,10 +14,22 @@ abstract class FileWrapper { + private final String name; + + public FileWrapper(String name) + { + this.name = name; + } + public abstract void write(String buffer); public abstract String read(int count); public abstract void close(); public abstract RandomAccessFile getFile(); + + public String getName() + { + return this.name; + } } class PrintStreamWrapper extends FileWrapper @@ -25,8 +37,9 @@ private final PrintStream stream; private final ll_os os; - public PrintStreamWrapper(PrintStream stream, ll_os os) + public PrintStreamWrapper(String name, PrintStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -58,8 +71,9 @@ private final InputStream stream; private final ll_os os; - public InputStreamWrapper(InputStream stream, ll_os os) + public InputStreamWrapper(String name, InputStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -102,11 +116,13 @@ private final boolean canWrite; private final ll_os os; - public RandomAccessFileWrapper(RandomAccessFile file, + public RandomAccessFileWrapper(String name, + RandomAccessFile file, boolean canRead, boolean canWrite, ll_os os) { + super(name); this.file = file; this.canRead = canRead; this.canWrite = canWrite; @@ -228,9 +244,9 @@ public ll_os(Interlink interlink) { this.interlink = interlink; - FileDescriptors.put(0, new InputStreamWrapper(System.in, this)); - FileDescriptors.put(1, new PrintStreamWrapper(System.out, this)); - FileDescriptors.put(2, new PrintStreamWrapper(System.err, this)); + FileDescriptors.put(0, new InputStreamWrapper("", System.in, this)); + FileDescriptors.put(1, new PrintStreamWrapper("", System.out, this)); + FileDescriptors.put(2, new PrintStreamWrapper("", System.err, this)); fdcount = 2; } @@ -339,7 +355,7 @@ // XXX: we ignore O_CREAT RandomAccessFile file = open_file(name, javaMode, flags); RandomAccessFileWrapper wrapper = - new RandomAccessFileWrapper(file, canRead, canWrite, this); + new RandomAccessFileWrapper(name, file, canRead, canWrite, this); fdcount++; FileDescriptors.put(fdcount, wrapper); @@ -418,6 +434,12 @@ return ll_os_stat(path); // XXX } + public StatResult ll_os_fstat(int fd) + { + String name = getfd(fd).getName(); + return ll_os_stat(name); + } + public String ll_os_strerror(int errno) { String msg = ErrorMessages.remove(errno); diff --git a/pypy/module/cpyext/include/modsupport.h b/pypy/module/cpyext/include/modsupport.h --- a/pypy/module/cpyext/include/modsupport.h +++ b/pypy/module/cpyext/include/modsupport.h @@ -38,7 +38,9 @@ PyObject * Py_BuildValue(const char *, ...); +PyObject * Py_VaBuildValue(const char *, va_list); PyObject * _Py_BuildValue_SizeT(const char *, ...); +PyObject * _Py_VaBuildValue_SizeT(const char *, va_list); int _PyArg_NoKeywords(const char *funcname, PyObject *kw); int PyArg_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, ...); diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/translator/backendopt/test/test_mallocprediction.py b/pypy/translator/backendopt/test/test_mallocprediction.py --- a/pypy/translator/backendopt/test/test_mallocprediction.py +++ b/pypy/translator/backendopt/test/test_mallocprediction.py @@ -4,7 +4,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.conftest import option import sys diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py --- a/pypy/annotation/unaryop.py +++ b/pypy/annotation/unaryop.py @@ -499,10 +499,14 @@ def getanyitem(str): return str.basecharclass() - def method_split(str, patt): # XXX + def method_split(str, patt, max=-1): getbookkeeper().count("str_split", str, patt) return getbookkeeper().newlist(str.basestringclass()) + def method_rsplit(str, patt, max=-1): + getbookkeeper().count("str_rsplit", str, patt) + return getbookkeeper().newlist(str.basestringclass()) + def method_replace(str, s1, s2): return str.basestringclass() diff --git a/pypy/module/_stackless/test/conftest.py b/pypy/module/_stackless/test/conftest.py --- a/pypy/module/_stackless/test/conftest.py +++ b/pypy/module/_stackless/test/conftest.py @@ -2,6 +2,7 @@ import py.test def pytest_runtest_setup(item): + py.test.importorskip('greenlet') if sys.platform == 'win32': py.test.skip("stackless tests segfault on Windows") diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,7 +4,6 @@ .. contents:: - .. _`try out the translator`: Trying out the translator @@ -18,9 +17,7 @@ * Download and install Pygame_. - * Download and install `Dot Graphviz`_ (optional if you have an internet - connection: the flowgraph viewer then connects to - codespeak.net and lets it convert the flowgraph by a graphviz server). + * Download and install `Dot Graphviz`_ To start the interactive translator shell do:: diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -5,7 +5,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem import lltype, llmemory, lloperation @@ -33,8 +33,7 @@ def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 count_calls = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == 'malloc': count_mallocs += 1 @@ -54,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -557,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -770,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -857,6 +857,9 @@ def op_gc_adr_of_nursery_free(self): raise NotImplementedError + def op_gc_adr_of_root_stack_top(self): + raise NotImplementedError + def op_gc_call_rtti_destructor(self, rtti, addr): if hasattr(rtti._obj, 'destructor_funcptr'): d = rtti._obj.destructor_funcptr diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -576,20 +576,56 @@ res = self.interpret(f, [i, newlines]) assert res == f(i, newlines) - def test_split(self): + def _make_split_test(self, split_fn): const = self.const def fn(i): s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = s.split(const('.')) + l = getattr(s, split_fn)(const('.')) sum = 0 for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) return sum + len(l) * 100 + return fn + + def test_split(self): + fn = self._make_split_test('split') for i in range(5): res = self.interpret(fn, [i]) assert res == fn(i) + def test_rsplit(self): + fn = self._make_split_test('rsplit') + for i in range(5): + res = self.interpret(fn, [i]) + assert res == fn(i) + + def _make_split_limit_test(self, split_fn): + const = self.const + def fn(i, j): + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.'), j) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + return fn + + def test_split_limit(self): + fn = self._make_split_limit_test('split') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + + def test_rsplit_limit(self): + fn = self._make_split_limit_test('rsplit') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + def test_contains(self): const = self.const constchar = self.constchar diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/dotviewer/conftest.py b/dotviewer/conftest.py --- a/dotviewer/conftest.py +++ b/dotviewer/conftest.py @@ -6,4 +6,6 @@ dest="pygame", default=False, help="allow interactive tests using Pygame") -option = py.test.config.option +def pytest_configure(config): + global option + option = config.option diff --git a/.hgsub b/.hgsub deleted file mode 100644 --- a/.hgsub +++ /dev/null @@ -1,2 +0,0 @@ -greenlet = [svn]http://codespeak.net/svn/greenlet/trunk/c -lib_pypy/pyrepl = [svn]http://codespeak.net/svn/pyrepl/trunk/pyrepl/pyrepl diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1645,7 +1645,7 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ .cfi_startproc /* %rdi is the 1st argument, which is the callback */ @@ -1691,6 +1691,12 @@ ret .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: diff --git a/pypy/rpython/memory/gc/env.py b/pypy/rpython/memory/gc/env.py --- a/pypy/rpython/memory/gc/env.py +++ b/pypy/rpython/memory/gc/env.py @@ -259,7 +259,7 @@ get_L2cache = globals().get('get_L2cache_' + sys.platform, lambda: -1) # implement me for other platforms -NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024*1024 +NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024 # arbitrary 1M. better than default of 131k for most cases # in case it didn't work diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -1,5 +1,5 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.objspace.flow.model import SpaceOperation, traverse +from pypy.objspace.flow.model import SpaceOperation from pypy.tool.algo.unionfind import UnionFind from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -149,8 +147,7 @@ set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except") set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except") - def visit(node): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname in self.IDENTITY_OPS: # special-case these operations to identify their input @@ -167,7 +164,7 @@ if isinstance(node.exitswitch, Variable): set_use_point(node, node.exitswitch, "exitswitch", node) - if isinstance(node, Link): + for node in graph.iterlinks(): if isinstance(node.last_exception, Variable): set_creation_point(node.prevblock, node.last_exception, "last_exception") @@ -187,7 +184,6 @@ else: d[arg] = True - traverse(visit, graph) return lifetimes.infos() def _try_inline_malloc(self, info): @@ -213,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -333,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -484,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -500,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -546,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -616,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -639,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -1126,7 +1126,7 @@ """ if not isinstance(source, str): source = py.std.inspect.getsource(source).lstrip() - while source.startswith('@py.test.mark.'): + while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function # object, we may ignore them assert '\n' in source diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -5,8 +5,8 @@ soon as possible (at least in a simple case). """ -import weakref -import py +import weakref, random +import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -67,6 +67,20 @@ return entrypoint +def get_functions_to_patch(): + from pypy.jit.backend.llsupport import gc + # + can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc + def can_inline_malloc2(*args): + try: + if os.environ['PYPY_NO_INLINE_MALLOC']: + return False + except KeyError: + pass + return can_inline_malloc1(*args) + # + return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings from pypy.translator.translator import TranslationContext @@ -82,8 +96,21 @@ ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy()) ann.build_types(f, [s_list_of_strings], main_entry_point=True) t.buildrtyper().specialize() + if kwds['jit']: - apply_jit(t, enable_opts=enable_opts) + patch = get_functions_to_patch() + old_value = {} + try: + for (obj, attr), value in patch.items(): + old_value[obj, attr] = getattr(obj, attr) + setattr(obj, attr, value) + # + apply_jit(t, enable_opts=enable_opts) + # + finally: + for (obj, attr), oldvalue in old_value.items(): + setattr(obj, attr, oldvalue) + cbuilder = genc.CStandaloneBuilder(t, f, t.config) cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES) cbuilder.compile() @@ -122,7 +149,7 @@ # ______________________________________________________________________ -class TestCompileFramework(object): +class CompileFrameworkTests(object): # Test suite using (so far) the minimark GC. def setup_class(cls): funcs = [] @@ -173,15 +200,21 @@ try: GcLLDescr_framework.DEBUG = True cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, - gcrootfinder="asmgcc", jit=True) + gcrootfinder=cls.gcrootfinder, jit=True) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG + def _run(self, name, n, env): + res = self.cbuilder.cmdexec("%s %d" %(name, n), env=env) + assert int(res) == 20 + def run(self, name, n=2000): pypylog = udir.join('TestCompileFramework.log') - res = self.cbuilder.cmdexec("%s %d" %(name, n), - env={'PYPYLOG': ':%s' % pypylog}) - assert int(res) == 20 + env = {'PYPYLOG': ':%s' % pypylog, + 'PYPY_NO_INLINE_MALLOC': '1'} + self._run(name, n, env) + env['PYPY_NO_INLINE_MALLOC'] = '' + self._run(name, n, env) def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) @@ -635,3 +668,10 @@ gcrootfinder="asmgcc", jit=True, enable_opts=ALL_OPTS_NAMES) assert int(res) == 20 + + +class TestShadowStack(CompileFrameworkTests): + gcrootfinder = "shadowstack" + +class TestAsmGcc(CompileFrameworkTests): + gcrootfinder = "asmgcc" diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -214,3 +214,21 @@ assert res == 1024*1024 res = thread.stack_size(0) assert res == 2*1024*1024 + + def test_interrupt_main(self): + import thread, time + import signal + + def f(): + time.sleep(0.5) + thread.interrupt_main() + + def busy_wait(): + for x in range(1000): + time.sleep(0.01) + + # This is normally called by app_main.py + signal.signal(signal.SIGINT, signal.default_int_handler) + + thread.start_new_thread(f, ()) + raises(KeyboardInterrupt, busy_wait) diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,13 +22,21 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - return jit.hint(self, promote=True).items + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items + return self.items def getitem(self, idx): return self.getitems()[idx] @@ -44,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -620,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/lib_pypy/pyrepl/module_lister.py b/lib_pypy/pyrepl/module_lister.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/module_lister.py @@ -0,0 +1,70 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.completing_reader import uniqify +import os, sys + +# for the completion support. +# this is all quite nastily written. +_packages = {} + +def _make_module_list_dir(dir, suffs, prefix=''): + l = [] + for fname in os.listdir(dir): + file = os.path.join(dir, fname) + if os.path.isfile(file): + for suff in suffs: + if fname.endswith(suff): + l.append( prefix + fname[:-len(suff)] ) + break + elif os.path.isdir(file) \ + and os.path.exists(os.path.join(file, "__init__.py")): + l.append( prefix + fname ) + _packages[prefix + fname] = _make_module_list_dir( + file, suffs, prefix + fname + '.' ) + l = uniqify(l) + l.sort() + return l + +def _make_module_list(): + import imp + suffs = [x[0] for x in imp.get_suffixes() if x[0] != '.pyc'] + def compare(x, y): + c = -cmp(len(x), len(y)) + if c: + return c + else: + return -cmp(x, y) + suffs.sort(compare) + _packages[''] = list(sys.builtin_module_names) + for dir in sys.path: + if dir == '': + dir = '.' + if os.path.isdir(dir): + _packages[''] += _make_module_list_dir(dir, suffs) + _packages[''].sort() + +def find_modules(stem): + l = stem.split('.') + pack = '.'.join(l[:-1]) + try: + mods = _packages[pack] + except KeyError: + raise ImportError, "can't find \"%s\" package"%pack + return [mod for mod in mods if mod.startswith(stem)] diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -7,15 +7,16 @@ bootstrap_function, PyObjectFields, cpython_struct, CONST_STRING, CONST_WSTRING) from pypy.module.cpyext.pyerrors import PyErr_BadArgument -from pypy.module.cpyext.pyobject import PyObject, from_ref, make_typedescr +from pypy.module.cpyext.pyobject import ( + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.objspace.std import unicodeobject, unicodetype from pypy.rlib import runicode import sys -## See comment in stringobject.py. PyUnicode_FromUnicode(NULL, size) is not -## yet supported. +## See comment in stringobject.py. PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) @@ -28,7 +29,8 @@ make_typedescr(space.w_unicode.instancetypedef, basestruct=PyUnicodeObject.TO, attach=unicode_attach, - dealloc=unicode_dealloc) + dealloc=unicode_dealloc, + realize=unicode_realize) # Buffer for the default encoding (used by PyUnicde_GetDefaultEncoding) DEFAULT_ENCODING_SIZE = 100 @@ -39,12 +41,39 @@ Py_UNICODE = lltype.UniChar +def new_empty_unicode(space, length): + """ + Allocatse a PyUnicodeObject and its buffer, but without a corresponding + interpreter object. The buffer may be mutated, until unicode_realize() is + called. + """ + typedescr = get_typedescr(space.w_unicode.instancetypedef) + py_obj = typedescr.allocate(space, space.w_unicode) + py_uni = rffi.cast(PyUnicodeObject, py_obj) + + buflen = length + 1 + py_uni.c_size = length + py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, + flavor='raw', zero=True) + return py_uni + def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) +def unicode_realize(space, py_obj): + """ + Creates the unicode in the interpreter. The PyUnicodeObject buffer must not + be modified after this call. + """ + py_uni = rffi.cast(PyUnicodeObject, py_obj) + s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + w_obj = space.wrap(s) + track_reference(space, py_obj, w_obj) + return w_obj + @cpython_api([PyObject], lltype.Void, external=False) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) @@ -128,7 +157,9 @@ def PyUnicode_AsUnicode(space, ref): """Return a read-only pointer to the Unicode object's internal Py_UNICODE buffer, NULL if unicode is not a Unicode object.""" - if not PyUnicode_Check(space, ref): + # Don't use PyUnicode_Check, it will realize the object :-( + w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) + if not space.is_true(space.issubtype(w_type, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap("expected unicode object")) return PyUnicode_AS_UNICODE(space, ref) @@ -237,10 +268,11 @@ object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" - if not wchar_p: - raise NotImplementedError - s = rffi.wcharpsize2unicode(wchar_p, length) - return space.wrap(s) + if wchar_p: + s = rffi.wcharpsize2unicode(wchar_p, length) + return make_ref(space, space.wrap(s)) + else: + return rffi.cast(PyObject, new_empty_unicode(space, length)) @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromWideChar(space, wchar_p, length): @@ -330,6 +362,29 @@ w_str = space.wrap(rffi.charpsize2str(s, size)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) + at cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) +def PyUnicode_Resize(space, ref, newsize): + # XXX always create a new string so far + py_uni = rffi.cast(PyUnicodeObject, ref[0]) + if not py_uni.c_buffer: + raise OperationError(space.w_SystemError, space.wrap( + "PyUnicode_Resize called on already created string")) + try: + py_newuni = new_empty_unicode(space, newsize) + except MemoryError: + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + raise + to_cp = newsize + oldsize = py_uni.c_size + if oldsize < newsize: + to_cp = oldsize + for i in range(to_cp): + py_newuni.c_buffer[i] = py_uni.c_buffer[i] + Py_DecRef(space, ref[0]) + ref[0] = rffi.cast(PyObject, py_newuni) + return 0 + @cpython_api([PyObject], PyObject) def PyUnicode_AsUTF8String(space, w_unicode): """Encode a Unicode object using UTF-8 and return the result as Python string diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -11,6 +11,8 @@ /* the -3 option will probably not be implemented */ #define Py_Py3kWarningFlag 0 +#define Py_FrozenFlag 0 + #ifdef __cplusplus } #endif diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -487,6 +487,7 @@ """) def test_range_iter(self): + py.test.skip("until we fix defaults") def main(n): def g(n): return range(n) @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): @@ -685,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) @@ -1009,10 +1010,11 @@ """) def test_func_defaults(self): + py.test.skip("skipped until we fix defaults") def main(n): i = 1 while i < n: - i += len(xrange(i)) / i + i += len(xrange(i+1)) - i return i log = self.run(main, [10000]) @@ -1023,17 +1025,49 @@ guard_true(i10, descr=) # This can be improved if the JIT realized the lookup of i5 produces # a constant and thus can be removed entirely - i12 = int_sub(i5, 1) - i13 = uint_floordiv(i12, i7) + i120 = int_add(i5, 1) + i140 = int_lt(0, i120) + guard_true(i140, descr=) + i13 = uint_floordiv(i5, i7) i15 = int_add(i13, 1) i17 = int_lt(i15, 0) - guard_false(i17, descr=) - i18 = int_floordiv(i15, i5) - i19 = int_xor(i15, i5) - i20 = int_mod(i15, i5) - i21 = int_is_true(i20) - i22 = int_add_ovf(i5, i18) - guard_no_overflow(descr=) + guard_false(i17, descr=) + i20 = int_sub(i15, i5) + i21 = int_add_ovf(i5, i20) + guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, p3, p4, i22, i6, i7, p8, p9, descr=) + jump(p0, p1, p2, p3, p4, i21, i6, i7, p8, p9, descr=) """) + + def test_unpack_iterable_non_list_tuple(self): + def main(n): + import array + + items = [array.array("i", [1])] * n + total = 0 + for a, in items: + total += a + return total + + log = self.run(main, [1000000]) + assert log.result == 1000000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i16 = int_ge(i12, i13) + guard_false(i16, descr=) + p17 = getarrayitem_gc(p15, i12, descr=) + i19 = int_add(i12, 1) + setfield_gc(p4, i19, descr=) + guard_nonnull_class(p17, 146982464, descr=) + i21 = getfield_gc(p17, descr=) + i23 = int_lt(0, i21) + guard_true(i23, descr=) + i24 = getfield_gc(p17, descr=) + i25 = getarrayitem_raw(i24, 0, descr=) + i27 = int_lt(1, i21) + guard_false(i27, descr=) + i28 = int_add_ovf(i10, i25) + guard_no_overflow(descr=) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) + """) diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -71,19 +71,6 @@ pieces.headerblock.exits[1], pieces.whileblock.exits[0]] -def test_traverse(): - lst = [] - traverse(lst.append, graph) - assert lst == [pieces.startblock, - pieces.startblock.exits[0], - pieces.headerblock, - pieces.headerblock.exits[0], - graph.returnblock, - pieces.headerblock.exits[1], - pieces.whileblock, - pieces.whileblock.exits[0]] - assert flatten(graph) == lst - def test_mkentrymap(): entrymap = mkentrymap(graph) startlink = entrymap[graph.startblock][0] diff --git a/pypy/translator/jvm/test/test_list.py b/pypy/translator/jvm/test/test_list.py --- a/pypy/translator/jvm/test/test_list.py +++ b/pypy/translator/jvm/test/test_list.py @@ -6,7 +6,10 @@ def test_recursive(self): py.test.skip("JVM doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_r_short_list(self): diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file diff --git a/lib_pypy/pyrepl/fancy_termios.py b/lib_pypy/pyrepl/fancy_termios.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/fancy_termios.py @@ -0,0 +1,52 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import termios + +class TermState: + def __init__(self, tuples): + self.iflag, self.oflag, self.cflag, self.lflag, \ + self.ispeed, self.ospeed, self.cc = tuples + def as_list(self): + return [self.iflag, self.oflag, self.cflag, self.lflag, + self.ispeed, self.ospeed, self.cc] + + def copy(self): + return self.__class__(self.as_list()) + +def tcgetattr(fd): + return TermState(termios.tcgetattr(fd)) + +def tcsetattr(fd, when, attrs): + termios.tcsetattr(fd, when, attrs.as_list()) + +class Term(TermState): + TS__init__ = TermState.__init__ + def __init__(self, fd=0): + self.TS__init__(termios.tcgetattr(fd)) + self.fd = fd + self.stack = [] + def save(self): + self.stack.append( self.as_list() ) + def set(self, when=termios.TCSANOW): + termios.tcsetattr(self.fd, when, self.as_list()) + def restore(self): + self.TS__init__(self.stack.pop()) + self.set() + diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -140,7 +140,7 @@ xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw', immortal=True) registers = rffi.ptradd(xmmregisters, 16) - stacklen = baseloc + 10 + stacklen = baseloc + 30 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw', immortal=True) expected_ints = [0] * len(content) diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -45,3 +47,29 @@ space.warn('PyImport_ImportModuleNoBlock() is not non-blocking', space.w_RuntimeWarning) return PyImport_Import(space, space.wrap(rffi.charp2str(name))) + + at cpython_api([PyObject], PyObject) +def PyImport_ReloadModule(space, w_mod): + from pypy.module.imp.importing import reload + return reload(space, w_mod) + + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -1,5 +1,5 @@ -from pypy.module.cpyext.api import cpython_api, generic_cpy_call, CANNOT_FAIL,\ - cpython_struct +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, CANNOT_FAIL, CConfig, cpython_struct) from pypy.rpython.lltypesystem import rffi, lltype PyInterpreterState = lltype.Ptr(cpython_struct("PyInterpreterState", ())) @@ -77,6 +77,52 @@ state = space.fromcache(InterpreterState) return state.get_thread_state(space) + at cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL) +def PyThreadState_Swap(space, tstate): + """Swap the current thread state with the thread state given by the argument + tstate, which may be NULL. The global interpreter lock must be held.""" + # All cpyext calls release and acquire the GIL, so this function has no + # side-effects + if tstate: + return lltype.nullptr(PyThreadState.TO) + else: + state = space.fromcache(InterpreterState) + return state.get_thread_state(space) + + at cpython_api([PyThreadState], lltype.Void) +def PyEval_AcquireThread(space, tstate): + """Acquire the global interpreter lock and set the current thread state to + tstate, which should not be NULL. The lock must have been created earlier. + If this thread already has the lock, deadlock ensues. This function is not + available when thread support is disabled at compile time.""" + # All cpyext calls release and acquire the GIL, so this is not necessary. + pass + + at cpython_api([PyThreadState], lltype.Void) +def PyEval_ReleaseThread(space, tstate): + """Reset the current thread state to NULL and release the global interpreter + lock. The lock must have been created earlier and must be held by the current + thread. The tstate argument, which must not be NULL, is only used to check + that it represents the current thread state --- if it isn't, a fatal error is + reported. This function is not available when thread support is disabled at + compile time.""" + # All cpyext calls release and acquire the GIL, so this is not necessary. + pass + +PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', + typedef='PyGILState_STATE', + compilation_info=CConfig._compilation_info_) + + at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) +def PyGILState_Ensure(space): + # All cpyext calls release and acquire the GIL, so this is not necessary. + return 0 + + at cpython_api([PyGILState_STATE], lltype.Void) +def PyGILState_Release(space, state): + # All cpyext calls release and acquire the GIL, so this is not necessary. + return + @cpython_api([], PyInterpreterState, error=CANNOT_FAIL) def PyInterpreterState_Head(space): """Return the interpreter state object at the head of the list of all such objects. diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,8 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.executor import execute +from pypy.jit.codewriter.heaptracker import vtable2descr class AbstractVirtualValue(optimizer.OptValue): @@ -72,28 +74,53 @@ assert isinstance(fieldvalue, optimizer.OptValue) self._fields[ofs] = fieldvalue + def _get_descr(self): + raise NotImplementedError + + def _is_immutable_and_filled_with_constants(self): + count = self._get_descr().count_fields_if_immutable() + if count != len(self._fields): # always the case if count == -1 + return False + for value in self._fields.itervalues(): + subbox = value.force_box() + if not isinstance(subbox, Const): + return False + return True + def _really_force(self): - assert self.source_op is not None + op = self.source_op + assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): - self.source_op.name = 'FORCE ' + self.source_op.name - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) + op.name = 'FORCE ' + self.source_op.name + + if self._is_immutable_and_filled_with_constants(): + box = self.optimizer.constant_fold(op) + self.make_constant(box) + for ofs, value in self._fields.iteritems(): + subbox = value.force_box() + assert isinstance(subbox, Const) + execute(self.optimizer.cpu, None, rop.SETFIELD_GC, + ofs, box, subbox) + # keep self._fields, because it's all immutable anyway + else: + newoperations = self.optimizer.newoperations newoperations.append(op) - self._fields = None + self.box = box = op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None def _get_field_descr_list(self): _cached_sorted_fields = self._cached_sorted_fields @@ -168,6 +195,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def _get_descr(self): + return vtable2descr(self.optimizer.cpu, self.known_class.getint()) + def __repr__(self): cls_name = self.known_class.value.adr.ptr._obj._TYPE._name if self._fields is None: @@ -185,6 +215,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_vstruct(self.structdescr, fielddescrs) + def _get_descr(self): + return self.structdescr + class VArrayValue(AbstractVirtualValue): def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): @@ -286,7 +319,6 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info c_cls = vrefinfo.jit_virtual_ref_const_class descr_virtual_token = vrefinfo.descr_virtual_token - descr_virtualref_index = vrefinfo.descr_virtualref_index # # Replace the VIRTUAL_REF operation with a virtual structure of type # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, @@ -296,7 +328,6 @@ tokenbox = BoxInt() self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox)) vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox)) - vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox)) def optimize_VIRTUAL_REF_FINISH(self, op): # Set the 'forced' field of the virtual_ref. diff --git a/pypy/jit/backend/x86/test/test_rx86.py b/pypy/jit/backend/x86/test/test_rx86.py --- a/pypy/jit/backend/x86/test/test_rx86.py +++ b/pypy/jit/backend/x86/test/test_rx86.py @@ -206,3 +206,8 @@ s = CodeBuilder64() s.MOV_rm(edx, (edi, -1)) assert s.getvalue() == '\x48\x8B\x57\xFF' + +def test_movsd_xj_64(): + s = CodeBuilder64() + s.MOVSD_xj(xmm2, 0x01234567) + assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py --- a/pypy/module/cpyext/stubsactive.py +++ b/pypy/module/cpyext/stubsactive.py @@ -34,43 +34,7 @@ PyThreadState_Clear().""" raise NotImplementedError - at cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL) -def PyThreadState_Swap(space, tstate): - """Swap the current thread state with the thread state given by the argument - tstate, which may be NULL. The global interpreter lock must be held.""" - raise NotImplementedError - - at cpython_api([PyThreadState], lltype.Void) -def PyEval_AcquireThread(space, tstate): - """Acquire the global interpreter lock and set the current thread state to - tstate, which should not be NULL. The lock must have been created earlier. - If this thread already has the lock, deadlock ensues. This function is not - available when thread support is disabled at compile time.""" - raise NotImplementedError - - at cpython_api([PyThreadState], lltype.Void) -def PyEval_ReleaseThread(space, tstate): - """Reset the current thread state to NULL and release the global interpreter - lock. The lock must have been created earlier and must be held by the current - thread. The tstate argument, which must not be NULL, is only used to check - that it represents the current thread state --- if it isn't, a fatal error is - reported. This function is not available when thread support is disabled at - compile time.""" - raise NotImplementedError - @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def Py_MakePendingCalls(space): return 0 -PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', - typedef='PyGILState_STATE', - compilation_info=CConfig._compilation_info_) - - at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) -def PyGILState_Ensure(space): - return 0 - - at cpython_api([PyGILState_STATE], lltype.Void) -def PyGILState_Release(space, state): - return - diff --git a/pypy/rlib/_rweakkeydict.py b/pypy/rlib/_rweakkeydict.py --- a/pypy/rlib/_rweakkeydict.py +++ b/pypy/rlib/_rweakkeydict.py @@ -123,7 +123,7 @@ @jit.dont_look_inside def ll_get(d, llkey): hash = compute_identity_hash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK #llop.debug_print(lltype.Void, i, 'get', hex(hash), # ll_debugrepr(d.entries[i].key), # ll_debugrepr(d.entries[i].value)) @@ -143,7 +143,7 @@ def ll_set_nonnull(d, llkey, llvalue): hash = compute_identity_hash(llkey) keyref = weakref_create(llkey) # GC effects here, before the rest - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK everused = d.entries.everused(i) d.entries[i].key = keyref d.entries[i].value = llvalue @@ -160,7 +160,7 @@ @jit.dont_look_inside def ll_set_null(d, llkey): hash = compute_identity_hash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK if d.entries.everused(i): # If the entry was ever used, clean up its key and value. # We don't store a NULL value, but a dead weakref, because diff --git a/pypy/module/cpyext/src/sysmodule.c b/pypy/module/cpyext/src/sysmodule.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/src/sysmodule.c @@ -0,0 +1,103 @@ +#include + +/* Reimplementation of PyFile_WriteString() no calling indirectly + PyErr_CheckSignals(): avoid the call to PyObject_Str(). */ + +static int +sys_pyfile_write_unicode(PyObject *unicode, PyObject *file) +{ + PyObject *writer = NULL, *args = NULL, *result = NULL; + int err; + + if (file == NULL) + return -1; + + writer = PyObject_GetAttrString(file, "write"); + if (writer == NULL) + goto error; + + args = PyTuple_Pack(1, unicode); + if (args == NULL) + goto error; + + result = PyEval_CallObject(writer, args); + if (result == NULL) { + goto error; + } else { + err = 0; + goto finally; + } + +error: + err = -1; +finally: + Py_XDECREF(writer); + Py_XDECREF(args); + Py_XDECREF(result); + return err; +} + +static int +sys_pyfile_write(const char *text, PyObject *file) +{ + PyObject *unicode = NULL; + int err; + + if (file == NULL) + return -1; + + unicode = PyUnicode_FromString(text); + if (unicode == NULL) + return -1; + + err = sys_pyfile_write_unicode(unicode, file); + Py_DECREF(unicode); + return err; +} + +/* APIs to write to sys.stdout or sys.stderr using a printf-like interface. + */ + +static void +sys_write(char *name, FILE *fp, const char *format, va_list va) +{ + PyObject *file; + PyObject *error_type, *error_value, *error_traceback; + char buffer[1001]; + int written; + + PyErr_Fetch(&error_type, &error_value, &error_traceback); + file = PySys_GetObject(name); + written = vsnprintf(buffer, sizeof(buffer), format, va); + if (sys_pyfile_write(buffer, file) != 0) { + PyErr_Clear(); + fputs(buffer, fp); + } + if (written < 0 || (size_t)written >= sizeof(buffer)) { + const char *truncated = "... truncated"; + if (sys_pyfile_write(truncated, file) != 0) + fputs(truncated, fp); + } + PyErr_Restore(error_type, error_value, error_traceback); +} + +void +PySys_WriteStdout(const char *format, ...) +{ + va_list va; + + va_start(va, format); + sys_write("stdout", stdout, format, va); + va_end(va); +} + +void +PySys_WriteStderr(const char *format, ...) +{ + va_list va; + + va_start(va, format); + sys_write("stderr", stderr, format, va); + va_end(va); +} + diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/lib_pypy/pyrepl/commands.py b/lib_pypy/pyrepl/commands.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/commands.py @@ -0,0 +1,385 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import sys, os + +# Catgories of actions: +# killing +# yanking +# motion +# editing +# history +# finishing +# [completion] + +class Command(object): + finish = 0 + kills_digit_arg = 1 + def __init__(self, reader, (event_name, event)): + self.reader = reader + self.event = event + self.event_name = event_name + def do(self): + pass + +class KillCommand(Command): + def kill_range(self, start, end): + if start == end: + return + r = self.reader + b = r.buffer + text = b[start:end] + del b[start:end] + if is_kill(r.last_command): + if start < r.pos: + r.kill_ring[-1] = text + r.kill_ring[-1] + else: + r.kill_ring[-1] = r.kill_ring[-1] + text + else: + r.kill_ring.append(text) + r.pos = start + r.dirty = 1 + +class YankCommand(Command): + pass + +class MotionCommand(Command): + pass + +class EditCommand(Command): + pass + +class FinishCommand(Command): + finish = 1 + pass + +def is_kill(command): + return command and issubclass(command, KillCommand) + +def is_yank(command): + return command and issubclass(command, YankCommand) + +# etc + +class digit_arg(Command): + kills_digit_arg = 0 + def do(self): + r = self.reader + c = self.event[-1] + if c == "-": + if r.arg is not None: + r.arg = -r.arg + else: + r.arg = -1 + else: + d = int(c) + if r.arg is None: + r.arg = d + else: + if r.arg < 0: + r.arg = 10*r.arg - d + else: + r.arg = 10*r.arg + d + r.dirty = 1 + +class clear_screen(Command): + def do(self): + r = self.reader + r.console.clear() + r.dirty = 1 + +class refresh(Command): + def do(self): + self.reader.dirty = 1 + +class repaint(Command): + def do(self): + self.reader.dirty = 1 + self.reader.console.repaint_prep() + +class kill_line(KillCommand): + def do(self): + r = self.reader + b = r.buffer + eol = r.eol() + for c in b[r.pos:eol]: + if not c.isspace(): + self.kill_range(r.pos, eol) + return + else: + self.kill_range(r.pos, eol+1) + +class unix_line_discard(KillCommand): + def do(self): + r = self.reader + self.kill_range(r.bol(), r.pos) + +# XXX unix_word_rubout and backward_kill_word should actually +# do different things... + +class unix_word_rubout(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.bow(), r.pos) + +class kill_word(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.pos, r.eow()) + +class backward_kill_word(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.bow(), r.pos) + +class yank(YankCommand): + def do(self): + r = self.reader + if not r.kill_ring: + r.error("nothing to yank") + return + r.insert(r.kill_ring[-1]) + +class yank_pop(YankCommand): + def do(self): + r = self.reader + b = r.buffer + if not r.kill_ring: + r.error("nothing to yank") + return + if not is_yank(r.last_command): + r.error("previous command was not a yank") + return + repl = len(r.kill_ring[-1]) + r.kill_ring.insert(0, r.kill_ring.pop()) + t = r.kill_ring[-1] + b[r.pos - repl:r.pos] = t + r.pos = r.pos - repl + len(t) + r.dirty = 1 + +class interrupt(FinishCommand): + def do(self): + import signal + self.reader.console.finish() + os.kill(os.getpid(), signal.SIGINT) + +class suspend(Command): + def do(self): + import signal + r = self.reader + p = r.pos + r.console.finish() + os.kill(os.getpid(), signal.SIGSTOP) + ## this should probably be done + ## in a handler for SIGCONT? + r.console.prepare() + r.pos = p + r.posxy = 0, 0 + r.dirty = 1 + r.console.screen = [] + +class up(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + bol1 = r.bol() + if bol1 == 0: + if r.historyi > 0: + r.select_item(r.historyi - 1) + return + r.pos = 0 + r.error("start of buffer") + return + bol2 = r.bol(bol1-1) + line_pos = r.pos - bol1 + if line_pos > bol1 - bol2 - 1: + r.sticky_y = line_pos + r.pos = bol1 - 1 + else: + r.pos = bol2 + line_pos + +class down(MotionCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + bol1 = r.bol() + eol1 = r.eol() + if eol1 == len(b): + if r.historyi < len(r.history): + r.select_item(r.historyi + 1) + r.pos = r.eol(0) + return + r.pos = len(b) + r.error("end of buffer") + return + eol2 = r.eol(eol1+1) + if r.pos - bol1 > eol2 - eol1 - 1: + r.pos = eol2 + else: + r.pos = eol1 + (r.pos - bol1) + 1 + +class left(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + p = r.pos - 1 + if p >= 0: + r.pos = p + else: + self.reader.error("start of buffer") + +class right(MotionCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + p = r.pos + 1 + if p <= len(b): + r.pos = p + else: + self.reader.error("end of buffer") + +class beginning_of_line(MotionCommand): + def do(self): + self.reader.pos = self.reader.bol() + +class end_of_line(MotionCommand): + def do(self): + r = self.reader + self.reader.pos = self.reader.eol() + +class home(MotionCommand): + def do(self): + self.reader.pos = 0 + +class end(MotionCommand): + def do(self): + self.reader.pos = len(self.reader.buffer) + +class forward_word(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + r.pos = r.eow() + +class backward_word(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + r.pos = r.bow() + +class self_insert(EditCommand): + def do(self): + r = self.reader + r.insert(self.event * r.get_arg()) + +class insert_nl(EditCommand): + def do(self): + r = self.reader + r.insert("\n" * r.get_arg()) + +class transpose_characters(EditCommand): + def do(self): + r = self.reader + b = r.buffer + s = r.pos - 1 + if s < 0: + r.error("cannot transpose at start of buffer") + else: + if s == len(b): + s -= 1 + t = min(s + r.get_arg(), len(b) - 1) + c = b[s] + del b[s] + b.insert(t, c) + r.pos = t + r.dirty = 1 + +class backspace(EditCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + if r.pos > 0: + r.pos -= 1 + del b[r.pos] + r.dirty = 1 + else: + self.reader.error("can't backspace at start") + +class delete(EditCommand): + def do(self): + r = self.reader + b = r.buffer + if ( r.pos == 0 and len(b) == 0 # this is something of a hack + and self.event[-1] == "\004"): + r.update_screen() + r.console.finish() + raise EOFError + for i in range(r.get_arg()): + if r.pos != len(b): + del b[r.pos] + r.dirty = 1 + else: + self.reader.error("end of buffer") + +class accept(FinishCommand): + def do(self): + pass + +class help(Command): + def do(self): + self.reader.msg = self.reader.help_text + self.reader.dirty = 1 + +class invalid_key(Command): + def do(self): + pending = self.reader.console.getpending() + s = ''.join(self.event) + pending.data + self.reader.error("`%r' not bound"%s) + +class invalid_command(Command): + def do(self): + s = self.event_name + self.reader.error("command `%s' not known"%s) + +class qIHelp(Command): + def do(self): + r = self.reader + r.insert((self.event + r.console.getpending().data) * r.get_arg()) + r.pop_input_trans() + +from pyrepl import input + +class QITrans(object): + def push(self, evt): + self.evt = evt + def get(self): + return ('qIHelp', self.evt.raw) + +class quoted_insert(Command): + kills_digit_arg = 0 + def do(self): + self.reader.push_input_trans(QITrans()) diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -435,14 +435,6 @@ return (PyObject *)foop; } -/* List of functions exported by this module */ - -static PyMethodDef foo_functions[] = { - {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, - {NULL, NULL} /* Sentinel */ -}; - - static int initerrtype_init(PyObject *self, PyObject *args, PyObject *kwargs) { PyErr_SetString(PyExc_ValueError, "init raised an error!"); return -1; @@ -592,6 +584,41 @@ 0 /*tp_weaklist*/ }; +/* A type with a custom allocator */ +static void custom_dealloc(PyObject *ob) +{ + free(ob); +} + +static PyTypeObject CustomType; + +static PyObject *newCustom(PyObject *self, PyObject *args) +{ + PyObject *obj = calloc(1, sizeof(PyObject)); + obj->ob_type = &CustomType; + _Py_NewReference(obj); + return obj; +} + +static PyTypeObject CustomType = { + PyObject_HEAD_INIT(NULL) + 0, + "foo.Custom", /*tp_name*/ + sizeof(PyObject), /*tp_size*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)custom_dealloc, /*tp_dealloc*/ +}; + + +/* List of functions exported by this module */ + +static PyMethodDef foo_functions[] = { + {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, + {"newCustom", (PyCFunction)newCustom, METH_NOARGS, NULL}, + {NULL, NULL} /* Sentinel */ +}; + /* Initialize this module. */ @@ -616,7 +643,10 @@ if (PyType_Ready(&InitErrType) < 0) return; if (PyType_Ready(&SimplePropertyType) < 0) - return; + return; + CustomType.ob_type = &MetaType; + if (PyType_Ready(&CustomType) < 0) + return; m = Py_InitModule("foo", foo_functions); if (m == NULL) return; @@ -635,4 +665,6 @@ return; if (PyDict_SetItemString(d, "Property", (PyObject *) &SimplePropertyType) < 0) return; + if (PyDict_SetItemString(d, "Custom", (PyObject *) &CustomType) < 0) + return; } diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -3,14 +3,14 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.annlowlevel import llhelper -from pypy.interpreter.baseobjspace import DescrMismatch +from pypy.interpreter.baseobjspace import W_Root, DescrMismatch from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, + cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - PyBufferProcs, build_type_checkers) + build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, RefcountState, borrow_from) @@ -24,7 +24,7 @@ from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, - PyNumberMethods, PySequenceMethods) + PyNumberMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.interpreter.error import OperationError @@ -287,11 +287,17 @@ W_TypeObject.__init__(self, space, extension_name, bases_w or [space.w_object], dict_w) - self.flag_cpytype = True + if not space.is_true(space.issubtype(self, space.w_type)): + self.flag_cpytype = True self.flag_heaptype = False @bootstrap_function def init_typeobject(space): + # Probably a hack + space.model.typeorder[W_PyCTypeObject] = [(W_PyCTypeObject, None), + (W_TypeObject, None), + (W_Root, None)] + make_typedescr(space.w_type.instancetypedef, basestruct=PyTypeObject, attach=type_attach, @@ -355,14 +361,14 @@ # hopefully this does not clash with the memory model assumed in # extension modules - at cpython_api([PyObject, rffi.INTP], lltype.Signed, external=False, + at cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: - ref[0] = rffi.cast(rffi.INT, space.len_w(w_obj)) + ref[0] = space.len_w(w_obj) return 1 - at cpython_api([PyObject, lltype.Signed, rffi.VOIDPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, external=False, error=-1) def str_getreadbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -375,7 +381,7 @@ Py_DecRef(space, pyref) return space.len_w(w_str) - at cpython_api([PyObject, lltype.Signed, rffi.CCHARPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, external=False, error=-1) def str_getcharbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -472,14 +478,19 @@ def PyType_Ready(space, pto): if pto.c_tp_flags & Py_TPFLAGS_READY: return 0 + type_realize(space, rffi.cast(PyObject, pto)) + return 0 + +def type_realize(space, py_obj): + pto = rffi.cast(PyTypeObjectPtr, py_obj) assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 pto.c_tp_flags |= Py_TPFLAGS_READYING try: - type_realize(space, rffi.cast(PyObject, pto)) - pto.c_tp_flags |= Py_TPFLAGS_READY + w_obj = _type_realize(space, py_obj) finally: pto.c_tp_flags &= ~Py_TPFLAGS_READYING - return 0 + pto.c_tp_flags |= Py_TPFLAGS_READY + return w_obj def solid_base(space, w_type): typedef = w_type.instancetypedef @@ -535,7 +546,7 @@ finally: Py_DecRef(space, base_pyo) -def type_realize(space, py_obj): +def _type_realize(space, py_obj): """ Creates an interpreter type from a PyTypeObject structure. """ @@ -554,7 +565,9 @@ finish_type_1(space, py_type) - w_obj = space.allocate_instance(W_PyCTypeObject, space.w_type) + w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type)) + + w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype) track_reference(space, py_obj, w_obj) w_obj.__init__(space, py_type) w_obj.ready() diff --git a/pypy/module/test_lib_pypy/test_stackless.py b/pypy/module/test_lib_pypy/test_stackless.py --- a/pypy/module/test_lib_pypy/test_stackless.py +++ b/pypy/module/test_lib_pypy/test_stackless.py @@ -3,6 +3,8 @@ class AppTest_Stackless: def setup_class(cls): + import py.test + py.test.importorskip('greenlet') space = gettestobjspace(usemodules=('_stackless', '_socket')) cls.space = space # cannot test the unpickle part on top of py.py diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -205,7 +205,8 @@ if dirname == search: # not found! let's hope that the compiled-in path is ok print >> sys.stderr, ('debug: WARNING: library path not found, ' - 'using compiled-in sys.path') + 'using compiled-in sys.path ' + 'and sys.prefix will be unset') newpath = sys.path[:] break newpath = sys.pypy_initial_path(dirname) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -2,14 +2,12 @@ from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, PyVarObject, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, - Py_GE, CONST_STRING, FILEP, fwrite, build_type_checkers) + Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, - track_reference, get_typedescr, RefcountState) + track_reference, get_typedescr, _Py_NewReference, RefcountState) from pypy.module.cpyext.typeobject import PyTypeObjectPtr from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall -from pypy.module._file.interp_file import W_File -from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.error import OperationError import pypy.module.__builtin__.operation as operation @@ -185,26 +183,17 @@ return 0 @cpython_api([PyObject, PyTypeObjectPtr], PyObject) -def PyObject_Init(space, py_obj, type): +def PyObject_Init(space, obj, type): """Initialize a newly-allocated object op with its type and initial reference. Returns the initialized object. If type indicates that the object participates in the cyclic garbage detector, it is added to the detector's set of observed objects. Other fields of the object are not affected.""" - if not py_obj: + if not obj: PyErr_NoMemory(space) - py_obj.c_ob_type = type - py_obj.c_ob_refcnt = 1 - w_type = from_ref(space, rffi.cast(PyObject, type)) - assert isinstance(w_type, W_TypeObject) - if w_type.is_cpytype(): - w_obj = space.allocate_instance(W_ObjectObject, w_type) - track_reference(space, py_obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, py_obj) - else: - assert False, "Please add more cases in PyObject_Init" - return py_obj + obj.c_ob_type = type + _Py_NewReference(space, obj) + return obj @cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject) def PyObject_InitVar(space, py_obj, type, size): @@ -256,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, @@ -396,7 +395,7 @@ raise OperationError(space.w_TypeError, space.wrap( "expected a character buffer object")) if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(rffi.INTP.TO)) != 1: + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: raise OperationError(space.w_TypeError, space.wrap( "expected a single-segment buffer object")) size = generic_cpy_call(space, pb.c_bf_getcharbuffer, @@ -429,40 +428,3 @@ rffi.free_nonmovingbuffer(data, buf) return 0 -PyFile_Check, PyFile_CheckExact = build_type_checkers("File", W_File) - - at cpython_api([PyObject, rffi.INT_real], PyObject) -def PyFile_GetLine(space, w_obj, n): - """ - Equivalent to p.readline([n]), this function reads one line from the - object p. p may be a file object or any object with a readline() - method. If n is 0, exactly one line is read, regardless of the length of - the line. If n is greater than 0, no more than n bytes will be read - from the file; a partial line can be returned. In both cases, an empty string - is returned if the end of the file is reached immediately. If n is less than - 0, however, one line is read regardless of length, but EOFError is - raised if the end of the file is reached immediately.""" - try: - w_readline = space.getattr(w_obj, space.wrap('readline')) - except OperationError: - raise OperationError( - space.w_TypeError, space.wrap( - "argument must be a file, or have a readline() method.")) - - n = rffi.cast(lltype.Signed, n) - if space.is_true(space.gt(space.wrap(n), space.wrap(0))): - return space.call_function(w_readline, space.wrap(n)) - elif space.is_true(space.lt(space.wrap(n), space.wrap(0))): - return space.call_function(w_readline) - else: - # XXX Raise EOFError as specified - return space.call_function(w_readline) - at cpython_api([CONST_STRING, CONST_STRING], PyObject) -def PyFile_FromString(space, filename, mode): - """ - On success, return a new file object that is opened on the file given by - filename, with a file mode given by mode, where mode has the same - semantics as the standard C routine fopen(). On failure, return NULL.""" - w_filename = space.wrap(rffi.charp2str(filename)) - w_mode = space.wrap(rffi.charp2str(mode)) - return space.call_method(space.builtin, 'file', w_filename, w_mode) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -108,6 +108,11 @@ #return w_value or None return None + def impl_setdefault(self, w_key, w_default): + # here the dict is always empty + self._as_rdict().impl_fallback_setitem(w_key, w_default) + return w_default + def impl_setitem(self, w_key, w_value): self._as_rdict().impl_fallback_setitem(w_key, w_value) @@ -181,6 +186,9 @@ # _________________________________________________________________ # fallback implementation methods + def impl_fallback_setdefault(self, w_key, w_default): + return self.r_dict_content.setdefault(w_key, w_default) + def impl_fallback_setitem(self, w_key, w_value): self.r_dict_content[w_key] = w_value @@ -227,6 +235,7 @@ ("length", 0), ("setitem_str", 2), ("setitem", 2), + ("setdefault", 2), ("delitem", 1), ("iter", 0), ("items", 0), @@ -317,6 +326,14 @@ def impl_setitem_str(self, key, w_value): self.content[key] = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + return self.content.setdefault(space.str_w(w_key), w_default) + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) @@ -787,13 +804,7 @@ return w_default def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default): - # XXX should be more efficient, with only one dict lookup - w_value = w_dict.getitem(w_key) - if w_value is not None: - return w_value - else: - w_dict.setitem(w_key, w_default) - return w_default + return w_dict.setdefault(w_key, w_default) def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w): len_defaults = len(defaults_w) diff --git a/pypy/translator/backendopt/test/test_inline.py b/pypy/translator/backendopt/test/test_inline.py --- a/pypy/translator/backendopt/test/test_inline.py +++ b/pypy/translator/backendopt/test/test_inline.py @@ -1,7 +1,7 @@ # XXX clean up these tests to use more uniform helpers import py import os -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import last_exception, checkgraph from pypy.translator.backendopt import canraise from pypy.translator.backendopt.inline import simple_inline_function, CannotInline @@ -20,29 +20,27 @@ from pypy.translator.backendopt import removenoops from pypy.objspace.flow.model import summary -def no_missing_concretetype(node): - if isinstance(node, Block): - for v in node.inputargs: - assert hasattr(v, 'concretetype') - for op in node.operations: - for v in op.args: - assert hasattr(v, 'concretetype') - assert hasattr(op.result, 'concretetype') - if isinstance(node, Link): - if node.exitcase is not None: - assert hasattr(node, 'llexitcase') - for v in node.args: - assert hasattr(v, 'concretetype') - if isinstance(node.last_exception, (Variable, Constant)): - assert hasattr(node.last_exception, 'concretetype') - if isinstance(node.last_exc_value, (Variable, Constant)): - assert hasattr(node.last_exc_value, 'concretetype') - def sanity_check(t): # look for missing '.concretetype' for graph in t.graphs: checkgraph(graph) - traverse(no_missing_concretetype, graph) + for node in graph.iterblocks(): + for v in node.inputargs: + assert hasattr(v, 'concretetype') + for op in node.operations: + for v in op.args: + assert hasattr(v, 'concretetype') + assert hasattr(op.result, 'concretetype') + for node in graph.iterlinks(): + if node.exitcase is not None: + assert hasattr(node, 'llexitcase') + for v in node.args: + assert hasattr(v, 'concretetype') + if isinstance(node.last_exception, (Variable, Constant)): + assert hasattr(node.last_exception, 'concretetype') + if isinstance(node.last_exc_value, (Variable, Constant)): + assert hasattr(node.last_exc_value, 'concretetype') + class CustomError1(Exception): def __init__(self): diff --git a/lib_pypy/pyrepl/tests/bugs.py b/lib_pypy/pyrepl/tests/bugs.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/bugs.py @@ -0,0 +1,36 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +# this test case should contain as-verbatim-as-possible versions of +# (applicable) bug reports + +class BugsTestCase(ReaderTestCase): + + def test_transpose_at_start(self): + self.run_test([( 'transpose', [EA, '']), + ( 'accept', [''])]) + +def test(): + run_testcase(BugsTestCase) + +if __name__ == '__main__': + test() diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -212,52 +212,48 @@ # cpython, and rpython, assumed that integer division truncates # towards -infinity. however, in C99 and most (all?) other # backends, integer division truncates towards 0. so assuming - # that, we can generate scary code that applies the necessary + # that, we call a helper function that applies the necessary # correction in the right cases. - # paper and pencil are encouraged for this :) - - from pypy.rpython.rbool import bool_repr - assert isinstance(repr.lowleveltype, Number) - c_zero = inputconst(repr.lowleveltype, repr.lowleveltype._default) op = func.split('_', 1)[0] if op == 'floordiv': - # return (x/y) - (((x^y)<0)&((x%y)!=0)); - v_xor = hop.genop(prefix + 'xor', vlist, - resulttype=repr) - v_xor_le = hop.genop(prefix + 'lt', [v_xor, c_zero], - resulttype=Bool) - v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr) - v_mod = hop.genop(prefix + 'mod', vlist, - resulttype=repr) - v_mod_ne = hop.genop(prefix + 'ne', [v_mod, c_zero], - resulttype=Bool) - v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr) - v_corr = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne], - resulttype=repr) - v_res = hop.genop(prefix + 'sub', [v_res, v_corr], - resulttype=repr) + llfunc = globals()['ll_correct_' + prefix + 'floordiv'] + v_res = hop.gendirectcall(llfunc, vlist[0], vlist[1], v_res) elif op == 'mod': - # return r + y*(((x^y)<0)&(r!=0)); - v_xor = hop.genop(prefix + 'xor', vlist, - resulttype=repr) - v_xor_le = hop.genop(prefix + 'lt', [v_xor, c_zero], - resulttype=Bool) - v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr) - v_mod_ne = hop.genop(prefix + 'ne', [v_res, c_zero], - resulttype=Bool) - v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr) - v_corr1 = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne], - resulttype=repr) - v_corr = hop.genop(prefix + 'mul', [v_corr1, vlist[1]], - resulttype=repr) - v_res = hop.genop(prefix + 'add', [v_res, v_corr], - resulttype=repr) + llfunc = globals()['ll_correct_' + prefix + 'mod'] + v_res = hop.gendirectcall(llfunc, vlist[1], v_res) + v_res = hop.llops.convertvar(v_res, repr, r_result) return v_res +INT_BITS_1 = r_int.BITS - 1 +LLONG_BITS_1 = r_longlong.BITS - 1 + +def ll_correct_int_floordiv(x, y, r): + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> INT_BITS_1) + +def ll_correct_llong_floordiv(x, y, r): + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> LLONG_BITS_1) + +def ll_correct_int_mod(y, r): + if y < 0: u = -r + else: u = r + return r + (y & (u >> INT_BITS_1)) + +def ll_correct_llong_mod(y, r): + if y < 0: u = -r + else: u = r + return r + (y & (u >> LLONG_BITS_1)) + + #Helper functions for comparisons def _rtype_compare_template(hop, func): diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -19,6 +19,7 @@ 'load_module': 'interp_imp.load_module', 'load_source': 'interp_imp.load_source', 'load_compiled': 'interp_imp.load_compiled', + 'load_dynamic': 'interp_imp.load_dynamic', '_run_compiled_module': 'interp_imp._run_compiled_module', # pypy '_getimporter': 'importing._getimporter', # pypy #'run_module': 'interp_imp.run_module', @@ -36,7 +37,6 @@ } appleveldefs = { - 'load_dynamic': 'app_imp.load_dynamic', } def __init__(self, space, *args): diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/translator/goal/targetrpystonedalone.py b/pypy/translator/goal/targetrpystonedalone.py --- a/pypy/translator/goal/targetrpystonedalone.py +++ b/pypy/translator/goal/targetrpystonedalone.py @@ -2,11 +2,11 @@ from pypy.translator.test import rpystone from pypy.translator.goal import richards import pypy.interpreter.gateway # needed before sys, order of imports !!! -from pypy.module.sys.version import svn_revision +from pypy.tool.version import get_repo_version_info # __________ Entry point __________ -VERSION = svn_revision() +VERSION = get_repo_version_info()[2] # note that we have %f but no length specifiers in RPython diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -146,6 +146,15 @@ self.pending_signals[n] = None self.reissue_signal_action.fire_after_thread_switch() + def set_interrupt(self): + "Simulates the effect of a SIGINT signal arriving" + n = cpy_signal.SIGINT + if self.reissue_signal_action is None: + self.report_signal(n) + else: + self.pending_signals[n] = None + self.reissue_signal_action.fire_after_thread_switch() + def report_signal(self, n): try: w_handler = self.handlers_w[n] diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -106,6 +106,10 @@ 'debug_catch_exception': Ignore, 'debug_reraise_traceback': Ignore, 'debug_print_traceback': Ignore, + 'debug_start': Ignore, + 'debug_stop': Ignore, + 'debug_print': Ignore, + 'keepalive': Ignore, # __________ numeric operations __________ @@ -144,6 +148,7 @@ 'int_xor_ovf': jvm.IXOR, 'int_floordiv_ovf_zer': jvm.IFLOORDIVZEROVF, 'int_mod_ovf_zer': _check_zer(jvm.IREMOVF), + 'int_between': jvm.PYPYINTBETWEEN, 'uint_invert': 'bitwise_negate', @@ -185,8 +190,8 @@ 'llong_mod_zer': _check_zer(jvm.LREM), 'llong_and': jvm.LAND, 'llong_or': jvm.LOR, - 'llong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'llong_rshift': [PushAllArgs, jvm.L2I, jvm.LSHR, StoreResult], + 'llong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'llong_rshift': [PushAllArgs, jvm.LSHR, StoreResult], 'llong_xor': jvm.LXOR, 'llong_floordiv_ovf': jvm.LFLOORDIVOVF, 'llong_floordiv_ovf_zer': jvm.LFLOORDIVZEROVF, @@ -202,9 +207,11 @@ 'ullong_truediv': None, # TODO 'ullong_floordiv': jvm.LDIV, # valid? 'ullong_mod': jvm.PYPYULONGMOD, - 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], + 'ullong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'ullong_rshift': [PushAllArgs, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, + 'ullong_or': jvm.LOR, + 'ullong_and': jvm.LAND, # when casting from bool we want that every truth value is casted # to 1: we can't simply DoNothing, because the CLI stack could @@ -227,5 +234,8 @@ 'cast_float_to_uint': jvm.PYPYDOUBLETOUINT, 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, + 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, + 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], + 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -46,15 +46,15 @@ w_f_trace = None # For tracing instr_lb = 0 - instr_ub = -1 - instr_prev = -1 + instr_ub = 0 + instr_prev_plus_one = 0 is_being_profiled = False def __init__(self, space, code, w_globals, closure): self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code - eval.Frame.__init__(self, space, w_globals, code.co_nlocals) + eval.Frame.__init__(self, space, w_globals) self.valuestack_w = [None] * code.co_stacksize self.valuestackdepth = 0 self.lastblock = None @@ -63,7 +63,7 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(closure, code) - self.fastlocals_w = [None]*self.numlocals + self.fastlocals_w = [None] * code.co_nlocals make_sure_not_resized(self.fastlocals_w) self.f_lineno = code.co_firstlineno @@ -335,7 +335,7 @@ w(self.instr_lb), #do we need these three (that are for tracing) w(self.instr_ub), - w(self.instr_prev), + w(self.instr_prev_plus_one), w_cells, ] @@ -349,7 +349,7 @@ args_w = space.unpackiterable(w_args) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ - w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev, w_cells = args_w + w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w new_frame = self pycode = space.interp_w(PyCode, w_pycode) @@ -397,7 +397,7 @@ new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev = space.int_w(w_instr_prev) + new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) # XXX what if the frame is in another thread?? @@ -430,7 +430,10 @@ """Initialize cellvars from self.fastlocals_w This is overridden in nestedscope.py""" pass - + + def getfastscopelength(self): + return self.pycode.co_nlocals + def getclosure(self): return None diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,12 +25,13 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None): + arg_types=None, count_fields_if_immut=-1): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types + self.count_fields_if_immut = count_fields_if_immut def get_arg_types(self): return self.arg_types @@ -63,6 +64,9 @@ def as_vtable_size_descr(self): return self + def count_fields_if_immutable(self): + return self.count_fields_if_immut + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -109,12 +113,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None): - key = (ofs, typeinfo, extrainfo, name, arg_types) + arg_types=None, count_fields_if_immut=-1): + key = (ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) try: return self._descrs[key] except KeyError: - descr = Descr(ofs, typeinfo, extrainfo, name, arg_types) + descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) self._descrs[key] = descr return descr @@ -284,7 +290,8 @@ def sizeof(self, S): assert not isinstance(S, lltype.Ptr) - return self.getdescr(symbolic.get_size(S)) + count = heaptracker.count_fields_if_immutable(S) + return self.getdescr(symbolic.get_size(S), count_fields_if_immut=count) class LLtypeCPU(BaseCPU): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,9 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import cpython_struct, \ - PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, \ - Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE, \ - PyTypeObject, PyTypeObjectPtr, PyBufferProcs, FILEP +from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -55,6 +54,14 @@ wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) +readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) +charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) +## We don't support new buffer interface for now +getbufferproc = rffi.VOIDP +releasebufferproc = rffi.VOIDP + PyGetSetDef = cpython_struct("PyGetSetDef", ( ("name", rffi.CCHARP), @@ -127,7 +134,6 @@ ("mp_ass_subscript", objobjargproc), )) -""" PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getreadbuffer", readbufferproc), ("bf_getwritebuffer", writebufferproc), @@ -136,7 +142,6 @@ ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), )) -""" PyMemberDef = cpython_struct("PyMemberDef", ( ("name", rffi.CCHARP), diff --git a/lib_pypy/pyrepl/keymap.py b/lib_pypy/pyrepl/keymap.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/keymap.py @@ -0,0 +1,186 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +functions for parsing keyspecs + +Support for turning keyspecs into appropriate sequences. + +pyrepl uses it's own bastardized keyspec format, which is meant to be +a strict superset of readline's \"KEYSEQ\" format (which is to say +that if you can come up with a spec readline accepts that this +doesn't, you've found a bug and should tell me about it). + +Note that this is the `\\C-o' style of readline keyspec, not the +`Control-o' sort. + +A keyspec is a string representing a sequence of keypresses that can +be bound to a command. + +All characters other than the backslash represent themselves. In the +traditional manner, a backslash introduces a escape sequence. + +The extension to readline is that the sequence \\ denotes the +sequence of charaters produced by hitting KEY. + +Examples: + +`a' - what you get when you hit the `a' key +`\\EOA' - Escape - O - A (up, on my terminal) +`\\' - the up arrow key +`\\' - ditto (keynames are case insensitive) +`\\C-o', `\\c-o' - control-o +`\\M-.' - meta-period +`\\E.' - ditto (that's how meta works for pyrepl) +`\\', `\\', `\\t', `\\011', '\\x09', '\\X09', '\\C-i', '\\C-I' + - all of these are the tab character. Can you think of any more? +""" + +_escapes = { + '\\':'\\', + "'":"'", + '"':'"', + 'a':'\a', + 'b':'\h', + 'e':'\033', + 'f':'\f', + 'n':'\n', + 'r':'\r', + 't':'\t', + 'v':'\v' + } + +_keynames = { + 'backspace': 'backspace', + 'delete': 'delete', + 'down': 'down', + 'end': 'end', + 'enter': '\r', + 'escape': '\033', + 'f1' : 'f1', 'f2' : 'f2', 'f3' : 'f3', 'f4' : 'f4', + 'f5' : 'f5', 'f6' : 'f6', 'f7' : 'f7', 'f8' : 'f8', + 'f9' : 'f9', 'f10': 'f10', 'f11': 'f11', 'f12': 'f12', + 'f13': 'f13', 'f14': 'f14', 'f15': 'f15', 'f16': 'f16', + 'f17': 'f17', 'f18': 'f18', 'f19': 'f19', 'f20': 'f20', + 'home': 'home', + 'insert': 'insert', + 'left': 'left', + 'page down': 'page down', + 'page up': 'page up', + 'return': '\r', + 'right': 'right', + 'space': ' ', + 'tab': '\t', + 'up': 'up', + } + +class KeySpecError(Exception): + pass + +def _parse_key1(key, s): + ctrl = 0 + meta = 0 + ret = '' + while not ret and s < len(key): + if key[s] == '\\': + c = key[s+1].lower() + if _escapes.has_key(c): + ret = _escapes[c] + s += 2 + elif c == "c": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\C must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if ctrl: + raise KeySpecError, "doubled \\C- (char %d of %s)"%( + s + 1, repr(key)) + ctrl = 1 + s += 3 + elif c == "m": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\M must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if meta: + raise KeySpecError, "doubled \\M- (char %d of %s)"%( + s + 1, repr(key)) + meta = 1 + s += 3 + elif c.isdigit(): + n = key[s+1:s+4] + ret = chr(int(n, 8)) + s += 4 + elif c == 'x': + n = key[s+2:s+4] + ret = chr(int(n, 16)) + s += 4 + elif c == '<': + t = key.find('>', s) + if t == -1: + raise KeySpecError, \ + "unterminated \\< starting at char %d of %s"%( + s + 1, repr(key)) + ret = key[s+2:t].lower() + if ret not in _keynames: + raise KeySpecError, \ + "unrecognised keyname `%s' at char %d of %s"%( + ret, s + 2, repr(key)) + ret = _keynames[ret] + s = t + 1 + else: + raise KeySpecError, \ + "unknown backslash escape %s at char %d of %s"%( + `c`, s + 2, repr(key)) + else: + ret = key[s] + s += 1 + if ctrl: + if len(ret) > 1: + raise KeySpecError, "\\C- must be followed by a character" + ret = chr(ord(ret) & 0x1f) # curses.ascii.ctrl() + if meta: + ret = ['\033', ret] + else: + ret = [ret] + return ret, s + +def parse_keys(key): + s = 0 + r = [] + while s < len(key): + k, s = _parse_key1(key, s) + r.extend(k) + return r + +def compile_keymap(keymap, empty=''): + r = {} + for key, value in keymap.items(): + r.setdefault(key[0], {})[key[1:]] = value + for key, value in r.items(): + if empty in value: + if len(value) <> 1: + raise KeySpecError, \ + "key definitions for %s clash"%(value.values(),) + else: + r[key] = value[empty] + else: + r[key] = compile_keymap(value, empty) + return r diff --git a/pypy/translator/platform/posix.py b/pypy/translator/platform/posix.py --- a/pypy/translator/platform/posix.py +++ b/pypy/translator/platform/posix.py @@ -113,11 +113,16 @@ m.eci = eci def pypyrel(fpath): - rel = py.path.local(fpath).relto(pypypath) + lpath = py.path.local(fpath) + rel = lpath.relto(pypypath) if rel: return os.path.join('$(PYPYDIR)', rel) - else: - return fpath + m_dir = m.makefile_dir + if m_dir == lpath: + return '.' + if m_dir.dirpath() == lpath: + return '..' + return fpath rel_cfiles = [m.pathrel(cfile) for cfile in cfiles] rel_ofiles = [rel_cfile[:-2]+'.o' for rel_cfile in rel_cfiles] diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -261,7 +261,8 @@ if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') - return space.wrap(rffi.charp2strn(buf, bufsize_p[0] - 1)) + length = intmask(bufsize_p[0] - 1) + return space.wrap(rffi.charp2strn(buf, length)) def convert_to_regdata(space, w_value, typ): buf = None @@ -445,9 +446,10 @@ continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') + length = intmask(retDataSize[0]) return space.newtuple([ convert_from_regdata(space, databuf, - retDataSize[0], retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) @@ -595,11 +597,11 @@ if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') + length = intmask(retDataSize[0]) return space.newtuple([ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, - retDataSize[0], - retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) diff --git a/pypy/module/cpyext/include/longintrepr.h b/pypy/module/cpyext/include/longintrepr.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/longintrepr.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -3,8 +3,102 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.jitexc import JitException +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization + +class CachedField(object): + def __init__(self): + # Cache information for a field descr. It can be in one + # of two states: + # + # 1. 'cached_fields' is a dict mapping OptValues of structs + # to OptValues of fields. All fields on-heap are + # synchronized with the values stored in the cache. + # + # 2. we just did one setfield, which is delayed (and thus + # not synchronized). 'lazy_setfield' is the delayed + # ResOperation. In this state, 'cached_fields' contains + # out-of-date information. More precisely, the field + # value pending in the ResOperation is *not* visible in + # 'cached_fields'. + # + self._cached_fields = {} + self._lazy_setfield = None + self._lazy_setfield_registered = False + + def do_setfield(self, optheap, op): + # Update the state with the SETFIELD_GC operation 'op'. + structvalue = optheap.getvalue(op.getarg(0)) + fieldvalue = optheap.getvalue(op.getarg(1)) + if self.possible_aliasing(optheap, structvalue): + self.force_lazy_setfield(optheap) + assert not self.possible_aliasing(optheap, structvalue) + cached_fieldvalue = self._cached_fields.get(structvalue, None) + if cached_fieldvalue is not fieldvalue: + # common case: store the 'op' as lazy_setfield, and register + # myself in the optheap's _lazy_setfields list + self._lazy_setfield = op + if not self._lazy_setfield_registered: + optheap._lazy_setfields.append(self) + self._lazy_setfield_registered = True + else: + # this is the case where the pending setfield ends up + # storing precisely the value that is already there, + # as proved by 'cached_fields'. In this case, we don't + # need any _lazy_setfield: the heap value is already right. + # Note that this may reset to None a non-None lazy_setfield, + # cancelling its previous effects with no side effect. + self._lazy_setfield = None + + def possible_aliasing(self, optheap, structvalue): + # If lazy_setfield is set and contains a setfield on a different + # structvalue, then we are annoyed, because it may point to either + # the same or a different structure at runtime. + return (self._lazy_setfield is not None + and (optheap.getvalue(self._lazy_setfield.getarg(0)) + is not structvalue)) + + def getfield_from_cache(self, optheap, structvalue): + # Returns the up-to-date field's value, or None if not cached. + if self.possible_aliasing(optheap, structvalue): + self.force_lazy_setfield(optheap) + if self._lazy_setfield is not None: + op = self._lazy_setfield + assert optheap.getvalue(op.getarg(0)) is structvalue + return optheap.getvalue(op.getarg(1)) + else: + return self._cached_fields.get(structvalue, None) + + def remember_field_value(self, structvalue, fieldvalue): + assert self._lazy_setfield is None + self._cached_fields[structvalue] = fieldvalue + + def force_lazy_setfield(self, optheap): + op = self._lazy_setfield + if op is not None: + # This is the way _lazy_setfield is usually reset to None. + # Now we clear _cached_fields, because actually doing the + # setfield might impact any of the stored result (because of + # possible aliasing). + self._cached_fields.clear() + self._lazy_setfield = None + optheap.next_optimization.propagate_forward(op) + # Once it is done, we can put at least one piece of information + # back in the cache: the value of this particular structure's + # field. + structvalue = optheap.getvalue(op.getarg(0)) + fieldvalue = optheap.getvalue(op.getarg(1)) + self.remember_field_value(structvalue, fieldvalue) + + def get_reconstructed(self, optimizer, valuemap): + assert self._lazy_setfield is None + cf = CachedField() + for structvalue, fieldvalue in self._cached_fields.iteritems(): + structvalue2 = structvalue.get_reconstructed(optimizer, valuemap) + fieldvalue2 = fieldvalue .get_reconstructed(optimizer, valuemap) + cf._cached_fields[structvalue2] = fieldvalue2 + return cf + class CachedArrayItems(object): def __init__(self): @@ -20,40 +114,23 @@ """Cache repeated heap accesses""" def __init__(self): - # cached fields: {descr: {OptValue_instance: OptValue_fieldvalue}} + # cached fields: {descr: CachedField} self.cached_fields = {} - self.known_heap_fields = {} + self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} - # lazily written setfields (at most one per descr): {descr: op} - self.lazy_setfields = {} - self.lazy_setfields_descrs = [] # keys (at least) of previous dict def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() if True: self.force_all_lazy_setfields() - assert not self.lazy_setfields_descrs - assert not self.lazy_setfields else: - new.lazy_setfields_descrs = self.lazy_setfields_descrs - new.lazy_setfields = self.lazy_setfields + assert 0 # was: new.lazy_setfields = self.lazy_setfields for descr, d in self.cached_fields.items(): - newd = {} - new.cached_fields[descr] = newd - for value, fieldvalue in d.items(): - newd[value.get_reconstructed(optimizer, valuemap)] = \ - fieldvalue.get_reconstructed(optimizer, valuemap) - - for descr, d in self.known_heap_fields.items(): - newd = {} - new.known_heap_fields[descr] = newd - for value, fieldvalue in d.items(): - newd[value.get_reconstructed(optimizer, valuemap)] = \ - fieldvalue.get_reconstructed(optimizer, valuemap) - + new.cached_fields[descr] = d.get_reconstructed(optimizer, valuemap) + new.cached_arrayitems = {} for descr, d in self.cached_arrayitems.items(): newd = {} @@ -74,30 +151,16 @@ return new def clean_caches(self): + del self._lazy_setfields[:] self.cached_fields.clear() - self.known_heap_fields.clear() self.cached_arrayitems.clear() - def cache_field_value(self, descr, value, fieldvalue, write=False): - if write: - # when seeing a setfield, we have to clear the cache for the same - # field on any other structure, just in case they are aliasing - # each other - d = self.cached_fields[descr] = {} - else: - d = self.cached_fields.setdefault(descr, {}) - d[value] = fieldvalue - - def read_cached_field(self, descr, value): - # XXX self.cached_fields and self.lazy_setfields should probably - # be merged somehow - d = self.cached_fields.get(descr, None) - if d is None: - op = self.lazy_setfields.get(descr, None) - if op is None: - return None - return self.getvalue(op.getarg(1)) - return d.get(value, None) + def field_cache(self, descr): + try: + cf = self.cached_fields[descr] + except KeyError: + cf = self.cached_fields[descr] = CachedField() + return cf def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): d = self.cached_arrayitems.get(descr, None) @@ -157,11 +220,15 @@ self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or - opnum == rop.SETFIELD_RAW or - opnum == rop.SETARRAYITEM_GC or - opnum == rop.SETARRAYITEM_RAW or - opnum == rop.DEBUG_MERGE_POINT): + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or @@ -180,8 +247,8 @@ for fielddescr in effectinfo.write_descrs_fields: self.force_lazy_setfield(fielddescr) try: - del self.cached_fields[fielddescr] - del self.known_heap_fields[fielddescr] + cf = self.cached_fields[fielddescr] + cf._cached_fields.clear() except KeyError: pass for arraydescr in effectinfo.write_descrs_arrays: @@ -195,10 +262,7 @@ # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. return - self.force_all_lazy_setfields() - elif op.is_final() or (not we_are_translated() and - op.getopnum() < 0): # escape() operations - self.force_all_lazy_setfields() + self.force_all_lazy_setfields() self.clean_caches() @@ -206,58 +270,54 @@ assert value.is_constant() newvalue = self.getvalue(value.box) if value is not newvalue: - for d in self.cached_fields.values(): - if value in d: - d[newvalue] = d[value] - # FIXME: Update the other caches too? - - - def force_lazy_setfield(self, descr, before_guard=False): + for cf in self.cached_fields.itervalues(): + if value in cf._cached_fields: + cf._cached_fields[newvalue] = cf._cached_fields[value] + + def force_lazy_setfield(self, descr): try: - op = self.lazy_setfields[descr] + cf = self.cached_fields[descr] except KeyError: return - del self.lazy_setfields[descr] - value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) - try: - heapvalue = self.known_heap_fields[op.getdescr()][value] - if fieldvalue is heapvalue: - return - except KeyError: - pass - self.next_optimization.propagate_forward(op) + cf.force_lazy_setfield(self) + def fixup_guard_situation(self): # hackish: reverse the order of the last two operations if it makes # sense to avoid a situation like "int_eq/setfield_gc/guard_true", # which the backend (at least the x86 backend) does not handle well. newoperations = self.optimizer.newoperations - if before_guard and len(newoperations) >= 2: - lastop = newoperations[-1] - prevop = newoperations[-2] - # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" - # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" - # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.getopnum() - lastop_args = lastop.getarglist() - if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE - or opnum == rop.CALL_RELEASE_GIL or prevop.is_ovf()) - and prevop.result not in lastop_args): - newoperations[-2] = lastop - newoperations[-1] = prevop + if len(newoperations) < 2: + return + lastop = newoperations[-1] + if (lastop.getopnum() != rop.SETFIELD_GC and + lastop.getopnum() != rop.SETARRAYITEM_GC): + return + # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" + # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" + # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" + prevop = newoperations[-2] + opnum = prevop.getopnum() + if not (prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE + or prevop.is_ovf()): + return + if prevop.result in lastop.getarglist(): + return + newoperations[-2] = lastop + newoperations[-1] = prevop def force_all_lazy_setfields(self): - if len(self.lazy_setfields_descrs) > 0: - for descr in self.lazy_setfields_descrs: - self.force_lazy_setfield(descr) - del self.lazy_setfields_descrs[:] + for cf in self._lazy_setfields: + if not we_are_translated(): + assert cf in self.cached_fields.values() + cf.force_lazy_setfield(self) def force_lazy_setfields_for_guard(self): pendingfields = [] - for descr in self.lazy_setfields_descrs: - try: - op = self.lazy_setfields[descr] - except KeyError: + for cf in self._lazy_setfields: + if not we_are_translated(): + assert cf in self.cached_fields.values() + op = cf._lazy_setfield + if op is None: continue # the only really interesting case that we need to handle in the # guards' resume data is that of a virtual object that is stored @@ -267,41 +327,27 @@ fieldvalue = self.getvalue(op.getarg(1)) if fieldvalue.is_virtual(): # this is the case that we leave to resume.py - pendingfields.append((descr, value.box, + pendingfields.append((op.getdescr(), value.box, fieldvalue.get_key_box())) else: - self.force_lazy_setfield(descr, before_guard=True) + cf.force_lazy_setfield(self) + self.fixup_guard_situation() return pendingfields - def force_lazy_setfield_if_necessary(self, op, value, write=False): - try: - op1 = self.lazy_setfields[op.getdescr()] - except KeyError: - if write: - self.lazy_setfields_descrs.append(op.getdescr()) - else: - if self.getvalue(op1.getarg(0)) is not value: - self.force_lazy_setfield(op.getdescr()) - def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.getarg(0)) - self.force_lazy_setfield_if_necessary(op, value) - # check if the field was read from another getfield_gc just before - # or has been written to recently - fieldvalue = self.read_cached_field(op.getdescr(), value) + structvalue = self.getvalue(op.getarg(0)) + cf = self.field_cache(op.getdescr()) + fieldvalue = cf.getfield_from_cache(self, structvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return # default case: produce the operation - value.ensure_nonnull() + structvalue.ensure_nonnull() ###self.optimizer.optimize_default(op) self.emit_operation(op) # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - self.cache_field_value(op.getdescr(), value, fieldvalue) - # keep track of what's on the heap - d = self.known_heap_fields.setdefault(op.getdescr(), {}) - d[value] = fieldvalue + cf.remember_field_value(structvalue, fieldvalue) def optimize_SETFIELD_GC(self, op): if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)], @@ -310,14 +356,8 @@ (op.getdescr().repr_of_descr())) raise BogusPureField # - value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) - cached_fieldvalue = self.read_cached_field(op.getdescr(), value) - if fieldvalue is not cached_fieldvalue: - self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.getdescr()] = op - # remember the result of future reads of the field - self.cache_field_value(op.getdescr(), value, fieldvalue, write=True) + cf = self.field_cache(op.getdescr()) + cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC(self, op): value = self.getvalue(op.getarg(0)) diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/translator/c/src/dtoa.c b/pypy/translator/c/src/dtoa.c --- a/pypy/translator/c/src/dtoa.c +++ b/pypy/translator/c/src/dtoa.c @@ -116,7 +116,6 @@ /* Begin PYPY hacks */ /* #include "Python.h" */ -#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754 #define HAVE_UINT32_T #define HAVE_INT32_T #define HAVE_UINT64_T diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -126,8 +126,16 @@ _run_compiled_module(space, w_modulename, filename, w_file, w_mod) return w_mod + at unwrap_spec(filename=str) +def load_dynamic(space, w_modulename, filename, w_file=None): + if not space.config.objspace.usemodules.cpyext: + raise OperationError(space.w_ImportError, space.wrap( + "Not implemented")) + importing.load_c_extension(space, filename, space.str_w(w_modulename)) + return importing.check_sys_modules(space, w_modulename) + def new_module(space, w_name): - return space.wrap(Module(space, w_name)) + return space.wrap(Module(space, w_name, add_package=False)) def init_builtin(space, w_name): name = space.str_w(w_name) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -253,8 +253,10 @@ except OperationError, e: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) - return 0 - return 1 + result = 0 + else: + result = 1 + return rffi.cast(rffi.INT, result) callback_type = lltype.Ptr(lltype.FuncType( [rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT)) XML_SetUnknownEncodingHandler = expat_external( diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -154,6 +154,24 @@ self.emit_operation(op) + def optimize_INT_LSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_RSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_CALL_PURE(self, op): arg_consts = [] for i in range(op.numargs()): diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/readline.py @@ -0,0 +1,408 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Alex Gaynor +# Antonio Cuni +# Armin Rigo +# Holger Krekel +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""A compatibility wrapper reimplementing the 'readline' standard module +on top of pyrepl. Not all functionalities are supported. Contains +extensions for multiline input. +""" + +import sys, os +from pyrepl import commands +from pyrepl.historical_reader import HistoricalReader +from pyrepl.completing_reader import CompletingReader +from pyrepl.unix_console import UnixConsole, _error + + +ENCODING = 'latin1' # XXX hard-coded + +__all__ = ['add_history', + 'clear_history', + 'get_begidx', + 'get_completer', + 'get_completer_delims', + 'get_current_history_length', + 'get_endidx', + 'get_history_item', + 'get_history_length', + 'get_line_buffer', + 'insert_text', + 'parse_and_bind', + 'read_history_file', + 'read_init_file', + 'redisplay', + 'remove_history_item', + 'replace_history_item', + 'set_completer', + 'set_completer_delims', + 'set_history_length', + 'set_pre_input_hook', + 'set_startup_hook', + 'write_history_file', + # ---- multiline extensions ---- + 'multiline_input', + ] + +# ____________________________________________________________ + +class ReadlineConfig(object): + readline_completer = None + completer_delims = dict.fromkeys(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?') + +class ReadlineAlikeReader(HistoricalReader, CompletingReader): + + assume_immutable_completions = False + use_brackets = False + sort_in_column = True + + def error(self, msg="none"): + pass # don't show error messages by default + + def get_stem(self): + b = self.buffer + p = self.pos - 1 + completer_delims = self.config.completer_delims + while p >= 0 and b[p] not in completer_delims: + p -= 1 + return ''.join(b[p+1:self.pos]) + + def get_completions(self, stem): + result = [] + function = self.config.readline_completer + if function is not None: + try: + stem = str(stem) # rlcompleter.py seems to not like unicode + except UnicodeEncodeError: + pass # but feed unicode anyway if we have no choice + state = 0 + while True: + try: + next = function(stem, state) + except: + break + if not isinstance(next, str): + break + result.append(next) + state += 1 + # emulate the behavior of the standard readline that sorts + # the completions before displaying them. + result.sort() + return result + + def get_trimmed_history(self, maxlength): + if maxlength >= 0: + cut = len(self.history) - maxlength + if cut < 0: + cut = 0 + else: + cut = 0 + return self.history[cut:] + + # --- simplified support for reading multiline Python statements --- + + # This duplicates small parts of pyrepl.python_reader. I'm not + # reusing the PythonicReader class directly for two reasons. One is + # to try to keep as close as possible to CPython's prompt. The + # other is that it is the readline module that we are ultimately + # implementing here, and I don't want the built-in raw_input() to + # start trying to read multiline inputs just because what the user + # typed look like valid but incomplete Python code. So we get the + # multiline feature only when using the multiline_input() function + # directly (see _pypy_interact.py). + + more_lines = None + + def collect_keymap(self): + return super(ReadlineAlikeReader, self).collect_keymap() + ( + (r'\n', 'maybe-accept'),) + + def __init__(self, console): + super(ReadlineAlikeReader, self).__init__(console) + self.commands['maybe_accept'] = maybe_accept + self.commands['maybe-accept'] = maybe_accept + + def after_command(self, cmd): + super(ReadlineAlikeReader, self).after_command(cmd) + if self.more_lines is None: + # Force single-line input if we are in raw_input() mode. + # Although there is no direct way to add a \n in this mode, + # multiline buffers can still show up using various + # commands, e.g. navigating the history. + try: + index = self.buffer.index("\n") + except ValueError: + pass + else: + self.buffer = self.buffer[:index] + if self.pos > len(self.buffer): + self.pos = len(self.buffer) + +class maybe_accept(commands.Command): + def do(self): + r = self.reader + r.dirty = 1 # this is needed to hide the completion menu, if visible + # + # if there are already several lines and the cursor + # is not on the last one, always insert a new \n. + text = r.get_unicode() + if "\n" in r.buffer[r.pos:]: + r.insert("\n") + elif r.more_lines is not None and r.more_lines(text): + r.insert("\n") + else: + self.finish = 1 + +# ____________________________________________________________ + +class _ReadlineWrapper(object): + f_in = 0 + f_out = 1 + reader = None + saved_history_length = -1 + startup_hook = None + config = ReadlineConfig() + + def get_reader(self): + if self.reader is None: + console = UnixConsole(self.f_in, self.f_out, encoding=ENCODING) + self.reader = ReadlineAlikeReader(console) + self.reader.config = self.config + return self.reader + + def raw_input(self, prompt=''): + try: + reader = self.get_reader() + except _error: + return _old_raw_input(prompt) + if self.startup_hook is not None: + self.startup_hook() + reader.ps1 = prompt + return reader.readline() + + def multiline_input(self, more_lines, ps1, ps2): + """Read an input on possibly multiple lines, asking for more + lines as long as 'more_lines(unicodetext)' returns an object whose + boolean value is true. + """ + reader = self.get_reader() + saved = reader.more_lines + try: + reader.more_lines = more_lines + reader.ps1 = reader.ps2 = ps1 + reader.ps3 = reader.ps4 = ps2 + return reader.readline() + finally: + reader.more_lines = saved + + def parse_and_bind(self, string): + pass # XXX we don't support parsing GNU-readline-style init files + + def set_completer(self, function=None): + self.config.readline_completer = function + + def get_completer(self): + return self.config.readline_completer + + def set_completer_delims(self, string): + self.config.completer_delims = dict.fromkeys(string) + + def get_completer_delims(self): + chars = self.config.completer_delims.keys() + chars.sort() + return ''.join(chars) + + def _histline(self, line): + return unicode(line.rstrip('\n'), ENCODING) + + def get_history_length(self): + return self.saved_history_length + + def set_history_length(self, length): + self.saved_history_length = length + + def get_current_history_length(self): + return len(self.get_reader().history) + + def read_history_file(self, filename='~/.history'): + # multiline extension (really a hack) for the end of lines that + # are actually continuations inside a single multiline_input() + # history item: we use \r\n instead of just \n. If the history + # file is passed to GNU readline, the extra \r are just ignored. + history = self.get_reader().history + f = open(os.path.expanduser(filename), 'r') + buffer = [] + for line in f: + if line.endswith('\r\n'): + buffer.append(line) + else: + line = self._histline(line) + if buffer: + line = ''.join(buffer).replace('\r', '') + line + del buffer[:] + if line: + history.append(line) + f.close() + + def write_history_file(self, filename='~/.history'): + maxlength = self.saved_history_length + history = self.get_reader().get_trimmed_history(maxlength) + f = open(os.path.expanduser(filename), 'w') + for entry in history: + if isinstance(entry, unicode): + entry = entry.encode(ENCODING) + entry = entry.replace('\n', '\r\n') # multiline history support + f.write(entry + '\n') + f.close() + + def clear_history(self): + del self.get_reader().history[:] + + def get_history_item(self, index): + history = self.get_reader().history + if 1 <= index <= len(history): + return history[index-1] + else: + return None # blame readline.c for not raising + + def remove_history_item(self, index): + history = self.get_reader().history + if 0 <= index < len(history): + del history[index] + else: + raise ValueError("No history item at position %d" % index) + # blame readline.c for raising ValueError + + def replace_history_item(self, index, line): + history = self.get_reader().history + if 0 <= index < len(history): + history[index] = self._histline(line) + else: + raise ValueError("No history item at position %d" % index) + # blame readline.c for raising ValueError + + def add_history(self, line): + self.get_reader().history.append(self._histline(line)) + + def set_startup_hook(self, function=None): + self.startup_hook = function + + def get_line_buffer(self): + return self.get_reader().get_buffer() + + def _get_idxs(self): + start = cursor = self.get_reader().pos + buf = self.get_line_buffer() + for i in xrange(cursor - 1, -1, -1): + if buf[i] in self.get_completer_delims(): + break + start = i + return start, cursor + + def get_begidx(self): + return self._get_idxs()[0] + + def get_endidx(self): + return self._get_idxs()[1] + + def insert_text(self, text): + return self.get_reader().insert(text) + + +_wrapper = _ReadlineWrapper() + +# ____________________________________________________________ +# Public API + +parse_and_bind = _wrapper.parse_and_bind +set_completer = _wrapper.set_completer +get_completer = _wrapper.get_completer +set_completer_delims = _wrapper.set_completer_delims +get_completer_delims = _wrapper.get_completer_delims +get_history_length = _wrapper.get_history_length +set_history_length = _wrapper.set_history_length +get_current_history_length = _wrapper.get_current_history_length +read_history_file = _wrapper.read_history_file +write_history_file = _wrapper.write_history_file +clear_history = _wrapper.clear_history +get_history_item = _wrapper.get_history_item +remove_history_item = _wrapper.remove_history_item +replace_history_item = _wrapper.replace_history_item +add_history = _wrapper.add_history +set_startup_hook = _wrapper.set_startup_hook +get_line_buffer = _wrapper.get_line_buffer +get_begidx = _wrapper.get_begidx +get_endidx = _wrapper.get_endidx +insert_text = _wrapper.insert_text + +# Extension +multiline_input = _wrapper.multiline_input + +# Internal hook +_get_reader = _wrapper.get_reader + +# ____________________________________________________________ +# Stubs + +def _make_stub(_name, _ret): + def stub(*args, **kwds): + import warnings + warnings.warn("readline.%s() not implemented" % _name, stacklevel=2) + stub.func_name = _name + globals()[_name] = stub + +for _name, _ret in [ + ('read_init_file', None), + ('redisplay', None), + ('set_pre_input_hook', None), + ]: + assert _name not in globals(), _name + _make_stub(_name, _ret) + +# ____________________________________________________________ + +def _setup(): + global _old_raw_input + if _old_raw_input is not None: + return # don't run _setup twice + + try: + f_in = sys.stdin.fileno() + f_out = sys.stdout.fileno() + except (AttributeError, ValueError): + return + if not os.isatty(f_in) or not os.isatty(f_out): + return + + _wrapper.f_in = f_in + _wrapper.f_out = f_out + + if hasattr(sys, '__raw_input__'): # PyPy + _old_raw_input = sys.__raw_input__ + sys.__raw_input__ = _wrapper.raw_input + else: + # this is not really what readline.c does. Better than nothing I guess + import __builtin__ + _old_raw_input = __builtin__.raw_input + __builtin__.raw_input = _wrapper.raw_input + +_old_raw_input = None +_setup() diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -248,3 +248,8 @@ """This is synonymous to ``raise SystemExit''. It will cause the current thread to exit silently unless the exception is caught.""" raise OperationError(space.w_SystemExit, space.w_None) + +def interrupt_main(space): + """Raise a KeyboardInterrupt in the main thread. +A subthread can use this function to interrupt the main thread.""" + space.check_signal_action.set_interrupt() diff --git a/pypy/module/imp/app_imp.py b/pypy/module/imp/app_imp.py deleted file mode 100644 --- a/pypy/module/imp/app_imp.py +++ /dev/null @@ -1,5 +0,0 @@ - - -def load_dynamic(name, pathname, file=None): - """Always raises ah ImportError on pypy""" - raise ImportError('Not implemented') diff --git a/lib-python/modified-2.7.0/distutils/msvc9compiler.py b/lib-python/modified-2.7.0/distutils/msvc9compiler.py --- a/lib-python/modified-2.7.0/distutils/msvc9compiler.py +++ b/lib-python/modified-2.7.0/distutils/msvc9compiler.py @@ -644,6 +644,7 @@ temp_manifest = os.path.join( build_temp, os.path.basename(output_filename) + ".manifest") + ld_args.append('/MANIFEST') ld_args.append('/MANIFESTFILE:' + temp_manifest) if extra_preargs: diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -4,6 +4,8 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.interpreter.error import OperationError from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask +from pypy.rlib.rbigint import rbigint +from pypy.rlib.rarithmetic import intmask PyLong_Check, PyLong_CheckExact = build_type_checkers("Long") @@ -177,4 +179,31 @@ assert isinstance(w_long, W_LongObject) return w_long.num.sign +UCHARP = rffi.CArrayPtr(rffi.UCHAR) + at cpython_api([UCHARP, rffi.SIZE_T, rffi.INT_real, rffi.INT_real], PyObject) +def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): + little_endian = rffi.cast(lltype.Signed, little_endian) + signed = rffi.cast(lltype.Signed, signed) + result = rbigint() + negative = False + + for i in range(0, n): + if little_endian: + c = intmask(bytes[i]) + else: + c = intmask(bytes[n - i - 1]) + if i == 0 and signed and c & 0x80: + negative = True + if negative: + c = c ^ 0xFF + digit = rbigint.fromint(c) + + result = result.lshift(8) + result = result.add(digit) + + if negative: + result = result.neg() + + return space.newlong_from_rbigint(result) + diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/lib_pypy/pyrepl/pygame_keymap.py b/lib_pypy/pyrepl/pygame_keymap.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/pygame_keymap.py @@ -0,0 +1,250 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# keyspec parsing for a pygame console. currently this is simply copy +# n' change from the unix (ie. trad terminal) variant; probably some +# refactoring will happen when I work out how it will work best. + +# A key is represented as *either* + +# a) a (keycode, meta, ctrl) sequence (used for special keys such as +# f1, the up arrow key, etc) +# b) a (unichar, meta, ctrl) sequence (used for printable chars) + +# Because we allow keystokes like '\\C-xu', I'll use the same trick as +# the unix keymap module uses. + +# '\\C-a' --> (K_a, 0, 1) + +# XXX it's actually possible to test this module, so it should have a +# XXX test suite. + +from pygame.locals import * + +_escapes = { + '\\': K_BACKSLASH, + "'" : K_QUOTE, + '"' : K_QUOTEDBL, +# 'a' : '\a', + 'b' : K_BACKSLASH, + 'e' : K_ESCAPE, +# 'f' : '\f', + 'n' : K_RETURN, + 'r' : K_RETURN, + 't' : K_TAB, +# 'v' : '\v' + } + +_keynames = { + 'backspace' : K_BACKSPACE, + 'delete' : K_DELETE, + 'down' : K_DOWN, + 'end' : K_END, + 'enter' : K_KP_ENTER, + 'escape' : K_ESCAPE, + 'f1' : K_F1, 'f2' : K_F2, 'f3' : K_F3, 'f4' : K_F4, + 'f5' : K_F5, 'f6' : K_F6, 'f7' : K_F7, 'f8' : K_F8, + 'f9' : K_F9, 'f10': K_F10,'f11': K_F11,'f12': K_F12, + 'f13': K_F13,'f14': K_F14,'f15': K_F15, + 'home' : K_HOME, + 'insert' : K_INSERT, + 'left' : K_LEFT, + 'pgdown' : K_PAGEDOWN, 'page down' : K_PAGEDOWN, + 'pgup' : K_PAGEUP, 'page up' : K_PAGEUP, + 'return' : K_RETURN, + 'right' : K_RIGHT, + 'space' : K_SPACE, + 'tab' : K_TAB, + 'up' : K_UP, + } + +class KeySpecError(Exception): + pass + +def _parse_key1(key, s): + ctrl = 0 + meta = 0 + ret = '' + while not ret and s < len(key): + if key[s] == '\\': + c = key[s+1].lower() + if _escapes.has_key(c): + ret = _escapes[c] + s += 2 + elif c == "c": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\C must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if ctrl: + raise KeySpecError, "doubled \\C- (char %d of %s)"%( + s + 1, repr(key)) + ctrl = 1 + s += 3 + elif c == "m": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\M must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if meta: + raise KeySpecError, "doubled \\M- (char %d of %s)"%( + s + 1, repr(key)) + meta = 1 + s += 3 + elif c.isdigit(): + n = key[s+1:s+4] + ret = chr(int(n, 8)) + s += 4 + elif c == 'x': + n = key[s+2:s+4] + ret = chr(int(n, 16)) + s += 4 + elif c == '<': + t = key.find('>', s) + if t == -1: + raise KeySpecError, \ + "unterminated \\< starting at char %d of %s"%( + s + 1, repr(key)) + try: + ret = _keynames[key[s+2:t].lower()] + s = t + 1 + except KeyError: + raise KeySpecError, \ + "unrecognised keyname `%s' at char %d of %s"%( + key[s+2:t], s + 2, repr(key)) + if ret is None: + return None, s + else: + raise KeySpecError, \ + "unknown backslash escape %s at char %d of %s"%( + `c`, s + 2, repr(key)) + else: + if ctrl: + ret = chr(ord(key[s]) & 0x1f) # curses.ascii.ctrl() + ret = unicode(ret) + else: + ret = unicode(key[s]) + s += 1 + return (ret, meta, ctrl), s + +def parse_keys(key): + s = 0 + r = [] + while s < len(key): + k, s = _parse_key1(key, s) + if k is None: + return None + r.append(k) + return tuple(r) + +def _compile_keymap(keymap): + r = {} + for key, value in keymap.items(): + r.setdefault(key[0], {})[key[1:]] = value + for key, value in r.items(): + if value.has_key(()): + if len(value) <> 1: + raise KeySpecError, \ + "key definitions for %s clash"%(value.values(),) + else: + r[key] = value[()] + else: + r[key] = _compile_keymap(value) + return r + +def compile_keymap(keymap): + r = {} + for key, value in keymap: + k = parse_keys(key) + if value is None and r.has_key(k): + del r[k] + if k is not None: + r[k] = value + return _compile_keymap(r) + +def keyname(key): + longest_match = '' + longest_match_name = '' + for name, keyseq in keyset.items(): + if keyseq and key.startswith(keyseq) and \ + len(keyseq) > len(longest_match): + longest_match = keyseq + longest_match_name = name + if len(longest_match) > 0: + return longest_match_name, len(longest_match) + else: + return None, 0 + +_unescapes = {'\r':'\\r', '\n':'\\n', '\177':'^?'} + +#for k,v in _escapes.items(): +# _unescapes[v] = k + +def unparse_key(keyseq): + if not keyseq: + return '' + name, s = keyname(keyseq) + if name: + if name <> 'escape' or s == len(keyseq): + return '\\<' + name + '>' + unparse_key(keyseq[s:]) + else: + return '\\M-' + unparse_key(keyseq[1:]) + else: + c = keyseq[0] + r = keyseq[1:] + if c == '\\': + p = '\\\\' + elif _unescapes.has_key(c): + p = _unescapes[c] + elif ord(c) < ord(' '): + p = '\\C-%s'%(chr(ord(c)+96),) + elif ord(' ') <= ord(c) <= ord('~'): + p = c + else: + p = '\\%03o'%(ord(c),) + return p + unparse_key(r) + +def _unparse_keyf(keyseq): + if not keyseq: + return [] + name, s = keyname(keyseq) + if name: + if name <> 'escape' or s == len(keyseq): + return [name] + _unparse_keyf(keyseq[s:]) + else: + rest = _unparse_keyf(keyseq[1:]) + return ['M-'+rest[0]] + rest[1:] + else: + c = keyseq[0] + r = keyseq[1:] + if c == '\\': + p = '\\' + elif _unescapes.has_key(c): + p = _unescapes[c] + elif ord(c) < ord(' '): + p = 'C-%s'%(chr(ord(c)+96),) + elif ord(' ') <= ord(c) <= ord('~'): + p = c + else: + p = '\\%03o'%(ord(c),) + return [p] + _unparse_keyf(r) + +def unparse_keyf(keyseq): + return " ".join(_unparse_keyf(keyseq)) diff --git a/pypy/interpreter/test/test_extmodules.py b/pypy/interpreter/test/test_extmodules.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_extmodules.py @@ -0,0 +1,68 @@ +import sys +import pytest + +from pypy.config.pypyoption import get_pypy_config +from pypy.objspace.std import StdObjSpace +from pypy.tool.udir import udir + +mod_init = """ +from pypy.interpreter.mixedmodule import MixedModule + +import time + +class Module(MixedModule): + + appleveldefs = {} + + interpleveldefs = { + 'clock' : 'interp_time.clock', + 'time' : 'interp_time.time_', + 'sleep' : 'interp_time.sleep', + } +""" + +mod_interp = """ +import time + +from pypy.interpreter.gateway import unwrap_spec + +def clock(space): + return space.wrap(time.clock()) + +def time_(space): + return space.wrap(time.time()) + + at unwrap_spec(seconds=float) +def sleep(space, seconds): + time.sleep(seconds) +""" + +old_sys_path = [] + +def init_extmodule_code(): + pkg = udir.join("testext") + pkg.ensure(dir=True) + pkg.join("__init__.py").write("# package") + mod = pkg.join("extmod") + mod.ensure(dir=True) + mod.join("__init__.py").write(mod_init) + mod.join("interp_time.py").write(mod_interp) + +class AppTestExtModules(object): + def setup_class(cls): + init_extmodule_code() + conf = get_pypy_config() + conf.objspace.extmodules = 'testext.extmod' + old_sys_path[:] = sys.path[:] + sys.path.insert(0, str(udir)) + space = StdObjSpace(conf) + cls.space = space + + def teardown_class(cls): + sys.path[:] = old_sys_path + + @pytest.mark.skipif("config.option.runappdirect") + def test_import(self): + import extmod + assert extmod.__file__.endswith('extmod') + assert type(extmod.time()) is float diff --git a/lib_pypy/pyrepl/tests/infrastructure.py b/lib_pypy/pyrepl/tests/infrastructure.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/infrastructure.py @@ -0,0 +1,82 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.reader import Reader +from pyrepl.console import Console, Event +import unittest +import sys + +class EqualsAnything(object): + def __eq__(self, other): + return True +EA = EqualsAnything() + +class TestConsole(Console): + height = 24 + width = 80 + encoding = 'utf-8' + + def __init__(self, events, testcase, verbose=False): + self.events = events + self.next_screen = None + self.verbose = verbose + self.testcase = testcase + + def refresh(self, screen, xy): + if self.next_screen is not None: + self.testcase.assertEqual( + screen, self.next_screen, + "[ %s != %s after %r ]"%(screen, self.next_screen, + self.last_event_name)) + + def get_event(self, block=1): + ev, sc = self.events.pop(0) + self.next_screen = sc + if not isinstance(ev, tuple): + ev = (ev,) + self.last_event_name = ev[0] + if self.verbose: + print "event", ev + return Event(*ev) + +class TestReader(Reader): + def get_prompt(self, lineno, cursor_on_line): + return '' + def refresh(self): + Reader.refresh(self) + self.dirty = True + +class ReaderTestCase(unittest.TestCase): + def run_test(self, test_spec, reader_class=TestReader): + # remember to finish your test_spec with 'accept' or similar! + con = TestConsole(test_spec, self) + reader = reader_class(con) + reader.readline() + +class BasicTestRunner: + def run(self, test): + result = unittest.TestResult() + test(result) + return result + +def run_testcase(testclass): + suite = unittest.makeSuite(testclass) + runner = unittest.TextTestRunner(sys.stdout, verbosity=1) + result = runner.run(suite) + diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -144,3 +144,20 @@ """), ]) assert module.from_string() == 0x1234 + + def test_frombytearray(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC", 2, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x9ABC + assert module.from_bytearray(True, True) == -0x6543 + assert module.from_bytearray(False, False) == 0xBC9A + assert module.from_bytearray(False, True) == -0x4365 + diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -19,6 +19,8 @@ def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): + if gcdescr is not None: + gcdescr.force_index_ofs = FORCE_INDEX_OFS AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) @@ -128,7 +130,7 @@ fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) - rffi.cast(TP, addr_of_force_index)[0] = -1 + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index frb = self.assembler._find_failure_recovery_bytecode(faildescr) bytecode = rffi.cast(rffi.UCHARP, frb) # start of "no gc operation!" block @@ -148,7 +150,6 @@ WORD = 4 NUM_REGS = 8 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 supports_longlong = True @@ -164,7 +165,6 @@ WORD = 8 NUM_REGS = 16 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 def __init__(self, *args, **kwargs): assert sys.maxint == (2**63 - 1) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -89,6 +89,8 @@ metainterp.history = History() metainterp.history.operations = loop.operations[:] metainterp.history.inputargs = loop.inputargs[:] + cpu._all_size_descrs_with_vtable = ( + LLtypeMixin.cpu._all_size_descrs_with_vtable) # loop_tokens = [] loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -487,7 +487,9 @@ # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), # ^^^ returns an address of pointer, since it can change at runtime - + 'gc_adr_of_root_stack_top': LLOp(), + # ^^^ returns the address of gcdata.root_stack_top (for shadowstack only) + # experimental operations in support of thread cloning, only # implemented by the Mark&Sweep GC 'gc_x_swap_pool': LLOp(canraise=(MemoryError,), canunwindgc=True), diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -365,7 +365,11 @@ def setbuiltinmodule(self, importname): """NOT_RPYTHON. load a lazy pypy/module and put it into sys.modules""" - fullname = "pypy.module.%s" % importname + if '.' in importname: + fullname = importname + importname = fullname.rsplit('.', 1)[1] + else: + fullname = "pypy.module.%s" % importname Module = __import__(fullname, None, None, ["Module"]).Module @@ -428,6 +432,11 @@ if value and name not in modules: modules.append(name) + if self.config.objspace.extmodules: + for name in self.config.objspace.extmodules.split(','): + if name not in modules: + modules.append(name) + # a bit of custom logic: time2 or rctime take precedence over time # XXX this could probably be done as a "requires" in the config if ('time2' in modules or 'rctime' in modules) and 'time' in modules: @@ -745,7 +754,12 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - items = [] + # If we know the expected length we can preallocate. + if expected_length == -1: + items = [] + else: + items = [None] * expected_length + idx = 0 while True: try: w_item = self.next(w_iterator) @@ -753,19 +767,22 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and len(items) == expected_length: + if expected_length != -1 and idx == expected_length: raise OperationError(self.w_ValueError, self.wrap("too many values to unpack")) - items.append(w_item) - if expected_length != -1 and len(items) < expected_length: - i = len(items) - if i == 1: + if expected_length == -1: + items.append(w_item) + else: + items[idx] = w_item + idx += 1 + if expected_length != -1 and idx < expected_length: + if idx == 1: plural = "" else: plural = "s" raise OperationError(self.w_ValueError, self.wrap("need more than %d value%s to unpack" % - (i, plural))) + (idx, plural))) return items unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, @@ -1333,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/translator/cli/ilgenerator.py b/pypy/translator/cli/ilgenerator.py --- a/pypy/translator/cli/ilgenerator.py +++ b/pypy/translator/cli/ilgenerator.py @@ -443,8 +443,8 @@ self.ilasm.opcode('newarr', clitype.itemtype.typename()) def _array_suffix(self, ARRAY, erase_unsigned=False): - from pypy.translator.cli.metavm import OOTYPE_TO_MNEMONIC - suffix = OOTYPE_TO_MNEMONIC.get(ARRAY.ITEM, 'ref') + from pypy.translator.cli.metavm import ootype_to_mnemonic + suffix = ootype_to_mnemonic(ARRAY.ITEM, ARRAY.ITEM, 'ref') if erase_unsigned: suffix = suffix.replace('u', 'i') return suffix diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/translator/backendopt/ssa.py b/pypy/translator/backendopt/ssa.py --- a/pypy/translator/backendopt/ssa.py +++ b/pypy/translator/backendopt/ssa.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block +from pypy.objspace.flow.model import Variable, mkentrymap, Block from pypy.tool.algo.unionfind import UnionFind class DataFlowFamilyBuilder: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4.1' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.5-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -211,8 +211,11 @@ def ll_stringslice_minusone(s): return s.ll_substring(0, s.ll_strlen()-1) - def ll_split_chr(RESULT, s, c): - return RESULT.ll_convert_from_array(s.ll_split_chr(c)) + def ll_split_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_split_chr(c, max)) + + def ll_rsplit_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_rsplit_chr(c, max)) def ll_int(s, base): if not 2 <= base <= 36: diff --git a/lib_pypy/pyrepl/console.py b/lib_pypy/pyrepl/console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/console.py @@ -0,0 +1,93 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +class Event: + """An Event. `evt' is 'key' or somesuch.""" + + def __init__(self, evt, data, raw=''): + self.evt = evt + self.data = data + self.raw = raw + + def __repr__(self): + return 'Event(%r, %r)'%(self.evt, self.data) + +class Console: + """Attributes: + + screen, + height, + width, + """ + + def refresh(self, screen, xy): + pass + + def prepare(self): + pass + + def restore(self): + pass + + def move_cursor(self, x, y): + pass + + def set_cursor_vis(self, vis): + pass + + def getheightwidth(self): + """Return (height, width) where height and width are the height + and width of the terminal window in characters.""" + pass + + def get_event(self, block=1): + """Return an Event instance. Returns None if |block| is false + and there is no event pending, otherwise waits for the + completion of an event.""" + pass + + def beep(self): + pass + + def clear(self): + """Wipe the screen""" + pass + + def finish(self): + """Move the cursor to the end of the display and otherwise get + ready for end. XXX could be merged with restore? Hmm.""" + pass + + def flushoutput(self): + """Flush all output to the screen (assuming there's some + buffering going on somewhere).""" + pass + + def forgetinput(self): + """Forget all pending, but not yet processed input.""" + pass + + def getpending(self): + """Return the characters that have been typed but not yet + processed.""" + pass + + def wait(self): + """Wait for an event.""" + pass diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/reader.py @@ -0,0 +1,614 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import types +from pyrepl import unicodedata_ +from pyrepl import commands +from pyrepl import input + +def _make_unctrl_map(): + uc_map = {} + for c in map(unichr, range(256)): + if unicodedata_.category(c)[0] <> 'C': + uc_map[c] = c + for i in range(32): + c = unichr(i) + uc_map[c] = u'^' + unichr(ord('A') + i - 1) + uc_map['\t'] = ' ' # display TABs as 4 characters + uc_map['\177'] = u'^?' + for i in range(256): + c = unichr(i) + if not uc_map.has_key(c): + uc_map[c] = u'\\%03o'%i + return uc_map + +# disp_str proved to be a bottleneck for large inputs, so it's been +# rewritten in C; it's not required though. +try: + raise ImportError # currently it's borked by the unicode support + + from _pyrepl_utils import disp_str, init_unctrl_map + + init_unctrl_map(_make_unctrl_map()) + + del init_unctrl_map +except ImportError: + def _my_unctrl(c, u=_make_unctrl_map()): + if c in u: + return u[c] + else: + if unicodedata_.category(c).startswith('C'): + return '\u%04x'%(ord(c),) + else: + return c + + def disp_str(buffer, join=''.join, uc=_my_unctrl): + """ disp_str(buffer:string) -> (string, [int]) + + Return the string that should be the printed represenation of + |buffer| and a list detailing where the characters of |buffer| + get used up. E.g.: + + >>> disp_str(chr(3)) + ('^C', [1, 0]) + + the list always contains 0s or 1s at present; it could conceivably + go higher as and when unicode support happens.""" + s = map(uc, buffer) + return (join(s), + map(ord, join(map(lambda x:'\001'+(len(x)-1)*'\000', s)))) + + del _my_unctrl + +del _make_unctrl_map + +# syntax classes: + +[SYNTAX_WHITESPACE, + SYNTAX_WORD, + SYNTAX_SYMBOL] = range(3) + +def make_default_syntax_table(): + # XXX perhaps should use some unicodedata here? + st = {} + for c in map(unichr, range(256)): + st[c] = SYNTAX_SYMBOL + for c in [a for a in map(unichr, range(256)) if a.isalpha()]: + st[c] = SYNTAX_WORD + st[u'\n'] = st[u' '] = SYNTAX_WHITESPACE + return st + +default_keymap = tuple( + [(r'\C-a', 'beginning-of-line'), + (r'\C-b', 'left'), + (r'\C-c', 'interrupt'), + (r'\C-d', 'delete'), + (r'\C-e', 'end-of-line'), + (r'\C-f', 'right'), + (r'\C-g', 'cancel'), + (r'\C-h', 'backspace'), + (r'\C-j', 'accept'), + (r'\', 'accept'), + (r'\C-k', 'kill-line'), + (r'\C-l', 'clear-screen'), + (r'\C-m', 'accept'), + (r'\C-q', 'quoted-insert'), + (r'\C-t', 'transpose-characters'), + (r'\C-u', 'unix-line-discard'), + (r'\C-v', 'quoted-insert'), + (r'\C-w', 'unix-word-rubout'), + (r'\C-x\C-u', 'upcase-region'), + (r'\C-y', 'yank'), + (r'\C-z', 'suspend'), + + (r'\M-b', 'backward-word'), + (r'\M-c', 'capitalize-word'), + (r'\M-d', 'kill-word'), + (r'\M-f', 'forward-word'), + (r'\M-l', 'downcase-word'), + (r'\M-t', 'transpose-words'), + (r'\M-u', 'upcase-word'), + (r'\M-y', 'yank-pop'), + (r'\M--', 'digit-arg'), + (r'\M-0', 'digit-arg'), + (r'\M-1', 'digit-arg'), + (r'\M-2', 'digit-arg'), + (r'\M-3', 'digit-arg'), + (r'\M-4', 'digit-arg'), + (r'\M-5', 'digit-arg'), + (r'\M-6', 'digit-arg'), + (r'\M-7', 'digit-arg'), + (r'\M-8', 'digit-arg'), + (r'\M-9', 'digit-arg'), + #(r'\M-\n', 'insert-nl'), + ('\\\\', 'self-insert')] + \ + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\', 'up'), + (r'\', 'down'), + (r'\', 'left'), + (r'\', 'right'), + (r'\', 'quoted-insert'), + (r'\', 'delete'), + (r'\', 'backspace'), + (r'\M-\', 'backward-kill-word'), + (r'\', 'end'), + (r'\', 'home'), + (r'\', 'help'), + (r'\EOF', 'end'), # the entries in the terminfo database for xterms + (r'\EOH', 'home'), # seem to be wrong. this is a less than ideal + # workaround + ]) + +del c # from the listcomps + +class Reader(object): + """The Reader class implements the bare bones of a command reader, + handling such details as editing and cursor motion. What it does + not support are such things as completion or history support - + these are implemented elsewhere. + + Instance variables of note include: + + * buffer: + A *list* (*not* a string at the moment :-) containing all the + characters that have been entered. + * console: + Hopefully encapsulates the OS dependent stuff. + * pos: + A 0-based index into `buffer' for where the insertion point + is. + * screeninfo: + Ahem. This list contains some info needed to move the + insertion point around reasonably efficiently. I'd like to + get rid of it, because its contents are obtuse (to put it + mildly) but I haven't worked out if that is possible yet. + * cxy, lxy: + the position of the insertion point in screen ... XXX + * syntax_table: + Dictionary mapping characters to `syntax class'; read the + emacs docs to see what this means :-) + * commands: + Dictionary mapping command names to command classes. + * arg: + The emacs-style prefix argument. It will be None if no such + argument has been provided. + * dirty: + True if we need to refresh the display. + * kill_ring: + The emacs-style kill-ring; manipulated with yank & yank-pop + * ps1, ps2, ps3, ps4: + prompts. ps1 is the prompt for a one-line input; for a + multiline input it looks like: + ps2> first line of input goes here + ps3> second and further + ps3> lines get ps3 + ... + ps4> and the last one gets ps4 + As with the usual top-level, you can set these to instances if + you like; str() will be called on them (once) at the beginning + of each command. Don't put really long or newline containing + strings here, please! + This is just the default policy; you can change it freely by + overriding get_prompt() (and indeed some standard subclasses + do). + * finished: + handle1 will set this to a true value if a command signals + that we're done. + """ + + help_text = """\ +This is pyrepl. Hear my roar. + +Helpful text may appear here at some point in the future when I'm +feeling more loquacious than I am now.""" + + msg_at_bottom = True + + def __init__(self, console): + self.buffer = [] + self.ps1 = "->> " + self.ps2 = "/>> " + self.ps3 = "|.. " + self.ps4 = "\__ " + self.kill_ring = [] + self.arg = None + self.finished = 0 + self.console = console + self.commands = {} + self.msg = '' + for v in vars(commands).values(): + if ( isinstance(v, type) + and issubclass(v, commands.Command) + and v.__name__[0].islower() ): + self.commands[v.__name__] = v + self.commands[v.__name__.replace('_', '-')] = v + self.syntax_table = make_default_syntax_table() + self.input_trans_stack = [] + self.keymap = self.collect_keymap() + self.input_trans = input.KeymapTranslator( + self.keymap, + invalid_cls='invalid-key', + character_cls='self-insert') + + def collect_keymap(self): + return default_keymap + + def calc_screen(self): + """The purpose of this method is to translate changes in + self.buffer into changes in self.screen. Currently it rips + everything down and starts from scratch, which whilst not + especially efficient is certainly simple(r). + """ + lines = self.get_unicode().split("\n") + screen = [] + screeninfo = [] + w = self.console.width - 1 + p = self.pos + for ln, line in zip(range(len(lines)), lines): + ll = len(line) + if 0 <= p <= ll: + if self.msg and not self.msg_at_bottom: + for mline in self.msg.split("\n"): + screen.append(mline) + screeninfo.append((0, [])) + self.lxy = p, ln + prompt = self.get_prompt(ln, ll >= p >= 0) + while '\n' in prompt: + pre_prompt, _, prompt = prompt.partition('\n') + screen.append(pre_prompt) + screeninfo.append((0, [])) + p -= ll + 1 + prompt, lp = self.process_prompt(prompt) + l, l2 = disp_str(line) + wrapcount = (len(l) + lp) / w + if wrapcount == 0: + screen.append(prompt + l) + screeninfo.append((lp, l2+[1])) + else: + screen.append(prompt + l[:w-lp] + "\\") + screeninfo.append((lp, l2[:w-lp])) + for i in range(-lp + w, -lp + wrapcount*w, w): + screen.append(l[i:i+w] + "\\") + screeninfo.append((0, l2[i:i + w])) + screen.append(l[wrapcount*w - lp:]) + screeninfo.append((0, l2[wrapcount*w - lp:]+[1])) + self.screeninfo = screeninfo + self.cxy = self.pos2xy(self.pos) + if self.msg and self.msg_at_bottom: + for mline in self.msg.split("\n"): + screen.append(mline) + screeninfo.append((0, [])) + return screen + + def process_prompt(self, prompt): + """ Process the prompt. + + This means calculate the length of the prompt. The character \x01 + and \x02 are used to bracket ANSI control sequences and need to be + excluded from the length calculation. So also a copy of the prompt + is returned with these control characters removed. """ + + out_prompt = '' + l = len(prompt) + pos = 0 + while True: + s = prompt.find('\x01', pos) + if s == -1: + break + e = prompt.find('\x02', s) + if e == -1: + break + # Found start and end brackets, subtract from string length + l = l - (e-s+1) + out_prompt += prompt[pos:s] + prompt[s+1:e] + pos = e+1 + out_prompt += prompt[pos:] + return out_prompt, l + + def bow(self, p=None): + """Return the 0-based index of the word break preceding p most + immediately. + + p defaults to self.pos; word boundaries are determined using + self.syntax_table.""" + if p is None: + p = self.pos + st = self.syntax_table + b = self.buffer + p -= 1 + while p >= 0 and st.get(b[p], SYNTAX_WORD) <> SYNTAX_WORD: + p -= 1 + while p >= 0 and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD: + p -= 1 + return p + 1 + + def eow(self, p=None): + """Return the 0-based index of the word break following p most + immediately. + + p defaults to self.pos; word boundaries are determined using + self.syntax_table.""" + if p is None: + p = self.pos + st = self.syntax_table + b = self.buffer + while p < len(b) and st.get(b[p], SYNTAX_WORD) <> SYNTAX_WORD: + p += 1 + while p < len(b) and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD: + p += 1 + return p + + def bol(self, p=None): + """Return the 0-based index of the line break preceding p most + immediately. + + p defaults to self.pos.""" + # XXX there are problems here. + if p is None: + p = self.pos + b = self.buffer + p -= 1 + while p >= 0 and b[p] <> '\n': + p -= 1 + return p + 1 + + def eol(self, p=None): + """Return the 0-based index of the line break following p most + immediately. + + p defaults to self.pos.""" + if p is None: + p = self.pos + b = self.buffer + while p < len(b) and b[p] <> '\n': + p += 1 + return p + + def get_arg(self, default=1): + """Return any prefix argument that the user has supplied, + returning `default' if there is None. `default' defaults + (groan) to 1.""" + if self.arg is None: + return default + else: + return self.arg + + def get_prompt(self, lineno, cursor_on_line): + """Return what should be in the left-hand margin for line + `lineno'.""" + if self.arg is not None and cursor_on_line: + return "(arg: %s) "%self.arg + if "\n" in self.buffer: + if lineno == 0: + return self._ps2 + elif lineno == self.buffer.count("\n"): + return self._ps4 + else: + return self._ps3 + else: + return self._ps1 + + def push_input_trans(self, itrans): + self.input_trans_stack.append(self.input_trans) + self.input_trans = itrans + + def pop_input_trans(self): + self.input_trans = self.input_trans_stack.pop() + + def pos2xy(self, pos): + """Return the x, y coordinates of position 'pos'.""" + # this *is* incomprehensible, yes. + y = 0 + assert 0 <= pos <= len(self.buffer) + if pos == len(self.buffer): + y = len(self.screeninfo) - 1 + p, l2 = self.screeninfo[y] + return p + len(l2) - 1, y + else: + for p, l2 in self.screeninfo: + l = l2.count(1) + if l > pos: + break + else: + pos -= l + y += 1 + c = 0 + i = 0 + while c < pos: + c += l2[i] + i += 1 + while l2[i] == 0: + i += 1 + return p + i, y + + def insert(self, text): + """Insert 'text' at the insertion point.""" + self.buffer[self.pos:self.pos] = list(text) + self.pos += len(text) + self.dirty = 1 + + def update_cursor(self): + """Move the cursor to reflect changes in self.pos""" + self.cxy = self.pos2xy(self.pos) + self.console.move_cursor(*self.cxy) + + def after_command(self, cmd): + """This function is called to allow post command cleanup.""" + if getattr(cmd, "kills_digit_arg", 1): + if self.arg is not None: + self.dirty = 1 + self.arg = None + + def prepare(self): + """Get ready to run. Call restore when finished. You must not + write to the console in between the calls to prepare and + restore.""" + try: + self.console.prepare() + self.arg = None + self.screeninfo = [] + self.finished = 0 + del self.buffer[:] + self.pos = 0 + self.dirty = 1 + self.last_command = None + self._ps1, self._ps2, self._ps3, self._ps4 = \ + map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + except: + self.restore() + raise + + def last_command_is(self, klass): + if not self.last_command: + return 0 + return issubclass(klass, self.last_command) + + def restore(self): + """Clean up after a run.""" + self.console.restore() + + def finish(self): + """Called when a command signals that we're finished.""" + pass + + def error(self, msg="none"): + self.msg = "! " + msg + " " + self.dirty = 1 + self.console.beep() + + def update_screen(self): + if self.dirty: + self.refresh() + + def refresh(self): + """Recalculate and refresh the screen.""" + # this call sets up self.cxy, so call it first. + screen = self.calc_screen() + self.console.refresh(screen, self.cxy) + self.dirty = 0 # forgot this for a while (blush) + + def do_cmd(self, cmd): + #print cmd + if isinstance(cmd[0], str): + cmd = self.commands.get(cmd[0], + commands.invalid_command)(self, cmd) + elif isinstance(cmd[0], type): + cmd = cmd[0](self, cmd) + + cmd.do() + + self.after_command(cmd) + + if self.dirty: + self.refresh() + else: + self.update_cursor() + + if not isinstance(cmd, commands.digit_arg): + self.last_command = cmd.__class__ + + self.finished = cmd.finish + if self.finished: + self.console.finish() + self.finish() + + def handle1(self, block=1): + """Handle a single event. Wait as long as it takes if block + is true (the default), otherwise return None if no event is + pending.""" + + if self.msg: + self.msg = '' + self.dirty = 1 + + while 1: + event = self.console.get_event(block) + if not event: # can only happen if we're not blocking + return None + + if event.evt == 'key': + self.input_trans.push(event) + elif event.evt == 'scroll': + self.refresh() + elif event.evt == 'resize': + self.refresh() + else: + pass + + cmd = self.input_trans.get() + + if cmd is None: + if block: + continue + else: + return None + + self.do_cmd(cmd) + return 1 + + def push_char(self, char): + self.console.push_char(char) + self.handle1(0) + + def readline(self): + """Read a line. The implementation of this method also shows + how to drive Reader if you want more control over the event + loop.""" + self.prepare() + try: + self.refresh() + while not self.finished: + self.handle1() + return self.get_buffer() + finally: + self.restore() + + def bind(self, spec, command): + self.keymap = self.keymap + ((spec, command),) + self.input_trans = input.KeymapTranslator( + self.keymap, + invalid_cls='invalid-key', + character_cls='self-insert') + + def get_buffer(self, encoding=None): + if encoding is None: + encoding = self.console.encoding + return u''.join(self.buffer).encode(self.console.encoding) + + def get_unicode(self): + """Return the current buffer as a unicode string.""" + return u''.join(self.buffer) + +def test(): + from pyrepl.unix_console import UnixConsole + reader = Reader(UnixConsole()) + reader.ps1 = "**> " + reader.ps2 = "/*> " + reader.ps3 = "|*> " + reader.ps4 = "\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: @@ -115,46 +114,6 @@ # in the second block! return split_block(annotator, block, 0, _forcelink=block.inputargs) -def remove_direct_loops(annotator, graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def remove_double_links(annotator, graph): - """This can be useful for code generators: it ensures that no block has - more than one incoming links from one and the same other block. It allows - argument passing along links to be implemented with phi nodes since the - value of an argument can be determined by looking from which block the - control passed. """ - def visit(block): - if isinstance(block, Block): - double_links = [] - seen = {} - for link in block.exits: - if link.target in seen: - double_links.append(link) - seen[link.target] = True - for link in double_links: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def no_links_to_startblock(graph): - """Ensure no links to start block.""" - links_to_start_block = False - for block in graph.iterblocks(): - for link in block.exits: - if link.target == graph.startblock: - links_to_start_block = True - break - if links_to_start_block: - insert_empty_startblock(None, graph) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from pypy.annotation import model as annmodel diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -1,9 +1,81 @@ # encoding: iso-8859-15 from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.unicodeobject import Py_UNICODE +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.module.cpyext.unicodeobject import ( + Py_UNICODE, PyUnicodeObject, new_empty_unicode) +from pypy.module.cpyext.api import PyObjectP, PyObject +from pypy.module.cpyext.pyobject import Py_DecRef from pypy.rpython.lltypesystem import rffi, lltype import sys, py +class AppTestUnicodeObject(AppTestCpythonExtensionBase): + def test_unicodeobject(self): + module = self.import_extension('foo', [ + ("get_hello1", "METH_NOARGS", + """ + return PyUnicode_FromStringAndSize( + "Hello world", 11); + """), + ("test_GetSize", "METH_NOARGS", + """ + PyObject* s = PyUnicode_FromString("Hello world"); + int result = 0; + + if(PyUnicode_GetSize(s) == 11) { + result = 1; + } + if(s->ob_type->tp_basicsize != sizeof(void*)*4) + result = 0; + Py_DECREF(s); + return PyBool_FromLong(result); + """), + ("test_GetSize_exception", "METH_NOARGS", + """ + PyObject* f = PyFloat_FromDouble(1.0); + Py_ssize_t size = PyUnicode_GetSize(f); + + Py_DECREF(f); + return NULL; + """), + ("test_is_unicode", "METH_VARARGS", + """ + return PyBool_FromLong(PyUnicode_Check(PyTuple_GetItem(args, 0))); + """)]) + assert module.get_hello1() == u'Hello world' + assert module.test_GetSize() + raises(TypeError, module.test_GetSize_exception) + + assert module.test_is_unicode(u"") + assert not module.test_is_unicode(()) + + def test_unicode_buffer_init(self): + module = self.import_extension('foo', [ + ("getunicode", "METH_NOARGS", + """ + PyObject *s, *t; + Py_UNICODE* c; + Py_ssize_t len; + + s = PyUnicode_FromUnicode(NULL, 4); + if (s == NULL) + return NULL; + t = PyUnicode_FromUnicode(NULL, 3); + if (t == NULL) + return NULL; + Py_DECREF(t); + c = PyUnicode_AsUnicode(s); + c[0] = 'a'; + c[1] = 0xe9; + c[3] = 'c'; + return s; + """), + ]) + s = module.getunicode() + assert len(s) == 4 + assert s == u'a�\x00c' + + + class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): assert api.PyUnicode_GET_SIZE(space.wrap(u'sp�m')) == 4 @@ -77,6 +149,28 @@ assert space.unwrap(w_res) == u'sp�' rffi.free_charp(s) + def test_unicode_resize(self, space, api): + py_uni = new_empty_unicode(space, 10) + ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + py_uni.c_buffer[0] = u'a' + py_uni.c_buffer[1] = u'b' + py_uni.c_buffer[2] = u'c' + ar[0] = rffi.cast(PyObject, py_uni) + api.PyUnicode_Resize(ar, 3) + py_uni = rffi.cast(PyUnicodeObject, ar[0]) + assert py_uni.c_size == 3 + assert py_uni.c_buffer[1] == u'b' + assert py_uni.c_buffer[3] == u'\x00' + # the same for growing + ar[0] = rffi.cast(PyObject, py_uni) + api.PyUnicode_Resize(ar, 10) + py_uni = rffi.cast(PyUnicodeObject, ar[0]) + assert py_uni.c_size == 10 + assert py_uni.c_buffer[1] == 'b' + assert py_uni.c_buffer[10] == '\x00' + Py_DecRef(space, ar[0]) + lltype.free(ar, flavor='raw') + def test_AsUTF8String(self, space, api): w_u = space.wrap(u'sp�m') w_res = api.PyUnicode_AsUTF8String(w_u) @@ -235,13 +329,13 @@ x_chunk = api.PyUnicode_AS_UNICODE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, 4) - w_y = api.PyUnicode_FromUnicode(target_chunk, 4) + w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, 4)) assert space.eq_w(w_y, space.wrap(u"abcd")) size = api.PyUnicode_GET_SIZE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, size) - w_y = api.PyUnicode_FromUnicode(target_chunk, size) + w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, size)) assert space.eq_w(w_y, w_x) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -36,29 +36,35 @@ init_defaults = Defaults([None]) def init__List(space, w_list, __args__): + from pypy.objspace.std.tupleobject import W_TupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - # - # this is the old version of the loop at the end of this function: - # - # w_list.wrappeditems = space.unpackiterable(w_iterable) - # - # This is commented out to avoid assigning a new RPython list to - # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. - # items_w = w_list.wrappeditems del items_w[:] if w_iterable is not None: - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - items_w.append(w_item) + # unfortunately this is duplicating space.unpackiterable to avoid + # assigning a new RPython list to 'wrappeditems', which defeats the + # W_FastSeqIterObject optimization. + if isinstance(w_iterable, W_ListObject): + items_w.extend(w_iterable.wrappeditems) + elif isinstance(w_iterable, W_TupleObject): + items_w.extend(w_iterable.wrappeditems) + else: + _init_from_iterable(space, items_w, w_iterable) + +def _init_from_iterable(space, items_w, w_iterable): + # in its own function to make the JIT look into init__List + # XXX this would need a JIT driver somehow? + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + items_w.append(w_item) def len__List(space, w_list): result = len(w_list.wrappeditems) diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/module/cpyext/include/import.h b/pypy/module/cpyext/include/import.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/import.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/lib_pypy/pyrepl/python_reader.py b/lib_pypy/pyrepl/python_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/python_reader.py @@ -0,0 +1,392 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Bob Ippolito +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# one impressive collections of imports: +from pyrepl.completing_reader import CompletingReader +from pyrepl.historical_reader import HistoricalReader +from pyrepl import completing_reader, reader +from pyrepl import copy_code, commands, completer +from pyrepl import module_lister +import new, sys, os, re, code, traceback +import atexit, warnings +try: + import cPickle as pickle +except ImportError: + import pickle +try: + import imp + imp.find_module("twisted") + from twisted.internet import reactor + from twisted.internet.abstract import FileDescriptor +except ImportError: + default_interactmethod = "interact" +else: + default_interactmethod = "twistedinteract" + +CommandCompiler = code.CommandCompiler + +def eat_it(*args): + """this function eats warnings, if you were wondering""" + pass + +class maybe_accept(commands.Command): + def do(self): + r = self.reader + text = r.get_unicode() + try: + # ooh, look at the hack: + code = r.compiler("#coding:utf-8\n"+text.encode('utf-8')) + except (OverflowError, SyntaxError, ValueError): + self.finish = 1 + else: + if code is None: + r.insert("\n") + else: + self.finish = 1 + +from_line_prog = re.compile( + "^from\s+(?P[A-Za-z_.0-9]*)\s+import\s+(?P[A-Za-z_.0-9]*)") +import_line_prog = re.compile( + "^(?:import|from)\s+(?P[A-Za-z_.0-9]*)\s*$") + +def mk_saver(reader): + def saver(reader=reader): + try: + file = open(os.path.expanduser("~/.pythoni.hist"), "w") + except IOError: + pass + else: + pickle.dump(reader.history, file) + file.close() + return saver + +class PythonicReader(CompletingReader, HistoricalReader): + def collect_keymap(self): + return super(PythonicReader, self).collect_keymap() + ( + (r'\n', 'maybe-accept'), + (r'\M-\n', 'insert-nl')) + + def __init__(self, console, locals, + compiler=None): + super(PythonicReader, self).__init__(console) + self.completer = completer.Completer(locals) + st = self.syntax_table + for c in "._0123456789": + st[c] = reader.SYNTAX_WORD + self.locals = locals + if compiler is None: + self.compiler = CommandCompiler() + else: + self.compiler = compiler + try: + file = open(os.path.expanduser("~/.pythoni.hist")) + except IOError: + pass + else: + try: + self.history = pickle.load(file) + except: + self.history = [] + self.historyi = len(self.history) + file.close() + atexit.register(mk_saver(self)) + for c in [maybe_accept]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + + def get_completions(self, stem): + b = self.get_unicode() + m = import_line_prog.match(b) + if m: + if not self._module_list_ready: + module_lister._make_module_list() + self._module_list_ready = True + + mod = m.group("mod") + try: + return module_lister.find_modules(mod) + except ImportError: + pass + m = from_line_prog.match(b) + if m: + mod, name = m.group("mod", "name") + try: + l = module_lister._packages[mod] + except KeyError: + try: + mod = __import__(mod, self.locals, self.locals, ['']) + return [x for x in dir(mod) if x.startswith(name)] + except ImportError: + pass + else: + return [x[len(mod) + 1:] + for x in l if x.startswith(mod + '.' + name)] + try: + l = completing_reader.uniqify(self.completer.complete(stem)) + return l + except (NameError, AttributeError): + return [] + +class ReaderConsole(code.InteractiveInterpreter): + II_init = code.InteractiveInterpreter.__init__ + def __init__(self, console, locals=None): + if locals is None: + locals = {} + self.II_init(locals) + self.compiler = CommandCompiler() + self.compile = self.compiler.compiler + self.reader = PythonicReader(console, locals, self.compiler) + locals['Reader'] = self.reader + + def run_user_init_file(self): + for key in "PYREPLSTARTUP", "PYTHONSTARTUP": + initfile = os.environ.get(key) + if initfile is not None and os.path.exists(initfile): + break + else: + return + try: + execfile(initfile, self.locals, self.locals) + except: + etype, value, tb = sys.exc_info() + traceback.print_exception(etype, value, tb.tb_next) + + def execute(self, text): + try: + # ooh, look at the hack: + code = self.compile("# coding:utf8\n"+text.encode('utf-8'), + '', 'single') + except (OverflowError, SyntaxError, ValueError): + self.showsyntaxerror("") + else: + self.runcode(code) + sys.stdout.flush() + + def interact(self): + while 1: + try: # catches EOFError's and KeyboardInterrupts during execution + try: # catches KeyboardInterrupts during editing + try: # warning saver + # can't have warnings spewed onto terminal + sv = warnings.showwarning + warnings.showwarning = eat_it + l = unicode(self.reader.readline(), 'utf-8') + finally: + warnings.showwarning = sv + except KeyboardInterrupt: + print "KeyboardInterrupt" + else: + if l: + self.execute(l) + except EOFError: + break + except KeyboardInterrupt: + continue + + def prepare(self): + self.sv_sw = warnings.showwarning + warnings.showwarning = eat_it + self.reader.prepare() + self.reader.refresh() # we want :after methods... + + def restore(self): + self.reader.restore() + warnings.showwarning = self.sv_sw + + def handle1(self, block=1): + try: + r = 1 + r = self.reader.handle1(block) + except KeyboardInterrupt: + self.restore() + print "KeyboardInterrupt" + self.prepare() + else: + if self.reader.finished: + text = self.reader.get_unicode() + self.restore() + if text: + self.execute(text) + self.prepare() + return r + + def tkfilehandler(self, file, mask): + try: + self.handle1(block=0) + except: + self.exc_info = sys.exc_info() + + # how the do you get this to work on Windows (without + # createfilehandler)? threads, I guess + def really_tkinteract(self): + import _tkinter + _tkinter.createfilehandler( + self.reader.console.input_fd, _tkinter.READABLE, + self.tkfilehandler) + + self.exc_info = None + while 1: + # dooneevent will return 0 without blocking if there are + # no Tk windows, 1 after blocking until an event otherwise + # so the following does what we want (this wasn't expected + # to be obvious). + if not _tkinter.dooneevent(_tkinter.ALL_EVENTS): + self.handle1(block=1) + if self.exc_info: + type, value, tb = self.exc_info + self.exc_info = None + raise type, value, tb + + def tkinteract(self): + """Run a Tk-aware Python interactive session. + + This function simulates the Python top-level in a way that + allows Tk's mainloop to run.""" + + # attempting to understand the control flow of this function + # without help may cause internal injuries. so, some + # explanation. + + # The outer while loop is there to restart the interaction if + # the user types control-c when execution is deep in our + # innards. I'm not sure this can't leave internals in an + # inconsistent state, but it's a good start. + + # then the inside loop keeps calling self.handle1 until + # _tkinter gets imported; then control shifts to + # self.really_tkinteract, above. + + # this function can only return via an exception; we mask + # EOFErrors (but they end the interaction) and + # KeyboardInterrupts cause a restart. All other exceptions + # are likely bugs in pyrepl (well, 'cept for SystemExit, of + # course). + + while 1: + try: + try: + self.prepare() + try: + while 1: + if sys.modules.has_key("_tkinter"): + self.really_tkinteract() + # really_tkinteract is not expected to + # return except via an exception, but: + break + self.handle1() + except EOFError: + pass + finally: + self.restore() + except KeyboardInterrupt: + continue + else: + break + + def twistedinteract(self): + from twisted.internet import reactor + from twisted.internet.abstract import FileDescriptor + import signal + outerself = self + class Me(FileDescriptor): + def fileno(self): + """ We want to select on FD 0 """ + return 0 + + def doRead(self): + """called when input is ready""" + try: + outerself.handle1() + except EOFError: + reactor.stop() + + reactor.addReader(Me()) + reactor.callWhenRunning(signal.signal, + signal.SIGINT, + signal.default_int_handler) + self.prepare() + try: + reactor.run() + finally: + self.restore() + + + def cocoainteract(self, inputfilehandle=None, outputfilehandle=None): + # only call this when there's a run loop already going! + # note that unlike the other *interact methods, this returns immediately + from cocoasupport import CocoaInteracter + self.cocoainteracter = CocoaInteracter.alloc().init(self, inputfilehandle, outputfilehandle) + + +def main(use_pygame_console=0, interactmethod=default_interactmethod, print_banner=True, clear_main=True): + si, se, so = sys.stdin, sys.stderr, sys.stdout + try: + if 0 and use_pygame_console: # pygame currently borked + from pyrepl.pygame_console import PyGameConsole, FakeStdin, FakeStdout + con = PyGameConsole() + sys.stderr = sys.stdout = FakeStdout(con) + sys.stdin = FakeStdin(con) + else: + from pyrepl.unix_console import UnixConsole + try: + import locale + except ImportError: + encoding = None + else: + if hasattr(locale, 'nl_langinfo') \ + and hasattr(locale, 'CODESET'): + encoding = locale.nl_langinfo(locale.CODESET) + elif os.environ.get('TERM_PROGRAM') == 'Apple_Terminal': + # /me whistles innocently... + code = int(os.popen( + "defaults read com.apple.Terminal StringEncoding" + ).read()) + if code == 4: + encoding = 'utf-8' + # More could go here -- and what's here isn't + # bulletproof. What would be? AppleScript? + # Doesn't seem to be possible. + else: + encoding = None + else: + encoding = None # so you get ASCII... + con = UnixConsole(0, 1, None, encoding) + if print_banner: + print "Python", sys.version, "on", sys.platform + print 'Type "help", "copyright", "credits" or "license" '\ + 'for more information.' + sys.path.insert(0, os.getcwd()) + + if clear_main and __name__ != '__main__': + mainmod = new.module('__main__') + sys.modules['__main__'] = mainmod + else: + mainmod = sys.modules['__main__'] + + rc = ReaderConsole(con, mainmod.__dict__) + rc.reader._module_list_ready = False + rc.run_user_init_file() + getattr(rc, interactmethod)() + finally: + sys.stdin, sys.stderr, sys.stdout = si, se, so + +if __name__ == '__main__': + main() diff --git a/pypy/translator/backendopt/test/test_ssa.py b/pypy/translator/backendopt/test/test_ssa.py --- a/pypy/translator/backendopt/test/test_ssa.py +++ b/pypy/translator/backendopt/test/test_ssa.py @@ -1,6 +1,6 @@ from pypy.translator.backendopt.ssa import * from pypy.translator.translator import TranslationContext -from pypy.objspace.flow.model import flatten, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import SpaceOperation diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pyfile.py @@ -0,0 +1,68 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, CONST_STRING, FILEP, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject) +from pypy.interpreter.error import OperationError +from pypy.module._file.interp_file import W_File + +PyFile_Check, PyFile_CheckExact = build_type_checkers("File", W_File) + + at cpython_api([PyObject, rffi.INT_real], PyObject) +def PyFile_GetLine(space, w_obj, n): + """ + Equivalent to p.readline([n]), this function reads one line from the + object p. p may be a file object or any object with a readline() + method. If n is 0, exactly one line is read, regardless of the length of + the line. If n is greater than 0, no more than n bytes will be read + from the file; a partial line can be returned. In both cases, an empty string + is returned if the end of the file is reached immediately. If n is less than + 0, however, one line is read regardless of length, but EOFError is + raised if the end of the file is reached immediately.""" + try: + w_readline = space.getattr(w_obj, space.wrap('readline')) + except OperationError: + raise OperationError( + space.w_TypeError, space.wrap( + "argument must be a file, or have a readline() method.")) + + n = rffi.cast(lltype.Signed, n) + if space.is_true(space.gt(space.wrap(n), space.wrap(0))): + return space.call_function(w_readline, space.wrap(n)) + elif space.is_true(space.lt(space.wrap(n), space.wrap(0))): + return space.call_function(w_readline) + else: + # XXX Raise EOFError as specified + return space.call_function(w_readline) + + at cpython_api([CONST_STRING, CONST_STRING], PyObject) +def PyFile_FromString(space, filename, mode): + """ + On success, return a new file object that is opened on the file given by + filename, with a file mode given by mode, where mode has the same + semantics as the standard C routine fopen(). On failure, return NULL.""" + w_filename = space.wrap(rffi.charp2str(filename)) + w_mode = space.wrap(rffi.charp2str(mode)) + return space.call_method(space.builtin, 'file', w_filename, w_mode) + + at cpython_api([FILEP, CONST_STRING, CONST_STRING, rffi.VOIDP], PyObject) +def PyFile_FromFile(space, fp, name, mode, close): + """Create a new PyFileObject from the already-open standard C file + pointer, fp. The function close will be called when the file should be + closed. Return NULL on failure.""" + raise NotImplementedError + + at cpython_api([PyObject, rffi.INT_real], lltype.Void) +def PyFile_SetBufSize(space, w_file, n): + """Available on systems with setvbuf() only. This should only be called + immediately after file object creation.""" + raise NotImplementedError + + at cpython_api([CONST_STRING, PyObject], rffi.INT_real, error=-1) +def PyFile_WriteString(space, s, w_p): + """Write string s to file object p. Return 0 on success or -1 on + failure; the appropriate exception will be set.""" + w_s = space.wrap(rffi.charp2str(s)) + space.call_method(w_p, "write", w_s) + return 0 + diff --git a/lib_pypy/pyrepl/pygame_console.py b/lib_pypy/pyrepl/pygame_console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/pygame_console.py @@ -0,0 +1,353 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# the pygame console is currently thoroughly broken. + +# there's a fundamental difference from the UnixConsole: here we're +# the terminal emulator too, in effect. This means, e.g., for pythoni +# we really need a separate process (or thread) to monitor for ^C +# during command execution and zap the executor process. Making this +# work on non-Unix is expected to be even more entertaining. + +from pygame.locals import * +from pyrepl.console import Console, Event +from pyrepl import pygame_keymap +import pygame +import types + +lmargin = 5 +rmargin = 5 +tmargin = 5 +bmargin = 5 + +try: + bool +except NameError: + def bool(x): + return not not x + +modcolors = {K_LCTRL:1, + K_RCTRL:1, + K_LMETA:1, + K_RMETA:1, + K_LALT:1, + K_RALT:1, + K_LSHIFT:1, + K_RSHIFT:1} + +class colors: + fg = 250,240,230 + bg = 5, 5, 5 + cursor = 230, 0, 230 + margin = 5, 5, 15 + +class FakeStdout: + def __init__(self, con): + self.con = con + def write(self, text): + self.con.write(text) + def flush(self): + pass + +class FakeStdin: + def __init__(self, con): + self.con = con + def read(self, n=None): + # argh! + raise NotImplementedError + def readline(self, n=None): + from reader import Reader + try: + # this isn't quite right: it will clobber any prompt that's + # been printed. Not sure how to get around this... + return Reader(self.con).readline() + except EOFError: + return '' + +class PyGameConsole(Console): + """Attributes: + + (keymap), + (fd), + screen, + height, + width, + """ + + def __init__(self): + self.pygame_screen = pygame.display.set_mode((800, 600)) + pygame.font.init() + pygame.key.set_repeat(500, 30) + self.font = pygame.font.Font( + "/usr/X11R6/lib/X11/fonts/TTF/luximr.ttf", 15) + self.fw, self.fh = self.fontsize = self.font.size("X") + self.cursor = pygame.Surface(self.fontsize) + self.cursor.fill(colors.cursor) + self.clear() + self.curs_vis = 1 + self.height, self.width = self.getheightwidth() + pygame.display.update() + pygame.event.set_allowed(None) + pygame.event.set_allowed(KEYDOWN) + + def install_keymap(self, keymap): + """Install a given keymap. + + keymap is a tuple of 2-element tuples; each small tuple is a + pair (keyspec, event-name). The format for keyspec is + modelled on that used by readline (so read that manual for + now!).""" + self.k = self.keymap = pygame_keymap.compile_keymap(keymap) + + def char_rect(self, x, y): + return self.char_pos(x, y), self.fontsize + + def char_pos(self, x, y): + return (lmargin + x*self.fw, + tmargin + y*self.fh + self.cur_top + self.scroll) + + def paint_margin(self): + s = self.pygame_screen + c = colors.margin + s.fill(c, [0, 0, 800, tmargin]) + s.fill(c, [0, 0, lmargin, 600]) + s.fill(c, [0, 600 - bmargin, 800, bmargin]) + s.fill(c, [800 - rmargin, 0, lmargin, 600]) + + def refresh(self, screen, (cx, cy)): + self.screen = screen + self.pygame_screen.fill(colors.bg, + [0, tmargin + self.cur_top + self.scroll, + 800, 600]) + self.paint_margin() + + line_top = self.cur_top + width, height = self.fontsize + self.cxy = (cx, cy) + cp = self.char_pos(cx, cy) + if cp[1] < tmargin: + self.scroll = - (cy*self.fh + self.cur_top) + self.repaint() + elif cp[1] + self.fh > 600 - bmargin: + self.scroll += (600 - bmargin) - (cp[1] + self.fh) + self.repaint() + if self.curs_vis: + self.pygame_screen.blit(self.cursor, self.char_pos(cx, cy)) + for line in screen: + if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh): + if line: + ren = self.font.render(line, 1, colors.fg) + self.pygame_screen.blit(ren, (lmargin, + tmargin + line_top + self.scroll)) + line_top += self.fh + pygame.display.update() + + def prepare(self): + self.cmd_buf = '' + self.k = self.keymap + self.height, self.width = self.getheightwidth() + self.curs_vis = 1 + self.cur_top = self.pos[0] + self.event_queue = [] + + def restore(self): + pass + + def blit_a_char(self, linen, charn): + line = self.screen[linen] + if charn < len(line): + text = self.font.render(line[charn], 1, colors.fg) + self.pygame_screen.blit(text, self.char_pos(charn, linen)) + + def move_cursor(self, x, y): + cp = self.char_pos(x, y) + if cp[1] < tmargin or cp[1] + self.fh > 600 - bmargin: + self.event_queue.append(Event('refresh', '', '')) + else: + if self.curs_vis: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + self.pygame_screen.blit(self.cursor, cp) + self.blit_a_char(y, x) + pygame.display.update() + self.cxy = (x, y) + + def set_cursor_vis(self, vis): + self.curs_vis = vis + if vis: + self.move_cursor(*self.cxy) + else: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + pygame.display.update() + + def getheightwidth(self): + """Return (height, width) where height and width are the height + and width of the terminal window in characters.""" + return ((600 - tmargin - bmargin)/self.fh, + (800 - lmargin - rmargin)/self.fw) + + def tr_event(self, pyg_event): + shift = bool(pyg_event.mod & KMOD_SHIFT) + ctrl = bool(pyg_event.mod & KMOD_CTRL) + meta = bool(pyg_event.mod & (KMOD_ALT|KMOD_META)) + + try: + return self.k[(pyg_event.unicode, meta, ctrl)], pyg_event.unicode + except KeyError: + try: + return self.k[(pyg_event.key, meta, ctrl)], pyg_event.unicode + except KeyError: + return "invalid-key", pyg_event.unicode + + def get_event(self, block=1): + """Return an Event instance. Returns None if |block| is false + and there is no event pending, otherwise waits for the + completion of an event.""" + while 1: + if self.event_queue: + return self.event_queue.pop(0) + elif block: + pyg_event = pygame.event.wait() + else: + pyg_event = pygame.event.poll() + if pyg_event.type == NOEVENT: + return + + if pyg_event.key in modcolors: + continue + + k, c = self.tr_event(pyg_event) + self.cmd_buf += c.encode('ascii', 'replace') + self.k = k + + if not isinstance(k, types.DictType): + e = Event(k, self.cmd_buf, []) + self.k = self.keymap + self.cmd_buf = '' + return e + + def beep(self): + # uhh, can't be bothered now. + # pygame.sound.something, I guess. + pass + + def clear(self): + """Wipe the screen""" + self.pygame_screen.fill(colors.bg) + #self.screen = [] + self.pos = [0, 0] + self.grobs = [] + self.cur_top = 0 + self.scroll = 0 + + def finish(self): + """Move the cursor to the end of the display and otherwise get + ready for end. XXX could be merged with restore? Hmm.""" + if self.curs_vis: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + for line in self.screen: + self.write_line(line, 1) + if self.curs_vis: + self.pygame_screen.blit(self.cursor, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + pygame.display.update() + + def flushoutput(self): + """Flush all output to the screen (assuming there's some + buffering going on somewhere)""" + # no buffering here, ma'am (though perhaps there should be!) + pass + + def forgetinput(self): + """Forget all pending, but not yet processed input.""" + while pygame.event.poll().type <> NOEVENT: + pass + + def getpending(self): + """Return the characters that have been typed but not yet + processed.""" + events = [] + while 1: + event = pygame.event.poll() + if event.type == NOEVENT: + break + events.append(event) + + return events + + def wait(self): + """Wait for an event.""" + raise Exception, "erp!" + + def repaint(self): + # perhaps we should consolidate grobs? + self.pygame_screen.fill(colors.bg) + self.paint_margin() + for (y, x), surf, text in self.grobs: + if surf and 0 < y + self.scroll: + self.pygame_screen.blit(surf, (lmargin + x, + tmargin + y + self.scroll)) + pygame.display.update() + + def write_line(self, line, ret): + charsleft = (self.width*self.fw - self.pos[1])/self.fw + while len(line) > charsleft: + self.write_line(line[:charsleft], 1) + line = line[charsleft:] + if line: + ren = self.font.render(line, 1, colors.fg, colors.bg) + self.grobs.append((self.pos[:], ren, line)) + self.pygame_screen.blit(ren, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + else: + self.grobs.append((self.pos[:], None, line)) + if ret: + self.pos[0] += self.fh + if tmargin + self.pos[0] + self.scroll + self.fh > 600 - bmargin: + self.scroll = 600 - bmargin - self.pos[0] - self.fh - tmargin + self.repaint() + self.pos[1] = 0 + else: + self.pos[1] += self.fw*len(line) + + def write(self, text): + lines = text.split("\n") + if self.curs_vis: + self.pygame_screen.fill(colors.bg, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll, + self.fw, self.fh)) + for line in lines[:-1]: + self.write_line(line, 1) + self.write_line(lines[-1], 0) + if self.curs_vis: + self.pygame_screen.blit(self.cursor, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + pygame.display.update() + + def flush(self): + pass diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,6 +61,12 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,8 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox -from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong, r_uint class X86RegisterManager(RegisterManager): @@ -34,6 +35,12 @@ esi: 2, edi: 3, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + } def call_result_location(self, v): return eax @@ -61,6 +68,19 @@ r14: 4, r15: 5, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + r8: MY_COPY_OF_REGS + 4 * WORD, + r9: MY_COPY_OF_REGS + 5 * WORD, + r10: MY_COPY_OF_REGS + 6 * WORD, + r12: MY_COPY_OF_REGS + 7 * WORD, + r13: MY_COPY_OF_REGS + 8 * WORD, + r14: MY_COPY_OF_REGS + 9 * WORD, + r15: MY_COPY_OF_REGS + 10 * WORD, + } class X86XMMRegisterManager(RegisterManager): @@ -117,6 +137,16 @@ else: return 1 +if WORD == 4: + gpr_reg_mgr_cls = X86RegisterManager + xmm_reg_mgr_cls = X86XMMRegisterManager +elif WORD == 8: + gpr_reg_mgr_cls = X86_64_RegisterManager + xmm_reg_mgr_cls = X86_64_XMMRegisterManager +else: + raise AssertionError("Word size should be 4 or 8") + + class RegAlloc(object): def __init__(self, assembler, translate_support_code=False): @@ -136,16 +166,6 @@ # compute longevity of variables longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity - # XXX - if cpu.WORD == 4: - gpr_reg_mgr_cls = X86RegisterManager - xmm_reg_mgr_cls = X86XMMRegisterManager - elif cpu.WORD == 8: - gpr_reg_mgr_cls = X86_64_RegisterManager - xmm_reg_mgr_cls = X86_64_XMMRegisterManager - else: - raise AssertionError("Word size should be 4 or 8") - self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) @@ -741,8 +761,12 @@ def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None + self.xrm.before_call(force_store, save_all_regs=save_all_regs) + if not save_all_regs: + gcrootmap = gc_ll_descr = self.assembler.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + save_all_regs = 2 self.rm.before_call(force_store, save_all_regs=save_all_regs) - self.xrm.before_call(force_store, save_all_regs=save_all_regs) if op.result is not None: if op.result.type == FLOAT: resloc = self.xrm.after_call(op.result) @@ -848,31 +872,53 @@ self.PerformDiscard(op, arglocs) self.rm.possibly_free_vars_for_op(op) - def _fastpath_malloc(self, op, descr): + def fastpath_malloc_fixedsize(self, op, descr): assert isinstance(descr, BaseSizeDescr) + self._do_fastpath_malloc(op, descr.size, descr.tid) + + def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + size = basesize + itemsize * num_elem + self._do_fastpath_malloc(op, size, arraydescr.tid) + self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) + + def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) - # We need to force-allocate each of save_around_call_regs now. - # The alternative would be to save and restore them around the - # actual call to malloc(), in the rare case where we need to do - # it; however, mark_gc_roots() would need to be adapted to know - # where the variables end up being saved. Messy. - for reg in self.rm.save_around_call_regs: - if reg is not eax: - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=reg) - self.rm.possibly_free_var(tmp_box) - self.assembler.malloc_cond_fixedsize( + if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + # We need edx as a temporary, but otherwise don't save any more + # register. See comments in _build_malloc_slowpath(). + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=edx) + self.rm.possibly_free_var(tmp_box) + else: + # ---- asmgcc ---- + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) + + self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - descr.size, descr.tid, + size, tid, ) def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): - self._fastpath_malloc(op, op.getdescr()) + self.fastpath_malloc_fixedsize(op, op.getdescr()) else: args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] @@ -882,7 +928,7 @@ classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self._fastpath_malloc(op, descrsize) + self.fastpath_malloc_fixedsize(op, descrsize) self.assembler.set_vtable(eax, imm(classint)) # result of fastpath malloc is in eax else: @@ -941,16 +987,25 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) + return # boehm GC (XXX kill the following code at some point) itemsize, basesize, ofs_length, _, _ = ( self._unpack_arraydescr(op.getdescr())) scale_of_field = _get_scale(itemsize) - return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + self._malloc_varsize(basesize, ofs_length, scale_of_field, + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -1144,7 +1199,7 @@ # call memcpy() self.rm.before_call() self.xrm.before_call() - self.assembler._emit_call(imm(self.assembler.memcpy_addr), + self.assembler._emit_call(-1, imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) self.rm.possibly_free_var(length_box) self.rm.possibly_free_var(dstaddr_box) @@ -1212,18 +1267,24 @@ def consider_jit_debug(self, op): pass - def get_mark_gc_roots(self, gcrootmap): + def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) - gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position)) + gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) for v, reg in self.rm.reg_bindings.items(): if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX - gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + if use_copy_area: + assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS + area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] + gcrootmap.add_frame_offset(shape, area_offset) + else: + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg( + shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link from pypy.objspace.flow.model import SpaceOperation, c_last_exception from pypy.objspace.flow.model import FunctionGraph -from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph +from pypy.objspace.flow.model import mkentrymap, checkgraph from pypy.annotation import model as annmodel from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr from pypy.rpython.lltypesystem.lltype import normalizeptr @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,22 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + + def test_reload(self, space, api): + pdb = api.PyImport_Import(space.wrap("pdb")) + space.delattr(pdb, space.wrap("set_trace")) + pdb = api.PyImport_ReloadModule(pdb) + assert space.getattr(pdb, space.wrap("set_trace")) + class AppTestImportLogic(AppTestCpythonExtensionBase): def test_import_logic(self): skip("leak?") diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/lib_pypy/pyrepl/tests/basic.py b/lib_pypy/pyrepl/tests/basic.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/basic.py @@ -0,0 +1,115 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +class SimpleTestCase(ReaderTestCase): + + def test_basic(self): + self.run_test([(('self-insert', 'a'), ['a']), + ( 'accept', ['a'])]) + + def test_repeat(self): + self.run_test([(('digit-arg', '3'), ['']), + (('self-insert', 'a'), ['aaa']), + ( 'accept', ['aaa'])]) + + def test_kill_line(self): + self.run_test([(('self-insert', 'abc'), ['abc']), + ( 'left', None), + ( 'kill-line', ['ab']), + ( 'accept', ['ab'])]) + + def test_unix_line_discard(self): + self.run_test([(('self-insert', 'abc'), ['abc']), + ( 'left', None), + ( 'unix-word-rubout', ['c']), + ( 'accept', ['c'])]) + + def test_kill_word(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'beginning-of-line', ['ab cd']), + ( 'kill-word', [' cd']), + ( 'accept', [' cd'])]) + + def test_backward_kill_word(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'accept', ['ab '])]) + + def test_yank(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'beginning-of-line', ['ab ']), + ( 'yank', ['cdab ']), + ( 'accept', ['cdab '])]) + + def test_yank_pop(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'left', ['ab ']), + ( 'backward-kill-word', [' ']), + ( 'yank', ['ab ']), + ( 'yank-pop', ['cd ']), + ( 'accept', ['cd '])]) + + def test_interrupt(self): + try: + self.run_test([( 'interrupt', [''])]) + except KeyboardInterrupt: + pass + else: + self.fail('KeyboardInterrupt got lost') + + # test_suspend -- hah + + def test_up(self): + self.run_test([(('self-insert', 'ab\ncd'), ['ab', 'cd']), + ( 'up', ['ab', 'cd']), + (('self-insert', 'e'), ['abe', 'cd']), + ( 'accept', ['abe', 'cd'])]) + + def test_down(self): + self.run_test([(('self-insert', 'ab\ncd'), ['ab', 'cd']), + ( 'up', ['ab', 'cd']), + (('self-insert', 'e'), ['abe', 'cd']), + ( 'down', ['abe', 'cd']), + (('self-insert', 'f'), ['abe', 'cdf']), + ( 'accept', ['abe', 'cdf'])]) + + def test_left(self): + self.run_test([(('self-insert', 'ab'), ['ab']), + ( 'left', ['ab']), + (('self-insert', 'c'), ['acb']), + ( 'accept', ['acb'])]) + + def test_right(self): + self.run_test([(('self-insert', 'ab'), ['ab']), + ( 'left', ['ab']), + (('self-insert', 'c'), ['acb']), + ( 'right', ['acb']), + (('self-insert', 'd'), ['acbd']), + ( 'accept', ['acbd'])]) + +def test(): + run_testcase(SimpleTestCase) + +if __name__ == '__main__': + test() diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -443,7 +443,8 @@ "ll_upper": Meth([], self.SELFTYPE_T), "ll_lower": Meth([], self.SELFTYPE_T), "ll_substring": Meth([Signed, Signed], self.SELFTYPE_T), # ll_substring(start, count) - "ll_split_chr": Meth([self.CHAR], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_split_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_rsplit_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! "ll_contains": Meth([self.CHAR], Bool), "ll_replace_chr_chr": Meth([self.CHAR, self.CHAR], self.SELFTYPE_T), }) @@ -1480,9 +1481,16 @@ # NOT_RPYTHON return self.make_string(self._str[start:start+count]) - def ll_split_chr(self, ch): + def ll_split_chr(self, ch, max): # NOT_RPYTHON - l = [self.make_string(s) for s in self._str.split(ch)] + l = [self.make_string(s) for s in self._str.split(ch, max)] + res = _array(Array(self._TYPE), len(l)) + res._array[:] = l + return res + + def ll_rsplit_chr(self, ch, max): + # NOT_RPYTHON + l = [self.make_string(s) for s in self._str.rsplit(ch, max)] res = _array(Array(self._TYPE), len(l)) res._array[:] = l return res diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -285,6 +285,15 @@ elif drv.exe_name is None and '__name__' in targetspec_dic: drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s' + # Double check to ensure we are not overwriting the current interpreter + try: + exe_name = str(drv.compute_exe_name()) + assert not os.path.samefile(exe_name, sys.executable), ( + 'Output file %r is the currently running ' + 'interpreter (use --output=...)'% exe_name) + except OSError: + pass + goals = translateconfig.goals try: drv.proceed(goals) diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -151,9 +151,9 @@ class CPythonFakeFrame(eval.Frame): - def __init__(self, space, code, w_globals=None, numlocals=-1): + def __init__(self, space, code, w_globals=None): self.fakecode = code - eval.Frame.__init__(self, space, w_globals, numlocals) + eval.Frame.__init__(self, space, w_globals) def getcode(self): return self.fakecode diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -102,7 +102,7 @@ # first annotate, rtype, and backendoptimize PyPy try: - interp, graph = get_interpreter(entry_point, [], backendopt=True, + interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -8,9 +8,8 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, - X86XMMRegisterManager, get_ebp_ofs, - _get_scale) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, + _get_scale, gpr_reg_mgr_cls) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -78,8 +77,8 @@ self.loop_run_counters = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 - self.malloc_fixedsize_slowpath1 = 0 - self.malloc_fixedsize_slowpath2 = 0 + self.malloc_slowpath1 = 0 + self.malloc_slowpath2 = 0 self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False @@ -124,8 +123,8 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() - if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): - self._build_malloc_fixedsize_slowpath() + if gc_ll_descr.get_malloc_slowpath_addr is not None: + self._build_malloc_slowpath() self._build_stack_check_slowpath() if gc_ll_descr.gcrootmap: self._build_close_stack() @@ -135,6 +134,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" + self.current_clt = looptoken.compiled_loop_token self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -147,6 +147,7 @@ self.mc = None self.looppos = -1 self.currently_compiling_loop = None + self.current_clt = None def finish_once(self): if self._debug: @@ -172,26 +173,47 @@ self.float_const_neg_addr = float_constants self.float_const_abs_addr = float_constants + 16 - def _build_malloc_fixedsize_slowpath(self): + def _build_malloc_slowpath(self): + # With asmgcc, we need two helpers, so that we can write two CALL + # instructions in assembler, with a mark_gc_roots in between. + # With shadowstack, this is not needed, so we produce a single helper. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + # # ---------- first helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() if self.cpu.supports_floats: # save the XMM registers in for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - if IS_X86_32: - mc.MOV_sr(WORD, edx.value) # save it as the new argument - elif IS_X86_64: - # rdi can be clobbered: its content was forced to the stack - # by _fastpath_malloc(), like all other save_around_call_regs. - mc.MOV_rr(edi.value, edx.value) - - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() - mc.JMP(imm(addr)) # tail call to the real malloc - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart - # ---------- second helper for the slow path of malloc ---------- - mc = codebuf.MachineCodeBlockWrapper() + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() + # + if gcrootmap is not None and gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_br(ofs, reg.value) + mc.SUB_ri(esp.value, 16 - WORD) # stack alignment of 16 bytes + if IS_X86_32: + mc.MOV_sr(0, edx.value) # push argument + elif IS_X86_64: + mc.MOV_rr(edi.value, edx.value) + mc.CALL(imm(addr)) + mc.ADD_ri(esp.value, 16 - WORD) + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_rb(reg.value, ofs) + else: + # ---- asmgcc ---- + if IS_X86_32: + mc.MOV_sr(WORD, edx.value) # save it as the new argument + elif IS_X86_64: + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. + mc.MOV_rr(edi.value, edx.value) + mc.JMP(imm(addr)) # tail call to the real malloc + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.malloc_slowpath1 = rawstart + # ---------- second helper for the slow path of malloc ---------- + mc = codebuf.MachineCodeBlockWrapper() + # if self.cpu.supports_floats: # restore the XMM registers for i in range(self.cpu.NUM_REGS):# from where they were saved mc.MOVSD_xs(i, (WORD*2)+8*i) @@ -199,21 +221,28 @@ mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath2 = rawstart + self.malloc_slowpath2 = rawstart def _build_stack_check_slowpath(self): - from pypy.rlib import rstack _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or self.cpu.exit_frame_with_exception_v < 0: return # no stack check (for tests, or non-translated) # + # make a "function" that is called immediately at the start of + # an assembler function. In particular, the stack looks like: + # + # | ... | <-- aligned to a multiple of 16 + # | retaddr of caller | + # | my own retaddr | <-- esp + # +---------------------+ + # mc = codebuf.MachineCodeBlockWrapper() - mc.PUSH_r(ebp.value) - mc.MOV_rr(ebp.value, esp.value) # + stack_size = WORD if IS_X86_64: # on the x86_64, we have to save all the registers that may # have been used to pass arguments + stack_size += 6*WORD + 8*8 for reg in [edi, esi, edx, ecx, r8, r9]: mc.PUSH_r(reg.value) mc.SUB_ri(esp.value, 8*8) @@ -222,11 +251,13 @@ # if IS_X86_32: mc.LEA_rb(eax.value, +8) + stack_size += 2*WORD + mc.PUSH_r(eax.value) # alignment mc.PUSH_r(eax.value) elif IS_X86_64: mc.LEA_rb(edi.value, +16) - mc.AND_ri(esp.value, -16) # + # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) # mc.MOV(eax, heap(self.cpu.pos_exception())) @@ -234,16 +265,16 @@ mc.J_il8(rx86.Conditions['NZ'], 0) jnz_location = mc.get_relative_pos() # - if IS_X86_64: + if IS_X86_32: + mc.ADD_ri(esp.value, 2*WORD) + elif IS_X86_64: # restore the registers for i in range(7, -1, -1): mc.MOVSD_xs(i, 8*i) - for i, reg in [(6, r9), (5, r8), (4, ecx), - (3, edx), (2, esi), (1, edi)]: - mc.MOV_rb(reg.value, -8*i) + mc.ADD_ri(esp.value, 8*8) + for reg in [r9, r8, ecx, edx, esi, edi]: + mc.POP_r(reg.value) # - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) mc.RET() # # patch the JNZ above @@ -268,9 +299,7 @@ # function, and will instead return to the caller's caller. Note # also that we completely ignore the saved arguments, because we # are interrupting the function. - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) - mc.ADD_ri(esp.value, WORD) + mc.ADD_ri(esp.value, stack_size) mc.RET() # rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -588,7 +617,7 @@ def _get_offset_of_ebp_from_esp(self, allocated_depth): # Given that [EBP] is where we saved EBP, i.e. in the last word # of our fixed frame, then the 'words' value is: - words = (self.cpu.FRAME_FIXED_SIZE - 1) + allocated_depth + words = (FRAME_FIXED_SIZE - 1) + allocated_depth # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP return -WORD * aligned_words @@ -601,6 +630,10 @@ for regloc in self.cpu.CALLEE_SAVE_REGISTERS: self.mc.PUSH_r(regloc.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(gcrootmap) + def _call_header_with_stack_check(self): if self.stack_check_slowpath == 0: pass # no stack check (e.g. not translated) @@ -622,12 +655,32 @@ def _call_footer(self): self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) self.mc.POP_r(ebp.value) self.mc.RET() + def _call_header_shadowstack(self, gcrootmap): + # we need to put two words into the shadowstack: the MARKER + # and the address of the frame (ebp, actually) + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] + self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER + self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp + self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + + def _call_footer_shadowstack(self, gcrootmap): + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.SUB_ji8(rst, 2*WORD) # SUB [rootstacktop], 2*WORD + def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): if IS_X86_64: return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) @@ -737,8 +790,8 @@ nonfloatlocs, floatlocs = arglocs self._call_header() stackadjustpos = self._patchable_stackadjust() - tmp = X86RegisterManager.all_regs[0] - xmmtmp = X86XMMRegisterManager.all_regs[0] + tmp = eax + xmmtmp = xmm0 self.mc.begin_reuse_scratch_register() for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] @@ -947,9 +1000,9 @@ self.implement_guard(guard_token, checkfalsecond) return genop_cmp_guard_float - def _emit_call(self, x, arglocs, start=0, tmp=eax): + def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: - return self._emit_call_64(x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start) p = 0 n = len(arglocs) @@ -975,9 +1028,9 @@ self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) - def _emit_call_64(self, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start): src_locs = [] dst_locs = [] xmm_src_locs = [] @@ -1035,12 +1088,27 @@ self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) def call(self, addr, args, res): - self._emit_call(imm(addr), args) + force_index = self.write_new_force_index() + self._emit_call(force_index, imm(addr), args) assert res is eax + def write_new_force_index(self): + # for shadowstack only: get a new, unused force_index number and + # write it to FORCE_INDEX_OFS. Used to record the call shape + # (i.e. where the GC pointers are in the stack) around a CALL + # instruction that doesn't already have a force_index. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + clt = self.current_clt + force_index = clt.reserve_and_record_some_faildescr_index() + self.mc.MOV_bi(FORCE_INDEX_OFS, force_index) + return force_index + else: + return 0 + genop_int_neg = _unaryop("NEG") genop_int_invert = _unaryop("NOT") genop_int_add = _binaryop("ADD", True) @@ -1256,6 +1324,11 @@ assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + def set_new_array_length(self, loc, ofs_length, loc_num_elem): + assert isinstance(loc, RegLoc) + assert isinstance(loc_num_elem, ImmedLoc) + self.mc.MOV(mem(loc, ofs_length), loc_num_elem) + # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) def genop_new(self, op, arglocs, result_loc): @@ -1834,6 +1907,10 @@ self.pending_guard_tokens.append(guard_token) def genop_call(self, op, arglocs, resloc): + force_index = self.write_new_force_index() + self._genop_call(op, arglocs, resloc, force_index) + + def _genop_call(self, op, arglocs, resloc, force_index): sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -1847,8 +1924,8 @@ tmp = ecx else: tmp = eax - - self._emit_call(x, arglocs, 3, tmp=tmp) + + self._emit_call(force_index, x, arglocs, 3, tmp=tmp) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return @@ -1879,7 +1956,7 @@ faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - self.genop_call(op, arglocs, result_loc) + self._genop_call(op, arglocs, result_loc, fail_index) self.mc.CMP_bi(FORCE_INDEX_OFS, 0) self.implement_guard(guard_token, 'L') @@ -1963,8 +2040,8 @@ assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2, - tmp=eax) + self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_void_v @@ -1989,7 +2066,7 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self._emit_call(imm(asm_helper_adr), [eax, arglocs[1]], 0, + self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0, tmp=ecx) if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: self.mc.FSTP_b(result_loc.value) @@ -2016,7 +2093,7 @@ # load the return value from fail_boxes_xxx[0] kind = op.result.type if kind == FLOAT: - xmmtmp = X86XMMRegisterManager.all_regs[0] + xmmtmp = xmm0 adr = self.fail_boxes_float.get_addr_for_num(0) self.mc.MOVSD(xmmtmp, heap(adr)) self.mc.MOVSD(result_loc, xmmtmp) @@ -2111,11 +2188,16 @@ not_implemented("not implemented operation (guard): %s" % op.getopname()) - def mark_gc_roots(self): + def mark_gc_roots(self, force_index, use_copy_area=False): + if force_index < 0: + return # not needed gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: - mark = self._regalloc.get_mark_gc_roots(gcrootmap) - self.mc.insert_gcroot_marker(mark) + mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area) + if gcrootmap.is_shadow_stack: + gcrootmap.write_callshape(mark, force_index) + else: + self.mc.insert_gcroot_marker(mark) def target_arglocs(self, loop_token): return loop_token._x86_arglocs @@ -2127,8 +2209,7 @@ else: self.mc.JMP(imm(loop_token._x86_loop_code)) - def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, - size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) @@ -2136,7 +2217,7 @@ self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - # See comments in _build_malloc_fixedsize_slowpath for the + # See comments in _build_malloc_slowpath for the # details of the two helper functions that we are calling below. # First, we need to call two of them and not just one because we # need to have a mark_gc_roots() in between. Then the calling @@ -2146,19 +2227,27 @@ # result in EAX; slowpath_addr2 additionally returns in EDX a # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + # reserve room for the argument to the real malloc and the # 8 saved XMM regs self._regalloc.reserve_param(1+16) - self.mc.CALL(imm(slowpath_addr1)) - self.mark_gc_roots() - slowpath_addr2 = self.malloc_fixedsize_slowpath2 + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) + if not shadow_stack: + # there are two helpers to call only with asmgcc + slowpath_addr1 = self.malloc_slowpath1 + self.mc.CALL(imm(slowpath_addr1)) + self.mark_gc_roots(self.write_new_force_index(), + use_copy_area=shadow_stack) + slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) # on 64-bits, 'tid' is a value that fits in 31 bits + assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ From commits-noreply at bitbucket.org Tue Apr 12 12:30:43 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 12:30:43 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: At least, don't kill "generators" entirely in case there is still doubt. Message-ID: <20110412103043.AF48E2A202C@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3497:79eb8284ffe9 Date: 2011-04-12 12:30 +0200 http://bitbucket.org/pypy/extradoc/changeset/79eb8284ffe9/ Log: At least, don't kill "generators" entirely in case there is still doubt. diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -23,6 +23,8 @@ is a compile time constant (and call unrolled version of string formatting loop in this case). +- generators?? + - consider how much old style classes in stdlib hurt us. - support raw mallocs From commits-noreply at bitbucket.org Tue Apr 12 13:17:13 2011 From: commits-noreply at bitbucket.org (fijal) Date: Tue, 12 Apr 2011 13:17:13 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Make this a new style class Message-ID: <20110412111713.E19DA36C20D@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43307:e93a32fe3388 Date: 2011-04-12 13:17 +0200 http://bitbucket.org/pypy/pypy/changeset/e93a32fe3388/ Log: Make this a new style class diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -42,7 +42,7 @@ assert qmut1 is qmut2 -class QuasiImmutTests: +class QuasiImmutTests(object): def test_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) From commits-noreply at bitbucket.org Tue Apr 12 13:51:19 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 13:51:19 +0200 (CEST) Subject: [pypy-svn] pypy default: Update LICENSE with 2.7 information. Message-ID: <20110412115119.2755D2A202C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43308:9ef67176f084 Date: 2011-04-12 13:51 +0200 http://bitbucket.org/pypy/pypy/changeset/9ef67176f084/ Log: Update LICENSE with 2.7 information. diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -123,12 +123,12 @@ by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' +License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' ============================================================== Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files -in the 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' directories +in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories are all copyrighted by the Python Software Foundation and licensed under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html @@ -161,21 +161,12 @@ ====================================== The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. - CompositionExclusions-3.2.0.txt - CompositionExclusions-4.1.0.txt - CompositionExclusions-5.0.0.txt - EastAsianWidth-3.2.0.txt - EastAsianWidth-4.1.0.txt - EastAsianWidth-5.0.0.txt - UnicodeData-3.2.0.txt - UnicodeData-4.1.0.txt - UnicodeData-5.0.0.txt - -The following files are derived from files from the above website. The same -terms of use apply. - UnihanNumeric-3.2.0.txt - UnihanNumeric-4.1.0.txt - UnihanNumeric-5.0.0.txt + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt From commits-noreply at bitbucket.org Tue Apr 12 15:55:39 2011 From: commits-noreply at bitbucket.org (fijal) Date: Tue, 12 Apr 2011 15:55:39 +0200 (CEST) Subject: [pypy-svn] pypy default: Remove old and unrun test Message-ID: <20110412135539.2AEE236C208@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43309:4e684849fab7 Date: 2011-04-12 15:30 +0200 http://bitbucket.org/pypy/pypy/changeset/4e684849fab7/ Log: Remove old and unrun test diff --git a/pypy/jit/metainterp/test/test_dlist.py b/pypy/jit/metainterp/test/test_dlist.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_dlist.py +++ /dev/null @@ -1,165 +0,0 @@ - -import py -from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin -py.test.skip("Disabled") - -class ListTests: - def test_basic(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - n -= 1 - return l[0] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(getarrayitem_gc=0, setarrayitem_gc=1) -# XXX fix codewriter -# guard_exception=0, -# guard_no_exception=1) - - def test_list_escapes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=2, getarrayitem_gc=0) - - def test_list_escapes_but_getitem_goes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - x = l[2] - y = l[1] + l[2] - l[1] = x + y - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=3, getarrayitem_gc=0) - - def test_list_of_ptrs(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - class A(object): - def __init__(self, x): - self.x = x - - def f(n): - l = [A(3)] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0].x + 1 - l[0] = A(x) - n -= 1 - return l[0].x - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=1, getarrayitem_gc=0, - new_with_vtable=1) # A should escape - - def test_list_checklength(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [10, 13], listops=True) - assert res == f(10, 13) - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_list_checklength_run(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) > n: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [50, 13], listops=True) - assert res == 42 - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_checklength_cannot_go_away(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n): - l = [0] * n - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return len(l) - l = [0] * n - n -= 1 - return 0 - - res = self.meta_interp(f, [10], listops=True) - assert res == 2 - self.check_loops(arraylen_gc=1) - - def test_list_indexerror(self): - # this is an example where IndexError is raised before - # even getting to the JIT - py.test.skip("I suspect bug somewhere outside of the JIT") - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - l[n] = n - n -= 1 - return l[3] - - def g(n): - try: - f(n) - return 0 - except IndexError: - return 42 - - res = self.meta_interp(g, [10]) - assert res == 42 - self.check_loops(setitem=2) - -class TestLLtype(ListTests, LLJitMixin): - pass From commits-noreply at bitbucket.org Tue Apr 12 15:55:48 2011 From: commits-noreply at bitbucket.org (fijal) Date: Tue, 12 Apr 2011 15:55:48 +0200 (CEST) Subject: [pypy-svn] pypy default: Rename test_basic into test_ajit. Introduce support.py that contains JitMixin Message-ID: <20110412135548.876512A202D@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43310:f38e5dab3fd4 Date: 2011-04-12 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/f38e5dab3fd4/ Log: Rename test_basic into test_ajit. Introduce support.py that contains JitMixin (mwahahaha, evil laugh) diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ListTests: diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_ajit.py copy from pypy/jit/metainterp/test/test_basic.py copy to pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_basic.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -4,269 +4,17 @@ from pypy.rlib.jit import loop_invariant from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.metainterp.warmspot import get_stats from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong from pypy import conftest from pypy.rlib.rarithmetic import ovfcheck from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class BasicTests: diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -6,7 +6,7 @@ from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside from pypy.rlib.rarithmetic import intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.rclass import FieldListAccessor from pypy.jit.metainterp.warmspot import get_stats, get_translator from pypy.jit.metainterp import history diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py --- a/pypy/jit/metainterp/test/test_longlong.py +++ b/pypy/jit/metainterp/test/test_longlong.py @@ -1,6 +1,6 @@ import py, sys from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class WrongResult(Exception): pass diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -1,6 +1,6 @@ """Tests for multiple JitDrivers.""" from pypy.rlib.jit import JitDriver, unroll_safe -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_basic.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_basic.py +++ /dev/null @@ -1,2411 +0,0 @@ -import py -import sys -from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside -from pypy.rlib.jit import loop_invariant -from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed -from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner -from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value -from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong -from pypy import conftest -from pypy.rlib.rarithmetic import ovfcheck -from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper -from pypy.rpython.lltypesystem import lltype, llmemory -from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - - -class BasicTests: - - def test_basic(self): - def f(x, y): - return x + y - res = self.interp_operations(f, [40, 2]) - assert res == 42 - - def test_basic_inst(self): - class A: - pass - def f(n): - a = A() - a.x = n - return a.x - res = self.interp_operations(f, [42]) - assert res == 42 - - def test_uint_floordiv(self): - from pypy.rlib.rarithmetic import r_uint - - def f(a, b): - a = r_uint(a) - b = r_uint(b) - return a/b - - res = self.interp_operations(f, [-4, 3]) - assert res == long(r_uint(-4)) // 3 - - def test_direct_call(self): - def g(n): - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_direct_call_with_guard(self): - def g(n): - if n < 0: - return 0 - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_loop(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - if self.basic: - found = 0 - for op in get_stats().loops[0]._all_operations(): - if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 3 - for box in liveboxes: - assert isinstance(box, history.BoxInt) - found += 1 - assert found == 1 - - def test_loop_invariant_mul1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loop_invariant_mul_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - b = y * 2 - res += ovfcheck(x * x) + b - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 308 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) - - def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - x += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 3427 - self.check_loop_count(3) - - def test_loop_invariant_mul_bridge_maintaining1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - res += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1167 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - - def test_loop_invariant_mul_bridge_maintaining2(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - z = x * x - res += z - if y<16: - res += z - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1692 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - def test_loop_invariant_mul_bridge_maintaining3(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) - def f(x, y, m): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res, m=m) - myjitdriver.jit_merge_point(x=x, y=y, res=res, m=m) - z = x * x - res += z - if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x.intval * x.intval - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loops_are_transient(self): - import gc, weakref - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - if y%2: - res *= 2 - y -= 1 - return res - wr_loops = [] - old_init = history.TreeLoop.__init__.im_func - try: - def track_init(self, name): - old_init(self, name) - wr_loops.append(weakref.ref(self)) - history.TreeLoop.__init__ = track_init - res = self.meta_interp(f, [6, 15], no_stats=True) - finally: - history.TreeLoop.__init__ = old_init - - assert res == f(6, 15) - gc.collect() - - #assert not [wr for wr in wr_loops if wr()] - for loop in [wr for wr in wr_loops if wr()]: - assert loop().name == 'short preamble' - - def test_string(self): - def f(n): - bytecode = 'adlfkj' + chr(n) - if n < len(bytecode): - return bytecode[n] - else: - return "?" - res = self.interp_operations(f, [1]) - assert res == ord("d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord("?") - - def test_chr2str(self): - def f(n): - s = chr(n) - return s[0] - res = self.interp_operations(f, [3]) - assert res == 3 - - def test_unicode(self): - def f(n): - bytecode = u'adlfkj' + unichr(n) - if n < len(bytecode): - return bytecode[n] - else: - return u"?" - res = self.interp_operations(f, [1]) - assert res == ord(u"d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord(u"?") - - def test_residual_call(self): - @dont_look_inside - def externfn(x, y): - return x * y - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) - - def test_residual_call_pure(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - n = hint(n, promote=True) - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is not recorded in the history if all-constant args - self.check_operations_history(int_add=0, int_mul=0, - call=0, call_pure=0) - - def test_residual_call_pure_1(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is recorded in the history if not-all-constant args - self.check_operations_history(int_add=1, int_mul=0, - call=0, call_pure=1) - - def test_residual_call_pure_2(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def externfn(x): - return x - 1 - externfn._pure_function_ = True - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - n = externfn(n) - return n - res = self.meta_interp(f, [7]) - assert res == 0 - # CALL_PURE is recorded in the history, but turned into a CALL - # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) - - def test_constfold_call_pure(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - n -= externfn(m) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_constfold_call_pure_2(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - class V: - def __init__(self, value): - self.value = value - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - v = V(m) - n -= externfn(v.value) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_pure_function_returning_object(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - class V: - def __init__(self, x): - self.x = x - v1 = V(1) - v2 = V(2) - def externfn(x): - if x: - return v1 - else: - return v2 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - m = V(m).x - n -= externfn(m).x + externfn(m + m - m).x - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) - - def test_constant_across_mp(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - class X(object): - pass - def f(n): - while n > -100: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - x = X() - x.arg = 5 - if n <= 0: break - n -= x.arg - x.arg = 6 # prevents 'x.arg' from being annotated as constant - return n - res = self.meta_interp(f, [31]) - assert res == -4 - - def test_stopatxpolicy(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def internfn(y): - return y * 3 - def externfn(y): - return y % 4 - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if y & 7: - f = internfn - else: - f = externfn - f(y) - y -= 1 - return 42 - policy = StopAtXPolicy(externfn) - res = self.meta_interp(f, [31], policy=policy) - assert res == 42 - self.check_loops(int_mul=1, int_mod=0) - - def test_we_are_jitted(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if we_are_jitted(): - x = 1 - else: - x = 10 - y -= x - return y - assert f(55) == -5 - res = self.meta_interp(f, [55]) - assert res == -1 - - def test_confirm_enter_jit(self): - def confirm_enter_jit(x, y): - return x <= 5 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - confirm_enter_jit = confirm_enter_jit) - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - y -= x - return y - # - res = self.meta_interp(f, [10, 84]) - assert res == -6 - self.check_loop_count(0) - # - res = self.meta_interp(f, [3, 19]) - assert res == -2 - self.check_loop_count(1) - - def test_can_never_inline(self): - def can_never_inline(x): - return x > 50 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - can_never_inline = can_never_inline) - @dont_look_inside - def marker(): - pass - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - x += 1 - if x == 4 or x == 61: - marker() - y -= x - return y - # - res = self.meta_interp(f, [3, 6], repeat=7) - assert res == 6 - 4 - 5 - self.check_history(call=0) # because the trace starts in the middle - # - res = self.meta_interp(f, [60, 84], repeat=7) - assert res == 84 - 61 - 62 - self.check_history(call=1) # because the trace starts immediately - - def test_format(self): - def f(n): - return len("<%d>" % n) - res = self.interp_operations(f, [421]) - assert res == 5 - - def test_switch(self): - def f(n): - if n == -5: return 12 - elif n == 2: return 51 - elif n == 7: return 1212 - else: return 42 - res = self.interp_operations(f, [7]) - assert res == 1212 - res = self.interp_operations(f, [12311]) - assert res == 42 - - def test_r_uint(self): - from pypy.rlib.rarithmetic import r_uint - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - y = r_uint(y) - while y > 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - y -= 1 - return y - res = self.meta_interp(f, [10]) - assert res == 0 - - def test_uint_operations(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - return ((r_uint(n) - 123) >> 1) <= r_uint(456) - res = self.interp_operations(f, [50]) - assert res == False - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_uint_condition(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - if ((r_uint(n) - 123) >> 1) <= r_uint(456): - return 24 - else: - return 12 - res = self.interp_operations(f, [50]) - assert res == 12 - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_int_between(self): - # - def check(arg1, arg2, arg3, expect_result, **expect_operations): - from pypy.rpython.lltypesystem import lltype - from pypy.rpython.lltypesystem.lloperation import llop - loc = locals().copy() - exec py.code.Source(""" - def f(n, m, p): - arg1 = %(arg1)s - arg2 = %(arg2)s - arg3 = %(arg3)s - return llop.int_between(lltype.Bool, arg1, arg2, arg3) - """ % locals()).compile() in loc - res = self.interp_operations(loc['f'], [5, 6, 7]) - assert res == expect_result - self.check_operations_history(expect_operations) - # - check('n', 'm', 'p', True, int_sub=2, uint_lt=1) - check('n', 'p', 'm', False, int_sub=2, uint_lt=1) - # - check('n', 'm', 6, False, int_sub=2, uint_lt=1) - # - check('n', 4, 'p', False, int_sub=2, uint_lt=1) - check('n', 5, 'p', True, int_sub=2, uint_lt=1) - check('n', 8, 'p', False, int_sub=2, uint_lt=1) - # - check('n', 6, 7, True, int_sub=2, uint_lt=1) - # - check(-2, 'n', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'm', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'p', 'm', False, int_sub=2, uint_lt=1) - #check(0, 'n', 'p', True, uint_lt=1) xxx implement me - #check(0, 'm', 'p', True, uint_lt=1) - #check(0, 'p', 'm', False, uint_lt=1) - # - check(2, 'n', 6, True, int_sub=1, uint_lt=1) - check(2, 'm', 6, False, int_sub=1, uint_lt=1) - check(2, 'p', 6, False, int_sub=1, uint_lt=1) - check(5, 'n', 6, True, int_eq=1) # 6 == 5+1 - check(5, 'm', 6, False, int_eq=1) # 6 == 5+1 - # - check(2, 6, 'm', False, int_sub=1, uint_lt=1) - check(2, 6, 'p', True, int_sub=1, uint_lt=1) - # - check(2, 40, 6, False) - check(2, 40, 60, True) - - def test_getfield(self): - class A: - pass - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=1) - - def test_getfield_immutable(self): - class A: - _immutable_ = True - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=0) - - def test_setfield_bool(self): - class A: - def __init__(self): - self.flag = True - myjitdriver = JitDriver(greens = [], reds = ['n', 'obj']) - def f(n): - obj = A() - res = False - while n > 0: - myjitdriver.can_enter_jit(n=n, obj=obj) - myjitdriver.jit_merge_point(n=n, obj=obj) - obj.flag = False - n -= 1 - return res - res = self.meta_interp(f, [7]) - assert type(res) == bool - assert not res - - def test_switch_dict(self): - def f(x): - if x == 1: return 61 - elif x == 2: return 511 - elif x == 3: return -22 - elif x == 4: return 81 - elif x == 5: return 17 - elif x == 6: return 54 - elif x == 7: return 987 - elif x == 8: return -12 - elif x == 9: return 321 - return -1 - res = self.interp_operations(f, [5]) - assert res == 17 - res = self.interp_operations(f, [15]) - assert res == -1 - - def test_int_add_ovf(self): - def f(x, y): - try: - return ovfcheck(x + y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -98 - res = self.interp_operations(f, [1, sys.maxint]) - assert res == -42 - - def test_int_sub_ovf(self): - def f(x, y): - try: - return ovfcheck(x - y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -102 - res = self.interp_operations(f, [1, -sys.maxint]) - assert res == -42 - - def test_int_mul_ovf(self): - def f(x, y): - try: - return ovfcheck(x * y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -200 - res = self.interp_operations(f, [-3, sys.maxint//2]) - assert res == -42 - - def test_mod_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'y']) - def f(n, x, y): - while n > 0: - myjitdriver.can_enter_jit(x=x, y=y, n=n) - myjitdriver.jit_merge_point(x=x, y=y, n=n) - n -= ovfcheck(x % y) - return n - res = self.meta_interp(f, [20, 1, 2]) - assert res == 0 - self.check_loops(call=0) - - def test_abs(self): - myjitdriver = JitDriver(greens = [], reds = ['i', 't']) - def f(i): - t = 0 - while i < 10: - myjitdriver.can_enter_jit(i=i, t=t) - myjitdriver.jit_merge_point(i=i, t=t) - t += abs(i) - i += 1 - return t - res = self.meta_interp(f, [-5]) - assert res == 5+4+3+2+1+0+1+2+3+4+5+6+7+8+9 - - def test_float(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - x = float(x) - y = float(y) - res = 0.0 - while y > 0.0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1.0 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42.0 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) - - def test_print(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - print n - n -= 1 - return n - res = self.meta_interp(f, [7]) - assert res == 0 - - def test_bridge_from_interpreter(self): - mydriver = JitDriver(reds = ['n'], greens = []) - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - n -= 1 - - self.meta_interp(f, [20], repeat=7) - self.check_tree_loop_count(2) # the loop and the entry path - # we get: - # ENTER - compile the new loop and the entry bridge - # ENTER - compile the leaving path - self.check_enter_count(2) - - def test_bridge_from_interpreter_2(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n'], greens = []) - glob = [1] - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - if n == 17 and glob[0]: - glob[0] = 0 - x = n + 1 - y = n + 2 - z = n + 3 - k = n + 4 - n -= 1 - n += x + y + z + k - n -= x + y + z + k - n -= 1 - - self.meta_interp(f, [20], repeat=7) - - def test_bridge_from_interpreter_3(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n', 'x', 'y', 'z', 'k'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - glob.x = 1 - x = 0 - y = 0 - z = 0 - k = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x, y=y, z=z, k=k) - mydriver.jit_merge_point(n=n, x=x, y=y, z=z, k=k) - x += 10 - y += 3 - z -= 15 - k += 4 - if n == 17 and glob.x: - glob.x = 0 - x += n + 1 - y += n + 2 - z += n + 3 - k += n + 4 - n -= 1 - n -= 1 - return x + 2*y + 3*z + 5*k + 13*n - - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_bridge_from_interpreter_4(self): - jitdriver = JitDriver(reds = ['n', 'k'], greens = []) - - def f(n, k): - while n > 0: - jitdriver.can_enter_jit(n=n, k=k) - jitdriver.jit_merge_point(n=n, k=k) - if k: - n -= 2 - else: - n -= 1 - return n + k - - from pypy.rpython.test.test_llinterp import get_interpreter, clear_tcache - from pypy.jit.metainterp.warmspot import WarmRunnerDesc - - interp, graph = get_interpreter(f, [0, 0], backendopt=False, - inline_threshold=0, type_system=self.type_system) - clear_tcache() - translator = interp.typer.annotator.translator - translator.config.translation.gc = "boehm" - warmrunnerdesc = WarmRunnerDesc(translator, - CPUClass=self.CPUClass) - state = warmrunnerdesc.jitdrivers_sd[0].warmstate - state.set_param_threshold(3) # for tests - state.set_param_trace_eagerness(0) # for tests - warmrunnerdesc.finish() - for n, k in [(20, 0), (20, 1)]: - interp.eval_graph(graph, [n, k]) - - def test_bridge_leaving_interpreter_5(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - x = 0 - glob.x = 1 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - glob.x += 1 - x += 3 - n -= 1 - glob.x += 100 - return glob.x + x - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_instantiate_classes(self): - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - def f(n): - if n > 5: - cls = A - else: - cls = B - return cls().foo - res = self.interp_operations(f, [3]) - assert res == 8 - res = self.interp_operations(f, [13]) - assert res == 72 - - def test_instantiate_does_not_call(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - - def f(n): - x = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - if n % 2 == 0: - cls = A - else: - cls = B - inst = cls() - x += inst.foo - n -= 1 - return x - res = self.meta_interp(f, [20], enable_opts='') - assert res == f(20) - self.check_loops(call=0) - - def test_zerodivisionerror(self): - # test the case of exception-raising operation that is not delegated - # to the backend at all: ZeroDivisionError - # - def f(n): - assert n >= 0 - try: - return ovfcheck(5 % n) - except ZeroDivisionError: - return -666 - except OverflowError: - return -777 - res = self.interp_operations(f, [0]) - assert res == -666 - # - def f(n): - assert n >= 0 - try: - return ovfcheck(6 // n) - except ZeroDivisionError: - return -667 - except OverflowError: - return -778 - res = self.interp_operations(f, [0]) - assert res == -667 - - def test_div_overflow(self): - import sys - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - try: - res += llop.int_floordiv_ovf(lltype.Signed, - -sys.maxint-1, x) - x += 5 - except OverflowError: - res += 100 - y -= 1 - return res - res = self.meta_interp(f, [-41, 16]) - assert res == ((-sys.maxint-1) // (-41) + - (-sys.maxint-1) // (-36) + - (-sys.maxint-1) // (-31) + - (-sys.maxint-1) // (-26) + - (-sys.maxint-1) // (-21) + - (-sys.maxint-1) // (-16) + - (-sys.maxint-1) // (-11) + - (-sys.maxint-1) // (-6) + - 100 * 8) - - def test_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - if n: - obj = A() - else: - obj = B() - return isinstance(obj, B) - res = self.interp_operations(fn, [0]) - assert res - self.check_operations_history(guard_class=1) - res = self.interp_operations(fn, [1]) - assert not res - - def test_isinstance_2(self): - driver = JitDriver(greens = [], reds = ['n', 'sum', 'x']) - class A: - pass - class B(A): - pass - class C(B): - pass - - def main(): - return f(5, B()) * 10 + f(5, C()) + f(5, A()) * 100 - - def f(n, x): - sum = 0 - while n > 0: - driver.can_enter_jit(x=x, n=n, sum=sum) - driver.jit_merge_point(x=x, n=n, sum=sum) - if isinstance(x, B): - sum += 1 - n -= 1 - return sum - - res = self.meta_interp(main, []) - assert res == 55 - - def test_assert_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - # this should only be called with n != 0 - if n: - obj = B() - obj.a = n - else: - obj = A() - obj.a = 17 - assert isinstance(obj, B) - return obj.a - res = self.interp_operations(fn, [1]) - assert res == 1 - self.check_operations_history(guard_class=0) - if self.type_system == 'ootype': - self.check_operations_history(instanceof=0) - - def test_r_dict(self): - from pypy.rlib.objectmodel import r_dict - class FooError(Exception): - pass - def myeq(n, m): - return n == m - def myhash(n): - if n < 0: - raise FooError - return -n - def f(n): - d = r_dict(myeq, myhash) - for i in range(10): - d[i] = i*i - try: - return d[n] - except FooError: - return 99 - res = self.interp_operations(f, [5]) - assert res == f(5) - - def test_free_object(self): - import weakref - from pypy.rlib import rgc - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - class X(object): - pass - def main(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= x.foo - def g(n): - x = X() - x.foo = 2 - main(n, x) - x.foo = 5 - return weakref.ref(x) - def f(n): - r = g(n) - rgc.collect(); rgc.collect(); rgc.collect() - return r() is None - # - assert f(30) == 1 - res = self.meta_interp(f, [30], no_stats=True) - assert res == 1 - - def test_pass_around(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - - def call(): - pass - - def f(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - if n % 2: - call() - if n == 8: - return x - x = 3 - else: - x = 5 - n -= 1 - return 0 - - self.meta_interp(f, [40, 0]) - - def test_const_inputargs(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'x']) - def f(n, x): - m = 0x7FFFFFFF - while n > 0: - myjitdriver.can_enter_jit(m=m, n=n, x=x) - myjitdriver.jit_merge_point(m=m, n=n, x=x) - x = 42 - n -= 1 - m = m >> 1 - return x - - res = self.meta_interp(f, [50, 1], enable_opts='') - assert res == 42 - - def test_set_param(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - def g(n): - x = 0 - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= 1 - x += n - return x - def f(n, threshold): - myjitdriver.set_param('threshold', threshold) - return g(n) - - res = self.meta_interp(f, [10, 3]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(2) - - res = self.meta_interp(f, [10, 13]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(0) - - def test_dont_look_inside(self): - @dont_look_inside - def g(a, b): - return a + b - def f(a, b): - return g(a, b) - res = self.interp_operations(f, [3, 5]) - assert res == 8 - self.check_operations_history(int_add=0, call=1) - - def test_listcomp(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst']) - def f(x, y): - lst = [0, 0, 0] - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, lst=lst) - myjitdriver.jit_merge_point(x=x, y=y, lst=lst) - lst = [i+x for i in lst if i >=0] - y -= 1 - return lst[0] - res = self.meta_interp(f, [6, 7], listcomp=True, backendopt=True, listops=True) - # XXX: the loop looks inefficient - assert res == 42 - - def test_tuple_immutable(self): - def new(a, b): - return a, b - def f(a, b): - tup = new(a, b) - return tup[1] - res = self.interp_operations(f, [3, 5]) - assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure=1) - - def test_oosend_look_inside_only_one(self): - class A: - pass - class B(A): - def g(self): - return 123 - class C(A): - @dont_look_inside - def g(self): - return 456 - def f(n): - if n > 3: - x = B() - else: - x = C() - return x.g() + x.g() - res = self.interp_operations(f, [10]) - assert res == 123 * 2 - res = self.interp_operations(f, [-10]) - assert res == 456 * 2 - - def test_residual_external_call(self): - import math - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - def f(x, y): - x = float(x) - res = 0.0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - # this is an external call that the default policy ignores - rpart, ipart = math.modf(x) - res += ipart - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops(call=1) - - def test_merge_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 5 - class B(A): - def g(self, y): - return y - 3 - - a1 = A() - a2 = A() - b = B() - def f(x): - l = [a1] * 100 + [a2] * 100 + [b] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - x = a.g(x) - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_value=2) - self.check_loops(guard_class=0, guard_value=5, everywhere=True) - - def test_merge_guardnonnull_guardclass(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=2, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=4, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [b1] * 100 + [None] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=1, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=3, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) - - def test_merge_guardnonnull_guardvalue_2(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=4, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - a2 = A() - b1 = B() - def f(x): - l = [a2] * 100 + [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [399], listops=True) - assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=5, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_residual_call_doesnt_lose_info(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) - - class A(object): - pass - - globall = [""] - @dont_look_inside - def g(x): - globall[0] = str(x) - return x - - def f(x): - y = A() - y.v = x - l = [0] - while y.v > 0: - myjitdriver.can_enter_jit(x=x, y=y, l=l) - myjitdriver.jit_merge_point(x=x, y=y, l=l) - l[0] = y.v - lc = l[0] - y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 - return y.v - res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) - - def test_guard_isnull_nonnull(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - - @dont_look_inside - def create(x): - if x >= -40: - return A() - return None - - def f(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - obj = create(x-1) - if obj is not None: - res += 1 - obj2 = create(x-1000) - if obj2 is None: - res += 1 - x -= 1 - return res - res = self.meta_interp(f, [21]) - assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) - - def test_loop_invariant1(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - a = A() - a.current_a = A() - a.current_a.x = 1 - @loop_invariant - def f(): - return a.current_a - - def g(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - res += f().x - res += f().x - res += f().x - x -= 1 - a.current_a = A() - a.current_a.x = 2 - return res - res = self.meta_interp(g, [21]) - assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) - - def test_bug_optimizeopt_mutates_ops(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) - class A(object): - pass - class B(A): - pass - - glob = A() - glob.a = None - def f(x): - res = 0 - a = A() - a.x = 0 - glob.a = A() - const = 2 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res, a=a, const=const) - myjitdriver.jit_merge_point(x=x, res=res, a=a, const=const) - if type(glob.a) is B: - res += 1 - if a is None: - a = A() - a.x = x - glob.a = B() - const = 2 - else: - const = hint(const, promote=True) - x -= const - res += a.x - a = None - glob.a = A() - const = 1 - return res - res = self.meta_interp(f, [21]) - assert res == f(21) - - def test_getitem_indexerror(self): - lst = [10, 4, 9, 16] - def f(n): - try: - return lst[n] - except IndexError: - return -2 - res = self.interp_operations(f, [2]) - assert res == 9 - res = self.interp_operations(f, [4]) - assert res == -2 - res = self.interp_operations(f, [-4]) - assert res == 10 - res = self.interp_operations(f, [-5]) - assert res == -2 - - def test_guard_always_changing_value(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - a = A() - hint(a, promote=True) - x -= 1 - self.meta_interp(f, [50]) - self.check_loop_count(1) - # this checks that the logic triggered by make_a_counter_per_value() - # works and prevents generating tons of bridges - - def test_swap_values(self): - def f(x, y): - if x > 5: - x, y = y, x - return x - y - res = self.interp_operations(f, [10, 2]) - assert res == -8 - res = self.interp_operations(f, [3, 2]) - assert res == 1 - - def test_raw_malloc_and_access(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Signed) - - def f(n): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = n - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10]) - assert res == 10 - - def test_raw_malloc_and_access_float(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Float) - - def f(n, f): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = f - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10, 3.5]) - assert res == 3.5 - - def test_jit_debug(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - jit_debug("hi there:", x) - jit_debug("foobar") - x -= 1 - return x - res = self.meta_interp(f, [8]) - assert res == 0 - self.check_loops(jit_debug=2) - - def test_assert_green(self): - def f(x, promote): - if promote: - x = hint(x, promote=True) - assert_green(x) - return x - res = self.interp_operations(f, [8, 1]) - assert res == 8 - py.test.raises(AssertGreenFailed, self.interp_operations, f, [8, 0]) - - def test_multiple_specialied_versions1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 7]) - assert res == 6*8 + 6**8 - self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) - - def test_multiple_specialied_versions_array(self): - myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', - 'array']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val - other.val) - def f(x, y): - res = x - array = [1, 2, 3] - array[1] = 7 - idx = 0 - while y > 0: - myjitdriver.can_enter_jit(idx=idx, y=y, x=x, res=res, - array=array) - myjitdriver.jit_merge_point(idx=idx, y=y, x=x, res=res, - array=array) - res = res.binop(x) - res.val += array[idx] + array[1] - if y < 7: - idx = 2 - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - self.check_loop_count(9) - self.check_loops(getarrayitem_gc=6, everywhere=True) - - def test_multiple_specialied_versions_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - b1 = f(B(x), y, B(x)) - b2 = f(B(x), y, B(x)) - assert b1.val == b2.val - c1 = f(B(x), y, A(x)) - c2 = f(B(x), y, A(x)) - assert c1.val == c2.val - d1 = f(A(x), y, B(x)) - d2 = f(A(x), y, B(x)) - assert d1.val == d2.val - return a1.val + b1.val + c1.val + d1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_failing_inlined_guard(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 8: - x = z - return res - def g(x, y): - c1 = f(A(x), y, B(x)) - c2 = f(A(x), y, B(x)) - assert c1.val == c2.val - return c1.val - res = self.meta_interp(g, [3, 16]) - assert res == g(3, 16) - - def test_inlined_guard_in_short_preamble(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class A: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - def binop(self, other): - return A(self.getval() + other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_specialied_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(A(y)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_specialied_bridge_const(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'const', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - const = 7 - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res, const=const) - myjitdriver.jit_merge_point(y=y, x=x, res=res, const=const) - const = hint(const, promote=True) - res = res.binop(A(const)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_multiple_specialied_zigzag(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - def switch(self): - return B(self.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def switch(self): - return A(self.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - if y % 4 == 0: - res = res.switch() - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [3, 23]) - assert res == 7068153 - self.check_loop_count(6) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) - - def test_dont_trace_every_iteration(self): - myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) - - def main(a, b): - i = sa = 0 - #while i < 200: - while i < 200: - myjitdriver.can_enter_jit(a=a, b=b, i=i, sa=sa) - myjitdriver.jit_merge_point(a=a, b=b, i=i, sa=sa) - if a > 0: pass - if b < 2: pass - sa += a % b - i += 1 - return sa - def g(): - return main(10, 20) + main(-10, -20) - res = self.meta_interp(g, []) - assert res == g() - self.check_enter_count(2) - - def test_current_trace_length(self): - myjitdriver = JitDriver(greens = ['g'], reds = ['x']) - @dont_look_inside - def residual(): - print "hi there" - @unroll_safe - def loop(g): - y = 0 - while y < g: - residual() - y += 1 - def f(x, g): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, g=g) - myjitdriver.jit_merge_point(x=x, g=g) - loop(g) - x -= 1 - n = current_trace_length() - return n - res = self.meta_interp(f, [5, 8]) - assert 14 < res < 42 - res = self.meta_interp(f, [5, 2]) - assert 4 < res < 14 - - def test_compute_identity_hash(self): - from pypy.rlib.objectmodel import compute_identity_hash - class A(object): - pass - def f(): - a = A() - return compute_identity_hash(a) == compute_identity_hash(a) - res = self.interp_operations(f, []) - assert res - # a "did not crash" kind of test - - def test_compute_unique_id(self): - from pypy.rlib.objectmodel import compute_unique_id - class A(object): - pass - def f(): - a1 = A() - a2 = A() - return (compute_unique_id(a1) == compute_unique_id(a1) and - compute_unique_id(a1) != compute_unique_id(a2)) - res = self.interp_operations(f, []) - assert res - - def test_wrap_around_add(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x += 1 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint-10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_mul(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x *= 2 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint>>10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_sub(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x < 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x -= 1 - n += 1 - return n - res = self.meta_interp(f, [10-sys.maxint]) - assert res == 12 - self.check_tree_loop_count(2) - - - -class TestOOtype(BasicTests, OOJitMixin): - - def test_oohash(self): - def f(n): - s = ootype.oostring(n, -1) - return s.ll_hash() - res = self.interp_operations(f, [5]) - assert res == ootype.oostring(5, -1).ll_hash() - - def test_identityhash(self): - A = ootype.Instance("A", ootype.ROOT) - def f(): - obj1 = ootype.new(A) - obj2 = ootype.new(A) - return ootype.identityhash(obj1) == ootype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oois(self): - A = ootype.Instance("A", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - if n: - obj2 = obj1 - else: - obj2 = ootype.new(A) - return obj1 is obj2 - res = self.interp_operations(f, [0]) - assert not res - res = self.interp_operations(f, [1]) - assert res - - def test_oostring_instance(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - obj2 = ootype.new(B) - s1 = ootype.oostring(obj1, -1) - s2 = ootype.oostring(obj2, -1) - ch1 = s1.ll_stritem_nonneg(1) - ch2 = s2.ll_stritem_nonneg(1) - return ord(ch1) + ord(ch2) - res = self.interp_operations(f, [0]) - assert res == ord('A') + ord('B') - - def test_subclassof(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", A) - clsA = ootype.runtimeClass(A) - clsB = ootype.runtimeClass(B) - myjitdriver = JitDriver(greens = [], reds = ['n', 'flag', 'res']) - - def getcls(flag): - if flag: - return clsA - else: - return clsB - - def f(flag, n): - res = True - while n > -100: - myjitdriver.can_enter_jit(n=n, flag=flag, res=res) - myjitdriver.jit_merge_point(n=n, flag=flag, res=res) - cls = getcls(flag) - n -= 1 - res = ootype.subclassof(cls, clsB) - return res - - res = self.meta_interp(f, [1, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert not res - - res = self.meta_interp(f, [0, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert res - -class BaseLLtypeTests(BasicTests): - - def test_identityhash(self): - A = lltype.GcStruct("A") - def f(): - obj1 = lltype.malloc(A) - obj2 = lltype.malloc(A) - return lltype.identityhash(obj1) == lltype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oops_on_nongc(self): - from pypy.rpython.lltypesystem import lltype - - TP = lltype.Struct('x') - def f(i1, i2): - p1 = prebuilt[i1] - p2 = prebuilt[i2] - a = p1 is p2 - b = p1 is not p2 - c = bool(p1) - d = not bool(p2) - return 1000*a + 100*b + 10*c + d - prebuilt = [lltype.malloc(TP, flavor='raw', immortal=True)] * 2 - expected = f(0, 1) - assert self.interp_operations(f, [0, 1]) == expected - - def test_casts(self): - py.test.skip("xxx fix or kill") - if not self.basic: - py.test.skip("test written in a style that " - "means it's frontend only") - from pypy.rpython.lltypesystem import lltype, llmemory, rffi - - TP = lltype.GcStruct('S1') - def f(p): - n = lltype.cast_ptr_to_int(p) - return n - x = lltype.malloc(TP) - xref = lltype.cast_opaque_ptr(llmemory.GCREF, x) - res = self.interp_operations(f, [xref]) - y = llmemory.cast_ptr_to_adr(x) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - # - TP = lltype.Struct('S2') - prebuilt = [lltype.malloc(TP, immortal=True), - lltype.malloc(TP, immortal=True)] - def f(x): - p = prebuilt[x] - n = lltype.cast_ptr_to_int(p) - return n - res = self.interp_operations(f, [1]) - y = llmemory.cast_ptr_to_adr(prebuilt[1]) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - - def test_collapsing_ptr_eq(self): - S = lltype.GcStruct('S') - p = lltype.malloc(S) - driver = JitDriver(greens = [], reds = ['n', 'x']) - - def f(n, x): - while n > 0: - driver.can_enter_jit(n=n, x=x) - driver.jit_merge_point(n=n, x=x) - if x: - n -= 1 - n -= 1 - - def main(): - f(10, p) - f(10, lltype.nullptr(S)) - - self.meta_interp(main, []) - - def test_enable_opts(self): - jitdriver = JitDriver(greens = [], reds = ['a']) - - class A(object): - def __init__(self, i): - self.i = i - - def f(): - a = A(0) - - while a.i < 10: - jitdriver.jit_merge_point(a=a) - jitdriver.can_enter_jit(a=a) - a = A(a.i + 1) - - self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) - self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) - -class TestLLtype(BaseLLtypeTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver, hint, purefunction from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class SendTests(object): diff --git a/pypy/jit/backend/cli/test/test_basic.py b/pypy/jit/backend/cli/test/test_basic.py --- a/pypy/jit/backend/cli/test/test_basic.py +++ b/pypy/jit/backend/cli/test/test_basic.py @@ -1,14 +1,14 @@ import py from pypy.jit.backend.cli.runner import CliCPU -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit -class CliJitMixin(test_basic.OOJitMixin): +class CliJitMixin(suport.OOJitMixin): CPUClass = CliCPU def setup_class(cls): from pypy.translator.cli.support import PythonNet PythonNet.System # possibly raises Skip -class TestBasic(CliJitMixin, test_basic.TestOOtype): +class TestBasic(CliJitMixin, test_ajit.TestOOtype): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver from pypy.rlib import objectmodel diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -1,7 +1,7 @@ from pypy.jit.metainterp.warmspot import ll_meta_interp from pypy.rlib.jit import JitDriver, dont_look_inside, purefunction -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.jitprof import * diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class DelTests: diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -5,7 +5,7 @@ from pypy.rlib.libffi import ArgChain from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestFfiCall(LLJitMixin, _TestLibffiCall): diff --git a/pypy/jit/tl/tla/test_tla.py b/pypy/jit/tl/tla/test_tla.py --- a/pypy/jit/tl/tla/test_tla.py +++ b/pypy/jit/tl/tla/test_tla.py @@ -155,7 +155,7 @@ # ____________________________________________________________ -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestLLtype(LLJitMixin): def test_loop(self): diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -1,6 +1,6 @@ import py from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class ToyLanguageTests: diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test import test_loop -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES class LoopUnrollTest(test_loop.LoopTest): diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import unroll_safe, dont_look_inside from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.annlowlevel import hlstr from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver from pypy.rlib.objectmodel import compute_hash from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import history diff --git a/pypy/jit/metainterp/test/test_immutable.py b/pypy/jit/metainterp/test/test_immutable.py --- a/pypy/jit/metainterp/test/test_immutable.py +++ b/pypy/jit/metainterp/test/test_immutable.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ImmutableFieldsTests: diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -1,6 +1,6 @@ import py -from pypy.jit.metainterp.test.test_basic import JitMixin +from pypy.jit.metainterp.test.support import JitMixin from pypy.jit.tl.spli import interpreter, objects, serializer from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.backend.llgraph import runner diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -12,7 +12,7 @@ import py from pypy.jit.metainterp.memmgr import MemoryManager -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside diff --git a/pypy/jit/backend/x86/test/test_basic.py b/pypy/jit/backend/x86/test/test_basic.py --- a/pypy/jit/backend/x86/test/test_basic.py +++ b/pypy/jit/backend/x86/test/test_basic.py @@ -1,18 +1,18 @@ import py from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.metainterp.warmspot import ll_meta_interp -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver -class Jit386Mixin(test_basic.LLJitMixin): +class Jit386Mixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() def check_jumps(self, maxcount): pass -class TestBasic(Jit386Mixin, test_basic.BaseLLtypeTests): +class TestBasic(Jit386Mixin, test_ajit.BaseLLtypeTests): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py def test_bug(self): diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, hint from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.lltypesystem import lltype, rclass from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype diff --git a/pypy/jit/metainterp/test/test_float.py b/pypy/jit/metainterp/test/test_float.py --- a/pypy/jit/metainterp/test/test_float.py +++ b/pypy/jit/metainterp/test/test_float.py @@ -1,5 +1,5 @@ import math -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class FloatTests: diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -6,7 +6,7 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import BoxInt -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder from pypy.jit.metainterp.blackhole import BlackholeInterpreter from pypy.jit.metainterp.blackhole import convert_and_run_from_pyjitpl diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, vref_None from pypy.rlib.jit import virtual_ref, virtual_ref_finish from pypy.rlib.objectmodel import compute_unique_id -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.virtualref import VirtualRefInfo diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class StringTests: diff --git a/pypy/jit/metainterp/test/test_tlc.py b/pypy/jit/metainterp/test/test_tlc.py --- a/pypy/jit/metainterp/test/test_tlc.py +++ b/pypy/jit/metainterp/test/test_tlc.py @@ -3,7 +3,7 @@ from pypy.jit.tl import tlc -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class TLCTests: diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver class ListTests(object): diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -1,5 +1,5 @@ import py, sys -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.jit.codewriter.policy import StopAtXPolicy From commits-noreply at bitbucket.org Tue Apr 12 15:55:49 2011 From: commits-noreply at bitbucket.org (fijal) Date: Tue, 12 Apr 2011 15:55:49 +0200 (CEST) Subject: [pypy-svn] pypy default: actually add support Message-ID: <20110412135549.289032A202D@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43311:87dfc41d5393 Date: 2011-04-12 15:55 +0200 http://bitbucket.org/pypy/pypy/changeset/87dfc41d5393/ Log: actually add support diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/support.py @@ -0,0 +1,261 @@ + +import py, sys +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.ootypesystem import ootype +from pypy.jit.backend.llgraph import runner +from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT +from pypy.jit.metainterp import pyjitpl, history +from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.codewriter.policy import JitPolicy +from pypy.jit.codewriter import longlong + +def _get_jitcodes(testself, CPUClass, func, values, type_system, + supports_longlong=False, **kwds): + from pypy.jit.codewriter import support, codewriter + + class FakeJitCell: + __compiled_merge_points = [] + def get_compiled_merge_points(self): + return self.__compiled_merge_points[:] + def set_compiled_merge_points(self, lst): + self.__compiled_merge_points = lst + + class FakeWarmRunnerState: + def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): + pass + + def jit_cell_at_key(self, greenkey): + assert greenkey == [] + return self._cell + _cell = FakeJitCell() + + trace_limit = sys.maxint + enable_opts = ALL_OPTS_DICT + + func._jit_unroll_safe_ = True + rtyper = support.annotate(func, values, type_system=type_system) + graphs = rtyper.annotator.translator.graphs + result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] + + class FakeJitDriverSD: + num_green_args = 0 + portal_graph = graphs[0] + virtualizable_info = None + greenfield_info = None + result_type = result_kind + portal_runner_ptr = "???" + + stats = history.Stats() + cpu = CPUClass(rtyper, stats, None, False) + cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) + testself.cw = cw + policy = JitPolicy() + policy.set_supports_longlong(supports_longlong) + cw.find_all_graphs(policy) + # + testself.warmrunnerstate = FakeWarmRunnerState() + testself.warmrunnerstate.cpu = cpu + FakeJitDriverSD.warmstate = testself.warmrunnerstate + if hasattr(testself, 'finish_setup_for_interp_operations'): + testself.finish_setup_for_interp_operations() + # + cw.make_jitcodes(verbose=True) + +def _run_with_blackhole(testself, args): + from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder + cw = testself.cw + blackholeinterpbuilder = BlackholeInterpBuilder(cw) + blackholeinterp = blackholeinterpbuilder.acquire_interp() + count_i = count_r = count_f = 0 + for value in args: + T = lltype.typeOf(value) + if T == lltype.Signed: + blackholeinterp.setarg_i(count_i, value) + count_i += 1 + elif T == llmemory.GCREF: + blackholeinterp.setarg_r(count_r, value) + count_r += 1 + elif T == lltype.Float: + value = longlong.getfloatstorage(value) + blackholeinterp.setarg_f(count_f, value) + count_f += 1 + else: + raise TypeError(T) + [jitdriver_sd] = cw.callcontrol.jitdrivers_sd + blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) + blackholeinterp.run() + return blackholeinterp._final_result_anytype() + +def _run_with_pyjitpl(testself, args): + + class DoneWithThisFrame(Exception): + pass + + class DoneWithThisFrameRef(DoneWithThisFrame): + def __init__(self, cpu, *args): + DoneWithThisFrame.__init__(self, *args) + + cw = testself.cw + opt = history.Options(listops=True) + metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) + metainterp_sd.finish_setup(cw) + [jitdriver_sd] = metainterp_sd.jitdrivers_sd + metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) + metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame + metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef + metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame + testself.metainterp = metainterp + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + except DoneWithThisFrame, e: + #if conftest.option.view: + # metainterp.stats.view() + return e.args[0] + else: + raise Exception("FAILED") + +def _run_with_machine_code(testself, args): + metainterp = testself.metainterp + num_green_args = metainterp.jitdriver_sd.num_green_args + loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) + if len(loop_tokens) != 1: + return NotImplemented + # a loop was successfully created by _run_with_pyjitpl(); call it + cpu = metainterp.cpu + for i in range(len(args) - num_green_args): + x = args[num_green_args + i] + typecode = history.getkind(lltype.typeOf(x)) + set_future_value(cpu, i, x, typecode) + faildescr = cpu.execute_token(loop_tokens[0]) + assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') + if metainterp.jitdriver_sd.result_type == history.INT: + return cpu.get_latest_value_int(0) + elif metainterp.jitdriver_sd.result_type == history.REF: + return cpu.get_latest_value_ref(0) + elif metainterp.jitdriver_sd.result_type == history.FLOAT: + return cpu.get_latest_value_float(0) + else: + return None + + +class JitMixin: + basic = True + def check_loops(self, expected=None, everywhere=False, **check): + get_stats().check_loops(expected=expected, everywhere=everywhere, + **check) + def check_loop_count(self, count): + """NB. This is a hack; use check_tree_loop_count() or + check_enter_count() for the real thing. + This counts as 1 every bridge in addition to every loop; and it does + not count at all the entry bridges from interpreter, although they + are TreeLoops as well.""" + assert get_stats().compiled_count == count + def check_tree_loop_count(self, count): + assert len(get_stats().loops) == count + def check_loop_count_at_most(self, count): + assert get_stats().compiled_count <= count + def check_enter_count(self, count): + assert get_stats().enter_count == count + def check_enter_count_at_most(self, count): + assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): + assert get_stats().aborted_count == count + def check_aborted_count_at_least(self, count): + assert get_stats().aborted_count >= count + + def meta_interp(self, *args, **kwds): + kwds['CPUClass'] = self.CPUClass + kwds['type_system'] = self.type_system + if "backendopt" not in kwds: + kwds["backendopt"] = False + return ll_meta_interp(*args, **kwds) + + def interp_operations(self, f, args, **kwds): + # get the JitCodes for the function f + _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + # try to run it with blackhole.py + result1 = _run_with_blackhole(self, args) + # try to run it with pyjitpl.py + result2 = _run_with_pyjitpl(self, args) + assert result1 == result2 + # try to run it by running the code compiled just before + result3 = _run_with_machine_code(self, args) + assert result1 == result3 or result3 == NotImplemented + # + if (longlong.supports_longlong and + isinstance(result1, longlong.r_float_storage)): + result1 = longlong.getrealfloat(result1) + return result1 + + def check_history(self, expected=None, **isns): + # this can be used after calling meta_interp + get_stats().check_history(expected, **isns) + + def check_operations_history(self, expected=None, **isns): + # this can be used after interp_operations + if expected is not None: + expected = dict(expected) + expected['jump'] = 1 + self.metainterp.staticdata.stats.check_history(expected, **isns) + + +class LLJitMixin(JitMixin): + type_system = 'lltype' + CPUClass = runner.LLtypeCPU + + @staticmethod + def Ptr(T): + return lltype.Ptr(T) + + @staticmethod + def GcStruct(name, *fields, **kwds): + S = lltype.GcStruct(name, *fields, **kwds) + return S + + malloc = staticmethod(lltype.malloc) + nullptr = staticmethod(lltype.nullptr) + + @staticmethod + def malloc_immortal(T): + return lltype.malloc(T, immortal=True) + + def _get_NODE(self): + NODE = lltype.GcForwardReference() + NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), + ('next', lltype.Ptr(NODE)))) + return NODE + +class OOJitMixin(JitMixin): + type_system = 'ootype' + #CPUClass = runner.OOtypeCPU + + def setup_class(cls): + py.test.skip("ootype tests skipped for now") + + @staticmethod + def Ptr(T): + return T + + @staticmethod + def GcStruct(name, *fields, **kwds): + if 'hints' in kwds: + kwds['_hints'] = kwds['hints'] + del kwds['hints'] + I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) + return I + + malloc = staticmethod(ootype.new) + nullptr = staticmethod(ootype.null) + + @staticmethod + def malloc_immortal(T): + return ootype.new(T) + + def _get_NODE(self): + NODE = ootype.Instance('NODE', ootype.ROOT, {}) + NODE._add_fields({'value': ootype.Signed, + 'next': NODE}) + return NODE From commits-noreply at bitbucket.org Tue Apr 12 18:30:06 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 12 Apr 2011 18:30:06 +0200 (CEST) Subject: [pypy-svn] pypy default: skip this test because of a hotspot bug Message-ID: <20110412163006.CDA1E2A202E@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43312:1a8f776a2f17 Date: 2011-04-12 16:29 +0000 http://bitbucket.org/pypy/pypy/changeset/1a8f776a2f17/ Log: skip this test because of a hotspot bug diff --git a/pypy/translator/jvm/test/test_extreme.py b/pypy/translator/jvm/test/test_extreme.py --- a/pypy/translator/jvm/test/test_extreme.py +++ b/pypy/translator/jvm/test/test_extreme.py @@ -1,5 +1,8 @@ +import py from pypy.translator.jvm.test.runtest import JvmTest from pypy.translator.oosupport.test_template.extreme import BaseTestExtreme class TestExtreme(BaseTestExtreme, JvmTest): - pass + + def test_runtimeerror_due_to_stack_overflow(self): + py.test.skip('hotspot bug') From commits-noreply at bitbucket.org Tue Apr 12 21:06:17 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 21:06:17 +0200 (CEST) Subject: [pypy-svn] pypy default: Support ctypes if the interpreter has no threads. Message-ID: <20110412190617.D294D2A202E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43313:6f621aabc17a Date: 2011-04-12 21:04 +0200 http://bitbucket.org/pypy/pypy/changeset/6f621aabc17a/ Log: Support ctypes if the interpreter has no threads. diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -1,6 +1,9 @@ import _rawffi, sys -import threading +try: + from thread import _local as local +except ImportError: + local = object # no threads class ConvMode: encoding = 'ascii' @@ -28,7 +31,7 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(threading.local): +class ErrorObject(local): def __init__(self): self.errno = 0 self.winerror = 0 From commits-noreply at bitbucket.org Tue Apr 12 22:19:44 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 22:19:44 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: hg merge default Message-ID: <20110412201944.6B71936C053@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43314:1fb8995fe302 Date: 2011-04-12 21:14 +0200 http://bitbucket.org/pypy/pypy/changeset/1fb8995fe302/ Log: hg merge default diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -39,6 +39,10 @@ state = space.fromcache(State) state.clear_exception() + at cpython_api([PyObject], PyObject) +def PyExceptionInstance_Class(space, w_obj): + return space.type(w_obj) + @cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void) def PyErr_Fetch(space, ptype, pvalue, ptraceback): """Retrieve the error indicator into three variables whose addresses are passed. @@ -303,3 +307,11 @@ operror = state.clear_exception() if operror: operror.write_unraisable(space, space.str_w(space.repr(w_where))) + + at cpython_api([], lltype.Void) +def PyErr_SetInterrupt(space): + """This function simulates the effect of a SIGINT signal arriving --- the + next time PyErr_CheckSignals() is called, KeyboardInterrupt will be raised. + It may be called without holding the interpreter lock.""" + space.check_signal_action.set_interrupt() + diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py --- a/pypy/interpreter/test/test_eval.py +++ b/pypy/interpreter/test/test_eval.py @@ -13,7 +13,8 @@ def __init__(self, space, code, numlocals): self.code = code - Frame.__init__(self, space, numlocals=numlocals) + Frame.__init__(self, space) + self.numlocals = numlocals self.fastlocals_w = [None] * self.numlocals def getcode(self): @@ -24,7 +25,10 @@ def getfastscope(self): return self.fastlocals_w - + + def getfastscopelength(self): + return self.numlocals + self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -179,6 +179,9 @@ """ raise NotImplementedError + def count_fields_if_immutable(self): + return -1 + def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -108,6 +108,7 @@ Anders Qvist Alan McIntyre Bert Freudenberg + Tav Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -118,13 +119,16 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. -License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' +License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' ============================================================== Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files -in the 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' directories +in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories are all copyrighted by the Python Software Foundation and licensed under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html @@ -157,21 +161,12 @@ ====================================== The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. - CompositionExclusions-3.2.0.txt - CompositionExclusions-4.1.0.txt - CompositionExclusions-5.0.0.txt - EastAsianWidth-3.2.0.txt - EastAsianWidth-4.1.0.txt - EastAsianWidth-5.0.0.txt - UnicodeData-3.2.0.txt - UnicodeData-4.1.0.txt - UnicodeData-5.0.0.txt - -The following files are derived from files from the above website. The same -terms of use apply. - UnihanNumeric-3.2.0.txt - UnihanNumeric-4.1.0.txt - UnihanNumeric-5.0.0.txt + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -870,6 +870,9 @@ def op_gc_adr_of_nursery_free(self): raise NotImplementedError + def op_gc_adr_of_root_stack_top(self): + raise NotImplementedError + def op_gc_call_rtti_destructor(self, rtti, addr): if hasattr(rtti._obj, 'destructor_funcptr'): d = rtti._obj.destructor_funcptr diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -103,6 +103,7 @@ except KeyError: subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, weakrefable) + assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -4,7 +4,7 @@ """ from cStringIO import StringIO -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.ootypesystem import ootype, rclass from pypy.rpython.ootypesystem.module import ll_os from pypy.translator.jvm import node, methods @@ -229,9 +229,15 @@ if not ootype.isSubclass(OOTYPE, SELF): continue mobj = self._function_for_graph( clsobj, mname, False, mimpl.graph) - graphs = OOTYPE._lookup_graphs(mname) - if len(graphs) == 1: - mobj.is_final = True + # XXX: this logic is broken: it might happen that there are + # ootype.Instance which contains a meth whose graph is exactly + # the same as the meth in the superclass: in this case, + # len(graphs) == 1 but we cannot just mark the method as final + # (or we can, but we should avoid to emit the method in the + # subclass, then) + ## graphs = OOTYPE._lookup_graphs(mname) + ## if len(graphs) == 1: + ## mobj.is_final = True clsobj.add_method(mobj) # currently, we always include a special "dump" method for debugging @@ -359,6 +365,7 @@ ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, ootype.String:jvm.PYPYESCAPEDSTRING, ootype.Unicode:jvm.PYPYESCAPEDUNICODE, + rffi.SHORT:jvm.SHORTTOSTRINGS, } def toString_method_for_ootype(self, OOTYPE): @@ -406,6 +413,7 @@ ootype.UniChar: jvm.jChar, ootype.Class: jvm.jClass, ootype.ROOT: jvm.jObject, # treat like a scalar + rffi.SHORT: jvm.jShort, } # Dictionary for non-scalar types; in this case, if we see the key, we diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -6,6 +6,7 @@ from pypy.tool.udir import udir from pypy.rlib import streamio from pypy.conftest import gettestobjspace +import pytest import sys, os import tempfile, marshal @@ -109,6 +110,14 @@ p.join('lone.pyc').write(p.join('x.pyc').read(mode='rb'), mode='wb') + # create a .pyw file + p = setuppkg("windows", x = "x = 78") + try: + p.join('x.pyw').remove() + except py.error.ENOENT: + pass + p.join('x.py').rename(p.join('x.pyw')) + return str(root) @@ -177,6 +186,14 @@ import a assert a == a0 + def test_trailing_slash(self): + import sys + try: + sys.path[0] += '/' + import a + finally: + sys.path[0] = sys.path[0].rstrip('/') + def test_import_pkg(self): import sys import pkg @@ -325,6 +342,11 @@ import compiled.x assert compiled.x == sys.modules.get('compiled.x') + @pytest.mark.skipif("sys.platform != 'win32'") + def test_pyw(self): + import windows.x + assert windows.x.__file__.endswith('x.pyw') + def test_cannot_write_pyc(self): import sys, os p = os.path.join(sys.path[-1], 'readonly') @@ -985,7 +1007,8 @@ class AppTestPyPyExtension(object): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['imp', 'zipimport']) + cls.space = gettestobjspace(usemodules=['imp', 'zipimport', + '__pypy__']) cls.w_udir = cls.space.wrap(str(udir)) def test_run_compiled_module(self): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver, hint, purefunction from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class SendTests(object): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,6 +1,7 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver +from pypy.rlib import objectmodel class DictTests: @@ -69,6 +70,66 @@ res = self.meta_interp(f, [10], listops=True) assert res == expected + def test_dict_trace_hash(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + if total not in dct: + dct[total] = [] + dct[total].append(total) + total -= 1 + return len(dct[0]) + + res1 = f(100) + res2 = self.meta_interp(f, [100], listops=True) + assert res1 == res2 + self.check_loops(int_mod=1) # the hash was traced + + def test_dict_setdefault(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def f(n): + dct = {} + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct.setdefault(total % 2, []).append(total) + total -= 1 + return len(dct[0]) + + assert f(100) == 50 + res = self.meta_interp(f, [100], listops=True) + assert res == 50 + self.check_loops(new=0, new_with_vtable=0) + + def test_dict_as_counter(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct[total] = dct.get(total, 0) + 1 + total -= 1 + return dct[0] + + assert f(100) == 50 + res = self.meta_interp(f, [100], listops=True) + assert res == 50 + self.check_loops(int_mod=1) + class TestOOtype(DictTests, OOJitMixin): pass diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -68,6 +68,16 @@ nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') + INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), + ('intval', lltype.Signed)) + INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), + ('intval', lltype.Signed), + hints={'immutable': True}) + intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') + immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -147,7 +157,6 @@ FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token - virtualrefindexdescr = vrefinfo.descr_virtualref_index virtualforceddescr = vrefinfo.descr_forced jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) @@ -156,6 +165,8 @@ register_known_gctype(cpu, node_vtable2, NODE2) register_known_gctype(cpu, u_vtable, U) register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) + register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) + register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,8 @@ +import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror +from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop @@ -21,6 +23,8 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 + get_malloc_slowpath_addr = None + def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr @@ -34,6 +38,8 @@ pass def can_inline_malloc(self, descr): return False + def can_inline_malloc_varsize(self, descr, num_elem): + return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): @@ -212,10 +218,12 @@ return addr_ref -class GcRootMap_asmgcc: +class GcRootMap_asmgcc(object): """Handles locating the stack roots in the assembler. This is the class supporting --gcrootfinder=asmgcc. """ + is_shadow_stack = False + LOC_REG = 0 LOC_ESP_PLUS = 1 LOC_EBP_PLUS = 2 @@ -224,7 +232,7 @@ GCMAP_ARRAY = rffi.CArray(lltype.Signed) CALLSHAPE_ARRAY_PTR = rffi.CArrayPtr(rffi.UCHAR) - def __init__(self): + def __init__(self, gcdescr=None): # '_gcmap' is an array of length '_gcmap_maxlength' of addresses. # '_gcmap_curlength' tells how full the array really is. # The addresses are actually grouped in pairs: @@ -237,6 +245,13 @@ self._gcmap_deadentries = 0 self._gcmap_sorted = True + def add_jit2gc_hooks(self, jit2gc): + jit2gc.update({ + 'gcmapstart': lambda: self.gcmapstart(), + 'gcmapend': lambda: self.gcmapend(), + 'gcmarksorted': lambda: self.gcmarksorted(), + }) + def initialize(self): # hack hack hack. Remove these lines and see MissingRTypeAttribute # when the rtyper tries to annotate these methods only when GC-ing... @@ -365,7 +380,7 @@ number >>= 7 shape.append(chr(number | flag)) - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset @@ -388,6 +403,126 @@ return rawaddr +class GcRootMap_shadowstack(object): + """Handles locating the stack roots in the assembler. + This is the class supporting --gcrootfinder=shadowstack. + """ + is_shadow_stack = True + MARKER = 8 + + # The "shadowstack" is a portable way in which the GC finds the + # roots that live in the stack. Normally it is just a list of + # pointers to GC objects. The pointers may be moved around by a GC + # collection. But with the JIT, an entry can also be MARKER, in + # which case the next entry points to an assembler stack frame. + # During a residual CALL from the assembler (which may indirectly + # call the GC), we use the force_index stored in the assembler + # stack frame to identify the call: we can go from the force_index + # to a list of where the GC pointers are in the frame (this is the + # purpose of the present class). + # + # Note that across CALL_MAY_FORCE or CALL_ASSEMBLER, we can also go + # from the force_index to a ResumeGuardForcedDescr instance, which + # is used if the virtualizable or the virtualrefs need to be forced + # (see pypy.jit.backend.model). The force_index number in the stack + # frame is initially set to a non-negative value x, but it is + # occasionally turned into (~x) in case of forcing. + + INTARRAYPTR = rffi.CArrayPtr(rffi.INT) + CALLSHAPES_ARRAY = rffi.CArray(INTARRAYPTR) + + def __init__(self, gcdescr): + self._callshapes = lltype.nullptr(self.CALLSHAPES_ARRAY) + self._callshapes_maxlength = 0 + self.force_index_ofs = gcdescr.force_index_ofs + + def add_jit2gc_hooks(self, jit2gc): + # + def collect_jit_stack_root(callback, gc, addr): + if addr.signed[0] != GcRootMap_shadowstack.MARKER: + # common case + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return WORD + else: + # case of a MARKER followed by an assembler stack frame + follow_stack_frame_of_assembler(callback, gc, addr) + return 2 * WORD + # + def follow_stack_frame_of_assembler(callback, gc, addr): + frame_addr = addr.signed[1] + addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + callshape = self._callshapes[force_index] + n = 0 + while True: + offset = rffi.cast(lltype.Signed, callshape[n]) + if offset == 0: + break + addr = llmemory.cast_int_to_adr(frame_addr + offset) + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + n += 1 + # + jit2gc.update({ + 'rootstackhook': collect_jit_stack_root, + }) + + def initialize(self): + pass + + def get_basic_shape(self, is_64_bit=False): + return [] + + def add_frame_offset(self, shape, offset): + assert offset != 0 + shape.append(offset) + + def add_callee_save_reg(self, shape, register): + msg = "GC pointer in %s was not spilled" % register + os.write(2, '[llsupport/gc] %s\n' % msg) + raise AssertionError(msg) + + def compress_callshape(self, shape, datablockwrapper): + length = len(shape) + SZINT = rffi.sizeof(rffi.INT) + rawaddr = datablockwrapper.malloc_aligned((length + 1) * SZINT, SZINT) + p = rffi.cast(self.INTARRAYPTR, rawaddr) + for i in range(length): + p[i] = rffi.cast(rffi.INT, shape[i]) + p[length] = rffi.cast(rffi.INT, 0) + return p + + def write_callshape(self, p, force_index): + if force_index >= self._callshapes_maxlength: + self._enlarge_callshape_list(force_index + 1) + self._callshapes[force_index] = p + + def _enlarge_callshape_list(self, minsize): + newlength = 250 + (self._callshapes_maxlength // 3) * 4 + if newlength < minsize: + newlength = minsize + newarray = lltype.malloc(self.CALLSHAPES_ARRAY, newlength, + flavor='raw', track_allocation=False) + if self._callshapes: + i = self._callshapes_maxlength - 1 + while i >= 0: + newarray[i] = self._callshapes[i] + i -= 1 + lltype.free(self._callshapes, flavor='raw') + self._callshapes = newarray + self._callshapes_maxlength = newlength + + def freeing_block(self, start, stop): + pass # nothing needed here + + def get_root_stack_top_addr(self): + rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) + return rffi.cast(lltype.Signed, rst_addr) + + class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 @@ -437,7 +572,7 @@ except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls() + gcrootmap = cls(gcdescr) self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) @@ -446,12 +581,9 @@ # where it can be fished and reused by the FrameworkGCTransformer self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = { - 'layoutbuilder': self.layoutbuilder, - 'gcmapstart': lambda: gcrootmap.gcmapstart(), - 'gcmapend': lambda: gcrootmap.gcmapend(), - 'gcmarksorted': lambda: gcrootmap.gcmarksorted(), - } + self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) @@ -461,6 +593,10 @@ self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() + # for the fast path of mallocs, the following must be true, at least + assert self.GCClass.inline_simple_malloc + assert self.GCClass.inline_simple_malloc_varsize + # make a malloc function, with three arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) @@ -539,20 +675,23 @@ x3 = x0 * 0.3 for_test_only.x = x0 + x1 + x2 + x3 # - def malloc_fixedsize_slowpath(size): + def malloc_slowpath(size): if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery try: + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, 0, size, True, False, False) except MemoryError: fatalerror("out of memory (from JITted code)") return 0 return rffi.cast(lltype.Signed, gcref) - self.malloc_fixedsize_slowpath = malloc_fixedsize_slowpath - self.MALLOC_FIXEDSIZE_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) + self.malloc_slowpath = malloc_slowpath + self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -562,9 +701,8 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_fixedsize_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_FIXEDSIZE_SLOWPATH), - self.malloc_fixedsize_slowpath) + def get_malloc_slowpath_addr(self): + fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) def initialize(self): @@ -710,6 +848,16 @@ return True return False + def can_inline_malloc_varsize(self, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + return size < self.max_size_of_young_obj + except OverflowError: + return False + def has_write_barrier_class(self): return WriteBarrierDescr diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -1,5 +1,5 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.objspace.flow.model import SpaceOperation, traverse +from pypy.objspace.flow.model import SpaceOperation from pypy.tool.algo.unionfind import UnionFind from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -149,8 +147,7 @@ set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except") set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except") - def visit(node): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname in self.IDENTITY_OPS: # special-case these operations to identify their input @@ -167,7 +164,7 @@ if isinstance(node.exitswitch, Variable): set_use_point(node, node.exitswitch, "exitswitch", node) - if isinstance(node, Link): + for node in graph.iterlinks(): if isinstance(node.last_exception, Variable): set_creation_point(node.prevblock, node.last_exception, "last_exception") @@ -187,7 +184,6 @@ else: d[arg] = True - traverse(visit, graph) return lifetimes.infos() def _try_inline_malloc(self, info): @@ -213,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -333,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -484,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -500,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -546,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -616,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -639,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -79,7 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel **32-bit** environment needs ``4GB``. + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -1,7 +1,7 @@ from pypy.jit.metainterp.warmspot import ll_meta_interp from pypy.rlib.jit import JitDriver, dont_look_inside, purefunction -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.jitprof import * diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -127,12 +127,15 @@ checks[2], checks[3])) subclasses = {} for key, subcls in typedef._subclass_cache.items(): + if key[0] is not space.config: + continue cls = key[1] subclasses.setdefault(cls, {}) - subclasses[cls][subcls] = True + prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls) + assert subcls is prevsubcls for cls, set in subclasses.items(): assert len(set) <= 6, "%s has %d subclasses:\n%r" % ( - cls, len(set), [subcls.__name__ for subcls in set]) + cls, len(set), list(set)) def test_getsetproperty(self): class W_SomeType(Wrappable): diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ pypy/doc/*.html pypy/doc/config/*.html pypy/doc/discussion/*.html +pypy/module/cpyext/src/*.o +pypy/module/cpyext/test/*.o pypy/module/test_lib_pypy/ctypes_tests/*.o pypy/translator/c/src/dtoa.o pypy/translator/goal/pypy-c diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -9,7 +9,6 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.test.test_optimizeopt import equaloplists -from pypy.rpython.memory.gctransform import asmgcroot def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -75,8 +74,8 @@ num2a = ((-num2|3) >> 7) | 128 num2b = (-num2|3) & 127 shape = gcrootmap.get_basic_shape() - gcrootmap.add_ebp_offset(shape, num1) - gcrootmap.add_ebp_offset(shape, num2) + gcrootmap.add_frame_offset(shape, num1) + gcrootmap.add_frame_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, @@ -228,6 +227,33 @@ gc.asmgcroot = saved +class TestGcRootMapShadowStack: + class FakeGcDescr: + force_index_ofs = 92 + + def test_make_shapes(self): + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = gcrootmap.get_basic_shape() + gcrootmap.add_frame_offset(shape, 16) + gcrootmap.add_frame_offset(shape, -24) + assert shape == [16, -24] + + def test_compress_callshape(self): + class FakeDataBlockWrapper: + def malloc_aligned(self, size, alignment): + assert alignment == 4 # even on 64-bits + assert size == 12 # 4*3, even on 64-bits + return rffi.cast(lltype.Signed, p) + datablockwrapper = FakeDataBlockWrapper() + p = lltype.malloc(rffi.CArray(rffi.INT), 3, immortal=True) + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = [16, -24] + gcrootmap.compress_callshape(shape, datablockwrapper) + assert rffi.cast(lltype.Signed, p[0]) == 16 + assert rffi.cast(lltype.Signed, p[1]) == -24 + assert rffi.cast(lltype.Signed, p[2]) == 0 + + class FakeLLOp(object): def __init__(self): self.record = [] diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -5,8 +5,8 @@ soon as possible (at least in a simple case). """ -import weakref, random, sys -import py +import weakref, sys +import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -71,6 +71,20 @@ return entrypoint +def get_functions_to_patch(): + from pypy.jit.backend.llsupport import gc + # + can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc + def can_inline_malloc2(*args): + try: + if os.environ['PYPY_NO_INLINE_MALLOC']: + return False + except KeyError: + pass + return can_inline_malloc1(*args) + # + return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + def compile(f, gc, **kwds): from pypy.annotation.listdef import s_list_of_strings from pypy.translator.translator import TranslationContext @@ -86,8 +100,21 @@ ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy()) ann.build_types(f, [s_list_of_strings], main_entry_point=True) t.buildrtyper().specialize() + if kwds['jit']: - apply_jit(t, enable_opts='') + patch = get_functions_to_patch() + old_value = {} + try: + for (obj, attr), value in patch.items(): + old_value[obj, attr] = getattr(obj, attr) + setattr(obj, attr, value) + # + apply_jit(t, enable_opts='') + # + finally: + for (obj, attr), oldvalue in old_value.items(): + setattr(obj, attr, oldvalue) + cbuilder = genc.CStandaloneBuilder(t, f, t.config) cbuilder.generate_source() cbuilder.compile() @@ -126,7 +153,7 @@ # ______________________________________________________________________ -class TestCompileFramework(object): +class CompileFrameworkTests(object): # Test suite using (so far) the minimark GC. EXTRA_PARAMS = {} @@ -179,16 +206,22 @@ try: GcLLDescr_framework.DEBUG = True cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, - gcrootfinder="asmgcc", jit=True, + gcrootfinder=cls.gcrootfinder, jit=True, **cls.EXTRA_PARAMS) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG + def _run(self, name, n, env): + res = self.cbuilder.cmdexec("%s %d" %(name, n), env=env) + assert int(res) == 20 + def run(self, name, n=2000): pypylog = udir.join('TestCompileFramework.log') - res = self.cbuilder.cmdexec("%s %d" %(name, n), - env={'PYPYLOG': ':%s' % pypylog}) - assert int(res) == 20 + env = {'PYPYLOG': ':%s' % pypylog, + 'PYPY_NO_INLINE_MALLOC': '1'} + self._run(name, n, env) + env['PYPY_NO_INLINE_MALLOC'] = '' + self._run(name, n, env) def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) @@ -580,10 +613,15 @@ self.run('compile_framework_minimal_size_in_nursery') -class TestCompressPtr(TestCompileFramework): +class TestShadowStack(CompileFrameworkTests): + gcrootfinder = "shadowstack" + +class TestCompressPtr(TestShadowStack): EXTRA_PARAMS = {'compressptr': True} - def setup_class(cls): if sys.maxint == 2147483647: py.test.skip("for 64-bit only") - TestCompileFramework.setup_class.im_func(cls) + TestShadowStack.setup_class.im_func(cls) + +class TestAsmGcc(CompileFrameworkTests): + gcrootfinder = "asmgcc" diff --git a/pypy/rlib/rdtoa.py b/pypy/rlib/rdtoa.py --- a/pypy/rlib/rdtoa.py +++ b/pypy/rlib/rdtoa.py @@ -5,16 +5,33 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder -import py +import py, sys cdir = py.path.local(pypydir) / 'translator' / 'c' include_dirs = [cdir] +# set the word endianness based on the host's endianness +# and the C double's endianness (which should be equal) +if hasattr(float, '__getformat__'): + assert float.__getformat__('double') == 'IEEE, %s-endian' % sys.byteorder +if sys.byteorder == 'little': + source_file = ['#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754'] +elif sys.byteorder == 'big': + source_file = ['#define WORDS_BIGENDIAN', + '#define DOUBLE_IS_BIG_ENDIAN_IEEE754'] +else: + raise AssertionError(sys.byteorder) + +source_file.append('#include "src/dtoa.c"') +source_file = '\n\n'.join(source_file) + +# ____________________________________________________________ + eci = ExternalCompilationInfo( include_dirs = [cdir], includes = ['src/dtoa.h'], libraries = [], - separate_module_files = [cdir / 'src' / 'dtoa.c'], + separate_module_sources = [source_file], export_symbols = ['_PyPy_dg_strtod', '_PyPy_dg_dtoa', '_PyPy_dg_freedtoa', diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,13 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level @@ -159,8 +152,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class DelTests: diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -12,12 +12,13 @@ 'get_ident': 'os_thread.get_ident', 'exit': 'os_thread.exit', 'exit_thread': 'os_thread.exit', # obsolete synonym + 'interrupt_main': 'os_thread.interrupt_main', 'stack_size': 'os_thread.stack_size', '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_wait.py @@ -0,0 +1,51 @@ +from ctypes import CDLL, c_int, POINTER, byref +from ctypes.util import find_library +from resource import _struct_rusage, struct_rusage + +__all__ = ["wait3", "wait4"] + +libc = CDLL(find_library("c")) +c_wait3 = libc.wait3 + +c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] + +c_wait4 = libc.wait4 + +c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] + +def create_struct_rusage(c_struct): + return struct_rusage(( + float(c_struct.ru_utime), + float(c_struct.ru_stime), + c_struct.ru_maxrss, + c_struct.ru_ixrss, + c_struct.ru_idrss, + c_struct.ru_isrss, + c_struct.ru_minflt, + c_struct.ru_majflt, + c_struct.ru_nswap, + c_struct.ru_inblock, + c_struct.ru_oublock, + c_struct.ru_msgsnd, + c_struct.ru_msgrcv, + c_struct.ru_nsignals, + c_struct.ru_nvcsw, + c_struct.ru_nivcsw)) + +def wait3(options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage + +def wait4(pid, options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -34,11 +34,7 @@ @jit.purefunction def _getcell_makenew(self, key): - res = self.content.get(key, None) - if res is not None: - return res - result = self.content[key] = ModuleCell() - return result + return self.content.setdefault(key, ModuleCell()) def impl_setitem(self, w_key, w_value): space = self.space @@ -50,6 +46,16 @@ def impl_setitem_str(self, name, w_value): self.getcell(name, True).w_value = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + cell = self.getcell(space.str_w(w_key), True) + if cell.w_value is None: + cell.w_value = w_default + return cell.w_value + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -7,6 +7,7 @@ CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject +from pypy.objspace.std.objectobject import W_ObjectObject from pypy.rlib.objectmodel import specialize, we_are_translated from pypy.rlib.rweakref import RWeakKeyDictionary from pypy.rpython.annlowlevel import llhelper @@ -370,6 +371,15 @@ @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): obj.c_ob_refcnt = 1 + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + assert isinstance(w_type, W_TypeObject) + if w_type.is_cpytype(): + w_obj = space.allocate_instance(W_ObjectObject, w_type) + track_reference(space, obj, w_obj) + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + else: + assert False, "Please add more cases in _Py_NewReference()" def _Py_Dealloc(space, obj): from pypy.module.cpyext.api import generic_cpy_call_dont_decref diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -27,7 +27,10 @@ from pyrepl.console import Console, Event from pyrepl import unix_eventqueue -_error = (termios.error, curses.error) +class InvalidTerminal(RuntimeError): + pass + +_error = (termios.error, curses.error, InvalidTerminal) # there are arguments for changing this to "refresh" SIGWINCH_EVENT = 'repaint' @@ -38,7 +41,7 @@ def _my_getstr(cap, optional=0): r = curses.tigetstr(cap) if not optional and r is None: - raise RuntimeError, \ + raise InvalidTerminal, \ "terminal doesn't have the required '%s' capability"%cap return r @@ -289,6 +292,12 @@ self.__write_code(self._el) self.__write(newline[x:]) self.__posxy = len(newline), y + + if '\x1b' in newline: + # ANSI escape characters are present, so we can't assume + # anything about the position of the cursor. Moving the cursor + # to the left margin should work to get to a known position. + self.move_cursor(0, y) def __write(self, text): self.__buffer.append((text, 0)) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -174,6 +174,17 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -278,6 +278,22 @@ rex_mem_reg_plus_scaled_reg_plus_const) # ____________________________________________________________ +# Emit a mod/rm referencing an immediate address that fits in 32-bit +# (the immediate address itself must be explicitely encoded as well, +# with immediate(argnum)). + +def encode_abs(mc, _1, _2, orbyte): + # expands to either '\x05' on 32-bit, or '\x04\x25' or 64-bit + if mc.WORD == 8: + mc.writechar(chr(0x04 | orbyte)) + mc.writechar(chr(0x25)) + else: + mc.writechar(chr(0x05 | orbyte)) + return 0 + +abs_ = encode_abs, 0, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -348,7 +364,9 @@ INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) - INSN_rj = insn(rex_w, chr(base+3), register(1,8), '\x05', immediate(2)) + INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2)) + INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1), + immediate(2,'b')) INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b')) INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2)) @@ -366,7 +384,8 @@ INSN_bi32(mc, offset, immed) INSN_bi._always_inline_ = True # try to constant-fold single_byte() - return INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj + return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, + INSN_ji8) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -444,23 +463,25 @@ # ------------------------------ Arithmetic ------------------------------ - ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj = common_modes(0) - OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj = common_modes(1) - AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj = common_modes(4) - SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj = common_modes(5) - SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj = common_modes(3) - XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj = common_modes(6) - CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj = common_modes(7) + ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj, _ = common_modes(0) + OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj, _ = common_modes(1) + AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj, _ = common_modes(4) + SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj, SUB_ji8 = common_modes(5) + SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj, _ = common_modes(3) + XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj, _ = common_modes(6) + CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj, _ = common_modes(7) CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32) CMP_mr = insn(rex_w, '\x39', register(2, 8), mem_reg_plus_const(1)) - CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b')) - CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2)) + CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_, + immediate(1), immediate(2, 'b')) + CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_, + immediate(1), immediate(2)) CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) - CMP_jr = insn(rex_w, '\x39', register(2, 8), '\x05', immediate(1)) + CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_, immediate(1)) CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) @@ -508,7 +529,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) - LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_, immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) @@ -534,12 +555,15 @@ CDQ = insn(rex_nw, '\x99') TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) - TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), '\x05', immediate(1), immediate(2, 'b')) + TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_, immediate(1), immediate(2, 'b')) TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') # x87 instructions FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + # reserved as an illegal instruction + UD2 = insn('\x0F\x0B') + # ------------------------------ SSE2 ------------------------------ # Conversion @@ -639,7 +663,7 @@ add_insn('s', stack_sp(modrm_argnum)) add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) - add_insn('j', '\x05', immediate(modrm_argnum)) + add_insn('j', abs_, immediate(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register @@ -680,7 +704,7 @@ # assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') - add_insn('j', '\x05', immediate(2)) + add_insn('j', abs_, immediate(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/jit/metainterp/virtualref.py b/pypy/jit/metainterp/virtualref.py --- a/pypy/jit/metainterp/virtualref.py +++ b/pypy/jit/metainterp/virtualref.py @@ -13,7 +13,6 @@ self.JIT_VIRTUAL_REF = lltype.GcStruct('JitVirtualRef', ('super', rclass.OBJECT), ('virtual_token', lltype.Signed), - ('virtualref_index', lltype.Signed), ('forced', rclass.OBJECTPTR)) self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE, zero=True, flavor='raw', @@ -27,8 +26,6 @@ fielddescrof = self.cpu.fielddescrof self.descr_virtual_token = fielddescrof(self.JIT_VIRTUAL_REF, 'virtual_token') - self.descr_virtualref_index = fielddescrof(self.JIT_VIRTUAL_REF, - 'virtualref_index') self.descr_forced = fielddescrof(self.JIT_VIRTUAL_REF, 'forced') # # record the type JIT_VIRTUAL_REF explicitly in the rtyper, too diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -519,7 +519,7 @@ return code = frame.pycode if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr <= frame.instr_prev: + if frame.last_instr < frame.instr_prev_plus_one: # We jumped backwards in the same line. executioncontext._trace(frame, 'line', self.space.w_None) else: @@ -557,5 +557,5 @@ frame.f_lineno = line executioncontext._trace(frame, 'line', self.space.w_None) - frame.instr_prev = frame.last_instr + frame.instr_prev_plus_one = frame.last_instr + 1 self.space.frame_trace_action.fire() # continue tracing diff --git a/pypy/translator/backendopt/test/test_merge_if_blocks.py b/pypy/translator/backendopt/test/test_merge_if_blocks.py --- a/pypy/translator/backendopt/test/test_merge_if_blocks.py +++ b/pypy/translator/backendopt/test/test_merge_if_blocks.py @@ -2,7 +2,7 @@ from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof as tgraphof -from pypy.objspace.flow.model import flatten, Block +from pypy.objspace.flow.model import Block from pypy.translator.backendopt.removenoops import remove_same_as from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int diff --git a/pypy/jit/metainterp/test/test_immutable.py b/pypy/jit/metainterp/test/test_immutable.py --- a/pypy/jit/metainterp/test/test_immutable.py +++ b/pypy/jit/metainterp/test/test_immutable.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ImmutableFieldsTests: diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.simplify import (get_graph, transform_dead_op_vars, desugar_isinstance) -from pypy.objspace.flow.model import traverse, Block, Constant, summary +from pypy.objspace.flow.model import Block, Constant, summary from pypy import conftest def translate(func, argtypes, backend_optimize=True): @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/lib_pypy/pypy_test/test_os_wait.py b/lib_pypy/pypy_test/test_os_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_os_wait.py @@ -0,0 +1,44 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + +import os + +from lib_pypy._pypy_wait import wait3, wait4 + +if hasattr(os, 'wait3'): + def test_os_wait3(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait3()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) + +if hasattr(os, 'wait4'): + def test_os_wait4(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait4()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -12,7 +12,7 @@ import py from pypy.jit.metainterp.memmgr import MemoryManager -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside diff --git a/pypy/doc/config/confrest.py b/pypy/doc/config/confrest.py --- a/pypy/doc/config/confrest.py +++ b/pypy/doc/config/confrest.py @@ -7,7 +7,6 @@ all_optiondescrs = [pypyoption.pypy_optiondescription, translationoption.translation_optiondescription, ] - start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) class PyPyPage(PyPyPage): @@ -29,7 +28,7 @@ Page = PyPyPage def get_content(self, txtpath, encoding): - if txtpath.basename == "commandline.txt": + if txtpath.basename == "commandline.rst": result = [] for line in txtpath.read().splitlines(): if line.startswith('.. GENERATE:'): diff --git a/pypy/jit/metainterp/test/test_float.py b/pypy/jit/metainterp/test/test_float.py --- a/pypy/jit/metainterp/test/test_float.py +++ b/pypy/jit/metainterp/test/test_float.py @@ -1,5 +1,5 @@ import math -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class FloatTests: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -382,7 +382,7 @@ send_bridge_to_backend(metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.token) - def copy_all_attrbutes_into(self, res): + def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here res.rd_snapshot = self.rd_snapshot res.rd_frame_info_list = self.rd_frame_info_list @@ -393,13 +393,13 @@ def _clone_if_mutable(self): res = ResumeGuardDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeGuardForcedDescr(ResumeGuardDescr): @@ -473,7 +473,7 @@ def _clone_if_mutable(self): res = ResumeGuardForcedDescr(self.metainterp_sd, self.jitdriver_sd) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -42,3 +42,13 @@ assert arr[1:].tolist() == [2,3,4] assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + + def test_buffer(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + # XXX big-endian + assert str(buffer(arr)) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') + diff --git a/pypy/rpython/lltypesystem/rdict.py b/pypy/rpython/lltypesystem/rdict.py --- a/pypy/rpython/lltypesystem/rdict.py +++ b/pypy/rpython/lltypesystem/rdict.py @@ -4,13 +4,16 @@ from pypy.rpython.rdict import AbstractDictRepr, AbstractDictIteratorRepr,\ rtype_newdict from pypy.rpython.lltypesystem import lltype, llmemory -from pypy.rlib.rarithmetic import r_uint, intmask +from pypy.rlib.rarithmetic import r_uint, intmask, LONG_BIT from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import hlinvoke from pypy.rpython import robject -from pypy.rlib import objectmodel +from pypy.rlib import objectmodel, jit from pypy.rpython import rmodel +HIGHEST_BIT = intmask(1 << (LONG_BIT - 1)) +MASK = intmask(HIGHEST_BIT - 1) + # ____________________________________________________________ # # generic implementation of RPython dictionary, with parametric DICTKEY and @@ -405,6 +408,10 @@ ENTRIES = lltype.typeOf(entries).TO return ENTRIES.fasthashfn(entries[i].key) + at jit.dont_look_inside +def ll_get_value(d, i): + return d.entries[i].value + def ll_keyhash_custom(d, key): DICT = lltype.typeOf(d).TO return hlinvoke(DICT.r_rdict_hashfn, d.fnkeyhash, key) @@ -422,18 +429,21 @@ def ll_dict_getitem(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - entries = d.entries - if entries.valid(i): - return entries[i].value - else: - raise KeyError -ll_dict_getitem.oopspec = 'dict.getitem(d, key)' + if not i & HIGHEST_BIT: + return ll_get_value(d, i) + else: + raise KeyError def ll_dict_setitem(d, key, value): hash = d.keyhash(key) i = ll_dict_lookup(d, key, hash) + return _ll_dict_setitem_lookup_done(d, key, value, hash, i) + + at jit.dont_look_inside +def _ll_dict_setitem_lookup_done(d, key, value, hash, i): + valid = (i & HIGHEST_BIT) == 0 + i = i & MASK everused = d.entries.everused(i) - valid = d.entries.valid(i) # set up the new entry ENTRY = lltype.typeOf(d.entries).TO.OF entry = d.entries[i] @@ -449,7 +459,6 @@ d.num_pristine_entries -= 1 if d.num_pristine_entries <= len(d.entries) / 3: ll_dict_resize(d) -ll_dict_setitem.oopspec = 'dict.setitem(d, key, value)' def ll_dict_insertclean(d, key, value, hash): # Internal routine used by ll_dict_resize() to insert an item which is @@ -470,7 +479,7 @@ def ll_dict_delitem(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - if not d.entries.valid(i): + if i & HIGHEST_BIT: raise KeyError _ll_dict_del(d, i) ll_dict_delitem.oopspec = 'dict.delitem(d, key)' @@ -542,7 +551,7 @@ elif entries.everused(i): freeslot = i else: - return i # pristine entry -- lookup failed + return i | HIGHEST_BIT # pristine entry -- lookup failed # In the loop, a deleted entry (everused and not valid) is by far # (factor of 100s) the least likely outcome, so test for that last. @@ -557,7 +566,7 @@ if not entries.everused(i): if freeslot == -1: freeslot = i - return freeslot + return freeslot | HIGHEST_BIT elif entries.valid(i): checkingkey = entries[i].key if direct_compare and checkingkey == key: @@ -711,22 +720,19 @@ def ll_get(dict, key, default): i = ll_dict_lookup(dict, key, dict.keyhash(key)) - entries = dict.entries - if entries.valid(i): - return entries[i].value - else: + if not i & HIGHEST_BIT: + return ll_get_value(dict, i) + else: return default -ll_get.oopspec = 'dict.get(dict, key, default)' def ll_setdefault(dict, key, default): - i = ll_dict_lookup(dict, key, dict.keyhash(key)) - entries = dict.entries - if entries.valid(i): - return entries[i].value + hash = dict.keyhash(key) + i = ll_dict_lookup(dict, key, hash) + if not i & HIGHEST_BIT: + return ll_get_value(dict, i) else: - ll_dict_setitem(dict, key, default) + _ll_dict_setitem_lookup_done(dict, key, default, hash, i) return default -ll_setdefault.oopspec = 'dict.setdefault(dict, key, default)' def ll_copy(dict): DICT = lltype.typeOf(dict).TO @@ -768,7 +774,10 @@ while i < d2len: if entries.valid(i): entry = entries[i] - ll_dict_setitem(dic1, entry.key, entry.value) + hash = entries.hash(i) + key = entry.key + j = ll_dict_lookup(dic1, key, hash) + _ll_dict_setitem_lookup_done(dic1, key, entry.value, hash, j) i += 1 ll_update.oopspec = 'dict.update(dic1, dic2)' @@ -833,8 +842,7 @@ def ll_contains(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - return d.entries.valid(i) -ll_contains.oopspec = 'dict.contains(d, key)' + return not i & HIGHEST_BIT POPITEMINDEX = lltype.Struct('PopItemIndex', ('nextindex', lltype.Signed)) global_popitem_index = lltype.malloc(POPITEMINDEX, zero=True, immortal=True) diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -11,6 +11,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -15,7 +15,7 @@ ## The problem ## ----------- ## -## PyString_AsString() must returns a (non-movable) pointer to the underlying +## PyString_AsString() must return a (non-movable) pointer to the underlying ## buffer, whereas pypy strings are movable. C code may temporarily store ## this address and use it, as long as it owns a reference to the PyObject. ## There is no "release" function to specify that the pointer is not needed diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,9 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); -int _pypy_math_isnan(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -201,6 +201,23 @@ assert cmpr == 3 assert cmpr != 42 + def test_richcompare(self): + module = self.import_module("comparisons") + cmpr = module.CmpType() + + # should not crash + cmpr < 4 + cmpr <= 4 + cmpr > 4 + cmpr >= 4 + + assert cmpr.__le__(4) is NotImplemented + + def test_tpcompare(self): + module = self.import_module("comparisons") + cmpr = module.OldCmpType() + assert cmpr < cmpr + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() @@ -245,6 +262,11 @@ obj = foo.new() assert module.read_tp_dict(obj) == foo.fooType.copy + def test_custom_allocation(self): + foo = self.import_module("foo") + obj = foo.newCustom() + assert type(obj) is foo.Custom + assert type(foo.Custom) is foo.MetaType class TestTypes(BaseApiTest): def test_type_attributes(self, space, api): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,20 +29,14 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) def test_basic_threadstate_dance(self, space, api): # Let extension modules call these functions, @@ -54,5 +48,3 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) - - clear_threadstate(space) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -56,13 +56,10 @@ """A frame is an environment supporting the execution of a code object. Abstract base class.""" - def __init__(self, space, w_globals=None, numlocals=-1): + def __init__(self, space, w_globals=None): self.space = space self.w_globals = w_globals # wrapped dict of globals self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(self.getcode().getvarnames()) - self.numlocals = numlocals def run(self): "Abstract method to override. Runs the frame" @@ -96,6 +93,10 @@ where the order is according to self.getcode().signature().""" raise TypeError, "abstract" + def getfastscopelength(self): + "Abstract. Get the expected number of locals." + raise TypeError, "abstract" + def fast2locals(self): # Copy values from self.fastlocals_w to self.w_locals if self.w_locals is None: @@ -113,10 +114,11 @@ # Copy values from self.w_locals to self.fastlocals_w assert self.w_locals is not None varnames = self.getcode().getvarnames() + numlocals = self.getfastscopelength() - new_fastlocals_w = [None]*self.numlocals - - for i in range(min(len(varnames), self.numlocals)): + new_fastlocals_w = [None] * numlocals + + for i in range(min(len(varnames), numlocals)): w_name = self.space.wrap(varnames[i]) try: w_value = self.space.getitem(self.w_locals, w_name) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -399,12 +399,7 @@ return ll_rdict.ll_newdict(DICT) _ll_0_newdict.need_result_type = True - _ll_2_dict_getitem = ll_rdict.ll_dict_getitem - _ll_3_dict_setitem = ll_rdict.ll_dict_setitem _ll_2_dict_delitem = ll_rdict.ll_dict_delitem - _ll_3_dict_setdefault = ll_rdict.ll_setdefault - _ll_2_dict_contains = ll_rdict.ll_contains - _ll_3_dict_get = ll_rdict.ll_get _ll_1_dict_copy = ll_rdict.ll_copy _ll_1_dict_clear = ll_rdict.ll_clear _ll_2_dict_update = ll_rdict.ll_update diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -1,5 +1,5 @@ import py, sys -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.jit.codewriter.policy import StopAtXPolicy diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -1,17 +1,29 @@ # Constants that depend on whether we are on 32-bit or 64-bit +# The frame size gives the standard fixed part at the start of +# every assembler frame: the saved value of some registers, +# one word for the force_index, and some extra space used only +# during a malloc that needs to go via its slow path. + import sys if sys.maxint == (2**31 - 1): WORD = 4 - # ebp + ebx + esi + edi + force_index = 5 words - FRAME_FIXED_SIZE = 5 + # ebp + ebx + esi + edi + 4 extra words + force_index = 9 words + FRAME_FIXED_SIZE = 9 + FORCE_INDEX_OFS = -8*WORD + MY_COPY_OF_REGS = -7*WORD IS_X86_32 = True IS_X86_64 = False else: WORD = 8 - # rbp + rbx + r12 + r13 + r14 + r15 + force_index = 7 words - FRAME_FIXED_SIZE = 7 + # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 + FRAME_FIXED_SIZE = 18 + FORCE_INDEX_OFS = -17*WORD + MY_COPY_OF_REGS = -16*WORD IS_X86_32 = False IS_X86_64 = True -FORCE_INDEX_OFS = -(FRAME_FIXED_SIZE-1)*WORD +# The extra space has room for almost all registers, apart from eax and edx +# which are used in the malloc itself. They are: +# ecx, ebx, esi, edi [32 and 64 bits] +# r8, r9, r10, r12, r13, r14, r15 [64 bits only] diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -350,7 +350,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -486,6 +486,7 @@ class W_IMap(Wrappable): _error_name = "imap" + _immutable_fields_ = ["w_fun", "iterators_w"] def __init__(self, space, w_fun, args_w): self.space = space diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): @@ -153,128 +167,132 @@ result = formatd(value, tp, precision, flags) return result, special -if USE_SHORT_FLOAT_REPR: - def round_double(value, ndigits): - # The basic idea is very simple: convert and round the double to - # a decimal string using _Py_dg_dtoa, then convert that decimal - # string back to a double with _Py_dg_strtod. There's one minor - # difficulty: Python 2.x expects round to do - # round-half-away-from-zero, while _Py_dg_dtoa does - # round-half-to-even. So we need some way to detect and correct - # the halfway cases. +def round_double(value, ndigits): + if USE_SHORT_FLOAT_REPR: + return round_double_short_repr(value, ndigits) + else: + return round_double_fallback_repr(value, ndigits) - # a halfway value has the form k * 0.5 * 10**-ndigits for some - # odd integer k. Or in other words, a rational number x is - # exactly halfway between two multiples of 10**-ndigits if its - # 2-valuation is exactly -ndigits-1 and its 5-valuation is at - # least -ndigits. For ndigits >= 0 the latter condition is - # automatically satisfied for a binary float x, since any such - # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x - # needs to be an integral multiple of 5**-ndigits; we can check - # this using fmod. For -22 > ndigits, there are no halfway - # cases: 5**23 takes 54 bits to represent exactly, so any odd - # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of - # precision to represent exactly. +def round_double_short_repr(value, ndigits): + # The basic idea is very simple: convert and round the double to + # a decimal string using _Py_dg_dtoa, then convert that decimal + # string back to a double with _Py_dg_strtod. There's one minor + # difficulty: Python 2.x expects round to do + # round-half-away-from-zero, while _Py_dg_dtoa does + # round-half-to-even. So we need some way to detect and correct + # the halfway cases. - sign = copysign(1.0, value) - value = abs(value) + # a halfway value has the form k * 0.5 * 10**-ndigits for some + # odd integer k. Or in other words, a rational number x is + # exactly halfway between two multiples of 10**-ndigits if its + # 2-valuation is exactly -ndigits-1 and its 5-valuation is at + # least -ndigits. For ndigits >= 0 the latter condition is + # automatically satisfied for a binary float x, since any such + # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x + # needs to be an integral multiple of 5**-ndigits; we can check + # this using fmod. For -22 > ndigits, there are no halfway + # cases: 5**23 takes 54 bits to represent exactly, so any odd + # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of + # precision to represent exactly. - # find 2-valuation value - m, expo = math.frexp(value) - while m != math.floor(m): - m *= 2.0 - expo -= 1 + sign = copysign(1.0, value) + value = abs(value) - # determine whether this is a halfway case. - halfway_case = 0 - if expo == -ndigits - 1: - if ndigits >= 0: + # find 2-valuation value + m, expo = math.frexp(value) + while m != math.floor(m): + m *= 2.0 + expo -= 1 + + # determine whether this is a halfway case. + halfway_case = 0 + if expo == -ndigits - 1: + if ndigits >= 0: + halfway_case = 1 + elif ndigits >= -22: + # 22 is the largest k such that 5**k is exactly + # representable as a double + five_pow = 1.0 + for i in range(-ndigits): + five_pow *= 5.0 + if math.fmod(value, five_pow) == 0.0: halfway_case = 1 - elif ndigits >= -22: - # 22 is the largest k such that 5**k is exactly - # representable as a double - five_pow = 1.0 - for i in range(-ndigits): - five_pow *= 5.0 - if math.fmod(value, five_pow) == 0.0: - halfway_case = 1 - # round to a decimal string; use an extra place for halfway case - strvalue = formatd(value, 'f', ndigits + halfway_case) + # round to a decimal string; use an extra place for halfway case + strvalue = formatd(value, 'f', ndigits + halfway_case) - if halfway_case: - buf = [c for c in strvalue] - if ndigits >= 0: - endpos = len(buf) - 1 - else: - endpos = len(buf) + ndigits - # Sanity checks: there should be exactly ndigits+1 places - # following the decimal point, and the last digit in the - # buffer should be a '5' - if not objectmodel.we_are_translated(): - assert buf[endpos] == '5' - if '.' in buf: - assert endpos == len(buf) - 1 - assert buf.index('.') == len(buf) - ndigits - 2 + if halfway_case: + buf = [c for c in strvalue] + if ndigits >= 0: + endpos = len(buf) - 1 + else: + endpos = len(buf) + ndigits + # Sanity checks: there should be exactly ndigits+1 places + # following the decimal point, and the last digit in the + # buffer should be a '5' + if not objectmodel.we_are_translated(): + assert buf[endpos] == '5' + if '.' in buf: + assert endpos == len(buf) - 1 + assert buf.index('.') == len(buf) - ndigits - 2 - # increment and shift right at the same time - i = endpos - 1 - carry = 1 - while i >= 0: + # increment and shift right at the same time + i = endpos - 1 + carry = 1 + while i >= 0: + digit = ord(buf[i]) + if digit == ord('.'): + buf[i+1] = chr(digit) + i -= 1 digit = ord(buf[i]) - if digit == ord('.'): - buf[i+1] = chr(digit) - i -= 1 - digit = ord(buf[i]) - carry += digit - ord('0') - buf[i+1] = chr(carry % 10 + ord('0')) - carry /= 10 - i -= 1 - buf[0] = chr(carry + ord('0')) - if ndigits < 0: - buf.append('0') + carry += digit - ord('0') + buf[i+1] = chr(carry % 10 + ord('0')) + carry /= 10 + i -= 1 + buf[0] = chr(carry + ord('0')) + if ndigits < 0: + buf.append('0') - strvalue = ''.join(buf) + strvalue = ''.join(buf) - return sign * rstring_to_float(strvalue) + return sign * rstring_to_float(strvalue) -else: - # fallback version, to be used when correctly rounded - # binary<->decimal conversions aren't available - def round_double(value, ndigits): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 +# fallback version, to be used when correctly rounded +# binary<->decimal conversions aren't available +def round_double_fallback_repr(value, ndigits): + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow + pow1 = math.pow(10.0, ndigits - 22) + pow2 = 1e22 + else: + pow1 = math.pow(10.0, ndigits) + pow2 = 1.0 - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value + y = (value * pow1) * pow2 + # if y overflows, then rounded value is exactly x + if isinf(y): + return value - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 + else: + pow1 = math.pow(10.0, -ndigits); + pow2 = 1.0 # unused; for translation + y = value / pow1 - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y-z) == 1.0: # obscure case, see the test - z = y + if y >= 0.0: + z = math.floor(y + 0.5) + else: + z = math.ceil(y - 0.5) + if math.fabs(y-z) == 1.0: # obscure case, see the test + z = y - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + return z INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import \ W_DictMultiObject, setitem__DictMulti_ANY_ANY, getitem__DictMulti_ANY, \ @@ -151,6 +152,8 @@ class AppTest_DictObject: + def setup_class(cls): + cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names) def test_equality(self): d = {1:2} @@ -259,7 +262,29 @@ d[33] = 99 assert d == dd assert x == 99 - + + def test_setdefault_fast(self): + class Key(object): + calls = 0 + def __hash__(self): + self.calls += 1 + return object.__hash__(self) + + k = Key() + d = {} + d.setdefault(k, []) + if self.on_pypy: + assert k.calls == 1 + + d.setdefault(k, 1) + if self.on_pypy: + assert k.calls == 2 + + k = Key() + d.setdefault(k, 42) + if self.on_pypy: + assert k.calls == 1 + def test_update(self): d = {1:2, 3:4} dd = d.copy() @@ -704,13 +729,20 @@ class FakeString(str): + hash_count = 0 def unwrap(self, space): self.unwrapped = True return str(self) + def __hash__(self): + self.hash_count += 1 + return str.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: + hash_count = 0 def hash_w(self, obj): + self.hash_count += 1 return hash(obj) def unwrap(self, x): return x @@ -726,6 +758,8 @@ return [] DictObjectCls = W_DictMultiObject def type(self, w_obj): + if isinstance(w_obj, FakeString): + return str return type(w_obj) w_str = str def str_w(self, string): @@ -890,6 +924,19 @@ impl.setitem(x, x) assert impl.r_dict_content is not None + def test_setdefault_fast(self): + on_pypy = "__pypy__" in sys.builtin_module_names + impl = self.impl + key = FakeString(self.string) + x = impl.setdefault(key, 1) + assert x == 1 + if on_pypy: + assert key.hash_count == 1 + x = impl.setdefault(key, 2) + assert x == 1 + if on_pypy: + assert key.hash_count == 2 + class TestStrDictImplementation(BaseTestRDictImplementation): ImplementionClass = StrDictImplementation diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -96,6 +96,10 @@ out, err = capfd.readouterr() assert "Exception ValueError: 'message' in 'location' ignored" == err.strip() + def test_ExceptionInstance_Class(self, space, api): + instance = space.call_function(space.w_ValueError) + assert api.PyExceptionInstance_Class(instance) is space.w_ValueError + class AppTestFetch(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -221,14 +221,33 @@ def rtype_method_split(self, hop): rstr = hop.args_r[0].repr - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO except AttributeError: list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr) + return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr, v_max) + + def rtype_method_rsplit(self, hop): + rstr = hop.args_r[0].repr + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) + try: + list_type = hop.r_result.lowleveltype.TO + except AttributeError: + list_type = hop.r_result.lowleveltype + cLIST = hop.inputconst(Void, list_type) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_rsplit_chr, cLIST, v_str, v_chr, v_max) def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Block, Constant, Variable, flatten +from pypy.objspace.flow.model import Block, Constant, Variable from pypy.objspace.flow.model import checkgraph, mkentrymap from pypy.translator.backendopt.support import log @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -6,7 +6,7 @@ from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside from pypy.rlib.rarithmetic import intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.rclass import FieldListAccessor from pypy.jit.metainterp.warmspot import get_stats, get_translator from pypy.jit.metainterp import history diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -135,7 +135,7 @@ return type(self) is type(other) # xxx obscure def clone_if_mutable(self): res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res def _sortboxes(boxes): @@ -816,6 +816,52 @@ """ self.optimize_loop(ops, expected, preamble) + def test_compare_with_itself(self): + ops = """ + [] + i0 = escape() + i1 = int_lt(i0, i0) + guard_false(i1) [] + i2 = int_le(i0, i0) + guard_true(i2) [] + i3 = int_eq(i0, i0) + guard_true(i3) [] + i4 = int_ne(i0, i0) + guard_false(i4) [] + i5 = int_gt(i0, i0) + guard_false(i5) [] + i6 = int_ge(i0, i0) + guard_true(i6) [] + jump() + """ + expected = """ + [] + i0 = escape() + jump() + """ + self.optimize_loop(ops, expected) + + def test_compare_with_itself_uint(self): + py.test.skip("implement me") + ops = """ + [] + i0 = escape() + i7 = uint_lt(i0, i0) + guard_false(i7) [] + i8 = uint_le(i0, i0) + guard_true(i8) [] + i9 = uint_gt(i0, i0) + guard_false(i9) [] + i10 = uint_ge(i0, i0) + guard_true(i10) [] + jump() + """ + expected = """ + [] + i0 = escape() + jump() + """ + self.optimize_loop(ops, expected) @@ -1791,7 +1837,7 @@ """ self.optimize_loop(ops, ops) - def test_duplicate_setfield_1(self): + def test_duplicate_setfield_0(self): ops = """ [p1, i1, i2] setfield_gc(p1, i1, descr=valuedescr) @@ -1800,8 +1846,27 @@ """ expected = """ [p1, i1, i2] + jump(p1, i1, i2) + """ + # in this case, all setfields are removed, because we can prove + # that in the loop it will always have the same value + self.optimize_loop(ops, expected) + + def test_duplicate_setfield_1(self): + ops = """ + [p1] + i1 = escape() + i2 = escape() + setfield_gc(p1, i1, descr=valuedescr) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2) + jump(p1) + """ + expected = """ + [p1] + i1 = escape() + i2 = escape() + setfield_gc(p1, i2, descr=valuedescr) + jump(p1) """ self.optimize_loop(ops, expected) @@ -1848,6 +1913,7 @@ setfield_gc(p1, i4, descr=nextdescr) # setfield_gc(p1, i2, descr=valuedescr) + escape() jump(p1, i1, i2, p3) """ preamble = """ @@ -1860,6 +1926,7 @@ # setfield_gc(p1, i2, descr=valuedescr) setfield_gc(p1, i4, descr=nextdescr) + escape() jump(p1, i1, i2, p3, i3) """ expected = """ @@ -1871,6 +1938,7 @@ # setfield_gc(p1, i2, descr=valuedescr) setfield_gc(p1, i4, descr=nextdescr) + escape() jump(p1, i1, i2, p3, i3) """ self.optimize_loop(ops, expected, preamble) @@ -1943,6 +2011,7 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ preamble = """ @@ -1950,12 +2019,14 @@ guard_true(i3) [p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ expected = """ [p1, i2, i4] guard_true(i4) [p1] setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, 1) """ self.optimize_loop(ops, expected, preamble) @@ -1969,6 +2040,7 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ preamble = """ @@ -1976,12 +2048,14 @@ guard_true(i3) [i2, p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ expected = """ [p1, i2, i4] guard_true(i4) [i2, p1] setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, 1) """ self.optimize_loop(ops, expected) @@ -2027,15 +2101,34 @@ guard_value(p1, ConstPtr(myptr)) [] setfield_gc(p1, i1, descr=valuedescr) setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) + escape() jump(p1, i1, i2) """ expected = """ [i1, i2] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) + escape() jump(i1, i2) """ self.optimize_loop(ops, expected) + def test_dont_force_setfield_around_copystrcontent(self): + ops = """ + [p0, i0, p1, i1, i2] + setfield_gc(p0, i1, descr=valuedescr) + copystrcontent(p0, i0, p1, i1, i2) + escape() + jump(p0, i0, p1, i1, i2) + """ + expected = """ + [p0, i0, p1, i1, i2] + copystrcontent(p0, i0, p1, i1, i2) + setfield_gc(p0, i1, descr=valuedescr) + escape() + jump(p0, i0, p1, i1, i2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getarrayitem_1(self): ops = """ [p1] @@ -2356,6 +2449,33 @@ """ self.optimize_loop(ops, expected, preamble) + def test_bug_5(self): + ops = """ + [p0] + i0 = escape() + i2 = getfield_gc(p0, descr=valuedescr) + i4 = int_add(i2, 1) + setfield_gc(p0, i4, descr=valuedescr) + guard_true(i0) [] + i6 = getfield_gc(p0, descr=valuedescr) + i8 = int_sub(i6, 1) + setfield_gc(p0, i8, descr=valuedescr) + escape() + jump(p0) + """ + expected = """ + [p0] + i0 = escape() + i2 = getfield_gc(p0, descr=valuedescr) + i4 = int_add(i2, 1) + setfield_gc(p0, i4, descr=valuedescr) + guard_true(i0) [] + setfield_gc(p0, i2, descr=valuedescr) + escape() + jump(p0) + """ + self.optimize_loop(ops, expected) + def test_invalid_loop_1(self): ops = """ [p1] @@ -2637,7 +2757,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops(self): + def test_fold_partially_constant_add_sub(self): ops = """ [i0] i1 = int_sub(i0, 0) @@ -2671,7 +2791,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops_ovf(self): + def test_fold_partially_constant_add_sub_ovf(self): ops = """ [i0] i1 = int_sub_ovf(i0, 0) @@ -2708,6 +2828,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): + ops = """ + [i0] + i1 = int_lshift(i0, 0) + i2 = int_rshift(i1, 0) + i3 = int_eq(i2, i0) + guard_true(i3) [] + jump(i2) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + # ---------- class TestLLtype(OptimizeOptTest, LLtypeMixin): @@ -2992,7 +3127,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3025,7 +3159,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3065,7 +3198,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3103,6 +3235,7 @@ guard_no_exception(descr=fdescr) [p2, p1] virtual_ref_finish(p2, p1) setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ preamble = """ @@ -3111,6 +3244,7 @@ call(i1, descr=nonwritedescr) guard_no_exception(descr=fdescr) [i3, i1, p0] setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ expected = """ @@ -3119,6 +3253,7 @@ call(i1, descr=nonwritedescr) guard_no_exception(descr=fdescr2) [i3, i1, p0] setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ self.optimize_loop(ops, expected, preamble) @@ -3129,7 +3264,7 @@ #self.loop.inputargs[0].value = self.nodeobjvalue #self.check_expanded_fail_descr('''p2, p1 # p0.refdescr = p2 - # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 # where p1 is a node_vtable, nextdescr=p1b # where p1b is a node_vtable, valuedescr=i1 # ''', rop.GUARD_NO_EXCEPTION) @@ -3150,7 +3285,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3176,7 +3310,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -4842,6 +4975,58 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i1, descr=nextdescr) """ + py.test.skip("no test here") + + def test_immutable_not(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_noimmut_vtable)) + setfield_gc(p0, 42, descr=noimmut_intval) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_variable(self): + ops = """ + [i0] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, i0, descr=immut_intval) + escape(p0) + jump(i0) + """ + self.optimize_loop(ops, ops) + + def test_immutable_incomplete(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_constantfold(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, 1242, descr=immut_intval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class IntObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(self, other): + return other.container.intval == 1242 + self.namespace['intobj1242'] = lltype._ptr(llmemory.GCREF, + IntObj1242()) + expected = """ + [] + escape(ConstPtr(intobj1242)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble=None): diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -112,6 +112,7 @@ try: while True: count = fread(buf, 1, BUF_SIZE, fp) + count = rffi.cast(lltype.Signed, count) source += rffi.charpsize2str(buf, count) if count < BUF_SIZE: if feof(fp): diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -514,12 +514,10 @@ break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(op.getarg(i)) - for i in range(op.numargs())] - resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.getdescr()) - # FIXME: Don't we need to check for an overflow here? - self.make_constant(op.result, resbox.constbox()) + resbox = self.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.make_constant(op.result, resbox) return # did we do the exact same operation already? @@ -538,6 +536,13 @@ if nextop: self.emit_operation(nextop) + def constant_fold(self, op): + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] + resbox = execute_nonspec(self.cpu, None, + op.getopnum(), argboxes, op.getdescr()) + return resbox.constbox() + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -1,6 +1,6 @@ """Tests for multiple JitDrivers.""" from pypy.rlib.jit import JitDriver, unroll_safe -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_basic.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_basic.py +++ /dev/null @@ -1,2411 +0,0 @@ -import py -import sys -from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside -from pypy.rlib.jit import loop_invariant -from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed -from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner -from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value -from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong -from pypy import conftest -from pypy.rlib.rarithmetic import ovfcheck -from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper -from pypy.rpython.lltypesystem import lltype, llmemory -from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - - -class BasicTests: - - def test_basic(self): - def f(x, y): - return x + y - res = self.interp_operations(f, [40, 2]) - assert res == 42 - - def test_basic_inst(self): - class A: - pass - def f(n): - a = A() - a.x = n - return a.x - res = self.interp_operations(f, [42]) - assert res == 42 - - def test_uint_floordiv(self): - from pypy.rlib.rarithmetic import r_uint - - def f(a, b): - a = r_uint(a) - b = r_uint(b) - return a/b - - res = self.interp_operations(f, [-4, 3]) - assert res == long(r_uint(-4)) // 3 - - def test_direct_call(self): - def g(n): - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_direct_call_with_guard(self): - def g(n): - if n < 0: - return 0 - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_loop(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - if self.basic: - found = 0 - for op in get_stats().loops[0]._all_operations(): - if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 3 - for box in liveboxes: - assert isinstance(box, history.BoxInt) - found += 1 - assert found == 1 - - def test_loop_invariant_mul1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loop_invariant_mul_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - b = y * 2 - res += ovfcheck(x * x) + b - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 308 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) - - def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - x += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 3427 - self.check_loop_count(3) - - def test_loop_invariant_mul_bridge_maintaining1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - res += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1167 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - - def test_loop_invariant_mul_bridge_maintaining2(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - z = x * x - res += z - if y<16: - res += z - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1692 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - def test_loop_invariant_mul_bridge_maintaining3(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) - def f(x, y, m): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res, m=m) - myjitdriver.jit_merge_point(x=x, y=y, res=res, m=m) - z = x * x - res += z - if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x.intval * x.intval - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loops_are_transient(self): - import gc, weakref - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - if y%2: - res *= 2 - y -= 1 - return res - wr_loops = [] - old_init = history.TreeLoop.__init__.im_func - try: - def track_init(self, name): - old_init(self, name) - wr_loops.append(weakref.ref(self)) - history.TreeLoop.__init__ = track_init - res = self.meta_interp(f, [6, 15], no_stats=True) - finally: - history.TreeLoop.__init__ = old_init - - assert res == f(6, 15) - gc.collect() - - #assert not [wr for wr in wr_loops if wr()] - for loop in [wr for wr in wr_loops if wr()]: - assert loop().name == 'short preamble' - - def test_string(self): - def f(n): - bytecode = 'adlfkj' + chr(n) - if n < len(bytecode): - return bytecode[n] - else: - return "?" - res = self.interp_operations(f, [1]) - assert res == ord("d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord("?") - - def test_chr2str(self): - def f(n): - s = chr(n) - return s[0] - res = self.interp_operations(f, [3]) - assert res == 3 - - def test_unicode(self): - def f(n): - bytecode = u'adlfkj' + unichr(n) - if n < len(bytecode): - return bytecode[n] - else: - return u"?" - res = self.interp_operations(f, [1]) - assert res == ord(u"d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord(u"?") - - def test_residual_call(self): - @dont_look_inside - def externfn(x, y): - return x * y - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) - - def test_residual_call_pure(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - n = hint(n, promote=True) - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is not recorded in the history if all-constant args - self.check_operations_history(int_add=0, int_mul=0, - call=0, call_pure=0) - - def test_residual_call_pure_1(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is recorded in the history if not-all-constant args - self.check_operations_history(int_add=1, int_mul=0, - call=0, call_pure=1) - - def test_residual_call_pure_2(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def externfn(x): - return x - 1 - externfn._pure_function_ = True - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - n = externfn(n) - return n - res = self.meta_interp(f, [7]) - assert res == 0 - # CALL_PURE is recorded in the history, but turned into a CALL - # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) - - def test_constfold_call_pure(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - n -= externfn(m) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_constfold_call_pure_2(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - class V: - def __init__(self, value): - self.value = value - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - v = V(m) - n -= externfn(v.value) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_pure_function_returning_object(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - class V: - def __init__(self, x): - self.x = x - v1 = V(1) - v2 = V(2) - def externfn(x): - if x: - return v1 - else: - return v2 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - m = V(m).x - n -= externfn(m).x + externfn(m + m - m).x - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) - - def test_constant_across_mp(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - class X(object): - pass - def f(n): - while n > -100: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - x = X() - x.arg = 5 - if n <= 0: break - n -= x.arg - x.arg = 6 # prevents 'x.arg' from being annotated as constant - return n - res = self.meta_interp(f, [31]) - assert res == -4 - - def test_stopatxpolicy(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def internfn(y): - return y * 3 - def externfn(y): - return y % 4 - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if y & 7: - f = internfn - else: - f = externfn - f(y) - y -= 1 - return 42 - policy = StopAtXPolicy(externfn) - res = self.meta_interp(f, [31], policy=policy) - assert res == 42 - self.check_loops(int_mul=1, int_mod=0) - - def test_we_are_jitted(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if we_are_jitted(): - x = 1 - else: - x = 10 - y -= x - return y - assert f(55) == -5 - res = self.meta_interp(f, [55]) - assert res == -1 - - def test_confirm_enter_jit(self): - def confirm_enter_jit(x, y): - return x <= 5 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - confirm_enter_jit = confirm_enter_jit) - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - y -= x - return y - # - res = self.meta_interp(f, [10, 84]) - assert res == -6 - self.check_loop_count(0) - # - res = self.meta_interp(f, [3, 19]) - assert res == -2 - self.check_loop_count(1) - - def test_can_never_inline(self): - def can_never_inline(x): - return x > 50 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - can_never_inline = can_never_inline) - @dont_look_inside - def marker(): - pass - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - x += 1 - if x == 4 or x == 61: - marker() - y -= x - return y - # - res = self.meta_interp(f, [3, 6], repeat=7) - assert res == 6 - 4 - 5 - self.check_history(call=0) # because the trace starts in the middle - # - res = self.meta_interp(f, [60, 84], repeat=7) - assert res == 84 - 61 - 62 - self.check_history(call=1) # because the trace starts immediately - - def test_format(self): - def f(n): - return len("<%d>" % n) - res = self.interp_operations(f, [421]) - assert res == 5 - - def test_switch(self): - def f(n): - if n == -5: return 12 - elif n == 2: return 51 - elif n == 7: return 1212 - else: return 42 - res = self.interp_operations(f, [7]) - assert res == 1212 - res = self.interp_operations(f, [12311]) - assert res == 42 - - def test_r_uint(self): - from pypy.rlib.rarithmetic import r_uint - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - y = r_uint(y) - while y > 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - y -= 1 - return y - res = self.meta_interp(f, [10]) - assert res == 0 - - def test_uint_operations(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - return ((r_uint(n) - 123) >> 1) <= r_uint(456) - res = self.interp_operations(f, [50]) - assert res == False - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_uint_condition(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - if ((r_uint(n) - 123) >> 1) <= r_uint(456): - return 24 - else: - return 12 - res = self.interp_operations(f, [50]) - assert res == 12 - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_int_between(self): - # - def check(arg1, arg2, arg3, expect_result, **expect_operations): - from pypy.rpython.lltypesystem import lltype - from pypy.rpython.lltypesystem.lloperation import llop - loc = locals().copy() - exec py.code.Source(""" - def f(n, m, p): - arg1 = %(arg1)s - arg2 = %(arg2)s - arg3 = %(arg3)s - return llop.int_between(lltype.Bool, arg1, arg2, arg3) - """ % locals()).compile() in loc - res = self.interp_operations(loc['f'], [5, 6, 7]) - assert res == expect_result - self.check_operations_history(expect_operations) - # - check('n', 'm', 'p', True, int_sub=2, uint_lt=1) - check('n', 'p', 'm', False, int_sub=2, uint_lt=1) - # - check('n', 'm', 6, False, int_sub=2, uint_lt=1) - # - check('n', 4, 'p', False, int_sub=2, uint_lt=1) - check('n', 5, 'p', True, int_sub=2, uint_lt=1) - check('n', 8, 'p', False, int_sub=2, uint_lt=1) - # - check('n', 6, 7, True, int_sub=2, uint_lt=1) - # - check(-2, 'n', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'm', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'p', 'm', False, int_sub=2, uint_lt=1) - #check(0, 'n', 'p', True, uint_lt=1) xxx implement me - #check(0, 'm', 'p', True, uint_lt=1) - #check(0, 'p', 'm', False, uint_lt=1) - # - check(2, 'n', 6, True, int_sub=1, uint_lt=1) - check(2, 'm', 6, False, int_sub=1, uint_lt=1) - check(2, 'p', 6, False, int_sub=1, uint_lt=1) - check(5, 'n', 6, True, int_eq=1) # 6 == 5+1 - check(5, 'm', 6, False, int_eq=1) # 6 == 5+1 - # - check(2, 6, 'm', False, int_sub=1, uint_lt=1) - check(2, 6, 'p', True, int_sub=1, uint_lt=1) - # - check(2, 40, 6, False) - check(2, 40, 60, True) - - def test_getfield(self): - class A: - pass - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=1) - - def test_getfield_immutable(self): - class A: - _immutable_ = True - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=0) - - def test_setfield_bool(self): - class A: - def __init__(self): - self.flag = True - myjitdriver = JitDriver(greens = [], reds = ['n', 'obj']) - def f(n): - obj = A() - res = False - while n > 0: - myjitdriver.can_enter_jit(n=n, obj=obj) - myjitdriver.jit_merge_point(n=n, obj=obj) - obj.flag = False - n -= 1 - return res - res = self.meta_interp(f, [7]) - assert type(res) == bool - assert not res - - def test_switch_dict(self): - def f(x): - if x == 1: return 61 - elif x == 2: return 511 - elif x == 3: return -22 - elif x == 4: return 81 - elif x == 5: return 17 - elif x == 6: return 54 - elif x == 7: return 987 - elif x == 8: return -12 - elif x == 9: return 321 - return -1 - res = self.interp_operations(f, [5]) - assert res == 17 - res = self.interp_operations(f, [15]) - assert res == -1 - - def test_int_add_ovf(self): - def f(x, y): - try: - return ovfcheck(x + y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -98 - res = self.interp_operations(f, [1, sys.maxint]) - assert res == -42 - - def test_int_sub_ovf(self): - def f(x, y): - try: - return ovfcheck(x - y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -102 - res = self.interp_operations(f, [1, -sys.maxint]) - assert res == -42 - - def test_int_mul_ovf(self): - def f(x, y): - try: - return ovfcheck(x * y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -200 - res = self.interp_operations(f, [-3, sys.maxint//2]) - assert res == -42 - - def test_mod_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'y']) - def f(n, x, y): - while n > 0: - myjitdriver.can_enter_jit(x=x, y=y, n=n) - myjitdriver.jit_merge_point(x=x, y=y, n=n) - n -= ovfcheck(x % y) - return n - res = self.meta_interp(f, [20, 1, 2]) - assert res == 0 - self.check_loops(call=0) - - def test_abs(self): - myjitdriver = JitDriver(greens = [], reds = ['i', 't']) - def f(i): - t = 0 - while i < 10: - myjitdriver.can_enter_jit(i=i, t=t) - myjitdriver.jit_merge_point(i=i, t=t) - t += abs(i) - i += 1 - return t - res = self.meta_interp(f, [-5]) - assert res == 5+4+3+2+1+0+1+2+3+4+5+6+7+8+9 - - def test_float(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - x = float(x) - y = float(y) - res = 0.0 - while y > 0.0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1.0 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42.0 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) - - def test_print(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - print n - n -= 1 - return n - res = self.meta_interp(f, [7]) - assert res == 0 - - def test_bridge_from_interpreter(self): - mydriver = JitDriver(reds = ['n'], greens = []) - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - n -= 1 - - self.meta_interp(f, [20], repeat=7) - self.check_tree_loop_count(2) # the loop and the entry path - # we get: - # ENTER - compile the new loop and the entry bridge - # ENTER - compile the leaving path - self.check_enter_count(2) - - def test_bridge_from_interpreter_2(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n'], greens = []) - glob = [1] - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - if n == 17 and glob[0]: - glob[0] = 0 - x = n + 1 - y = n + 2 - z = n + 3 - k = n + 4 - n -= 1 - n += x + y + z + k - n -= x + y + z + k - n -= 1 - - self.meta_interp(f, [20], repeat=7) - - def test_bridge_from_interpreter_3(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n', 'x', 'y', 'z', 'k'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - glob.x = 1 - x = 0 - y = 0 - z = 0 - k = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x, y=y, z=z, k=k) - mydriver.jit_merge_point(n=n, x=x, y=y, z=z, k=k) - x += 10 - y += 3 - z -= 15 - k += 4 - if n == 17 and glob.x: - glob.x = 0 - x += n + 1 - y += n + 2 - z += n + 3 - k += n + 4 - n -= 1 - n -= 1 - return x + 2*y + 3*z + 5*k + 13*n - - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_bridge_from_interpreter_4(self): - jitdriver = JitDriver(reds = ['n', 'k'], greens = []) - - def f(n, k): - while n > 0: - jitdriver.can_enter_jit(n=n, k=k) - jitdriver.jit_merge_point(n=n, k=k) - if k: - n -= 2 - else: - n -= 1 - return n + k - - from pypy.rpython.test.test_llinterp import get_interpreter, clear_tcache - from pypy.jit.metainterp.warmspot import WarmRunnerDesc - - interp, graph = get_interpreter(f, [0, 0], backendopt=False, - inline_threshold=0, type_system=self.type_system) - clear_tcache() - translator = interp.typer.annotator.translator - translator.config.translation.gc = "boehm" - warmrunnerdesc = WarmRunnerDesc(translator, - CPUClass=self.CPUClass) - state = warmrunnerdesc.jitdrivers_sd[0].warmstate - state.set_param_threshold(3) # for tests - state.set_param_trace_eagerness(0) # for tests - warmrunnerdesc.finish() - for n, k in [(20, 0), (20, 1)]: - interp.eval_graph(graph, [n, k]) - - def test_bridge_leaving_interpreter_5(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - x = 0 - glob.x = 1 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - glob.x += 1 - x += 3 - n -= 1 - glob.x += 100 - return glob.x + x - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_instantiate_classes(self): - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - def f(n): - if n > 5: - cls = A - else: - cls = B - return cls().foo - res = self.interp_operations(f, [3]) - assert res == 8 - res = self.interp_operations(f, [13]) - assert res == 72 - - def test_instantiate_does_not_call(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - - def f(n): - x = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - if n % 2 == 0: - cls = A - else: - cls = B - inst = cls() - x += inst.foo - n -= 1 - return x - res = self.meta_interp(f, [20], enable_opts='') - assert res == f(20) - self.check_loops(call=0) - - def test_zerodivisionerror(self): - # test the case of exception-raising operation that is not delegated - # to the backend at all: ZeroDivisionError - # - def f(n): - assert n >= 0 - try: - return ovfcheck(5 % n) - except ZeroDivisionError: - return -666 - except OverflowError: - return -777 - res = self.interp_operations(f, [0]) - assert res == -666 - # - def f(n): - assert n >= 0 - try: - return ovfcheck(6 // n) - except ZeroDivisionError: - return -667 - except OverflowError: - return -778 - res = self.interp_operations(f, [0]) - assert res == -667 - - def test_div_overflow(self): - import sys - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - try: - res += llop.int_floordiv_ovf(lltype.Signed, - -sys.maxint-1, x) - x += 5 - except OverflowError: - res += 100 - y -= 1 - return res - res = self.meta_interp(f, [-41, 16]) - assert res == ((-sys.maxint-1) // (-41) + - (-sys.maxint-1) // (-36) + - (-sys.maxint-1) // (-31) + - (-sys.maxint-1) // (-26) + - (-sys.maxint-1) // (-21) + - (-sys.maxint-1) // (-16) + - (-sys.maxint-1) // (-11) + - (-sys.maxint-1) // (-6) + - 100 * 8) - - def test_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - if n: - obj = A() - else: - obj = B() - return isinstance(obj, B) - res = self.interp_operations(fn, [0]) - assert res - self.check_operations_history(guard_class=1) - res = self.interp_operations(fn, [1]) - assert not res - - def test_isinstance_2(self): - driver = JitDriver(greens = [], reds = ['n', 'sum', 'x']) - class A: - pass - class B(A): - pass - class C(B): - pass - - def main(): - return f(5, B()) * 10 + f(5, C()) + f(5, A()) * 100 - - def f(n, x): - sum = 0 - while n > 0: - driver.can_enter_jit(x=x, n=n, sum=sum) - driver.jit_merge_point(x=x, n=n, sum=sum) - if isinstance(x, B): - sum += 1 - n -= 1 - return sum - - res = self.meta_interp(main, []) - assert res == 55 - - def test_assert_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - # this should only be called with n != 0 - if n: - obj = B() - obj.a = n - else: - obj = A() - obj.a = 17 - assert isinstance(obj, B) - return obj.a - res = self.interp_operations(fn, [1]) - assert res == 1 - self.check_operations_history(guard_class=0) - if self.type_system == 'ootype': - self.check_operations_history(instanceof=0) - - def test_r_dict(self): - from pypy.rlib.objectmodel import r_dict - class FooError(Exception): - pass - def myeq(n, m): - return n == m - def myhash(n): - if n < 0: - raise FooError - return -n - def f(n): - d = r_dict(myeq, myhash) - for i in range(10): - d[i] = i*i - try: - return d[n] - except FooError: - return 99 - res = self.interp_operations(f, [5]) - assert res == f(5) - - def test_free_object(self): - import weakref - from pypy.rlib import rgc - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - class X(object): - pass - def main(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= x.foo - def g(n): - x = X() - x.foo = 2 - main(n, x) - x.foo = 5 - return weakref.ref(x) - def f(n): - r = g(n) - rgc.collect(); rgc.collect(); rgc.collect() - return r() is None - # - assert f(30) == 1 - res = self.meta_interp(f, [30], no_stats=True) - assert res == 1 - - def test_pass_around(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - - def call(): - pass - - def f(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - if n % 2: - call() - if n == 8: - return x - x = 3 - else: - x = 5 - n -= 1 - return 0 - - self.meta_interp(f, [40, 0]) - - def test_const_inputargs(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'x']) - def f(n, x): - m = 0x7FFFFFFF - while n > 0: - myjitdriver.can_enter_jit(m=m, n=n, x=x) - myjitdriver.jit_merge_point(m=m, n=n, x=x) - x = 42 - n -= 1 - m = m >> 1 - return x - - res = self.meta_interp(f, [50, 1], enable_opts='') - assert res == 42 - - def test_set_param(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - def g(n): - x = 0 - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= 1 - x += n - return x - def f(n, threshold): - myjitdriver.set_param('threshold', threshold) - return g(n) - - res = self.meta_interp(f, [10, 3]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(2) - - res = self.meta_interp(f, [10, 13]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(0) - - def test_dont_look_inside(self): - @dont_look_inside - def g(a, b): - return a + b - def f(a, b): - return g(a, b) - res = self.interp_operations(f, [3, 5]) - assert res == 8 - self.check_operations_history(int_add=0, call=1) - - def test_listcomp(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst']) - def f(x, y): - lst = [0, 0, 0] - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, lst=lst) - myjitdriver.jit_merge_point(x=x, y=y, lst=lst) - lst = [i+x for i in lst if i >=0] - y -= 1 - return lst[0] - res = self.meta_interp(f, [6, 7], listcomp=True, backendopt=True, listops=True) - # XXX: the loop looks inefficient - assert res == 42 - - def test_tuple_immutable(self): - def new(a, b): - return a, b - def f(a, b): - tup = new(a, b) - return tup[1] - res = self.interp_operations(f, [3, 5]) - assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure=1) - - def test_oosend_look_inside_only_one(self): - class A: - pass - class B(A): - def g(self): - return 123 - class C(A): - @dont_look_inside - def g(self): - return 456 - def f(n): - if n > 3: - x = B() - else: - x = C() - return x.g() + x.g() - res = self.interp_operations(f, [10]) - assert res == 123 * 2 - res = self.interp_operations(f, [-10]) - assert res == 456 * 2 - - def test_residual_external_call(self): - import math - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - def f(x, y): - x = float(x) - res = 0.0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - # this is an external call that the default policy ignores - rpart, ipart = math.modf(x) - res += ipart - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops(call=1) - - def test_merge_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 5 - class B(A): - def g(self, y): - return y - 3 - - a1 = A() - a2 = A() - b = B() - def f(x): - l = [a1] * 100 + [a2] * 100 + [b] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - x = a.g(x) - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_value=2) - self.check_loops(guard_class=0, guard_value=5, everywhere=True) - - def test_merge_guardnonnull_guardclass(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=2, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=4, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [b1] * 100 + [None] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=1, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=3, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) - - def test_merge_guardnonnull_guardvalue_2(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=4, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - a2 = A() - b1 = B() - def f(x): - l = [a2] * 100 + [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [399], listops=True) - assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=5, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_residual_call_doesnt_lose_info(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) - - class A(object): - pass - - globall = [""] - @dont_look_inside - def g(x): - globall[0] = str(x) - return x - - def f(x): - y = A() - y.v = x - l = [0] - while y.v > 0: - myjitdriver.can_enter_jit(x=x, y=y, l=l) - myjitdriver.jit_merge_point(x=x, y=y, l=l) - l[0] = y.v - lc = l[0] - y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 - return y.v - res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) - - def test_guard_isnull_nonnull(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - - @dont_look_inside - def create(x): - if x >= -40: - return A() - return None - - def f(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - obj = create(x-1) - if obj is not None: - res += 1 - obj2 = create(x-1000) - if obj2 is None: - res += 1 - x -= 1 - return res - res = self.meta_interp(f, [21]) - assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) - - def test_loop_invariant1(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - a = A() - a.current_a = A() - a.current_a.x = 1 - @loop_invariant - def f(): - return a.current_a - - def g(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - res += f().x - res += f().x - res += f().x - x -= 1 - a.current_a = A() - a.current_a.x = 2 - return res - res = self.meta_interp(g, [21]) - assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) - - def test_bug_optimizeopt_mutates_ops(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) - class A(object): - pass - class B(A): - pass - - glob = A() - glob.a = None - def f(x): - res = 0 - a = A() - a.x = 0 - glob.a = A() - const = 2 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res, a=a, const=const) - myjitdriver.jit_merge_point(x=x, res=res, a=a, const=const) - if type(glob.a) is B: - res += 1 - if a is None: - a = A() - a.x = x - glob.a = B() - const = 2 - else: - const = hint(const, promote=True) - x -= const - res += a.x - a = None - glob.a = A() - const = 1 - return res - res = self.meta_interp(f, [21]) - assert res == f(21) - - def test_getitem_indexerror(self): - lst = [10, 4, 9, 16] - def f(n): - try: - return lst[n] - except IndexError: - return -2 - res = self.interp_operations(f, [2]) - assert res == 9 - res = self.interp_operations(f, [4]) - assert res == -2 - res = self.interp_operations(f, [-4]) - assert res == 10 - res = self.interp_operations(f, [-5]) - assert res == -2 - - def test_guard_always_changing_value(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - a = A() - hint(a, promote=True) - x -= 1 - self.meta_interp(f, [50]) - self.check_loop_count(1) - # this checks that the logic triggered by make_a_counter_per_value() - # works and prevents generating tons of bridges - - def test_swap_values(self): - def f(x, y): - if x > 5: - x, y = y, x - return x - y - res = self.interp_operations(f, [10, 2]) - assert res == -8 - res = self.interp_operations(f, [3, 2]) - assert res == 1 - - def test_raw_malloc_and_access(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Signed) - - def f(n): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = n - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10]) - assert res == 10 - - def test_raw_malloc_and_access_float(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Float) - - def f(n, f): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = f - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10, 3.5]) - assert res == 3.5 - - def test_jit_debug(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - jit_debug("hi there:", x) - jit_debug("foobar") - x -= 1 - return x - res = self.meta_interp(f, [8]) - assert res == 0 - self.check_loops(jit_debug=2) - - def test_assert_green(self): - def f(x, promote): - if promote: - x = hint(x, promote=True) - assert_green(x) - return x - res = self.interp_operations(f, [8, 1]) - assert res == 8 - py.test.raises(AssertGreenFailed, self.interp_operations, f, [8, 0]) - - def test_multiple_specialied_versions1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 7]) - assert res == 6*8 + 6**8 - self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) - - def test_multiple_specialied_versions_array(self): - myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', - 'array']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val - other.val) - def f(x, y): - res = x - array = [1, 2, 3] - array[1] = 7 - idx = 0 - while y > 0: - myjitdriver.can_enter_jit(idx=idx, y=y, x=x, res=res, - array=array) - myjitdriver.jit_merge_point(idx=idx, y=y, x=x, res=res, - array=array) - res = res.binop(x) - res.val += array[idx] + array[1] - if y < 7: - idx = 2 - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - self.check_loop_count(9) - self.check_loops(getarrayitem_gc=6, everywhere=True) - - def test_multiple_specialied_versions_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - b1 = f(B(x), y, B(x)) - b2 = f(B(x), y, B(x)) - assert b1.val == b2.val - c1 = f(B(x), y, A(x)) - c2 = f(B(x), y, A(x)) - assert c1.val == c2.val - d1 = f(A(x), y, B(x)) - d2 = f(A(x), y, B(x)) - assert d1.val == d2.val - return a1.val + b1.val + c1.val + d1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_failing_inlined_guard(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 8: - x = z - return res - def g(x, y): - c1 = f(A(x), y, B(x)) - c2 = f(A(x), y, B(x)) - assert c1.val == c2.val - return c1.val - res = self.meta_interp(g, [3, 16]) - assert res == g(3, 16) - - def test_inlined_guard_in_short_preamble(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class A: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - def binop(self, other): - return A(self.getval() + other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_specialied_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(A(y)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_specialied_bridge_const(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'const', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - const = 7 - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res, const=const) - myjitdriver.jit_merge_point(y=y, x=x, res=res, const=const) - const = hint(const, promote=True) - res = res.binop(A(const)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_multiple_specialied_zigzag(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - def switch(self): - return B(self.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def switch(self): - return A(self.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - if y % 4 == 0: - res = res.switch() - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [3, 23]) - assert res == 7068153 - self.check_loop_count(6) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) - - def test_dont_trace_every_iteration(self): - myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) - - def main(a, b): - i = sa = 0 - #while i < 200: - while i < 200: - myjitdriver.can_enter_jit(a=a, b=b, i=i, sa=sa) - myjitdriver.jit_merge_point(a=a, b=b, i=i, sa=sa) - if a > 0: pass - if b < 2: pass - sa += a % b - i += 1 - return sa - def g(): - return main(10, 20) + main(-10, -20) - res = self.meta_interp(g, []) - assert res == g() - self.check_enter_count(2) - - def test_current_trace_length(self): - myjitdriver = JitDriver(greens = ['g'], reds = ['x']) - @dont_look_inside - def residual(): - print "hi there" - @unroll_safe - def loop(g): - y = 0 - while y < g: - residual() - y += 1 - def f(x, g): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, g=g) - myjitdriver.jit_merge_point(x=x, g=g) - loop(g) - x -= 1 - n = current_trace_length() - return n - res = self.meta_interp(f, [5, 8]) - assert 14 < res < 42 - res = self.meta_interp(f, [5, 2]) - assert 4 < res < 14 - - def test_compute_identity_hash(self): - from pypy.rlib.objectmodel import compute_identity_hash - class A(object): - pass - def f(): - a = A() - return compute_identity_hash(a) == compute_identity_hash(a) - res = self.interp_operations(f, []) - assert res - # a "did not crash" kind of test - - def test_compute_unique_id(self): - from pypy.rlib.objectmodel import compute_unique_id - class A(object): - pass - def f(): - a1 = A() - a2 = A() - return (compute_unique_id(a1) == compute_unique_id(a1) and - compute_unique_id(a1) != compute_unique_id(a2)) - res = self.interp_operations(f, []) - assert res - - def test_wrap_around_add(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x += 1 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint-10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_mul(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x *= 2 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint>>10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_sub(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x < 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x -= 1 - n += 1 - return n - res = self.meta_interp(f, [10-sys.maxint]) - assert res == 12 - self.check_tree_loop_count(2) - - - -class TestOOtype(BasicTests, OOJitMixin): - - def test_oohash(self): - def f(n): - s = ootype.oostring(n, -1) - return s.ll_hash() - res = self.interp_operations(f, [5]) - assert res == ootype.oostring(5, -1).ll_hash() - - def test_identityhash(self): - A = ootype.Instance("A", ootype.ROOT) - def f(): - obj1 = ootype.new(A) - obj2 = ootype.new(A) - return ootype.identityhash(obj1) == ootype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oois(self): - A = ootype.Instance("A", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - if n: - obj2 = obj1 - else: - obj2 = ootype.new(A) - return obj1 is obj2 - res = self.interp_operations(f, [0]) - assert not res - res = self.interp_operations(f, [1]) - assert res - - def test_oostring_instance(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - obj2 = ootype.new(B) - s1 = ootype.oostring(obj1, -1) - s2 = ootype.oostring(obj2, -1) - ch1 = s1.ll_stritem_nonneg(1) - ch2 = s2.ll_stritem_nonneg(1) - return ord(ch1) + ord(ch2) - res = self.interp_operations(f, [0]) - assert res == ord('A') + ord('B') - - def test_subclassof(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", A) - clsA = ootype.runtimeClass(A) - clsB = ootype.runtimeClass(B) - myjitdriver = JitDriver(greens = [], reds = ['n', 'flag', 'res']) - - def getcls(flag): - if flag: - return clsA - else: - return clsB - - def f(flag, n): - res = True - while n > -100: - myjitdriver.can_enter_jit(n=n, flag=flag, res=res) - myjitdriver.jit_merge_point(n=n, flag=flag, res=res) - cls = getcls(flag) - n -= 1 - res = ootype.subclassof(cls, clsB) - return res - - res = self.meta_interp(f, [1, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert not res - - res = self.meta_interp(f, [0, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert res - -class BaseLLtypeTests(BasicTests): - - def test_identityhash(self): - A = lltype.GcStruct("A") - def f(): - obj1 = lltype.malloc(A) - obj2 = lltype.malloc(A) - return lltype.identityhash(obj1) == lltype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oops_on_nongc(self): - from pypy.rpython.lltypesystem import lltype - - TP = lltype.Struct('x') - def f(i1, i2): - p1 = prebuilt[i1] - p2 = prebuilt[i2] - a = p1 is p2 - b = p1 is not p2 - c = bool(p1) - d = not bool(p2) - return 1000*a + 100*b + 10*c + d - prebuilt = [lltype.malloc(TP, flavor='raw', immortal=True)] * 2 - expected = f(0, 1) - assert self.interp_operations(f, [0, 1]) == expected - - def test_casts(self): - py.test.skip("xxx fix or kill") - if not self.basic: - py.test.skip("test written in a style that " - "means it's frontend only") - from pypy.rpython.lltypesystem import lltype, llmemory, rffi - - TP = lltype.GcStruct('S1') - def f(p): - n = lltype.cast_ptr_to_int(p) - return n - x = lltype.malloc(TP) - xref = lltype.cast_opaque_ptr(llmemory.GCREF, x) - res = self.interp_operations(f, [xref]) - y = llmemory.cast_ptr_to_adr(x) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - # - TP = lltype.Struct('S2') - prebuilt = [lltype.malloc(TP, immortal=True), - lltype.malloc(TP, immortal=True)] - def f(x): - p = prebuilt[x] - n = lltype.cast_ptr_to_int(p) - return n - res = self.interp_operations(f, [1]) - y = llmemory.cast_ptr_to_adr(prebuilt[1]) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - - def test_collapsing_ptr_eq(self): - S = lltype.GcStruct('S') - p = lltype.malloc(S) - driver = JitDriver(greens = [], reds = ['n', 'x']) - - def f(n, x): - while n > 0: - driver.can_enter_jit(n=n, x=x) - driver.jit_merge_point(n=n, x=x) - if x: - n -= 1 - n -= 1 - - def main(): - f(10, p) - f(10, lltype.nullptr(S)) - - self.meta_interp(main, []) - - def test_enable_opts(self): - jitdriver = JitDriver(greens = [], reds = ['a']) - - class A(object): - def __init__(self, i): - self.i = i - - def f(): - a = A(0) - - while a.i < 10: - jitdriver.jit_merge_point(a=a) - jitdriver.can_enter_jit(a=a) - a = A(a.i + 1) - - self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) - self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) - -class TestLLtype(BaseLLtypeTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -205,7 +205,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) - elif v1.intbound.known_ge(v2.intbound): + elif v1.intbound.known_ge(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 0) else: self.emit_operation(op) @@ -215,7 +215,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) - elif v1.intbound.known_le(v2.intbound): + elif v1.intbound.known_le(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 0) else: self.emit_operation(op) @@ -223,7 +223,7 @@ def optimize_INT_LE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_le(v2.intbound): + if v1.intbound.known_le(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 1) elif v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 0) @@ -233,7 +233,7 @@ def optimize_INT_GE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_ge(v2.intbound): + if v1.intbound.known_ge(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 0) diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,8 +1,8 @@ from __future__ import with_statement import new import py -from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse -from pypy.objspace.flow.model import flatten, mkentrymap, c_last_exception +from pypy.objspace.flow.model import Constant, Block, Link, Variable +from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph from pypy.objspace.flow.objspace import FlowObjSpace, error @@ -37,12 +37,10 @@ def all_operations(self, graph): result = {} - def visit(node): - if isinstance(node, Block): - for op in node.operations: - result.setdefault(op.opname, 0) - result[op.opname] += 1 - traverse(visit, graph) + for node in graph.iterblocks(): + for op in node.operations: + result.setdefault(op.opname, 0) + result[op.opname] += 1 return result @@ -246,12 +244,9 @@ x = self.codetest(self.implicitException) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock - def implicitAttributeError(x): try: x = getattr(x, "y") @@ -263,10 +258,8 @@ x = self.codetest(self.implicitAttributeError) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock #__________________________________________________________ def implicitException_int_and_id(x): @@ -311,14 +304,12 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: if isinstance(link.args[0], Constant): found[link.args[0].value] = True else: found[link.exitcase] = None - traverse(find_exceptions, x) assert found == {IndexError: True, KeyError: True, Exception: None} def reraiseAnything(x): @@ -332,12 +323,10 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: assert isinstance(link.args[0], Constant) found[link.args[0].value] = True - traverse(find_exceptions, x) assert found == {ValueError: True, ZeroDivisionError: True, OverflowError: True} def loop_in_bare_except_bug(lst): @@ -521,11 +510,9 @@ def test_jump_target_specialization(self): x = self.codetest(self.jump_target_specialization) - def visitor(node): - if isinstance(node, Block): - for op in node.operations: - assert op.opname != 'mul', "mul should have disappeared" - traverse(visitor, x) + for block in x.iterblocks(): + for op in block.operations: + assert op.opname != 'mul', "mul should have disappeared" #__________________________________________________________ def highly_branching_example(a,b,c,d,e,f,g,h,i,j): @@ -573,7 +560,8 @@ def test_highly_branching_example(self): x = self.codetest(self.highly_branching_example) - assert len(flatten(x)) < 60 # roughly 20 blocks + 30 links + # roughly 20 blocks + 30 links + assert len(list(x.iterblocks())) + len(list(x.iterlinks())) < 60 #__________________________________________________________ def test_unfrozen_user_class1(self): @@ -589,11 +577,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 2 def test_unfrozen_user_class2(self): @@ -607,11 +593,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert not isinstance(results[0], Constant) def test_frozen_user_class1(self): @@ -630,11 +614,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 1 def test_frozen_user_class2(self): @@ -650,11 +632,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert results == [Constant(4)] def test_const_star_call(self): @@ -663,14 +643,9 @@ def f(): return g(1,*(2,3)) graph = self.codetest(f) - call_args = [] - def visit(block): - if isinstance(block, Block): - for op in block.operations: - if op.opname == "call_args": - call_args.append(op) - traverse(visit, graph) - assert not call_args + for block in graph.iterblocks(): + for op in block.operations: + assert not op.opname == "call_args" def test_catch_importerror_1(self): def f(): @@ -997,11 +972,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, AttributeError] @@ -1019,11 +992,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, TypeError] diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -381,6 +381,9 @@ def _setup(): global _old_raw_input + if _old_raw_input is not None: + return # don't run _setup twice + try: f_in = sys.stdin.fileno() f_out = sys.stdout.fileno() @@ -401,4 +404,5 @@ _old_raw_input = __builtin__.raw_input __builtin__.raw_input = _wrapper.raw_input +_old_raw_input = None _setup() diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -140,7 +140,7 @@ xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw', immortal=True) registers = rffi.ptradd(xmmregisters, 16) - stacklen = baseloc + 10 + stacklen = baseloc + 30 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw', immortal=True) expected_ints = [0] * len(content) diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -22,8 +22,7 @@ remover = cls.MallocRemover() checkgraph(graph) count1 = count2 = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == cls.MallocRemover.MALLOC_OP: S = op.args[0].value @@ -47,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -158,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -199,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/rlib/_rweakvaldict.py b/pypy/rlib/_rweakvaldict.py --- a/pypy/rlib/_rweakvaldict.py +++ b/pypy/rlib/_rweakvaldict.py @@ -113,7 +113,7 @@ @jit.dont_look_inside def ll_get(self, d, llkey): hash = self.ll_keyhash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK #llop.debug_print(lltype.Void, i, 'get') valueref = d.entries[i].value if valueref: @@ -132,7 +132,7 @@ def ll_set_nonnull(self, d, llkey, llvalue): hash = self.ll_keyhash(llkey) valueref = weakref_create(llvalue) # GC effects here, before the rest - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK everused = d.entries.everused(i) d.entries[i].key = llkey d.entries[i].value = valueref @@ -146,7 +146,7 @@ @jit.dont_look_inside def ll_set_null(self, d, llkey): hash = self.ll_keyhash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK if d.entries.everused(i): # If the entry was ever used, clean up its key and value. # We don't store a NULL value, but a dead weakref, because diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -9,6 +9,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert +from pypy.rlib.objectmodel import we_are_translated from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc from pypy.annotation import model as annmodel @@ -152,8 +153,13 @@ # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.root_stack_jit_hook = None if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + try: + self.root_stack_jit_hook = translator._jit2gc['rootstackhook'] + except KeyError: + pass else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) self.layoutbuilder.transformer = self @@ -501,6 +507,10 @@ s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) + s_gc_data = self.translator.annotator.bookkeeper.valueoftype( + gctypelayout.GCData) + r_gc_data = self.translator.rtyper.getrepr(s_gc_data) + self.c_const_gcdata = rmodel.inputconst(r_gc_data, self.gcdata) self.malloc_zero_filled = GCClass.malloc_zero_filled HDR = self.HDR = self.gcdata.gc.gcheaderbuilder.HDR @@ -794,6 +804,15 @@ resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) + def gct_gc_adr_of_root_stack_top(self, hop): + op = hop.spaceop + ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, + 'inst_root_stack_top') + c_ofs = rmodel.inputconst(lltype.Signed, ofs) + v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], + resulttype=llmemory.Address) + hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) + def gct_gc_x_swap_pool(self, hop): op = hop.spaceop [v_malloced] = op.args @@ -1363,6 +1382,14 @@ return top self.decr_stack = decr_stack + self.rootstackhook = gctransformer.root_stack_jit_hook + if self.rootstackhook is None: + def collect_stack_root(callback, gc, addr): + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return sizeofaddr + self.rootstackhook = collect_stack_root + def push_stack(self, addr): top = self.incr_stack(1) top.address[0] = addr @@ -1372,10 +1399,7 @@ return top.address[0] def allocate_stack(self): - result = llmemory.raw_malloc(self.rootstacksize) - if result: - llmemory.raw_memclear(result, self.rootstacksize) - return result + return llmemory.raw_malloc(self.rootstacksize) def setup_root_walker(self): stackbase = self.allocate_stack() @@ -1387,12 +1411,11 @@ def walk_stack_roots(self, collect_stack_root): gcdata = self.gcdata gc = self.gc + rootstackhook = self.rootstackhook addr = gcdata.root_stack_base end = gcdata.root_stack_top while addr != end: - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - addr += sizeofaddr + addr += rootstackhook(collect_stack_root, gc, addr) if self.collect_stacks_from_other_threads is not None: self.collect_stacks_from_other_threads(collect_stack_root) @@ -1499,12 +1522,11 @@ # collect all valid stacks from the dict (the entry # corresponding to the current thread is not valid) gc = self.gc + rootstackhook = self.rootstackhook end = stacktop - sizeofaddr addr = end.address[0] while addr != end: - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - addr += sizeofaddr + addr += rootstackhook(callback, gc, addr) def collect_more_stacks(callback): ll_assert(get_aid() == gcdata.active_thread, diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,8 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox -from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong, r_uint class X86RegisterManager(RegisterManager): @@ -34,6 +35,12 @@ esi: 2, edi: 3, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + } def call_result_location(self, v): return eax @@ -61,6 +68,19 @@ r14: 4, r15: 5, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + r8: MY_COPY_OF_REGS + 4 * WORD, + r9: MY_COPY_OF_REGS + 5 * WORD, + r10: MY_COPY_OF_REGS + 6 * WORD, + r12: MY_COPY_OF_REGS + 7 * WORD, + r13: MY_COPY_OF_REGS + 8 * WORD, + r14: MY_COPY_OF_REGS + 9 * WORD, + r15: MY_COPY_OF_REGS + 10 * WORD, + } class X86XMMRegisterManager(RegisterManager): @@ -117,6 +137,16 @@ else: return 1 +if WORD == 4: + gpr_reg_mgr_cls = X86RegisterManager + xmm_reg_mgr_cls = X86XMMRegisterManager +elif WORD == 8: + gpr_reg_mgr_cls = X86_64_RegisterManager + xmm_reg_mgr_cls = X86_64_XMMRegisterManager +else: + raise AssertionError("Word size should be 4 or 8") + + class RegAlloc(object): def __init__(self, assembler, translate_support_code=False): @@ -135,16 +165,6 @@ # compute longevity of variables longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity - # XXX - if cpu.WORD == 4: - gpr_reg_mgr_cls = X86RegisterManager - xmm_reg_mgr_cls = X86XMMRegisterManager - elif cpu.WORD == 8: - gpr_reg_mgr_cls = X86_64_RegisterManager - xmm_reg_mgr_cls = X86_64_XMMRegisterManager - else: - raise AssertionError("Word size should be 4 or 8") - self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) @@ -738,8 +758,12 @@ def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None + self.xrm.before_call(force_store, save_all_regs=save_all_regs) + if not save_all_regs: + gcrootmap = gc_ll_descr = self.assembler.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + save_all_regs = 2 self.rm.before_call(force_store, save_all_regs=save_all_regs) - self.xrm.before_call(force_store, save_all_regs=save_all_regs) if op.result is not None: if op.result.type == FLOAT: resloc = self.xrm.after_call(op.result) @@ -836,31 +860,53 @@ self.PerformDiscard(op, arglocs) self.rm.possibly_free_vars_for_op(op) - def _fastpath_malloc(self, op, descr): + def fastpath_malloc_fixedsize(self, op, descr): assert isinstance(descr, BaseSizeDescr) + self._do_fastpath_malloc(op, descr.size, descr.tid) + + def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + size = basesize + itemsize * num_elem + self._do_fastpath_malloc(op, size, arraydescr.tid) + self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) + + def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) - # We need to force-allocate each of save_around_call_regs now. - # The alternative would be to save and restore them around the - # actual call to malloc(), in the rare case where we need to do - # it; however, mark_gc_roots() would need to be adapted to know - # where the variables end up being saved. Messy. - for reg in self.rm.save_around_call_regs: - if reg is not eax: - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=reg) - self.rm.possibly_free_var(tmp_box) - self.assembler.malloc_cond_fixedsize( + if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + # We need edx as a temporary, but otherwise don't save any more + # register. See comments in _build_malloc_slowpath(). + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=edx) + self.rm.possibly_free_var(tmp_box) + else: + # ---- asmgcc ---- + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) + + self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - descr.size, descr.tid, + size, tid, ) def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): - self._fastpath_malloc(op, op.getdescr()) + self.fastpath_malloc_fixedsize(op, op.getdescr()) else: args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] @@ -870,7 +916,7 @@ classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self._fastpath_malloc(op, descrsize) + self.fastpath_malloc_fixedsize(op, descrsize) self.assembler.set_vtable(eax, imm(classint)) # result of fastpath malloc is in eax else: @@ -929,16 +975,25 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) + return # boehm GC (XXX kill the following code at some point) itemsize, basesize, ofs_length, _, _ = ( self._unpack_arraydescr(op.getdescr())) scale_of_field = _get_scale(itemsize) - return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + self._malloc_varsize(basesize, ofs_length, scale_of_field, + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -1132,7 +1187,7 @@ # call memcpy() self.rm.before_call() self.xrm.before_call() - self.assembler._emit_call(imm(self.assembler.memcpy_addr), + self.assembler._emit_call(-1, imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) self.rm.possibly_free_var(length_box) self.rm.possibly_free_var(dstaddr_box) @@ -1200,18 +1255,24 @@ def consider_jit_debug(self, op): pass - def get_mark_gc_roots(self, gcrootmap): + def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) - gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position)) + gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) for v, reg in self.rm.reg_bindings.items(): if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX - gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + if use_copy_area: + assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS + area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] + gcrootmap.add_frame_offset(shape, area_offset) + else: + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg( + shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,6 +46,7 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/jit/tl/pypyjit_child.py b/pypy/jit/tl/pypyjit_child.py --- a/pypy/jit/tl/pypyjit_child.py +++ b/pypy/jit/tl/pypyjit_child.py @@ -2,7 +2,6 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp import warmspot from pypy.module.pypyjit.policy import PyPyJitPolicy -from pypy.rlib.jit import OPTIMIZER_FULL, OPTIMIZER_NO_UNROLL def run_child(glob, loc): @@ -34,6 +33,5 @@ option.view = True warmspot.jittify_and_run(interp, graph, [], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -285,6 +285,15 @@ elif drv.exe_name is None and '__name__' in targetspec_dic: drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s' + # Double check to ensure we are not overwriting the current interpreter + try: + exe_name = str(drv.compute_exe_name()) + assert not os.path.samefile(exe_name, sys.executable), ( + 'Output file %r is the currently running ' + 'interpreter (use --output=...)'% exe_name) + except OSError: + pass + goals = translateconfig.goals try: drv.proceed(goals) diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.extmodules.rst @@ -0,0 +1,12 @@ +You can pass a comma-separated list of third-party builtin modules +which should be translated along with the standard modules within +``pypy.module``. + +The module names need to be fully qualified (i.e. have a ``.`` in them), +be on the ``$PYTHONPATH`` and not conflict with any existing ones, e.g. +``mypkg.somemod``. + +Once translated, the module will be accessible with a simple:: + + import somemod + diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -592,6 +592,26 @@ a.x = 5 self.interpret(fn, []) + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -696,26 +716,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -305,6 +305,15 @@ stackcounter = StackCounter() stackcounter._freeze_() +def llexternal_use_eci(compilation_info): + """Return a dummy function that, if called in a RPython program, + adds the given ExternalCompilationInfo to it.""" + eci = ExternalCompilationInfo(post_include_bits=['#define PYPY_NO_OP()']) + eci = eci.merge(compilation_info) + return llexternal('PYPY_NO_OP', [], lltype.Void, + compilation_info=eci, sandboxsafe=True, _nowrapper=True, + _callable=lambda: None) + # ____________________________________________________________ # Few helpers for keeping callback arguments alive # this makes passing opaque objects possible (they don't even pass @@ -737,6 +746,7 @@ def charpsize2str(cp, size): l = [cp[i] for i in range(size)] return emptystr.join(l) + charpsize2str._annenforceargs_ = [None, int] return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -110,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -26,9 +26,10 @@ CPU = getcpuclass() class MockGcRootMap(object): + is_shadow_stack = False def get_basic_shape(self, is_64_bit): return ['shape'] - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): shape.append(offset) def add_callee_save_reg(self, shape, reg_index): index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } @@ -44,7 +45,8 @@ get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new - + get_malloc_slowpath_addr = None + moving_gc = True gcrootmap = MockGcRootMap() @@ -166,26 +168,29 @@ class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - + expected_malloc_slowpath_size = WORD*2 + def __init__(self): GcCache.__init__(self, False) # create a nursery NTP = rffi.CArray(lltype.Signed) self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 64 - # 64 bytes + self.addrs[1] = self.addrs[0] + 16*WORD + self.addrs[2] = 0 + # 16 WORDs def malloc_slowpath(size): - assert size == WORD*2 + assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size + self.addrs[2] += 1 return nadr self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) - self._counter = 123 + self._counter = 123000 def can_inline_malloc(self, descr): return True @@ -204,7 +209,7 @@ def get_nursery_top_addr(self): return rffi.cast(lltype.Signed, self.addrs) + WORD - def get_malloc_fixedsize_slowpath_addr(self): + def get_malloc_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) @@ -220,9 +225,11 @@ cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() - NODE = lltype.Struct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) # xxx hack: NODE is not a GcStruct + # hack: specify 'tid' explicitly, because this test is not running + # with the gc transformer + NODE = lltype.GcStruct('node', ('tid', lltype.Signed), + ('value', lltype.Signed)) + nodedescr = cpu.sizeof(NODE) valuedescr = cpu.fielddescrof(NODE, 'value') self.cpu = cpu @@ -254,6 +261,7 @@ assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called def test_malloc_slowpath(self): ops = ''' @@ -274,6 +282,7 @@ gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nadr + (WORD*2) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once def test_new_with_vtable(self): ops = ''' @@ -289,3 +298,93 @@ assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + +class Seen(Exception): + pass + +class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): + def can_inline_malloc_varsize(self, arraydescr, num_elem): + return num_elem < 5 + def get_funcptr_for_newarray(self): + return 52 + def init_array_descr(self, A, descr): + descr.tid = self._counter + self._counter += 1 + def args_for_new_array(self, descr): + raise Seen("args_for_new_array") + +class TestMallocVarsizeFastpath(BaseTestRegalloc): + def setup_method(self, method): + cpu = CPU(None, None) + cpu.vtable_offset = WORD + cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() + cpu.setup_once() + self.cpu = cpu + + ARRAY = lltype.GcArray(lltype.Signed) + arraydescr = cpu.arraydescrof(ARRAY) + self.arraydescr = arraydescr + + self.namespace = locals().copy() + + def test_malloc_varsize_fastpath(self): + # Hack. Running the GcLLDescr_framework without really having + # a complete GC means that we end up with both the tid and the + # length being at offset 0. In this case, so the length overwrites + # the tid. This is of course only the case in this test class. + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 142, descr=arraydescr) + setarrayitem_gc(p0, 3, 143, descr=arraydescr) + finish(p0) + ''' + self.interpret(ops, []) + # check the nursery + gc_ll_descr = self.cpu.gc_ll_descr + assert gc_ll_descr.nursery[0] == 4 + assert gc_ll_descr.nursery[1] == 142 + assert gc_ll_descr.nursery[4] == 143 + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + def test_malloc_varsize_slowpath(self): + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 420, descr=arraydescr) + setarrayitem_gc(p0, 3, 430, descr=arraydescr) + p1 = new_array(4, descr=arraydescr) + setarrayitem_gc(p1, 0, 421, descr=arraydescr) + setarrayitem_gc(p1, 3, 431, descr=arraydescr) + p2 = new_array(4, descr=arraydescr) + setarrayitem_gc(p2, 0, 422, descr=arraydescr) + setarrayitem_gc(p2, 3, 432, descr=arraydescr) + p3 = new_array(4, descr=arraydescr) + setarrayitem_gc(p3, 0, 423, descr=arraydescr) + setarrayitem_gc(p3, 3, 433, descr=arraydescr) + finish(p0, p1, p2, p3) + ''' + gc_ll_descr = self.cpu.gc_ll_descr + gc_ll_descr.expected_malloc_slowpath_size = 5*WORD + self.interpret(ops, []) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once + + def test_malloc_varsize_too_big(self): + ops = ''' + [] + p0 = new_array(5, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_varsize_variable(self): + ops = ''' + [i0] + p0 = new_array(i0, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -283,9 +283,14 @@ sys.stdout = out = Out() try: raises(UnicodeError, "print unichr(0xa2)") + assert out.data == [] out.encoding = "cp424" print unichr(0xa2) assert out.data == [unichr(0xa2).encode("cp424"), "\n"] + del out.data[:] + del out.encoding + print u"foo\t", u"bar\n", u"trick", u"baz\n" # softspace handling + assert out.data == ["foo\t", "bar\n", "trick", " ", "baz\n", "\n"] finally: sys.stdout = save diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -1,3 +1,4 @@ +from __future__ import with_statement MARKER = 42 class AppTestImpModule: @@ -34,7 +35,8 @@ def test_load_dynamic(self): raises(ImportError, self.imp.load_dynamic, 'foo', 'bar') - raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', 'baz.so') + raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', + open(self.file_module)) def test_suffixes(self): for suffix, mode, type in self.imp.get_suffixes(): diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -46,4 +46,5 @@ return PyBuffer_New(150); """), ]) - module.buffer_new() + b = module.buffer_new() + raises(AttributeError, getattr, b, 'x') diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -1,4 +1,5 @@ from pypy.rpython.ootypesystem import ootype +from pypy.rpython.lltypesystem import rffi from pypy.translator.oosupport.metavm import MicroInstruction from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType import pypy.translator.jvm.typesystem as jvm @@ -94,14 +95,20 @@ (ootype.SignedLongLong, ootype.Signed): jvm.L2I, (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, + (ootype.Signed, rffi.SHORT): jvm.I2S, + (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, + (ootype.Signed, ootype.Unsigned): None, + (ootype.Unsigned, ootype.Signed): None, } class _CastPrimitive(MicroInstruction): def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype + if TO == FROM: + return opcode = CASTS[(FROM, TO)] if opcode: generator.emit(opcode) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -32,6 +32,7 @@ else: SO = ".so" DEFAULT_SOABI = 'pypy-14' +CHECK_FOR_PYW = sys.platform == 'win32' @specialize.memo() def get_so_extension(space): @@ -58,6 +59,12 @@ if os.path.exists(pyfile) and case_ok(pyfile): return PY_SOURCE, ".py", "U" + # on Windows, also check for a .pyw file + if CHECK_FOR_PYW: + pyfile = filepart + ".pyw" + if os.path.exists(pyfile) and case_ok(pyfile): + return PY_SOURCE, ".pyw", "U" + # The .py file does not exist. By default on PyPy, lonepycfiles # is False: if a .py file does not exist, we don't even try to # look for a lone .pyc file. @@ -85,6 +92,9 @@ # XXX that's slow def case_ok(filename): index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) if index < 0: directory = os.curdir else: diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -18,7 +18,6 @@ def should_skip_instruction(self, instrname, argmodes): return ( super(TestRx86_64, self).should_skip_instruction(instrname, argmodes) or - ('j' in argmodes) or # Not testing FSTP on 64-bit for now (instrname == 'FSTP') ) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -311,8 +311,7 @@ # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations # that will be performed later on the flow graph. - def fixegg(link): - if isinstance(link, Link): + for link in list(self.graph.iterlinks()): block = link.target if isinstance(block, EggBlock): if (not block.operations and len(block.exits) == 1 and @@ -324,15 +323,14 @@ link.args = list(link2.args) link.target = link2.target assert link2.exitcase is None - fixegg(link) else: mapping = {} for a in block.inputargs: mapping[a] = Variable(a) block.renamevariables(mapping) - elif isinstance(link, SpamBlock): + for block in self.graph.iterblocks(): + if isinstance(link, SpamBlock): del link.framestate # memory saver - traverse(fixegg, self.graph) def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py --- a/pypy/annotation/unaryop.py +++ b/pypy/annotation/unaryop.py @@ -499,10 +499,14 @@ def getanyitem(str): return str.basecharclass() - def method_split(str, patt): # XXX + def method_split(str, patt, max=-1): getbookkeeper().count("str_split", str, patt) return getbookkeeper().newlist(str.basestringclass()) + def method_rsplit(str, patt, max=-1): + getbookkeeper().count("str_rsplit", str, patt) + return getbookkeeper().newlist(str.basestringclass()) + def method_replace(str, s1, s2): return str.basestringclass() diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -722,31 +722,75 @@ newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) - def ll_split_chr(LIST, s, c): + def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) count = 1 i = 0 + if max == 0: + i = strlen while i < strlen: if chars[i] == c: count += 1 + if max >= 0 and count > max: + break i += 1 res = LIST.ll_newlist(count) items = res.ll_items() i = 0 j = 0 resindex = 0 + if max == 0: + j = strlen while j < strlen: if chars[j] == c: item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) resindex += 1 i = j + 1 + if max >= 0 and resindex >= max: + j = strlen + break j += 1 item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) return res + def ll_rsplit_chr(LIST, s, c, max): + chars = s.chars + strlen = len(chars) + count = 1 + i = 0 + if max == 0: + i = strlen + while i < strlen: + if chars[i] == c: + count += 1 + if max >= 0 and count > max: + break + i += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + i = strlen + j = strlen + resindex = count - 1 + assert resindex >= 0 + if max == 0: + j = 0 + while j > 0: + j -= 1 + if chars[j] == c: + item = items[resindex] = s.malloc(i - j - 1) + item.copy_contents(s, item, j + 1, 0, i - j - 1) + resindex -= 1 + i = j + if resindex == 0: + j = 0 + break + item = items[resindex] = s.malloc(i - j) + item.copy_contents(s, item, j, 0, i - j) + return res + @purefunction def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -43,9 +43,14 @@ class SizeDescr(AbstractDescr): size = 0 # help translation + is_immutable = False - def __init__(self, size): + def __init__(self, size, count_fields_if_immut=-1): self.size = size + self.count_fields_if_immut = count_fields_if_immut + + def count_fields_if_immutable(self): + return self.count_fields_if_immut def repr_of_descr(self): return '' % self.size @@ -62,15 +67,15 @@ return cache[STRUCT] except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) + count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) else: - sizedescr = SizeDescr(size) + sizedescr = SizeDescr(size, count_fields_if_immut) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr - # ____________________________________________________________ # FieldDescrs diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -283,9 +283,15 @@ # These are the worst cases: val2 = loc2.value_i() code1 = loc1.location_code() - if (code1 == 'j' - or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) - or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + if code1 == 'j': + checkvalue = loc1.value_j() + elif code1 == 'm': + checkvalue = loc1.value_m()[1] + elif code1 == 'a': + checkvalue = loc1.value_a()[3] + else: + checkvalue = 0 + if not rx86.fits_in_32bits(checkvalue): # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai # and the constant offset in the address is 64-bit. # Hopefully this doesn't happen too often @@ -330,10 +336,10 @@ if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if self.WORD == 8 and possible_code1 == 'j': + if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif self.WORD == 8 and possible_code2 == 'j': + elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): @@ -378,6 +384,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) diff --git a/pypy/translator/backendopt/test/test_tailrecursion.py b/pypy/translator/backendopt/test/test_tailrecursion.py --- a/pypy/translator/backendopt/test/test_tailrecursion.py +++ b/pypy/translator/backendopt/test/test_tailrecursion.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -69,12 +69,31 @@ }; +static int cmp_compare(PyObject *self, PyObject *other) { + return -1; +} + +PyTypeObject OldCmpType = { + PyVarObject_HEAD_INIT(NULL, 0) + "comparisons.OldCmpType", /* tp_name */ + sizeof(CmpObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)cmp_compare, /* tp_compare */ +}; + + void initcomparisons(void) { PyObject *m, *d; if (PyType_Ready(&CmpType) < 0) return; + if (PyType_Ready(&OldCmpType) < 0) + return; m = Py_InitModule("comparisons", NULL); if (m == NULL) return; @@ -83,4 +102,6 @@ return; if (PyDict_SetItemString(d, "CmpType", (PyObject *)&CmpType) < 0) return; + if (PyDict_SetItemString(d, "OldCmpType", (PyObject *)&OldCmpType) < 0) + return; } diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,18 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - -int -_pypy_math_isnan(double x) -{ - return PyPy_IS_NAN(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -260,13 +259,13 @@ @classmethod def is_const(cls, v1): return isinstance(v1, str) and v1.startswith('ConstClass(') - + def match_var(self, v1, exp_v2): assert v1 != '_' if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 @@ -285,9 +284,9 @@ self.match_var(op.res, exp_res) self._assert(len(op.args) == len(exp_args), "wrong number of arguments") for arg, exp_arg in zip(op.args, exp_args): - self._assert(self.match_var(arg, exp_arg), "variable mismatch") + self._assert(self.match_var(arg, exp_arg), "variable mismatch: %r instead of %r" % (arg, exp_arg)) self.match_descr(op.descr, exp_descr) - + def _next_op(self, iter_ops, assert_raises=False): try: diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -1,10 +1,10 @@ -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rpython.tool import rffi_platform as platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import py, os from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rlib import jit from pypy.rlib.debug import ll_assert from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem.lloperation import llop @@ -79,6 +79,7 @@ # wrappers... + at jit.loop_invariant def get_ident(): return rffi.cast(lltype.Signed, c_thread_get_ident()) @@ -113,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,5 +1,5 @@ -from pypy.jit.metainterp.history import Const, Box +from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated class TempBox(Box): @@ -313,11 +313,12 @@ self.assembler.regalloc_mov(reg, to) # otherwise it's clean - def before_call(self, force_store=[], save_all_regs=False): + def before_call(self, force_store=[], save_all_regs=0): """ Spill registers before a call, as described by 'self.save_around_call_regs'. Registers are not spilled if they don't survive past the current operation, unless they - are listed in 'force_store'. + are listed in 'force_store'. 'save_all_regs' can be 0 (default), + 1 (save all), or 2 (save default+PTRs). """ for v, reg in self.reg_bindings.items(): if v not in force_store and self.longevity[v][1] <= self.position: @@ -325,9 +326,11 @@ del self.reg_bindings[v] self.free_regs.append(reg) continue - if not save_all_regs and reg not in self.save_around_call_regs: - # we don't have to - continue + if save_all_regs != 1 and reg not in self.save_around_call_regs: + if save_all_regs == 0: + continue # we don't have to + if v.type != REF: + continue # only save GC pointers self._sync_var(v) del self.reg_bindings[v] self.free_regs.append(reg) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -3,6 +3,7 @@ import py from py.test import skip import sys, os, re +import subprocess class BytecodeTrace(list): def get_opnames(self, prefix=""): @@ -116,13 +117,12 @@ print >> f, "print 'OK :-)'" f.close() - if sys.platform.startswith('win'): - py.test.skip("XXX this is not Windows-friendly") print logfilepath - child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( - logfilepath, self.pypy_c, filepath), 'r') - result = child_stdout.read() - child_stdout.close() + env = os.environ.copy() + env['PYPYLOG'] = ":%s" % (logfilepath,) + p = subprocess.Popen([self.pypy_c, str(filepath)], + env=env, stdout=subprocess.PIPE) + result, _ = p.communicate() assert result if result.strip().startswith('SKIP:'): py.test.skip(result.strip()) @@ -1430,6 +1430,8 @@ res1 += dd(a, b, a1, b1) res2 += dd(a, b, a2, b2) res3 += dd(a, b, a3, b3) + # The purpose of this test is to check that we get + # the correct results, not really to count operations. self.run_source(''' def main(a, b): i = sa = 0 @@ -1437,11 +1439,10 @@ %s i += 1 return sa - ''' % code, 179, ([a1, b1], 2000 * res1), - ([a2, b2], 2000 * res2), - ([a3, b3], 2000 * res3), - count_debug_merge_point=False) - + ''' % code, sys.maxint, ([a1, b1], 2000 * res1), + ([a2, b2], 2000 * res2), + ([a3, b3], 2000 * res3)) + def test_mod(self): avalues = ('a', 'b', 7, -42, 8) bvalues = ['b'] + range(-10, 0) + range(1,10) @@ -1462,6 +1463,8 @@ res1 += dd(a, b, a1, b1) res2 += dd(a, b, a2, b2) res3 += dd(a, b, a3, b3) + # The purpose of this test is to check that we get + # the correct results, not really to count operations. self.run_source(''' def main(a, b): i = sa = 0 @@ -1471,11 +1474,10 @@ %s i += 1 return sa - ''' % code, 450, ([a1, b1], 2000 * res1), - ([a2, b2], 2000 * res2), - ([a3, b3], 2000 * res3), - count_debug_merge_point=False) - + ''' % code, sys.maxint, ([a1, b1], 2000 * res1), + ([a2, b2], 2000 * res2), + ([a3, b3], 2000 * res3)) + def test_dont_trace_every_iteration(self): self.run_source(''' def main(a, b): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -18,12 +18,33 @@ descr_t = get_size_descr(c0, T) assert descr_s.size == symbolic.get_size(S, False) assert descr_t.size == symbolic.get_size(T, False) + assert descr_s.count_fields_if_immutable() == -1 + assert descr_t.count_fields_if_immutable() == -1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # descr_s = get_size_descr(c1, S) assert isinstance(descr_s.size, Symbolic) + assert descr_s.count_fields_if_immutable() == -1 +def test_get_size_descr_immut(): + S = lltype.GcStruct('S', hints={'immutable': True}) + T = lltype.GcStruct('T', ('parent', S), + ('x', lltype.Char), + hints={'immutable': True}) + U = lltype.GcStruct('U', ('parent', T), + ('u', lltype.Ptr(T)), + ('v', lltype.Signed), + hints={'immutable': True}) + V = lltype.GcStruct('V', ('parent', U), + ('miss1', lltype.Void), + ('miss2', lltype.Void), + hints={'immutable': True}) + for STRUCT, expected in [(S, 0), (T, 1), (U, 3), (V, 3)]: + for translated in [False, True]: + c0 = GcCache(translated) + descr_s = get_size_descr(c0, STRUCT) + assert descr_s.count_fields_if_immutable() == expected def test_get_field_descr(): U = lltype.Struct('U') diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -266,6 +274,8 @@ x = inttype(random.randint(-100000, 100000)) y = inttype(random.randint(-100000, 100000)) if not y: continue + if (i & 31) == 0: + x = (x//y) * y # case where x is exactly divisible by y res = self.interpret(d, [x, y]) assert res == d(x, y) @@ -276,6 +286,8 @@ x = inttype(random.randint(-100000, 100000)) y = inttype(random.randint(-100000, 100000)) if not y: continue + if (i & 31) == 0: + x = (x//y) * y # case where x is exactly divisible by y res = self.interpret(m, [x, y]) assert res == m(x, y) @@ -384,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -38,6 +38,10 @@ public final static int INT_MIN = Integer.MIN_VALUE; public final static double ULONG_MAX = 18446744073709551616.0; + public static boolean int_between(int a, int b, int c) { + return a <= b && b < c; + } + /** * Compares two unsigned integers (value1 and value2) and returns * a value greater than, equal to, or less than zero if value 1 is @@ -163,6 +167,13 @@ return ULONG_MAX + value; } } + + public static long double_to_ulong(double value) { + if (value < 0) + return (long)(ULONG_MAX + value); + else + return (long)value; + } public static int double_to_uint(double value) { if (value <= Integer.MAX_VALUE) @@ -746,11 +757,13 @@ return str.substring(start, end); } - public static Object[] ll_split_chr(String str, char c) { + public static Object[] ll_split_chr(String str, char c, int max) { ArrayList list = new ArrayList(); int lastidx = 0, idx = 0; while ((idx = str.indexOf(c, lastidx)) != -1) { + if (max >= 0 && list.size() >= max) + break; String sub = str.substring(lastidx, idx); list.add(sub); lastidx = idx+1; @@ -759,6 +772,21 @@ return list.toArray(new String[list.size()]); } + public static Object[] ll_rsplit_chr(String str, char c, int max) { + ArrayList list = new ArrayList(); + int lastidx = str.length(), idx = 0; + while ((idx = str.lastIndexOf(c, lastidx - 1)) != -1) + { + if (max >= 0 && list.size() >= max) + break; + String sub = str.substring(idx + 1, lastidx); + list.add(0, sub); + lastidx = idx; + } + list.add(0, str.substring(0, lastidx)); + return list.toArray(new String[list.size()]); + } + public static String ll_substring(String str, int start, int cnt) { return str.substring(start,start+cnt); } @@ -1158,6 +1186,18 @@ return Math.tanh(x); } + public double ll_math_copysign(double x, double y) { + return Math.copySign(x, y); + } + + public boolean ll_math_isnan(double x) { + return Double.isNaN(x); + } + + public boolean ll_math_isinf(double x) { + return Double.isInfinite(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); @@ -1170,9 +1210,42 @@ return Character.toLowerCase(c); } + public int locale_tolower(int chr) + { + return Character.toLowerCase(chr); + } + + public int locale_isupper(int chr) + { + return boolean2int(Character.isUpperCase(chr)); + } + + public int locale_islower(int chr) + { + return boolean2int(Character.isLowerCase(chr)); + } + + public int locale_isalpha(int chr) + { + return boolean2int(Character.isLetter(chr)); + } + + public int locale_isalnum(int chr) + { + return boolean2int(Character.isLetterOrDigit(chr)); + } + + // ---------------------------------------------------------------------- // Self Test + public static int boolean2int(boolean b) + { + if (b) + return 1; + return 0; + } + public static int __counter = 0, __failures = 0; public static void ensure(boolean f) { if (f) { diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -38,11 +38,7 @@ if sys.platform == 'win32': ensure_sse2_floats = lambda : None else: - _sse2_eci = ExternalCompilationInfo( + ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = ['-msse2', '-mfpmath=sse', '-DPYPY_CPU_HAS_STANDARD_PRECISION'], - separate_module_sources = ['void PYPY_NO_OP(void) {}'], - ) - ensure_sse2_floats = rffi.llexternal('PYPY_NO_OP', [], lltype.Void, - compilation_info=_sse2_eci, - sandboxsafe=True) + )) diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class StringTests: diff --git a/pypy/translator/jvm/test/test_builtin.py b/pypy/translator/jvm/test/test_builtin.py --- a/pypy/translator/jvm/test/test_builtin.py +++ b/pypy/translator/jvm/test/test_builtin.py @@ -37,6 +37,15 @@ def test_cast_primitive(self): py.test.skip('fixme!') + def test_os_fstat(self): + import os, stat + def fn(): + fd = os.open(__file__, os.O_RDONLY, 0) + st = os.fstat(fd) + os.close(fd) + return st.st_mode + res = self.interpret(fn, []) + assert stat.S_ISREG(res) class TestJvmTime(JvmTest, BaseTestTime): diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -10,6 +10,30 @@ def int2adr(int): return llmemory.cast_int_to_adr(int) +def count_fields_if_immutable(STRUCT): + assert isinstance(STRUCT, lltype.GcStruct) + if STRUCT._hints.get('immutable', False): + try: + return _count_fields(STRUCT) + except ValueError: + pass + return -1 + +def _count_fields(STRUCT): + if STRUCT == rclass.OBJECT: + return 0 # don't count 'typeptr' + result = 0 + for fieldname, TYPE in STRUCT._flds.items(): + if TYPE is lltype.Void: + pass # ignore Voids + elif not isinstance(TYPE, lltype.ContainerType): + result += 1 + elif isinstance(TYPE, lltype.GcStruct): + result += _count_fields(TYPE) + else: + raise ValueError(TYPE) + return result + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -379,27 +379,6 @@ return result -def traverse(visit, functiongraph): - block = functiongraph.startblock - visit(block) - seen = identity_dict() - seen[block] = True - stack = list(block.exits[::-1]) - while stack: - link = stack.pop() - visit(link) - block = link.target - if block not in seen: - visit(block) - seen[block] = True - stack += block.exits[::-1] - - -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - def flattenobj(*args): for arg in args: try: @@ -497,6 +476,19 @@ assert block.operations == () assert block.exits == () + def definevar(v, only_in_link=None): + assert isinstance(v, Variable) + assert v not in vars, "duplicate variable %r" % (v,) + assert v not in vars_previous_blocks, ( + "variable %r used in more than one block" % (v,)) + vars[v] = only_in_link + + def usevar(v, in_link=None): + assert v in vars + if in_link is not None: + assert vars[v] is None or vars[v] is in_link + + for block in graph.iterblocks(): assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( @@ -506,18 +498,6 @@ assert block in exitblocks vars = {} - def definevar(v, only_in_link=None): - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = only_in_link - - def usevar(v, in_link=None): - assert v in vars - if in_link is not None: - assert vars[v] is None or vars[v] is in_link - for v in block.inputargs: definevar(v) diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -17,7 +17,6 @@ ^pypy/doc/.+\.html$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ -^pypy/translator/c/src/dtoa.o$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ ^pypy/translator/c/src/libffi_msvc/.+\.dll$ ^pypy/translator/c/src/libffi_msvc/.+\.lib$ @@ -64,4 +63,4 @@ ^pypy/doc/image/parsing_example.+\.png$ ^compiled ^.git/ -^release/ \ No newline at end of file +^release/ diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() @@ -39,7 +39,7 @@ translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "md5", "cStringIO", "array"])) + "struct", "_md5", "cStringIO", "array"])) working_oo_modules = default_modules.copy() working_oo_modules.update(dict.fromkeys( @@ -159,6 +159,11 @@ cmdline="--allworkingmodules", negation=True), + StrOption("extmodules", + "Comma-separated list of third-party builtin modules", + cmdline="--ext", + default=None), + BoolOption("translationmodules", "use only those modules that are needed to run translate.py on pypy", default=False, @@ -352,8 +357,8 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) - if not IS_64_BITS: - config.objspace.std.suggest(withsmalllong=True) + #if not IS_64_BITS: + # config.objspace.std.suggest(withsmalllong=True) # extra costly optimizations only go in level 3 if level == '3': diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -31,6 +50,10 @@ if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' + if hasattr(os, 'wait3'): + appleveldefs['wait3'] = 'app_posix.wait3' + if hasattr(os, 'wait4'): + appleveldefs['wait4'] = 'app_posix.wait4' interpleveldefs = { 'open' : 'interp_posix.open', @@ -156,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ListTests: diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -641,6 +641,18 @@ else: self._as_rdict().impl_fallback_setitem(w_key, w_value) + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + key = space.str_w(w_key) + w_result = self.impl_getitem_str(key) + if w_result is not None: + return w_result + self.impl_setitem_str(key, w_default) + return w_default + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/translator/goal/query.py b/pypy/translator/goal/query.py --- a/pypy/translator/goal/query.py +++ b/pypy/translator/goal/query.py @@ -30,15 +30,13 @@ def polluted_qgen(translator): """list functions with still real SomeObject variables""" annotator = translator.annotator - def visit(block): - if isinstance(block, flowmodel.Block): - for v in block.getvariables(): - s = annotator.binding(v, None) - if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: - raise Found for g in translator.graphs: try: - flowmodel.traverse(visit, g) + for block in g.iterblocks(): + for v in block.getvariables(): + s = annotator.binding(v, None) + if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: + raise Found except Found: line = "%s: %s" % (g, graph_sig(translator, g)) yield line diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,18 +30,18 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno @@ -49,7 +49,7 @@ # produced by gateway.applevel(), such as the ones found in # nanos.py) return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py --- a/pypy/jit/metainterp/test/test_longlong.py +++ b/pypy/jit/metainterp/test/test_longlong.py @@ -1,6 +1,6 @@ import py, sys from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class WrongResult(Exception): pass diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -1,6 +1,9 @@ import _rawffi, sys -import threading +try: + from thread import _local as local +except ImportError: + local = object # no threads class ConvMode: encoding = 'ascii' @@ -28,7 +31,7 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(threading.local): +class ErrorObject(local): def __init__(self): self.errno = 0 self.winerror = 0 diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -194,8 +194,8 @@ c_buf = py_str.c_ob_type.c_tp_as_buffer assert c_buf py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(rffi.INTP.TO)) == 1 - ref = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 assert ref[0] == 10 lltype.free(ref, flavor='raw') diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -255,7 +255,7 @@ x = ord(s[0]) << 7 i = 0 while i < length: - x = (1000003*x) ^ ord(s[i]) + x = intmask((1000003*x) ^ ord(s[i])) i += 1 x ^= length return intmask(x) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -181,6 +181,7 @@ jIntegerClass = JvmClassType('java.lang.Integer') jLongClass = JvmClassType('java.lang.Long') +jShortClass = JvmClassType('java.lang.Short') jDoubleClass = JvmClassType('java.lang.Double') jByteClass = JvmClassType('java.lang.Byte') jCharClass = JvmClassType('java.lang.Character') @@ -239,6 +240,7 @@ jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue') jByte = JvmScalarType('B', jByteClass, 'byteValue') jChar = JvmScalarType('C', jCharClass, 'charValue') +jShort = JvmScalarType('S', jShortClass, 'shortValue') class Generifier(object): @@ -527,6 +529,7 @@ if desc == 'C': return self._o("i") # Characters if desc == 'B': return self._o("i") # Bytes if desc == 'Z': return self._o("i") # Boolean + if desc == 'S': return self._o("i") # Short assert False, "Unknown argtype=%s" % repr(argtype) raise NotImplementedError @@ -625,6 +628,7 @@ NOP = Opcode('nop') I2D = Opcode('i2d') I2L = Opcode('i2l') +I2S = Opcode('i2s') D2I= Opcode('d2i') #D2L= Opcode('d2l') #PAUL L2I = Opcode('l2i') @@ -891,6 +895,7 @@ SYSTEMIDENTITYHASH = Method.s(jSystem, 'identityHashCode', (jObject,), jInt) SYSTEMGC = Method.s(jSystem, 'gc', (), jVoid) INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString) +SHORTTOSTRINGS = Method.s(jShortClass, 'toString', (jShort,), jString) LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString) DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString) CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString) @@ -922,15 +927,19 @@ CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool) STRINGBUILDERAPPEND = Method.v(jStringBuilder, 'append', (jString,), jStringBuilder) +PYPYINTBETWEEN = Method.s(jPyPy, 'int_between', (jInt,jInt,jInt), jBool) PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt) PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt) PYPYUINTMOD = Method.v(jPyPy, 'uint_mod', (jInt, jInt), jInt) PYPYUINTMUL = Method.v(jPyPy, 'uint_mul', (jInt, jInt), jInt) PYPYUINTDIV = Method.v(jPyPy, 'uint_div', (jInt, jInt), jInt) PYPYULONGMOD = Method.v(jPyPy, 'ulong_mod', (jLong, jLong), jLong) +PYPYUINTTOLONG = Method.s(jPyPy, 'uint_to_long', (jInt,), jLong) PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL +PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) +PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,12 +1,12 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops from pypy.translator.test.snippet import simple_method from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -9,7 +9,7 @@ from pypy.objspace.flow import operation from pypy.objspace.flow.model import (SpaceOperation, Variable, Constant, Block, Link, c_last_exception, checkgraph, - traverse, mkentrymap) + mkentrymap) from pypy.rlib import rarithmetic from pypy.translator import unsimplify from pypy.translator.backendopt import ssa @@ -76,23 +76,19 @@ def desugar_isinstance(graph): """Replace isinstance operation with a call to isinstance.""" constant_isinstance = Constant(isinstance) - def visit(block): - if not isinstance(block, Block): - return + for block in graph.iterblocks(): for i in range(len(block.operations) - 1, -1, -1): op = block.operations[i] if op.opname == "isinstance": args = [constant_isinstance, op.args[0], op.args[1]] new_op = SpaceOperation("simple_call", args, op.result) block.operations[i] = new_op - traverse(visit, graph) def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): + for link in list(graph.iterlinks()): while not link.target.operations: block1 = link.target if block1.exitswitch is not None: @@ -113,7 +109,6 @@ link.args = outputargs link.target = exit.target # the while loop above will simplify recursively the new link - traverse(visit, graph) def transform_ovfcheck(graph): """The special function calls ovfcheck and ovfcheck_lshift need to @@ -174,11 +169,10 @@ def rename(v): return renaming.get(v, v) - def visit(block): - if not (isinstance(block, Block) - and block.exitswitch == clastexc + for block in graph.iterblocks(): + if not (block.exitswitch == clastexc and block.exits[-1].exitcase is Exception): - return + continue covered = [link.exitcase for link in block.exits[1:-1]] seen = [] preserve = list(block.exits[:-1]) @@ -233,8 +227,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) - traverse(visit, graph) - def transform_xxxitem(graph): # xxx setitem too for block in graph.iterblocks(): @@ -262,9 +254,9 @@ return True return False - def visit(block): - if not (isinstance(block, Block) and block.exitswitch == clastexc): - return + for block in list(graph.iterblocks()): + if block.exitswitch != clastexc: + continue exits = [] seen = [] for link in block.exits: @@ -283,8 +275,6 @@ seen.append(case) block.recloseblock(*exits) - traverse(visit, graph) - def join_blocks(graph): """Links can be deleted if they are the single exit of a block and the single entry point of the next block. When this happens, we can @@ -340,8 +330,7 @@ this is how implicit exceptions are removed (see _implicit_ in flowcontext.py). """ - def visit(block): - if isinstance(block, Block): + for block in list(graph.iterblocks()): for i in range(len(block.exits)-1, -1, -1): exit = block.exits[i] if not (exit.target is graph.exceptblock and @@ -361,7 +350,6 @@ lst = list(block.exits) del lst[i] block.recloseblock(*lst) - traverse(visit, graph) # _____________________________________________________________________ @@ -627,12 +615,11 @@ tgts.append((exit.exitcase, tgt)) return tgts - def visit(block): - if isinstance(block, Block) and block.operations and block.operations[-1].opname == 'is_true': + for block in graph.iterblocks(): + if block.operations and block.operations[-1].opname == 'is_true': tgts = has_is_true_exitpath(block) if tgts: candidates.append((block, tgts)) - traverse(visit, graph) while candidates: cand, tgts = candidates.pop() diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,38 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] + +def ootype_to_mnemonic(FROM, TO, default=None): + if TO == ootype.Float: + return 'r8' + # + try: + size = str(INT_SIZE[TO]) + except KeyError: + return default + if FROM in UNSIGNED_TYPES: + return 'u' + size + else: + return 'i' + size class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + mnemonic = ootype_to_mnemonic(FROM, TO) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -8,8 +8,8 @@ for descr in all_optiondescrs: prefix = descr._name c = config.Config(descr) - thisdir.join(prefix + ".txt").ensure() + thisdir.join(prefix + ".rst").ensure() for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".txt" + basename = prefix + "." + p + ".rst" f = thisdir.join(basename) f.ensure() diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -5,7 +5,7 @@ from pypy.rlib.libffi import ArgChain from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestFfiCall(LLJitMixin, _TestLibffiCall): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -138,11 +138,13 @@ # raised after the exception handler block was popped. try: trace = self.w_f_trace - self.w_f_trace = None + if trace is not None: + self.w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: - self.w_f_trace = trace + if trace is not None: + self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( @@ -1421,9 +1423,10 @@ # add a softspace unless we just printed a string which ends in a '\t' # or '\n' -- or more generally any whitespace character but ' ' - if isinstance(x, str) and x and x[-1].isspace() and x[-1]!=' ': - return - # XXX add unicode handling + if isinstance(x, (str, unicode)) and x: + lastchar = x[-1] + if lastchar.isspace() and lastchar != ' ': + return file_softspace(stream, True) print_item_to._annspecialcase_ = "specialize:argtype(0)" diff --git a/pypy/translator/goal/old_queries.py b/pypy/translator/goal/old_queries.py --- a/pypy/translator/goal/old_queries.py +++ b/pypy/translator/goal/old_queries.py @@ -415,12 +415,10 @@ ops = 0 count = Counter() def visit(block): - if isinstance(block, flowmodel.Block): + for block in graph.iterblocks(): count.blocks += 1 count.ops += len(block.operations) - elif isinstance(block, flowmodel.Link): - count.links += 1 - flowmodel.traverse(visit, graph) + count.links = len(list(graph.iterlinks())) return count.blocks, count.links, count.ops # better used before backends opts diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -219,12 +219,14 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] else: - nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + n = len(codeobj.co_freevars) + freevars = [None] * n + while True: + n -= 1 + if n < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -26,7 +26,10 @@ else { string res = ""; foreach(char ch in x) - res+= string.Format("\\x{0:X2}", (int)ch); + if (ch >= 32 && ch < 128) + res+= ch; + else + res+= string.Format("\\x{0:X2}", (int)ch); return string.Format("'{0}'", res); } } @@ -498,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -717,9 +725,31 @@ return s.Substring(start, count); } - public static string[] ll_split_chr(string s, char ch) + public static string[] ll_split_chr(string s, char ch, int max) { - return s.Split(ch); + if (max < 0) + return s.Split(ch); + else + return s.Split(new Char[] {ch}, max + 1); + } + + public static string[] ll_rsplit_chr(string s, char ch, int max) + { + string[] splits = s.Split(ch); + if (max < 0 || splits.Length <= max + 1) + return splits; + else { + /* XXX not very efficient */ + string first = splits[0]; + // join the first (length - max - 1) items + int i; + for (i = 1; i < splits.Length - max; i++) + first += ch + splits[i]; + splits[0] = first; + Array.Copy(splits, i, splits, 1, max); + Array.Resize(ref splits, max + 1); + return splits; + } } public static bool ll_contains(string s, char ch) @@ -1123,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/jit/backend/x86/test/test_basic.py b/pypy/jit/backend/x86/test/test_basic.py --- a/pypy/jit/backend/x86/test/test_basic.py +++ b/pypy/jit/backend/x86/test/test_basic.py @@ -1,18 +1,18 @@ import py from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.metainterp.warmspot import ll_meta_interp -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver -class Jit386Mixin(test_basic.LLJitMixin): +class Jit386Mixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() def check_jumps(self, maxcount): pass -class TestBasic(Jit386Mixin, test_basic.BaseLLtypeTests): +class TestBasic(Jit386Mixin, test_ajit.BaseLLtypeTests): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py def test_bug(self): diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -12,7 +12,7 @@ def __init__(self, space, initargs): self.initargs = initargs ident = thread.get_ident() - self.dicts = {ident: space.newdict()} + self.dicts = {ident: space.newdict(instance=True)} def getdict(self, space): ident = thread.get_ident() @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/jvm/src/pypy/StatResult.java b/pypy/translator/jvm/src/pypy/StatResult.java --- a/pypy/translator/jvm/src/pypy/StatResult.java +++ b/pypy/translator/jvm/src/pypy/StatResult.java @@ -8,7 +8,7 @@ * *

The actual stat() function is defined in PyPy.java. */ -class StatResult { +public class StatResult { public int item0, item3, item4, item5; public long item1, item2, item6; public double item7, item8, item9; diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_ajit.py copy from pypy/jit/metainterp/test/test_basic.py copy to pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_basic.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -4,269 +4,17 @@ from pypy.rlib.jit import loop_invariant from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.metainterp.warmspot import get_stats from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong from pypy import conftest from pypy.rlib.rarithmetic import ovfcheck from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class BasicTests: diff --git a/pypy/translator/jvm/src/pypy/ll_os.java b/pypy/translator/jvm/src/pypy/ll_os.java --- a/pypy/translator/jvm/src/pypy/ll_os.java +++ b/pypy/translator/jvm/src/pypy/ll_os.java @@ -14,10 +14,22 @@ abstract class FileWrapper { + private final String name; + + public FileWrapper(String name) + { + this.name = name; + } + public abstract void write(String buffer); public abstract String read(int count); public abstract void close(); public abstract RandomAccessFile getFile(); + + public String getName() + { + return this.name; + } } class PrintStreamWrapper extends FileWrapper @@ -25,8 +37,9 @@ private final PrintStream stream; private final ll_os os; - public PrintStreamWrapper(PrintStream stream, ll_os os) + public PrintStreamWrapper(String name, PrintStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -58,8 +71,9 @@ private final InputStream stream; private final ll_os os; - public InputStreamWrapper(InputStream stream, ll_os os) + public InputStreamWrapper(String name, InputStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -102,11 +116,13 @@ private final boolean canWrite; private final ll_os os; - public RandomAccessFileWrapper(RandomAccessFile file, + public RandomAccessFileWrapper(String name, + RandomAccessFile file, boolean canRead, boolean canWrite, ll_os os) { + super(name); this.file = file; this.canRead = canRead; this.canWrite = canWrite; @@ -228,9 +244,9 @@ public ll_os(Interlink interlink) { this.interlink = interlink; - FileDescriptors.put(0, new InputStreamWrapper(System.in, this)); - FileDescriptors.put(1, new PrintStreamWrapper(System.out, this)); - FileDescriptors.put(2, new PrintStreamWrapper(System.err, this)); + FileDescriptors.put(0, new InputStreamWrapper("", System.in, this)); + FileDescriptors.put(1, new PrintStreamWrapper("", System.out, this)); + FileDescriptors.put(2, new PrintStreamWrapper("", System.err, this)); fdcount = 2; } @@ -339,7 +355,7 @@ // XXX: we ignore O_CREAT RandomAccessFile file = open_file(name, javaMode, flags); RandomAccessFileWrapper wrapper = - new RandomAccessFileWrapper(file, canRead, canWrite, this); + new RandomAccessFileWrapper(name, file, canRead, canWrite, this); fdcount++; FileDescriptors.put(fdcount, wrapper); @@ -418,6 +434,12 @@ return ll_os_stat(path); // XXX } + public StatResult ll_os_fstat(int fd) + { + String name = getfd(fd).getName(); + return ll_os_stat(name); + } + public String ll_os_strerror(int errno) { String msg = ErrorMessages.remove(errno); diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/translator/backendopt/test/test_mallocprediction.py b/pypy/translator/backendopt/test/test_mallocprediction.py --- a/pypy/translator/backendopt/test/test_mallocprediction.py +++ b/pypy/translator/backendopt/test/test_mallocprediction.py @@ -4,7 +4,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.conftest import option import sys diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -435,14 +435,6 @@ return (PyObject *)foop; } -/* List of functions exported by this module */ - -static PyMethodDef foo_functions[] = { - {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, - {NULL, NULL} /* Sentinel */ -}; - - static int initerrtype_init(PyObject *self, PyObject *args, PyObject *kwargs) { PyErr_SetString(PyExc_ValueError, "init raised an error!"); return -1; @@ -592,6 +584,41 @@ 0 /*tp_weaklist*/ }; +/* A type with a custom allocator */ +static void custom_dealloc(PyObject *ob) +{ + free(ob); +} + +static PyTypeObject CustomType; + +static PyObject *newCustom(PyObject *self, PyObject *args) +{ + PyObject *obj = calloc(1, sizeof(PyObject)); + obj->ob_type = &CustomType; + _Py_NewReference(obj); + return obj; +} + +static PyTypeObject CustomType = { + PyObject_HEAD_INIT(NULL) + 0, + "foo.Custom", /*tp_name*/ + sizeof(PyObject), /*tp_size*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)custom_dealloc, /*tp_dealloc*/ +}; + + +/* List of functions exported by this module */ + +static PyMethodDef foo_functions[] = { + {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, + {"newCustom", (PyCFunction)newCustom, METH_NOARGS, NULL}, + {NULL, NULL} /* Sentinel */ +}; + /* Initialize this module. */ @@ -616,7 +643,10 @@ if (PyType_Ready(&InitErrType) < 0) return; if (PyType_Ready(&SimplePropertyType) < 0) - return; + return; + CustomType.ob_type = &MetaType; + if (PyType_Ready(&CustomType) < 0) + return; m = Py_InitModule("foo", foo_functions); if (m == NULL) return; @@ -635,4 +665,6 @@ return; if (PyDict_SetItemString(d, "Property", (PyObject *) &SimplePropertyType) < 0) return; + if (PyDict_SetItemString(d, "Custom", (PyObject *) &CustomType) < 0) + return; } diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -5,7 +5,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem import lltype, llmemory, lloperation @@ -33,8 +33,7 @@ def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 count_calls = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == 'malloc': count_mallocs += 1 @@ -54,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -557,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -770,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -576,20 +576,56 @@ res = self.interpret(f, [i, newlines]) assert res == f(i, newlines) - def test_split(self): + def _make_split_test(self, split_fn): const = self.const def fn(i): s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = s.split(const('.')) + l = getattr(s, split_fn)(const('.')) sum = 0 for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) return sum + len(l) * 100 + return fn + + def test_split(self): + fn = self._make_split_test('split') for i in range(5): res = self.interpret(fn, [i]) assert res == fn(i) + def test_rsplit(self): + fn = self._make_split_test('rsplit') + for i in range(5): + res = self.interpret(fn, [i]) + assert res == fn(i) + + def _make_split_limit_test(self, split_fn): + const = self.const + def fn(i, j): + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.'), j) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + return fn + + def test_split_limit(self): + fn = self._make_split_limit_test('split') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + + def test_rsplit_limit(self): + fn = self._make_split_limit_test('rsplit') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + def test_contains(self): const = self.const constchar = self.constchar diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test import test_loop -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES class LoopUnrollTest(test_loop.LoopTest): diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -5,10 +5,9 @@ Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, - track_reference, get_typedescr, RefcountState) + track_reference, get_typedescr, _Py_NewReference, RefcountState) from pypy.module.cpyext.typeobject import PyTypeObjectPtr from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall -from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.error import OperationError import pypy.module.__builtin__.operation as operation @@ -184,26 +183,17 @@ return 0 @cpython_api([PyObject, PyTypeObjectPtr], PyObject) -def PyObject_Init(space, py_obj, type): +def PyObject_Init(space, obj, type): """Initialize a newly-allocated object op with its type and initial reference. Returns the initialized object. If type indicates that the object participates in the cyclic garbage detector, it is added to the detector's set of observed objects. Other fields of the object are not affected.""" - if not py_obj: + if not obj: PyErr_NoMemory(space) - py_obj.c_ob_type = type - py_obj.c_ob_refcnt = 1 - w_type = from_ref(space, rffi.cast(PyObject, type)) - assert isinstance(w_type, W_TypeObject) - if w_type.is_cpytype(): - w_obj = space.allocate_instance(W_ObjectObject, w_type) - track_reference(space, py_obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, py_obj) - else: - assert False, "Please add more cases in PyObject_Init" - return py_obj + obj.c_ob_type = type + _Py_NewReference(space, obj) + return obj @cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject) def PyObject_InitVar(space, py_obj, type, size): @@ -255,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, @@ -395,7 +395,7 @@ raise OperationError(space.w_TypeError, space.wrap( "expected a character buffer object")) if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(rffi.INTP.TO)) != 1: + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: raise OperationError(space.w_TypeError, space.wrap( "expected a single-segment buffer object")) size = generic_cpy_call(space, pb.c_bf_getcharbuffer, diff --git a/dotviewer/conftest.py b/dotviewer/conftest.py --- a/dotviewer/conftest.py +++ b/dotviewer/conftest.py @@ -6,4 +6,6 @@ dest="pygame", default=False, help="allow interactive tests using Pygame") -option = py.test.config.option +def pytest_configure(config): + global option + option = config.option diff --git a/pypy/translator/gensupp.py b/pypy/translator/gensupp.py --- a/pypy/translator/gensupp.py +++ b/pypy/translator/gensupp.py @@ -6,15 +6,13 @@ import sys from pypy.objspace.flow.model import Block -from pypy.objspace.flow.model import traverse # ordering the blocks of a graph by source position def ordered_blocks(graph): # collect all blocks allblocks = [] - def visit(block): - if isinstance(block, Block): + for block in graph.iterblocks(): # first we order by offset in the code string if block.operations: ofs = block.operations[0].offset @@ -26,7 +24,6 @@ else: txt = "dummy" allblocks.append((ofs, txt, block)) - traverse(visit, graph) allblocks.sort() #for ofs, txt, block in allblocks: # print ofs, txt, block diff --git a/pypy/translator/jvm/test/test_extreme.py b/pypy/translator/jvm/test/test_extreme.py --- a/pypy/translator/jvm/test/test_extreme.py +++ b/pypy/translator/jvm/test/test_extreme.py @@ -1,5 +1,8 @@ +import py from pypy.translator.jvm.test.runtest import JvmTest from pypy.translator.oosupport.test_template.extreme import BaseTestExtreme class TestExtreme(BaseTestExtreme, JvmTest): - pass + + def test_runtimeerror_due_to_stack_overflow(self): + py.test.skip('hotspot bug') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1645,8 +1645,9 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ + .cfi_startproc movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ movq\t%rsi, %rcx\t/* 2nd argument, which is gcrootanchor */ movq\t%rsp, %rax\t/* my frame top address */ @@ -1666,6 +1667,7 @@ pushq\t%rcx\t\t\t/* self->prev = gcrootanchor */ movq\t%rsp, 8(%rcx)\t/* gcrootanchor->next = self */ movq\t%rsp, 0(%rax)\t\t\t/* next->prev = self */ + .cfi_def_cfa_offset 80\t/* 9 pushes + the retaddr = 80 bytes */ /* note: the Mac OS X 16 bytes aligment must be respected. */ call\t*%rdx\t\t/* invoke the callback */ @@ -1687,7 +1689,14 @@ /* the return value is the one of the 'call' above, */ /* because %rax (and possibly %rdx) are unmodified */ ret + .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: diff --git a/pypy/rpython/memory/gc/env.py b/pypy/rpython/memory/gc/env.py --- a/pypy/rpython/memory/gc/env.py +++ b/pypy/rpython/memory/gc/env.py @@ -259,7 +259,7 @@ get_L2cache = globals().get('get_L2cache_' + sys.platform, lambda: -1) # implement me for other platforms -NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024*1024 +NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024 # arbitrary 1M. better than default of 131k for most cases # in case it didn't work diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -1,6 +1,6 @@ import py -from pypy.jit.metainterp.test.test_basic import JitMixin +from pypy.jit.metainterp.test.support import JitMixin from pypy.jit.tl.spli import interpreter, objects, serializer from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.backend.llgraph import runner diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, hint from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.lltypesystem import lltype, rclass from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -214,3 +214,21 @@ assert res == 1024*1024 res = thread.stack_size(0) assert res == 2*1024*1024 + + def test_interrupt_main(self): + import thread, time + import signal + + def f(): + time.sleep(0.5) + thread.interrupt_main() + + def busy_wait(): + for x in range(1000): + time.sleep(0.01) + + # This is normally called by app_main.py + signal.signal(signal.SIGINT, signal.default_int_handler) + + thread.start_new_thread(f, ()) + raises(KeyboardInterrupt, busy_wait) diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,13 +22,21 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - return jit.hint(self, promote=True).items + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items + return self.items def getitem(self, idx): return self.getitems()[idx] @@ -44,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -620,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -7,15 +7,16 @@ bootstrap_function, PyObjectFields, cpython_struct, CONST_STRING, CONST_WSTRING) from pypy.module.cpyext.pyerrors import PyErr_BadArgument -from pypy.module.cpyext.pyobject import PyObject, from_ref, make_typedescr +from pypy.module.cpyext.pyobject import ( + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.objspace.std import unicodeobject, unicodetype from pypy.rlib import runicode import sys -## See comment in stringobject.py. PyUnicode_FromUnicode(NULL, size) is not -## yet supported. +## See comment in stringobject.py. PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) @@ -28,7 +29,8 @@ make_typedescr(space.w_unicode.instancetypedef, basestruct=PyUnicodeObject.TO, attach=unicode_attach, - dealloc=unicode_dealloc) + dealloc=unicode_dealloc, + realize=unicode_realize) # Buffer for the default encoding (used by PyUnicde_GetDefaultEncoding) DEFAULT_ENCODING_SIZE = 100 @@ -39,12 +41,39 @@ Py_UNICODE = lltype.UniChar +def new_empty_unicode(space, length): + """ + Allocatse a PyUnicodeObject and its buffer, but without a corresponding + interpreter object. The buffer may be mutated, until unicode_realize() is + called. + """ + typedescr = get_typedescr(space.w_unicode.instancetypedef) + py_obj = typedescr.allocate(space, space.w_unicode) + py_uni = rffi.cast(PyUnicodeObject, py_obj) + + buflen = length + 1 + py_uni.c_size = length + py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, + flavor='raw', zero=True) + return py_uni + def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) +def unicode_realize(space, py_obj): + """ + Creates the unicode in the interpreter. The PyUnicodeObject buffer must not + be modified after this call. + """ + py_uni = rffi.cast(PyUnicodeObject, py_obj) + s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + w_obj = space.wrap(s) + track_reference(space, py_obj, w_obj) + return w_obj + @cpython_api([PyObject], lltype.Void, external=False) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) @@ -128,7 +157,9 @@ def PyUnicode_AsUnicode(space, ref): """Return a read-only pointer to the Unicode object's internal Py_UNICODE buffer, NULL if unicode is not a Unicode object.""" - if not PyUnicode_Check(space, ref): + # Don't use PyUnicode_Check, it will realize the object :-( + w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) + if not space.is_true(space.issubtype(w_type, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap("expected unicode object")) return PyUnicode_AS_UNICODE(space, ref) @@ -237,10 +268,11 @@ object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" - if not wchar_p: - raise NotImplementedError - s = rffi.wcharpsize2unicode(wchar_p, length) - return space.wrap(s) + if wchar_p: + s = rffi.wcharpsize2unicode(wchar_p, length) + return make_ref(space, space.wrap(s)) + else: + return rffi.cast(PyObject, new_empty_unicode(space, length)) @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromWideChar(space, wchar_p, length): @@ -330,6 +362,29 @@ w_str = space.wrap(rffi.charpsize2str(s, size)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) + at cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) +def PyUnicode_Resize(space, ref, newsize): + # XXX always create a new string so far + py_uni = rffi.cast(PyUnicodeObject, ref[0]) + if not py_uni.c_buffer: + raise OperationError(space.w_SystemError, space.wrap( + "PyUnicode_Resize called on already created string")) + try: + py_newuni = new_empty_unicode(space, newsize) + except MemoryError: + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + raise + to_cp = newsize + oldsize = py_uni.c_size + if oldsize < newsize: + to_cp = oldsize + for i in range(to_cp): + py_newuni.c_buffer[i] = py_uni.c_buffer[i] + Py_DecRef(space, ref[0]) + ref[0] = rffi.cast(PyObject, py_newuni) + return 0 + @cpython_api([PyObject], PyObject) def PyUnicode_AsUTF8String(space, w_unicode): """Encode a Unicode object using UTF-8 and return the result as Python string diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -163,7 +163,7 @@ if (not we_are_jitted() or w_self.is_heaptype() or w_self.space.config.objspace.std.mutable_builtintypes): return w_self._version_tag - # heap objects cannot get their version_tag changed + # prebuilt objects cannot get their version_tag changed return w_self._pure_version_tag() @purefunction_promote() @@ -253,7 +253,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,6 +262,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -487,6 +487,7 @@ """) def test_range_iter(self): + py.test.skip("until we fix defaults") def main(n): def g(n): return range(n) @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): @@ -685,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) @@ -1009,10 +1010,11 @@ """) def test_func_defaults(self): + py.test.skip("skipped until we fix defaults") def main(n): i = 1 while i < n: - i += len(xrange(i)) / i + i += len(xrange(i+1)) - i return i log = self.run(main, [10000]) @@ -1023,17 +1025,49 @@ guard_true(i10, descr=) # This can be improved if the JIT realized the lookup of i5 produces # a constant and thus can be removed entirely - i12 = int_sub(i5, 1) - i13 = uint_floordiv(i12, i7) + i120 = int_add(i5, 1) + i140 = int_lt(0, i120) + guard_true(i140, descr=) + i13 = uint_floordiv(i5, i7) i15 = int_add(i13, 1) i17 = int_lt(i15, 0) - guard_false(i17, descr=) - i18 = int_floordiv(i15, i5) - i19 = int_xor(i15, i5) - i20 = int_mod(i15, i5) - i21 = int_is_true(i20) - i22 = int_add_ovf(i5, i18) - guard_no_overflow(descr=) + guard_false(i17, descr=) + i20 = int_sub(i15, i5) + i21 = int_add_ovf(i5, i20) + guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, p3, p4, i22, i6, i7, p8, p9, descr=) + jump(p0, p1, p2, p3, p4, i21, i6, i7, p8, p9, descr=) """) + + def test_unpack_iterable_non_list_tuple(self): + def main(n): + import array + + items = [array.array("i", [1])] * n + total = 0 + for a, in items: + total += a + return total + + log = self.run(main, [1000000]) + assert log.result == 1000000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i16 = int_ge(i12, i13) + guard_false(i16, descr=) + p17 = getarrayitem_gc(p15, i12, descr=) + i19 = int_add(i12, 1) + setfield_gc(p4, i19, descr=) + guard_nonnull_class(p17, 146982464, descr=) + i21 = getfield_gc(p17, descr=) + i23 = int_lt(0, i21) + guard_true(i23, descr=) + i24 = getfield_gc(p17, descr=) + i25 = getarrayitem_raw(i24, 0, descr=) + i27 = int_lt(1, i21) + guard_false(i27, descr=) + i28 = int_add_ovf(i10, i25) + guard_no_overflow(descr=) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) + """) diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -71,19 +71,6 @@ pieces.headerblock.exits[1], pieces.whileblock.exits[0]] -def test_traverse(): - lst = [] - traverse(lst.append, graph) - assert lst == [pieces.startblock, - pieces.startblock.exits[0], - pieces.headerblock, - pieces.headerblock.exits[0], - graph.returnblock, - pieces.headerblock.exits[1], - pieces.whileblock, - pieces.whileblock.exits[0]] - assert flatten(graph) == lst - def test_mkentrymap(): entrymap = mkentrymap(graph) startlink = entrymap[graph.startblock][0] diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, vref_None from pypy.rlib.jit import virtual_ref, virtual_ref_finish from pypy.rlib.objectmodel import compute_unique_id -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.virtualref import VirtualRefInfo diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder from pypy.jit.metainterp.blackhole import BlackholeInterpreter from pypy.jit.metainterp.blackhole import convert_and_run_from_pyjitpl diff --git a/pypy/jit/metainterp/test/test_tlc.py b/pypy/jit/metainterp/test/test_tlc.py --- a/pypy/jit/metainterp/test/test_tlc.py +++ b/pypy/jit/metainterp/test/test_tlc.py @@ -3,7 +3,7 @@ from pypy.jit.tl import tlc -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class TLCTests: diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -146,6 +146,15 @@ self.pending_signals[n] = None self.reissue_signal_action.fire_after_thread_switch() + def set_interrupt(self): + "Simulates the effect of a SIGINT signal arriving" + n = cpy_signal.SIGINT + if self.reissue_signal_action is None: + self.report_signal(n) + else: + self.pending_signals[n] = None + self.reissue_signal_action.fire_after_thread_switch() + def report_signal(self, n): try: w_handler = self.handlers_w[n] diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] @@ -385,6 +390,19 @@ assert module.__doc__ == "docstring" assert module.return_cookie() == 3.14 + def test_load_dynamic(self): + import sys + init = """ + if (Py_IsInitialized()) + Py_InitModule("foo", NULL); + """ + foo = self.import_module(name='foo', init=init) + assert 'foo' in sys.modules + del sys.modules['foo'] + import imp + foo2 = imp.load_dynamic('foo', foo.__file__) + assert 'foo' in sys.modules + assert foo.__dict__ == foo2.__dict__ def test_InitModule4_dotted(self): """ diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver class ListTests(object): diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -45,3 +47,29 @@ space.warn('PyImport_ImportModuleNoBlock() is not non-blocking', space.w_RuntimeWarning) return PyImport_Import(space, space.wrap(rffi.charp2str(name))) + + at cpython_api([PyObject], PyObject) +def PyImport_ReloadModule(space, w_mod): + from pypy.module.imp.importing import reload + return reload(space, w_mod) + + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/jit/metainterp/test/test_dlist.py b/pypy/jit/metainterp/test/test_dlist.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_dlist.py +++ /dev/null @@ -1,165 +0,0 @@ - -import py -from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin -py.test.skip("Disabled") - -class ListTests: - def test_basic(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - n -= 1 - return l[0] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(getarrayitem_gc=0, setarrayitem_gc=1) -# XXX fix codewriter -# guard_exception=0, -# guard_no_exception=1) - - def test_list_escapes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=2, getarrayitem_gc=0) - - def test_list_escapes_but_getitem_goes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - x = l[2] - y = l[1] + l[2] - l[1] = x + y - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=3, getarrayitem_gc=0) - - def test_list_of_ptrs(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - class A(object): - def __init__(self, x): - self.x = x - - def f(n): - l = [A(3)] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0].x + 1 - l[0] = A(x) - n -= 1 - return l[0].x - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=1, getarrayitem_gc=0, - new_with_vtable=1) # A should escape - - def test_list_checklength(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [10, 13], listops=True) - assert res == f(10, 13) - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_list_checklength_run(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) > n: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [50, 13], listops=True) - assert res == 42 - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_checklength_cannot_go_away(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n): - l = [0] * n - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return len(l) - l = [0] * n - n -= 1 - return 0 - - res = self.meta_interp(f, [10], listops=True) - assert res == 2 - self.check_loops(arraylen_gc=1) - - def test_list_indexerror(self): - # this is an example where IndexError is raised before - # even getting to the JIT - py.test.skip("I suspect bug somewhere outside of the JIT") - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - l[n] = n - n -= 1 - return l[3] - - def g(n): - try: - f(n) - return 0 - except IndexError: - return 42 - - res = self.meta_interp(g, [10]) - assert res == 42 - self.check_loops(setitem=2) - -class TestLLtype(ListTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,8 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.executor import execute +from pypy.jit.codewriter.heaptracker import vtable2descr class AbstractVirtualValue(optimizer.OptValue): @@ -72,28 +74,53 @@ assert isinstance(fieldvalue, optimizer.OptValue) self._fields[ofs] = fieldvalue + def _get_descr(self): + raise NotImplementedError + + def _is_immutable_and_filled_with_constants(self): + count = self._get_descr().count_fields_if_immutable() + if count != len(self._fields): # always the case if count == -1 + return False + for value in self._fields.itervalues(): + subbox = value.force_box() + if not isinstance(subbox, Const): + return False + return True + def _really_force(self): - assert self.source_op is not None + op = self.source_op + assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): - self.source_op.name = 'FORCE ' + self.source_op.name - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) + op.name = 'FORCE ' + self.source_op.name + + if self._is_immutable_and_filled_with_constants(): + box = self.optimizer.constant_fold(op) + self.make_constant(box) + for ofs, value in self._fields.iteritems(): + subbox = value.force_box() + assert isinstance(subbox, Const) + execute(self.optimizer.cpu, None, rop.SETFIELD_GC, + ofs, box, subbox) + # keep self._fields, because it's all immutable anyway + else: + newoperations = self.optimizer.newoperations newoperations.append(op) - self._fields = None + self.box = box = op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None def _get_field_descr_list(self): _cached_sorted_fields = self._cached_sorted_fields @@ -168,6 +195,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def _get_descr(self): + return vtable2descr(self.optimizer.cpu, self.known_class.getint()) + def __repr__(self): cls_name = self.known_class.value.adr.ptr._obj._TYPE._name if self._fields is None: @@ -185,6 +215,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_vstruct(self.structdescr, fielddescrs) + def _get_descr(self): + return self.structdescr + class VArrayValue(AbstractVirtualValue): def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): @@ -286,7 +319,6 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info c_cls = vrefinfo.jit_virtual_ref_const_class descr_virtual_token = vrefinfo.descr_virtual_token - descr_virtualref_index = vrefinfo.descr_virtualref_index # # Replace the VIRTUAL_REF operation with a virtual structure of type # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, @@ -296,7 +328,6 @@ tokenbox = BoxInt() self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox)) vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox)) - vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox)) def optimize_VIRTUAL_REF_FINISH(self, op): # Set the 'forced' field of the virtual_ref. diff --git a/pypy/jit/backend/x86/test/test_rx86.py b/pypy/jit/backend/x86/test/test_rx86.py --- a/pypy/jit/backend/x86/test/test_rx86.py +++ b/pypy/jit/backend/x86/test/test_rx86.py @@ -206,3 +206,8 @@ s = CodeBuilder64() s.MOV_rm(edx, (edi, -1)) assert s.getvalue() == '\x48\x8B\x57\xFF' + +def test_movsd_xj_64(): + s = CodeBuilder64() + s.MOVSD_xj(xmm2, 0x01234567) + assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -745,13 +737,6 @@ described there.""" raise NotImplementedError - at cpython_api([], lltype.Void) -def PyErr_SetInterrupt(space): - """This function simulates the effect of a SIGINT signal arriving --- the - next time PyErr_CheckSignals() is called, KeyboardInterrupt will be raised. - It may be called without holding the interpreter lock.""" - raise NotImplementedError - @cpython_api([rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) def PySignal_SetWakeupFd(space, fd): """This utility function specifies a file descriptor to which a '\0' byte will @@ -1123,20 +1108,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code @@ -1972,14 +1943,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias diff --git a/pypy/rlib/_rweakkeydict.py b/pypy/rlib/_rweakkeydict.py --- a/pypy/rlib/_rweakkeydict.py +++ b/pypy/rlib/_rweakkeydict.py @@ -123,7 +123,7 @@ @jit.dont_look_inside def ll_get(d, llkey): hash = compute_identity_hash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK #llop.debug_print(lltype.Void, i, 'get', hex(hash), # ll_debugrepr(d.entries[i].key), # ll_debugrepr(d.entries[i].value)) @@ -143,7 +143,7 @@ def ll_set_nonnull(d, llkey, llvalue): hash = compute_identity_hash(llkey) keyref = weakref_create(llkey) # GC effects here, before the rest - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK everused = d.entries.everused(i) d.entries[i].key = keyref d.entries[i].value = llvalue @@ -160,7 +160,7 @@ @jit.dont_look_inside def ll_set_null(d, llkey): hash = compute_identity_hash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK if d.entries.everused(i): # If the entry was ever used, clean up its key and value. # We don't store a NULL value, but a dead weakref, because diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,21 +400,9 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyBufferProcs = lltype.ForwardReference() PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) -def F(ARGS, RESULT=lltype.Signed): - return lltype.Ptr(lltype.FuncType(ARGS, RESULT)) -PyBufferProcsFields = ( - ("bf_getreadbuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getwritebuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getsegcount", F([PyObject, rffi.INTP])), - ("bf_getcharbuffer", F([PyObject, lltype.Signed, rffi.CCHARPP])), -# we don't support new buffer interface for now - ("bf_getbuffer", rffi.VOIDP), - ("bf_releasebuffer", rffi.VOIDP)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -cpython_struct('PyBufferProcs', PyBufferProcsFields, PyBufferProcs) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) @@ -539,7 +527,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): diff --git a/pypy/translator/jvm/test/test_list.py b/pypy/translator/jvm/test/test_list.py --- a/pypy/translator/jvm/test/test_list.py +++ b/pypy/translator/jvm/test/test_list.py @@ -6,7 +6,10 @@ def test_recursive(self): py.test.skip("JVM doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_r_short_list(self): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -3,14 +3,14 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.annlowlevel import llhelper -from pypy.interpreter.baseobjspace import DescrMismatch +from pypy.interpreter.baseobjspace import W_Root, DescrMismatch from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, + cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - PyBufferProcs, build_type_checkers) + build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, RefcountState, borrow_from) @@ -24,7 +24,7 @@ from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, - PyNumberMethods, PySequenceMethods) + PyNumberMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.interpreter.error import OperationError @@ -287,11 +287,17 @@ W_TypeObject.__init__(self, space, extension_name, bases_w or [space.w_object], dict_w) - self.flag_cpytype = True + if not space.is_true(space.issubtype(self, space.w_type)): + self.flag_cpytype = True self.flag_heaptype = False @bootstrap_function def init_typeobject(space): + # Probably a hack + space.model.typeorder[W_PyCTypeObject] = [(W_PyCTypeObject, None), + (W_TypeObject, None), + (W_Root, None)] + make_typedescr(space.w_type.instancetypedef, basestruct=PyTypeObject, attach=type_attach, @@ -355,14 +361,14 @@ # hopefully this does not clash with the memory model assumed in # extension modules - at cpython_api([PyObject, rffi.INTP], lltype.Signed, external=False, + at cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: - ref[0] = rffi.cast(rffi.INT, space.len_w(w_obj)) + ref[0] = space.len_w(w_obj) return 1 - at cpython_api([PyObject, lltype.Signed, rffi.VOIDPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, external=False, error=-1) def str_getreadbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -375,7 +381,7 @@ Py_DecRef(space, pyref) return space.len_w(w_str) - at cpython_api([PyObject, lltype.Signed, rffi.CCHARPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, external=False, error=-1) def str_getcharbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -472,14 +478,19 @@ def PyType_Ready(space, pto): if pto.c_tp_flags & Py_TPFLAGS_READY: return 0 + type_realize(space, rffi.cast(PyObject, pto)) + return 0 + +def type_realize(space, py_obj): + pto = rffi.cast(PyTypeObjectPtr, py_obj) assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 pto.c_tp_flags |= Py_TPFLAGS_READYING try: - type_realize(space, rffi.cast(PyObject, pto)) - pto.c_tp_flags |= Py_TPFLAGS_READY + w_obj = _type_realize(space, py_obj) finally: pto.c_tp_flags &= ~Py_TPFLAGS_READYING - return 0 + pto.c_tp_flags |= Py_TPFLAGS_READY + return w_obj def solid_base(space, w_type): typedef = w_type.instancetypedef @@ -535,7 +546,7 @@ finally: Py_DecRef(space, base_pyo) -def type_realize(space, py_obj): +def _type_realize(space, py_obj): """ Creates an interpreter type from a PyTypeObject structure. """ @@ -554,7 +565,9 @@ finish_type_1(space, py_type) - w_obj = space.allocate_instance(W_PyCTypeObject, space.w_type) + w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type)) + + w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype) track_reference(space, py_obj, w_obj) w_obj.__init__(space, py_type) w_obj.ready() diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -347,8 +347,9 @@ assert list('') == [] assert list('abc') == ['a', 'b', 'c'] assert list((1, 2)) == [1, 2] - l = [] + l = [1] assert list(l) is not l + assert list(l) == l assert list(range(10)) == range(10) def test_explicit_new_init(self): diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,19 +1,16 @@ try: - import pypyjit - pypyjit.set_param(threshold=3, inlining=True) + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + main(10) - def sqrt(y, n=10000): - x = y / 2 - while n > 0: - #assert y > 0 and x > 0 - if y > 0 and x > 0: pass - n -= 1 - x = (x + y/x) / 2 - return x - - print sqrt(1234, 4) - except Exception, e: print "Exception: ", type(e) print e diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -108,6 +108,11 @@ #return w_value or None return None + def impl_setdefault(self, w_key, w_default): + # here the dict is always empty + self._as_rdict().impl_fallback_setitem(w_key, w_default) + return w_default + def impl_setitem(self, w_key, w_value): self._as_rdict().impl_fallback_setitem(w_key, w_value) @@ -181,6 +186,9 @@ # _________________________________________________________________ # fallback implementation methods + def impl_fallback_setdefault(self, w_key, w_default): + return self.r_dict_content.setdefault(w_key, w_default) + def impl_fallback_setitem(self, w_key, w_value): self.r_dict_content[w_key] = w_value @@ -227,6 +235,7 @@ ("length", 0), ("setitem_str", 2), ("setitem", 2), + ("setdefault", 2), ("delitem", 1), ("iter", 0), ("items", 0), @@ -317,6 +326,14 @@ def impl_setitem_str(self, key, w_value): self.content[key] = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + return self.content.setdefault(space.str_w(w_key), w_default) + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) @@ -787,13 +804,7 @@ return w_default def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default): - # XXX should be more efficient, with only one dict lookup - w_value = w_dict.getitem(w_key) - if w_value is not None: - return w_value - else: - w_dict.setitem(w_key, w_default) - return w_default + return w_dict.setdefault(w_key, w_default) def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w): len_defaults = len(defaults_w) diff --git a/pypy/translator/backendopt/test/test_inline.py b/pypy/translator/backendopt/test/test_inline.py --- a/pypy/translator/backendopt/test/test_inline.py +++ b/pypy/translator/backendopt/test/test_inline.py @@ -1,7 +1,7 @@ # XXX clean up these tests to use more uniform helpers import py import os -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import last_exception, checkgraph from pypy.translator.backendopt import canraise from pypy.translator.backendopt.inline import simple_inline_function, CannotInline @@ -20,29 +20,27 @@ from pypy.translator.backendopt import removenoops from pypy.objspace.flow.model import summary -def no_missing_concretetype(node): - if isinstance(node, Block): - for v in node.inputargs: - assert hasattr(v, 'concretetype') - for op in node.operations: - for v in op.args: - assert hasattr(v, 'concretetype') - assert hasattr(op.result, 'concretetype') - if isinstance(node, Link): - if node.exitcase is not None: - assert hasattr(node, 'llexitcase') - for v in node.args: - assert hasattr(v, 'concretetype') - if isinstance(node.last_exception, (Variable, Constant)): - assert hasattr(node.last_exception, 'concretetype') - if isinstance(node.last_exc_value, (Variable, Constant)): - assert hasattr(node.last_exc_value, 'concretetype') - def sanity_check(t): # look for missing '.concretetype' for graph in t.graphs: checkgraph(graph) - traverse(no_missing_concretetype, graph) + for node in graph.iterblocks(): + for v in node.inputargs: + assert hasattr(v, 'concretetype') + for op in node.operations: + for v in op.args: + assert hasattr(v, 'concretetype') + assert hasattr(op.result, 'concretetype') + for node in graph.iterlinks(): + if node.exitcase is not None: + assert hasattr(node, 'llexitcase') + for v in node.args: + assert hasattr(v, 'concretetype') + if isinstance(node.last_exception, (Variable, Constant)): + assert hasattr(node.last_exception, 'concretetype') + if isinstance(node.last_exc_value, (Variable, Constant)): + assert hasattr(node.last_exc_value, 'concretetype') + class CustomError1(Exception): def __init__(self): diff --git a/pypy/jit/metainterp/test/test_optimizebasic.py b/pypy/jit/metainterp/test/test_optimizebasic.py --- a/pypy/jit/metainterp/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/test/test_optimizebasic.py @@ -253,7 +253,7 @@ loop.call_pure_results = args_dict() if call_pure_results is not None: for k, v in call_pure_results.items(): - loop.call_pure_results[list(k)] = v + loop.call_pure_results[list(k)] = v metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo @@ -2886,7 +2886,7 @@ # the result of the call, recorded as the first arg), or turned into # a regular CALL. arg_consts = [ConstInt(i) for i in (123456, 4, 5, 6)] - call_pure_results = {tuple(arg_consts): ConstInt(42)} + call_pure_results = {tuple(arg_consts): ConstInt(42)} ops = ''' [i0, i1, i2] escape(i1) @@ -2931,7 +2931,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -2964,7 +2963,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3005,7 +3003,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3062,7 +3059,7 @@ self.loop.inputargs[0].value = self.nodeobjvalue self.check_expanded_fail_descr('''p2, p1 p0.refdescr = p2 - where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 where p1 is a node_vtable, nextdescr=p1b where p1b is a node_vtable, valuedescr=i1 ''', rop.GUARD_NO_EXCEPTION) @@ -3084,7 +3081,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3111,7 +3107,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3360,7 +3355,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) i3 = int_lt(i2, -5) guard_true(i3) [] @@ -3371,7 +3366,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) jump(i0) """ diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -212,52 +212,48 @@ # cpython, and rpython, assumed that integer division truncates # towards -infinity. however, in C99 and most (all?) other # backends, integer division truncates towards 0. so assuming - # that, we can generate scary code that applies the necessary + # that, we call a helper function that applies the necessary # correction in the right cases. - # paper and pencil are encouraged for this :) - - from pypy.rpython.rbool import bool_repr - assert isinstance(repr.lowleveltype, Number) - c_zero = inputconst(repr.lowleveltype, repr.lowleveltype._default) op = func.split('_', 1)[0] if op == 'floordiv': - # return (x/y) - (((x^y)<0)&((x%y)!=0)); - v_xor = hop.genop(prefix + 'xor', vlist, - resulttype=repr) - v_xor_le = hop.genop(prefix + 'lt', [v_xor, c_zero], - resulttype=Bool) - v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr) - v_mod = hop.genop(prefix + 'mod', vlist, - resulttype=repr) - v_mod_ne = hop.genop(prefix + 'ne', [v_mod, c_zero], - resulttype=Bool) - v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr) - v_corr = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne], - resulttype=repr) - v_res = hop.genop(prefix + 'sub', [v_res, v_corr], - resulttype=repr) + llfunc = globals()['ll_correct_' + prefix + 'floordiv'] + v_res = hop.gendirectcall(llfunc, vlist[0], vlist[1], v_res) elif op == 'mod': - # return r + y*(((x^y)<0)&(r!=0)); - v_xor = hop.genop(prefix + 'xor', vlist, - resulttype=repr) - v_xor_le = hop.genop(prefix + 'lt', [v_xor, c_zero], - resulttype=Bool) - v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr) - v_mod_ne = hop.genop(prefix + 'ne', [v_res, c_zero], - resulttype=Bool) - v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr) - v_corr1 = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne], - resulttype=repr) - v_corr = hop.genop(prefix + 'mul', [v_corr1, vlist[1]], - resulttype=repr) - v_res = hop.genop(prefix + 'add', [v_res, v_corr], - resulttype=repr) + llfunc = globals()['ll_correct_' + prefix + 'mod'] + v_res = hop.gendirectcall(llfunc, vlist[1], v_res) + v_res = hop.llops.convertvar(v_res, repr, r_result) return v_res +INT_BITS_1 = r_int.BITS - 1 +LLONG_BITS_1 = r_longlong.BITS - 1 + +def ll_correct_int_floordiv(x, y, r): + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> INT_BITS_1) + +def ll_correct_llong_floordiv(x, y, r): + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> LLONG_BITS_1) + +def ll_correct_int_mod(y, r): + if y < 0: u = -r + else: u = r + return r + (y & (u >> INT_BITS_1)) + +def ll_correct_llong_mod(y, r): + if y < 0: u = -r + else: u = r + return r + (y & (u >> LLONG_BITS_1)) + + #Helper functions for comparisons def _rtype_compare_template(hop, func): diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -19,6 +19,7 @@ 'load_module': 'interp_imp.load_module', 'load_source': 'interp_imp.load_source', 'load_compiled': 'interp_imp.load_compiled', + 'load_dynamic': 'interp_imp.load_dynamic', '_run_compiled_module': 'interp_imp._run_compiled_module', # pypy '_getimporter': 'importing._getimporter', # pypy #'run_module': 'interp_imp.run_module', @@ -36,7 +37,6 @@ } appleveldefs = { - 'load_dynamic': 'app_imp.load_dynamic', } def __init__(self, space, *args): diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/support.py @@ -0,0 +1,261 @@ + +import py, sys +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.ootypesystem import ootype +from pypy.jit.backend.llgraph import runner +from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT +from pypy.jit.metainterp import pyjitpl, history +from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.codewriter.policy import JitPolicy +from pypy.jit.codewriter import longlong + +def _get_jitcodes(testself, CPUClass, func, values, type_system, + supports_longlong=False, **kwds): + from pypy.jit.codewriter import support, codewriter + + class FakeJitCell: + __compiled_merge_points = [] + def get_compiled_merge_points(self): + return self.__compiled_merge_points[:] + def set_compiled_merge_points(self, lst): + self.__compiled_merge_points = lst + + class FakeWarmRunnerState: + def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): + pass + + def jit_cell_at_key(self, greenkey): + assert greenkey == [] + return self._cell + _cell = FakeJitCell() + + trace_limit = sys.maxint + enable_opts = ALL_OPTS_DICT + + func._jit_unroll_safe_ = True + rtyper = support.annotate(func, values, type_system=type_system) + graphs = rtyper.annotator.translator.graphs + result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] + + class FakeJitDriverSD: + num_green_args = 0 + portal_graph = graphs[0] + virtualizable_info = None + greenfield_info = None + result_type = result_kind + portal_runner_ptr = "???" + + stats = history.Stats() + cpu = CPUClass(rtyper, stats, None, False) + cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) + testself.cw = cw + policy = JitPolicy() + policy.set_supports_longlong(supports_longlong) + cw.find_all_graphs(policy) + # + testself.warmrunnerstate = FakeWarmRunnerState() + testself.warmrunnerstate.cpu = cpu + FakeJitDriverSD.warmstate = testself.warmrunnerstate + if hasattr(testself, 'finish_setup_for_interp_operations'): + testself.finish_setup_for_interp_operations() + # + cw.make_jitcodes(verbose=True) + +def _run_with_blackhole(testself, args): + from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder + cw = testself.cw + blackholeinterpbuilder = BlackholeInterpBuilder(cw) + blackholeinterp = blackholeinterpbuilder.acquire_interp() + count_i = count_r = count_f = 0 + for value in args: + T = lltype.typeOf(value) + if T == lltype.Signed: + blackholeinterp.setarg_i(count_i, value) + count_i += 1 + elif T == llmemory.GCREF: + blackholeinterp.setarg_r(count_r, value) + count_r += 1 + elif T == lltype.Float: + value = longlong.getfloatstorage(value) + blackholeinterp.setarg_f(count_f, value) + count_f += 1 + else: + raise TypeError(T) + [jitdriver_sd] = cw.callcontrol.jitdrivers_sd + blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) + blackholeinterp.run() + return blackholeinterp._final_result_anytype() + +def _run_with_pyjitpl(testself, args): + + class DoneWithThisFrame(Exception): + pass + + class DoneWithThisFrameRef(DoneWithThisFrame): + def __init__(self, cpu, *args): + DoneWithThisFrame.__init__(self, *args) + + cw = testself.cw + opt = history.Options(listops=True) + metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) + metainterp_sd.finish_setup(cw) + [jitdriver_sd] = metainterp_sd.jitdrivers_sd + metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) + metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame + metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef + metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame + testself.metainterp = metainterp + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + except DoneWithThisFrame, e: + #if conftest.option.view: + # metainterp.stats.view() + return e.args[0] + else: + raise Exception("FAILED") + +def _run_with_machine_code(testself, args): + metainterp = testself.metainterp + num_green_args = metainterp.jitdriver_sd.num_green_args + loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) + if len(loop_tokens) != 1: + return NotImplemented + # a loop was successfully created by _run_with_pyjitpl(); call it + cpu = metainterp.cpu + for i in range(len(args) - num_green_args): + x = args[num_green_args + i] + typecode = history.getkind(lltype.typeOf(x)) + set_future_value(cpu, i, x, typecode) + faildescr = cpu.execute_token(loop_tokens[0]) + assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') + if metainterp.jitdriver_sd.result_type == history.INT: + return cpu.get_latest_value_int(0) + elif metainterp.jitdriver_sd.result_type == history.REF: + return cpu.get_latest_value_ref(0) + elif metainterp.jitdriver_sd.result_type == history.FLOAT: + return cpu.get_latest_value_float(0) + else: + return None + + +class JitMixin: + basic = True + def check_loops(self, expected=None, everywhere=False, **check): + get_stats().check_loops(expected=expected, everywhere=everywhere, + **check) + def check_loop_count(self, count): + """NB. This is a hack; use check_tree_loop_count() or + check_enter_count() for the real thing. + This counts as 1 every bridge in addition to every loop; and it does + not count at all the entry bridges from interpreter, although they + are TreeLoops as well.""" + assert get_stats().compiled_count == count + def check_tree_loop_count(self, count): + assert len(get_stats().loops) == count + def check_loop_count_at_most(self, count): + assert get_stats().compiled_count <= count + def check_enter_count(self, count): + assert get_stats().enter_count == count + def check_enter_count_at_most(self, count): + assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): + assert get_stats().aborted_count == count + def check_aborted_count_at_least(self, count): + assert get_stats().aborted_count >= count + + def meta_interp(self, *args, **kwds): + kwds['CPUClass'] = self.CPUClass + kwds['type_system'] = self.type_system + if "backendopt" not in kwds: + kwds["backendopt"] = False + return ll_meta_interp(*args, **kwds) + + def interp_operations(self, f, args, **kwds): + # get the JitCodes for the function f + _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + # try to run it with blackhole.py + result1 = _run_with_blackhole(self, args) + # try to run it with pyjitpl.py + result2 = _run_with_pyjitpl(self, args) + assert result1 == result2 + # try to run it by running the code compiled just before + result3 = _run_with_machine_code(self, args) + assert result1 == result3 or result3 == NotImplemented + # + if (longlong.supports_longlong and + isinstance(result1, longlong.r_float_storage)): + result1 = longlong.getrealfloat(result1) + return result1 + + def check_history(self, expected=None, **isns): + # this can be used after calling meta_interp + get_stats().check_history(expected, **isns) + + def check_operations_history(self, expected=None, **isns): + # this can be used after interp_operations + if expected is not None: + expected = dict(expected) + expected['jump'] = 1 + self.metainterp.staticdata.stats.check_history(expected, **isns) + + +class LLJitMixin(JitMixin): + type_system = 'lltype' + CPUClass = runner.LLtypeCPU + + @staticmethod + def Ptr(T): + return lltype.Ptr(T) + + @staticmethod + def GcStruct(name, *fields, **kwds): + S = lltype.GcStruct(name, *fields, **kwds) + return S + + malloc = staticmethod(lltype.malloc) + nullptr = staticmethod(lltype.nullptr) + + @staticmethod + def malloc_immortal(T): + return lltype.malloc(T, immortal=True) + + def _get_NODE(self): + NODE = lltype.GcForwardReference() + NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), + ('next', lltype.Ptr(NODE)))) + return NODE + +class OOJitMixin(JitMixin): + type_system = 'ootype' + #CPUClass = runner.OOtypeCPU + + def setup_class(cls): + py.test.skip("ootype tests skipped for now") + + @staticmethod + def Ptr(T): + return T + + @staticmethod + def GcStruct(name, *fields, **kwds): + if 'hints' in kwds: + kwds['_hints'] = kwds['hints'] + del kwds['hints'] + I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) + return I + + malloc = staticmethod(ootype.new) + nullptr = staticmethod(ootype.null) + + @staticmethod + def malloc_immortal(T): + return ootype.new(T) + + def _get_NODE(self): + NODE = ootype.Instance('NODE', ootype.ROOT, {}) + NODE._add_fields({'value': ootype.Signed, + 'next': NODE}) + return NODE diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -261,7 +261,8 @@ if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') - return space.wrap(rffi.charp2strn(buf, bufsize_p[0] - 1)) + length = intmask(bufsize_p[0] - 1) + return space.wrap(rffi.charp2strn(buf, length)) def convert_to_regdata(space, w_value, typ): buf = None @@ -445,9 +446,10 @@ continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') + length = intmask(retDataSize[0]) return space.newtuple([ convert_from_regdata(space, databuf, - retDataSize[0], retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) @@ -595,11 +597,11 @@ if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') + length = intmask(retDataSize[0]) return space.newtuple([ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, - retDataSize[0], - retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) diff --git a/pypy/jit/backend/cli/test/test_basic.py b/pypy/jit/backend/cli/test/test_basic.py --- a/pypy/jit/backend/cli/test/test_basic.py +++ b/pypy/jit/backend/cli/test/test_basic.py @@ -1,14 +1,14 @@ import py from pypy.jit.backend.cli.runner import CliCPU -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit -class CliJitMixin(test_basic.OOJitMixin): +class CliJitMixin(suport.OOJitMixin): CPUClass = CliCPU def setup_class(cls): from pypy.translator.cli.support import PythonNet PythonNet.System # possibly raises Skip -class TestBasic(CliJitMixin, test_basic.TestOOtype): +class TestBasic(CliJitMixin, test_ajit.TestOOtype): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -106,6 +106,10 @@ 'debug_catch_exception': Ignore, 'debug_reraise_traceback': Ignore, 'debug_print_traceback': Ignore, + 'debug_start': Ignore, + 'debug_stop': Ignore, + 'debug_print': Ignore, + 'keepalive': Ignore, # __________ numeric operations __________ @@ -144,6 +148,7 @@ 'int_xor_ovf': jvm.IXOR, 'int_floordiv_ovf_zer': jvm.IFLOORDIVZEROVF, 'int_mod_ovf_zer': _check_zer(jvm.IREMOVF), + 'int_between': jvm.PYPYINTBETWEEN, 'uint_invert': 'bitwise_negate', @@ -185,8 +190,8 @@ 'llong_mod_zer': _check_zer(jvm.LREM), 'llong_and': jvm.LAND, 'llong_or': jvm.LOR, - 'llong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'llong_rshift': [PushAllArgs, jvm.L2I, jvm.LSHR, StoreResult], + 'llong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'llong_rshift': [PushAllArgs, jvm.LSHR, StoreResult], 'llong_xor': jvm.LXOR, 'llong_floordiv_ovf': jvm.LFLOORDIVOVF, 'llong_floordiv_ovf_zer': jvm.LFLOORDIVZEROVF, @@ -202,9 +207,11 @@ 'ullong_truediv': None, # TODO 'ullong_floordiv': jvm.LDIV, # valid? 'ullong_mod': jvm.PYPYULONGMOD, - 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], + 'ullong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'ullong_rshift': [PushAllArgs, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, + 'ullong_or': jvm.LOR, + 'ullong_and': jvm.LAND, # when casting from bool we want that every truth value is casted # to 1: we can't simply DoNothing, because the CLI stack could @@ -227,5 +234,8 @@ 'cast_float_to_uint': jvm.PYPYDOUBLETOUINT, 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, + 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, + 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], + 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -1,6 +1,6 @@ import py from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class ToyLanguageTests: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -46,15 +46,15 @@ w_f_trace = None # For tracing instr_lb = 0 - instr_ub = -1 - instr_prev = -1 + instr_ub = 0 + instr_prev_plus_one = 0 is_being_profiled = False def __init__(self, space, code, w_globals, closure): self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code - eval.Frame.__init__(self, space, w_globals, code.co_nlocals) + eval.Frame.__init__(self, space, w_globals) self.valuestack_w = [None] * code.co_stacksize self.valuestackdepth = 0 self.lastblock = None @@ -63,7 +63,7 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(closure, code) - self.fastlocals_w = [None]*self.numlocals + self.fastlocals_w = [None] * code.co_nlocals make_sure_not_resized(self.fastlocals_w) self.f_lineno = code.co_firstlineno @@ -335,7 +335,7 @@ w(self.instr_lb), #do we need these three (that are for tracing) w(self.instr_ub), - w(self.instr_prev), + w(self.instr_prev_plus_one), w_cells, ] @@ -349,7 +349,7 @@ args_w = space.unpackiterable(w_args) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ - w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev, w_cells = args_w + w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w new_frame = self pycode = space.interp_w(PyCode, w_pycode) @@ -397,7 +397,7 @@ new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev = space.int_w(w_instr_prev) + new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) # XXX what if the frame is in another thread?? @@ -430,7 +430,10 @@ """Initialize cellvars from self.fastlocals_w This is overridden in nestedscope.py""" pass - + + def getfastscopelength(self): + return self.pycode.co_nlocals + def getclosure(self): return None diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,12 +25,13 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None): + arg_types=None, count_fields_if_immut=-1): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types + self.count_fields_if_immut = count_fields_if_immut def get_arg_types(self): return self.arg_types @@ -63,6 +64,9 @@ def as_vtable_size_descr(self): return self + def count_fields_if_immutable(self): + return self.count_fields_if_immut + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -109,12 +113,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None): - key = (ofs, typeinfo, extrainfo, name, arg_types) + arg_types=None, count_fields_if_immut=-1): + key = (ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) try: return self._descrs[key] except KeyError: - descr = Descr(ofs, typeinfo, extrainfo, name, arg_types) + descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) self._descrs[key] = descr return descr @@ -284,7 +290,8 @@ def sizeof(self, S): assert not isinstance(S, lltype.Ptr) - return self.getdescr(symbolic.get_size(S)) + count = heaptracker.count_fields_if_immutable(S) + return self.getdescr(symbolic.get_size(S), count_fields_if_immut=count) class LLtypeCPU(BaseCPU): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,9 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import cpython_struct, \ - PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, \ - Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE, \ - PyTypeObject, PyTypeObjectPtr, PyBufferProcs, FILEP +from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -55,6 +54,14 @@ wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) +readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) +charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) +## We don't support new buffer interface for now +getbufferproc = rffi.VOIDP +releasebufferproc = rffi.VOIDP + PyGetSetDef = cpython_struct("PyGetSetDef", ( ("name", rffi.CCHARP), @@ -127,7 +134,6 @@ ("mp_ass_subscript", objobjargproc), )) -""" PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getreadbuffer", readbufferproc), ("bf_getwritebuffer", writebufferproc), @@ -136,7 +142,6 @@ ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), )) -""" PyMemberDef = cpython_struct("PyMemberDef", ( ("name", rffi.CCHARP), diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -190,14 +190,30 @@ def wait(): """ wait() -> (pid, status) - + Wait for completion of a child process. """ return posix.waitpid(-1, 0) + def wait3(options): + """ wait3(options) -> (pid, status, rusage) + + Wait for completion of a child process and provides resource usage informations + """ + from _pypy_wait import wait3 + return wait3(options) + + def wait4(pid, options): + """ wait4(pid, options) -> (pid, status, rusage) + + Wait for completion of the child process "pid" and provides resource usage informations + """ + from _pypy_wait import wait4 + return wait4(pid, options) + else: # Windows implementations - + # Supply os.popen() based on subprocess def popen(cmd, mode="r", bufsize=-1): """popen(command [, mode='r' [, bufsize]]) -> pipe @@ -285,7 +301,7 @@ raise TypeError("invalid cmd type (%s, expected string)" % (type(cmd),)) return cmd - + # A proxy for a file whose close waits for the process class _wrap_close(object): def __init__(self, stream, proc): diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -3,8 +3,102 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.jitexc import JitException +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization + +class CachedField(object): + def __init__(self): + # Cache information for a field descr. It can be in one + # of two states: + # + # 1. 'cached_fields' is a dict mapping OptValues of structs + # to OptValues of fields. All fields on-heap are + # synchronized with the values stored in the cache. + # + # 2. we just did one setfield, which is delayed (and thus + # not synchronized). 'lazy_setfield' is the delayed + # ResOperation. In this state, 'cached_fields' contains + # out-of-date information. More precisely, the field + # value pending in the ResOperation is *not* visible in + # 'cached_fields'. + # + self._cached_fields = {} + self._lazy_setfield = None + self._lazy_setfield_registered = False + + def do_setfield(self, optheap, op): + # Update the state with the SETFIELD_GC operation 'op'. + structvalue = optheap.getvalue(op.getarg(0)) + fieldvalue = optheap.getvalue(op.getarg(1)) + if self.possible_aliasing(optheap, structvalue): + self.force_lazy_setfield(optheap) + assert not self.possible_aliasing(optheap, structvalue) + cached_fieldvalue = self._cached_fields.get(structvalue, None) + if cached_fieldvalue is not fieldvalue: + # common case: store the 'op' as lazy_setfield, and register + # myself in the optheap's _lazy_setfields list + self._lazy_setfield = op + if not self._lazy_setfield_registered: + optheap._lazy_setfields.append(self) + self._lazy_setfield_registered = True + else: + # this is the case where the pending setfield ends up + # storing precisely the value that is already there, + # as proved by 'cached_fields'. In this case, we don't + # need any _lazy_setfield: the heap value is already right. + # Note that this may reset to None a non-None lazy_setfield, + # cancelling its previous effects with no side effect. + self._lazy_setfield = None + + def possible_aliasing(self, optheap, structvalue): + # If lazy_setfield is set and contains a setfield on a different + # structvalue, then we are annoyed, because it may point to either + # the same or a different structure at runtime. + return (self._lazy_setfield is not None + and (optheap.getvalue(self._lazy_setfield.getarg(0)) + is not structvalue)) + + def getfield_from_cache(self, optheap, structvalue): + # Returns the up-to-date field's value, or None if not cached. + if self.possible_aliasing(optheap, structvalue): + self.force_lazy_setfield(optheap) + if self._lazy_setfield is not None: + op = self._lazy_setfield + assert optheap.getvalue(op.getarg(0)) is structvalue + return optheap.getvalue(op.getarg(1)) + else: + return self._cached_fields.get(structvalue, None) + + def remember_field_value(self, structvalue, fieldvalue): + assert self._lazy_setfield is None + self._cached_fields[structvalue] = fieldvalue + + def force_lazy_setfield(self, optheap): + op = self._lazy_setfield + if op is not None: + # This is the way _lazy_setfield is usually reset to None. + # Now we clear _cached_fields, because actually doing the + # setfield might impact any of the stored result (because of + # possible aliasing). + self._cached_fields.clear() + self._lazy_setfield = None + optheap.next_optimization.propagate_forward(op) + # Once it is done, we can put at least one piece of information + # back in the cache: the value of this particular structure's + # field. + structvalue = optheap.getvalue(op.getarg(0)) + fieldvalue = optheap.getvalue(op.getarg(1)) + self.remember_field_value(structvalue, fieldvalue) + + def get_reconstructed(self, optimizer, valuemap): + assert self._lazy_setfield is None + cf = CachedField() + for structvalue, fieldvalue in self._cached_fields.iteritems(): + structvalue2 = structvalue.get_reconstructed(optimizer, valuemap) + fieldvalue2 = fieldvalue .get_reconstructed(optimizer, valuemap) + cf._cached_fields[structvalue2] = fieldvalue2 + return cf + class CachedArrayItems(object): def __init__(self): @@ -20,40 +114,23 @@ """Cache repeated heap accesses""" def __init__(self): - # cached fields: {descr: {OptValue_instance: OptValue_fieldvalue}} + # cached fields: {descr: CachedField} self.cached_fields = {} - self.known_heap_fields = {} + self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} - # lazily written setfields (at most one per descr): {descr: op} - self.lazy_setfields = {} - self.lazy_setfields_descrs = [] # keys (at least) of previous dict def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() if True: self.force_all_lazy_setfields() - assert not self.lazy_setfields_descrs - assert not self.lazy_setfields else: - new.lazy_setfields_descrs = self.lazy_setfields_descrs - new.lazy_setfields = self.lazy_setfields + assert 0 # was: new.lazy_setfields = self.lazy_setfields for descr, d in self.cached_fields.items(): - newd = {} - new.cached_fields[descr] = newd - for value, fieldvalue in d.items(): - newd[value.get_reconstructed(optimizer, valuemap)] = \ - fieldvalue.get_reconstructed(optimizer, valuemap) - - for descr, d in self.known_heap_fields.items(): - newd = {} - new.known_heap_fields[descr] = newd - for value, fieldvalue in d.items(): - newd[value.get_reconstructed(optimizer, valuemap)] = \ - fieldvalue.get_reconstructed(optimizer, valuemap) - + new.cached_fields[descr] = d.get_reconstructed(optimizer, valuemap) + new.cached_arrayitems = {} for descr, d in self.cached_arrayitems.items(): newd = {} @@ -74,30 +151,16 @@ return new def clean_caches(self): + del self._lazy_setfields[:] self.cached_fields.clear() - self.known_heap_fields.clear() self.cached_arrayitems.clear() - def cache_field_value(self, descr, value, fieldvalue, write=False): - if write: - # when seeing a setfield, we have to clear the cache for the same - # field on any other structure, just in case they are aliasing - # each other - d = self.cached_fields[descr] = {} - else: - d = self.cached_fields.setdefault(descr, {}) - d[value] = fieldvalue - - def read_cached_field(self, descr, value): - # XXX self.cached_fields and self.lazy_setfields should probably - # be merged somehow - d = self.cached_fields.get(descr, None) - if d is None: - op = self.lazy_setfields.get(descr, None) - if op is None: - return None - return self.getvalue(op.getarg(1)) - return d.get(value, None) + def field_cache(self, descr): + try: + cf = self.cached_fields[descr] + except KeyError: + cf = self.cached_fields[descr] = CachedField() + return cf def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): d = self.cached_arrayitems.get(descr, None) @@ -157,11 +220,15 @@ self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or - opnum == rop.SETFIELD_RAW or - opnum == rop.SETARRAYITEM_GC or - opnum == rop.SETARRAYITEM_RAW or - opnum == rop.DEBUG_MERGE_POINT): + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or @@ -179,8 +246,8 @@ for fielddescr in effectinfo.write_descrs_fields: self.force_lazy_setfield(fielddescr) try: - del self.cached_fields[fielddescr] - del self.known_heap_fields[fielddescr] + cf = self.cached_fields[fielddescr] + cf._cached_fields.clear() except KeyError: pass for arraydescr in effectinfo.write_descrs_arrays: @@ -194,10 +261,7 @@ # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. return - self.force_all_lazy_setfields() - elif op.is_final() or (not we_are_translated() and - op.getopnum() < 0): # escape() operations - self.force_all_lazy_setfields() + self.force_all_lazy_setfields() self.clean_caches() @@ -205,58 +269,54 @@ assert value.is_constant() newvalue = self.getvalue(value.box) if value is not newvalue: - for d in self.cached_fields.values(): - if value in d: - d[newvalue] = d[value] - # FIXME: Update the other caches too? - - - def force_lazy_setfield(self, descr, before_guard=False): + for cf in self.cached_fields.itervalues(): + if value in cf._cached_fields: + cf._cached_fields[newvalue] = cf._cached_fields[value] + + def force_lazy_setfield(self, descr): try: - op = self.lazy_setfields[descr] + cf = self.cached_fields[descr] except KeyError: return - del self.lazy_setfields[descr] - value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) - try: - heapvalue = self.known_heap_fields[op.getdescr()][value] - if fieldvalue is heapvalue: - return - except KeyError: - pass - self.next_optimization.propagate_forward(op) + cf.force_lazy_setfield(self) + def fixup_guard_situation(self): # hackish: reverse the order of the last two operations if it makes # sense to avoid a situation like "int_eq/setfield_gc/guard_true", # which the backend (at least the x86 backend) does not handle well. newoperations = self.optimizer.newoperations - if before_guard and len(newoperations) >= 2: - lastop = newoperations[-1] - prevop = newoperations[-2] - # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" - # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" - # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.getopnum() - lastop_args = lastop.getarglist() - if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE - or prevop.is_ovf()) - and prevop.result not in lastop_args): - newoperations[-2] = lastop - newoperations[-1] = prevop + if len(newoperations) < 2: + return + lastop = newoperations[-1] + if (lastop.getopnum() != rop.SETFIELD_GC and + lastop.getopnum() != rop.SETARRAYITEM_GC): + return + # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" + # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" + # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" + prevop = newoperations[-2] + opnum = prevop.getopnum() + if not (prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE + or prevop.is_ovf()): + return + if prevop.result in lastop.getarglist(): + return + newoperations[-2] = lastop + newoperations[-1] = prevop def force_all_lazy_setfields(self): - if len(self.lazy_setfields_descrs) > 0: - for descr in self.lazy_setfields_descrs: - self.force_lazy_setfield(descr) - del self.lazy_setfields_descrs[:] + for cf in self._lazy_setfields: + if not we_are_translated(): + assert cf in self.cached_fields.values() + cf.force_lazy_setfield(self) def force_lazy_setfields_for_guard(self): pendingfields = [] - for descr in self.lazy_setfields_descrs: - try: - op = self.lazy_setfields[descr] - except KeyError: + for cf in self._lazy_setfields: + if not we_are_translated(): + assert cf in self.cached_fields.values() + op = cf._lazy_setfield + if op is None: continue # the only really interesting case that we need to handle in the # guards' resume data is that of a virtual object that is stored @@ -266,41 +326,27 @@ fieldvalue = self.getvalue(op.getarg(1)) if fieldvalue.is_virtual(): # this is the case that we leave to resume.py - pendingfields.append((descr, value.box, + pendingfields.append((op.getdescr(), value.box, fieldvalue.get_key_box())) else: - self.force_lazy_setfield(descr, before_guard=True) + cf.force_lazy_setfield(self) + self.fixup_guard_situation() return pendingfields - def force_lazy_setfield_if_necessary(self, op, value, write=False): - try: - op1 = self.lazy_setfields[op.getdescr()] - except KeyError: - if write: - self.lazy_setfields_descrs.append(op.getdescr()) - else: - if self.getvalue(op1.getarg(0)) is not value: - self.force_lazy_setfield(op.getdescr()) - def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.getarg(0)) - self.force_lazy_setfield_if_necessary(op, value) - # check if the field was read from another getfield_gc just before - # or has been written to recently - fieldvalue = self.read_cached_field(op.getdescr(), value) + structvalue = self.getvalue(op.getarg(0)) + cf = self.field_cache(op.getdescr()) + fieldvalue = cf.getfield_from_cache(self, structvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return # default case: produce the operation - value.ensure_nonnull() + structvalue.ensure_nonnull() ###self.optimizer.optimize_default(op) self.emit_operation(op) # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - self.cache_field_value(op.getdescr(), value, fieldvalue) - # keep track of what's on the heap - d = self.known_heap_fields.setdefault(op.getdescr(), {}) - d[value] = fieldvalue + cf.remember_field_value(structvalue, fieldvalue) def optimize_SETFIELD_GC(self, op): if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)], @@ -309,14 +355,8 @@ (op.getdescr().repr_of_descr())) raise BogusPureField # - value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) - cached_fieldvalue = self.read_cached_field(op.getdescr(), value) - if fieldvalue is not cached_fieldvalue: - self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.getdescr()] = op - # remember the result of future reads of the field - self.cache_field_value(op.getdescr(), value, fieldvalue, write=True) + cf = self.field_cache(op.getdescr()) + cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC(self, op): value = self.getvalue(op.getarg(0)) diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/translator/c/src/dtoa.c b/pypy/translator/c/src/dtoa.c --- a/pypy/translator/c/src/dtoa.c +++ b/pypy/translator/c/src/dtoa.c @@ -116,7 +116,6 @@ /* Begin PYPY hacks */ /* #include "Python.h" */ -#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754 #define HAVE_UINT32_T #define HAVE_INT32_T #define HAVE_UINT64_T diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -126,8 +126,16 @@ _run_compiled_module(space, w_modulename, filename, w_file, w_mod) return w_mod + at unwrap_spec(filename=str) +def load_dynamic(space, w_modulename, filename, w_file=None): + if not space.config.objspace.usemodules.cpyext: + raise OperationError(space.w_ImportError, space.wrap( + "Not implemented")) + importing.load_c_extension(space, filename, space.str_w(w_modulename)) + return importing.check_sys_modules(space, w_modulename) + def new_module(space, w_name): - return space.wrap(Module(space, w_name)) + return space.wrap(Module(space, w_name, add_package=False)) def init_builtin(space, w_name): name = space.str_w(w_name) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -253,8 +253,10 @@ except OperationError, e: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) - return 0 - return 1 + result = 0 + else: + result = 1 + return rffi.cast(rffi.INT, result) callback_type = lltype.Ptr(lltype.FuncType( [rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT)) XML_SetUnknownEncodingHandler = expat_external( diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.tool.autopath import pypydir -from pypy.rlib import rposix +from pypy.rlib import jit, rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf', '_pypy_math_isnan'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -57,8 +56,6 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -91,13 +88,13 @@ # # Custom implementations - def ll_math_isnan(y): - return bool(math_isnan(y)) - + # By not calling into the extenal function the JIT can inline this. Floats + # are awesome. + return y != y def ll_math_isinf(y): - return bool(math_isinf(y)) + return y != 0 and y * .5 == y ll_math_copysign = math_copysign diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -248,3 +248,8 @@ """This is synonymous to ``raise SystemExit''. It will cause the current thread to exit silently unless the exception is caught.""" raise OperationError(space.w_SystemExit, space.w_None) + +def interrupt_main(space): + """Raise a KeyboardInterrupt in the main thread. +A subthread can use this function to interrupt the main thread.""" + space.check_signal_action.set_interrupt() diff --git a/pypy/module/imp/app_imp.py b/pypy/module/imp/app_imp.py deleted file mode 100644 --- a/pypy/module/imp/app_imp.py +++ /dev/null @@ -1,5 +0,0 @@ - - -def load_dynamic(name, pathname, file=None): - """Always raises ah ImportError on pypy""" - raise ImportError('Not implemented') diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -1126,7 +1126,7 @@ """ if not isinstance(source, str): source = py.std.inspect.getsource(source).lstrip() - while source.startswith('@py.test.mark.'): + while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function # object, we may ignore them assert '\n' in source diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -5,6 +5,7 @@ from pypy.interpreter.error import OperationError from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask from pypy.rlib.rbigint import rbigint +from pypy.rlib.rarithmetic import intmask PyLong_Check, PyLong_CheckExact = build_type_checkers("Long") @@ -178,9 +179,9 @@ assert isinstance(w_long, W_LongObject) return w_long.num.sign - at cpython_api([CONST_STRING, rffi.SIZE_T, rffi.INT_real, rffi.INT_real], PyObject) +UCHARP = rffi.CArrayPtr(rffi.UCHAR) + at cpython_api([UCHARP, rffi.SIZE_T, rffi.INT_real, rffi.INT_real], PyObject) def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): - s = rffi.charpsize2str(bytes, n) little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) @@ -189,9 +190,9 @@ for i in range(0, n): if little_endian: - c = ord(s[i]) + c = intmask(bytes[i]) else: - c = ord(s[n - i - 1]) + c = intmask(bytes[n - i - 1]) if i == 0 and signed and c & 0x80: negative = True if negative: diff --git a/pypy/jit/tl/tla/test_tla.py b/pypy/jit/tl/tla/test_tla.py --- a/pypy/jit/tl/tla/test_tla.py +++ b/pypy/jit/tl/tla/test_tla.py @@ -155,7 +155,7 @@ # ____________________________________________________________ -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestLLtype(LLJitMixin): def test_loop(self): diff --git a/pypy/interpreter/test/test_extmodules.py b/pypy/interpreter/test/test_extmodules.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_extmodules.py @@ -0,0 +1,68 @@ +import sys +import pytest + +from pypy.config.pypyoption import get_pypy_config +from pypy.objspace.std import StdObjSpace +from pypy.tool.udir import udir + +mod_init = """ +from pypy.interpreter.mixedmodule import MixedModule + +import time + +class Module(MixedModule): + + appleveldefs = {} + + interpleveldefs = { + 'clock' : 'interp_time.clock', + 'time' : 'interp_time.time_', + 'sleep' : 'interp_time.sleep', + } +""" + +mod_interp = """ +import time + +from pypy.interpreter.gateway import unwrap_spec + +def clock(space): + return space.wrap(time.clock()) + +def time_(space): + return space.wrap(time.time()) + + at unwrap_spec(seconds=float) +def sleep(space, seconds): + time.sleep(seconds) +""" + +old_sys_path = [] + +def init_extmodule_code(): + pkg = udir.join("testext") + pkg.ensure(dir=True) + pkg.join("__init__.py").write("# package") + mod = pkg.join("extmod") + mod.ensure(dir=True) + mod.join("__init__.py").write(mod_init) + mod.join("interp_time.py").write(mod_interp) + +class AppTestExtModules(object): + def setup_class(cls): + init_extmodule_code() + conf = get_pypy_config() + conf.objspace.extmodules = 'testext.extmod' + old_sys_path[:] = sys.path[:] + sys.path.insert(0, str(udir)) + space = StdObjSpace(conf) + cls.space = space + + def teardown_class(cls): + sys.path[:] = old_sys_path + + @pytest.mark.skipif("config.option.runappdirect") + def test_import(self): + import extmod + assert extmod.__file__.endswith('extmod') + assert type(extmod.time()) is float diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -19,6 +19,8 @@ def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): + if gcdescr is not None: + gcdescr.force_index_ofs = FORCE_INDEX_OFS AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) @@ -127,7 +129,7 @@ fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) - rffi.cast(TP, addr_of_force_index)[0] = -1 + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index frb = self.assembler._find_failure_recovery_bytecode(faildescr) bytecode = rffi.cast(rffi.UCHARP, frb) # start of "no gc operation!" block @@ -147,7 +149,6 @@ WORD = 4 NUM_REGS = 8 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 supports_longlong = True @@ -163,7 +164,6 @@ WORD = 8 NUM_REGS = 16 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 def __init__(self, *args, **kwargs): assert sys.maxint == (2**63 - 1) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -86,6 +86,8 @@ metainterp.history = History() metainterp.history.operations = loop.operations[:] metainterp.history.inputargs = loop.inputargs[:] + cpu._all_size_descrs_with_vtable = ( + LLtypeMixin.cpu._all_size_descrs_with_vtable) # loop_tokens = [] loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import unroll_safe, dont_look_inside from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.annlowlevel import hlstr from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -490,7 +490,9 @@ # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), # ^^^ returns an address of pointer, since it can change at runtime - + 'gc_adr_of_root_stack_top': LLOp(), + # ^^^ returns the address of gcdata.root_stack_top (for shadowstack only) + # experimental operations in support of thread cloning, only # implemented by the Mark&Sweep GC 'gc_x_swap_pool': LLOp(canraise=(MemoryError,), canunwindgc=True), diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -365,7 +365,11 @@ def setbuiltinmodule(self, importname): """NOT_RPYTHON. load a lazy pypy/module and put it into sys.modules""" - fullname = "pypy.module.%s" % importname + if '.' in importname: + fullname = importname + importname = fullname.rsplit('.', 1)[1] + else: + fullname = "pypy.module.%s" % importname Module = __import__(fullname, None, None, ["Module"]).Module @@ -428,6 +432,11 @@ if value and name not in modules: modules.append(name) + if self.config.objspace.extmodules: + for name in self.config.objspace.extmodules.split(','): + if name not in modules: + modules.append(name) + # a bit of custom logic: time2 or rctime take precedence over time # XXX this could probably be done as a "requires" in the config if ('time2' in modules or 'rctime' in modules) and 'time' in modules: @@ -745,7 +754,12 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - items = [] + # If we know the expected length we can preallocate. + if expected_length == -1: + items = [] + else: + items = [None] * expected_length + idx = 0 while True: try: w_item = self.next(w_iterator) @@ -753,19 +767,22 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and len(items) == expected_length: + if expected_length != -1 and idx == expected_length: raise OperationError(self.w_ValueError, self.wrap("too many values to unpack")) - items.append(w_item) - if expected_length != -1 and len(items) < expected_length: - i = len(items) - if i == 1: + if expected_length == -1: + items.append(w_item) + else: + items[idx] = w_item + idx += 1 + if expected_length != -1 and idx < expected_length: + if idx == 1: plural = "" else: plural = "s" raise OperationError(self.w_ValueError, self.wrap("need more than %d value%s to unpack" % - (i, plural))) + (idx, plural))) return items unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, @@ -1333,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/translator/cli/ilgenerator.py b/pypy/translator/cli/ilgenerator.py --- a/pypy/translator/cli/ilgenerator.py +++ b/pypy/translator/cli/ilgenerator.py @@ -443,8 +443,8 @@ self.ilasm.opcode('newarr', clitype.itemtype.typename()) def _array_suffix(self, ARRAY, erase_unsigned=False): - from pypy.translator.cli.metavm import OOTYPE_TO_MNEMONIC - suffix = OOTYPE_TO_MNEMONIC.get(ARRAY.ITEM, 'ref') + from pypy.translator.cli.metavm import ootype_to_mnemonic + suffix = ootype_to_mnemonic(ARRAY.ITEM, ARRAY.ITEM, 'ref') if erase_unsigned: suffix = suffix.replace('u', 'i') return suffix diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/translator/backendopt/ssa.py b/pypy/translator/backendopt/ssa.py --- a/pypy/translator/backendopt/ssa.py +++ b/pypy/translator/backendopt/ssa.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block +from pypy.objspace.flow.model import Variable, mkentrymap, Block from pypy.tool.algo.unionfind import UnionFind class DataFlowFamilyBuilder: diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -211,8 +211,11 @@ def ll_stringslice_minusone(s): return s.ll_substring(0, s.ll_strlen()-1) - def ll_split_chr(RESULT, s, c): - return RESULT.ll_convert_from_array(s.ll_split_chr(c)) + def ll_split_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_split_chr(c, max)) + + def ll_rsplit_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_rsplit_chr(c, max)) def ll_int(s, base): if not 2 <= base <= 36: diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -274,8 +274,12 @@ screeninfo.append((0, [])) self.lxy = p, ln prompt = self.get_prompt(ln, ll >= p >= 0) + while '\n' in prompt: + pre_prompt, _, prompt = prompt.partition('\n') + screen.append(pre_prompt) + screeninfo.append((0, [])) p -= ll + 1 - lp = len(prompt) + prompt, lp = self.process_prompt(prompt) l, l2 = disp_str(line) wrapcount = (len(l) + lp) / w if wrapcount == 0: @@ -297,6 +301,31 @@ screeninfo.append((0, [])) return screen + def process_prompt(self, prompt): + """ Process the prompt. + + This means calculate the length of the prompt. The character \x01 + and \x02 are used to bracket ANSI control sequences and need to be + excluded from the length calculation. So also a copy of the prompt + is returned with these control characters removed. """ + + out_prompt = '' + l = len(prompt) + pos = 0 + while True: + s = prompt.find('\x01', pos) + if s == -1: + break + e = prompt.find('\x02', s) + if e == -1: + break + # Found start and end brackets, subtract from string length + l = l - (e-s+1) + out_prompt += prompt[pos:s] + prompt[s+1:e] + pos = e+1 + out_prompt += prompt[pos:] + return out_prompt, l + def bow(self, p=None): """Return the 0-based index of the word break preceding p most immediately. diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: @@ -115,46 +114,6 @@ # in the second block! return split_block(annotator, block, 0, _forcelink=block.inputargs) -def remove_direct_loops(annotator, graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def remove_double_links(annotator, graph): - """This can be useful for code generators: it ensures that no block has - more than one incoming links from one and the same other block. It allows - argument passing along links to be implemented with phi nodes since the - value of an argument can be determined by looking from which block the - control passed. """ - def visit(block): - if isinstance(block, Block): - double_links = [] - seen = {} - for link in block.exits: - if link.target in seen: - double_links.append(link) - seen[link.target] = True - for link in double_links: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def no_links_to_startblock(graph): - """Ensure no links to start block.""" - links_to_start_block = False - for block in graph.iterblocks(): - for link in block.exits: - if link.target == graph.startblock: - links_to_start_block = True - break - if links_to_start_block: - insert_empty_startblock(None, graph) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from pypy.annotation import model as annmodel diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -1,9 +1,81 @@ # encoding: iso-8859-15 from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.unicodeobject import Py_UNICODE +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.module.cpyext.unicodeobject import ( + Py_UNICODE, PyUnicodeObject, new_empty_unicode) +from pypy.module.cpyext.api import PyObjectP, PyObject +from pypy.module.cpyext.pyobject import Py_DecRef from pypy.rpython.lltypesystem import rffi, lltype import sys, py +class AppTestUnicodeObject(AppTestCpythonExtensionBase): + def test_unicodeobject(self): + module = self.import_extension('foo', [ + ("get_hello1", "METH_NOARGS", + """ + return PyUnicode_FromStringAndSize( + "Hello world", 11); + """), + ("test_GetSize", "METH_NOARGS", + """ + PyObject* s = PyUnicode_FromString("Hello world"); + int result = 0; + + if(PyUnicode_GetSize(s) == 11) { + result = 1; + } + if(s->ob_type->tp_basicsize != sizeof(void*)*4) + result = 0; + Py_DECREF(s); + return PyBool_FromLong(result); + """), + ("test_GetSize_exception", "METH_NOARGS", + """ + PyObject* f = PyFloat_FromDouble(1.0); + Py_ssize_t size = PyUnicode_GetSize(f); + + Py_DECREF(f); + return NULL; + """), + ("test_is_unicode", "METH_VARARGS", + """ + return PyBool_FromLong(PyUnicode_Check(PyTuple_GetItem(args, 0))); + """)]) + assert module.get_hello1() == u'Hello world' + assert module.test_GetSize() + raises(TypeError, module.test_GetSize_exception) + + assert module.test_is_unicode(u"") + assert not module.test_is_unicode(()) + + def test_unicode_buffer_init(self): + module = self.import_extension('foo', [ + ("getunicode", "METH_NOARGS", + """ + PyObject *s, *t; + Py_UNICODE* c; + Py_ssize_t len; + + s = PyUnicode_FromUnicode(NULL, 4); + if (s == NULL) + return NULL; + t = PyUnicode_FromUnicode(NULL, 3); + if (t == NULL) + return NULL; + Py_DECREF(t); + c = PyUnicode_AsUnicode(s); + c[0] = 'a'; + c[1] = 0xe9; + c[3] = 'c'; + return s; + """), + ]) + s = module.getunicode() + assert len(s) == 4 + assert s == u'a�\x00c' + + + class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): assert api.PyUnicode_GET_SIZE(space.wrap(u'sp�m')) == 4 @@ -77,6 +149,28 @@ assert space.unwrap(w_res) == u'sp�' rffi.free_charp(s) + def test_unicode_resize(self, space, api): + py_uni = new_empty_unicode(space, 10) + ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + py_uni.c_buffer[0] = u'a' + py_uni.c_buffer[1] = u'b' + py_uni.c_buffer[2] = u'c' + ar[0] = rffi.cast(PyObject, py_uni) + api.PyUnicode_Resize(ar, 3) + py_uni = rffi.cast(PyUnicodeObject, ar[0]) + assert py_uni.c_size == 3 + assert py_uni.c_buffer[1] == u'b' + assert py_uni.c_buffer[3] == u'\x00' + # the same for growing + ar[0] = rffi.cast(PyObject, py_uni) + api.PyUnicode_Resize(ar, 10) + py_uni = rffi.cast(PyUnicodeObject, ar[0]) + assert py_uni.c_size == 10 + assert py_uni.c_buffer[1] == 'b' + assert py_uni.c_buffer[10] == '\x00' + Py_DecRef(space, ar[0]) + lltype.free(ar, flavor='raw') + def test_AsUTF8String(self, space, api): w_u = space.wrap(u'sp�m') w_res = api.PyUnicode_AsUTF8String(w_u) @@ -235,13 +329,13 @@ x_chunk = api.PyUnicode_AS_UNICODE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, 4) - w_y = api.PyUnicode_FromUnicode(target_chunk, 4) + w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, 4)) assert space.eq_w(w_y, space.wrap(u"abcd")) size = api.PyUnicode_GET_SIZE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, size) - w_y = api.PyUnicode_FromUnicode(target_chunk, size) + w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, size)) assert space.eq_w(w_y, w_x) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -36,29 +36,35 @@ init_defaults = Defaults([None]) def init__List(space, w_list, __args__): + from pypy.objspace.std.tupleobject import W_TupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - # - # this is the old version of the loop at the end of this function: - # - # w_list.wrappeditems = space.unpackiterable(w_iterable) - # - # This is commented out to avoid assigning a new RPython list to - # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. - # items_w = w_list.wrappeditems del items_w[:] if w_iterable is not None: - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - items_w.append(w_item) + # unfortunately this is duplicating space.unpackiterable to avoid + # assigning a new RPython list to 'wrappeditems', which defeats the + # W_FastSeqIterObject optimization. + if isinstance(w_iterable, W_ListObject): + items_w.extend(w_iterable.wrappeditems) + elif isinstance(w_iterable, W_TupleObject): + items_w.extend(w_iterable.wrappeditems) + else: + _init_from_iterable(space, items_w, w_iterable) + +def _init_from_iterable(space, items_w, w_iterable): + # in its own function to make the JIT look into init__List + # XXX this would need a JIT driver somehow? + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + items_w.append(w_item) def len__List(space, w_list): result = len(w_list.wrappeditems) diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -6,7 +6,7 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import BoxInt -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4.1' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.5-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/translator/backendopt/test/test_ssa.py b/pypy/translator/backendopt/test/test_ssa.py --- a/pypy/translator/backendopt/test/test_ssa.py +++ b/pypy/translator/backendopt/test/test_ssa.py @@ -1,6 +1,6 @@ from pypy.translator.backendopt.ssa import * from pypy.translator.translator import TranslationContext -from pypy.objspace.flow.model import flatten, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import SpaceOperation diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,6 +61,12 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -23,18 +23,22 @@ self.fail_descr_list = [] self.fail_descr_free_list = [] + def reserve_some_free_fail_descr_number(self): + lst = self.fail_descr_list + if len(self.fail_descr_free_list) > 0: + n = self.fail_descr_free_list.pop() + assert lst[n] is None + else: + n = len(lst) + lst.append(None) + return n + def get_fail_descr_number(self, descr): assert isinstance(descr, history.AbstractFailDescr) n = descr.index if n < 0: - lst = self.fail_descr_list - if len(self.fail_descr_free_list) > 0: - n = self.fail_descr_free_list.pop() - assert lst[n] is None - lst[n] = descr - else: - n = len(lst) - lst.append(descr) + n = self.reserve_some_free_fail_descr_number() + self.fail_descr_list[n] = descr descr.index = n return n @@ -294,6 +298,13 @@ def record_faildescr_index(self, n): self.faildescr_indices.append(n) + def reserve_and_record_some_faildescr_index(self): + # like record_faildescr_index(), but invent and return a new, + # unused faildescr index + n = self.cpu.reserve_some_free_fail_descr_number() + self.record_faildescr_index(n) + return n + def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link from pypy.objspace.flow.model import SpaceOperation, c_last_exception from pypy.objspace.flow.model import FunctionGraph -from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph +from pypy.objspace.flow.model import mkentrymap, checkgraph from pypy.annotation import model as annmodel from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr from pypy.rpython.lltypesystem.lltype import normalizeptr @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,22 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + + def test_reload(self, space, api): + pdb = api.PyImport_Import(space.wrap("pdb")) + space.delattr(pdb, space.wrap("set_trace")) + pdb = api.PyImport_ReloadModule(pdb) + assert space.getattr(pdb, space.wrap("set_trace")) + class AppTestImportLogic(AppTestCpythonExtensionBase): def test_import_logic(self): skip("leak?") diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,16 +1,20 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, PyObject, Py_ssize_t) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, - getattrfunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, - ssizeobjargproc, iternextfunc, initproc, richcmpfunc, hashfunc, - descrgetfunc, descrsetfunc, objobjproc) + getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, + ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, readbufferproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.buffer import Buffer as W_Buffer from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import specialize @@ -65,6 +69,12 @@ finally: rffi.free_charp(name_ptr) +def wrap_getattro(space, w_self, w_args, func): + func_target = rffi.cast(getattrofunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + return generic_cpy_call(space, func_target, w_self, args_w[0]) + def wrap_setattr(space, w_self, w_args, func): func_target = rffi.cast(setattrofunc, func) check_num_args(space, w_args, 2) @@ -187,18 +197,59 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) +class CPyBuffer(W_Buffer): + # Similar to Py_buffer + + def __init__(self, ptr, size, w_obj): + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + + def getlength(self): + return self.size + + def getitem(self, index): + return self.ptr[index] + +def wrap_getreadbuffer(space, w_self, w_args, func): + func_target = rffi.cast(readbufferproc, func) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: + index = rffi.cast(Py_ssize_t, 0) + size = generic_cpy_call(space, func_target, w_self, index, ptr) + if size < 0: + space.fromcache(State).check_and_raise_exception(always=True) + return space.wrap(CPyBuffer(ptr[0], size, w_self)) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) check_num_args(space, w_args, 1) - args_w = space.fixedview(w_args) - other_w = args_w[0] + w_other, = space.fixedview(w_args) return generic_cpy_call(space, func_target, - w_self, other_w, rffi.cast(rffi.INT_real, OP_CONST)) + w_self, w_other, rffi.cast(rffi.INT_real, OP_CONST)) return inner richcmp_eq = get_richcmp_func(Py_EQ) richcmp_ne = get_richcmp_func(Py_NE) +richcmp_lt = get_richcmp_func(Py_LT) +richcmp_le = get_richcmp_func(Py_LE) +richcmp_gt = get_richcmp_func(Py_GT) +richcmp_ge = get_richcmp_func(Py_GE) + +def wrap_cmpfunc(space, w_self, w_args, func): + func_target = rffi.cast(cmpfunc, func) + check_num_args(space, w_args, 1) + w_other, = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(w_self), + space.type(w_other))): + raise OperationError(space.w_TypeError, space.wrap( + "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % + (space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space)))) + + return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): @@ -289,7 +340,12 @@ # irregular interface, because of tp_getattr/tp_getattro confusion if NAME == "__getattr__": - wrapper = wrap_getattr + if SLOT == "tp_getattro": + wrapper = wrap_getattro + elif SLOT == "tp_getattr": + wrapper = wrap_getattr + else: + assert False function = globals().get(FUNCTION, None) assert FLAGS == 0 or FLAGS == PyWrapperFlag_KEYWORDS @@ -455,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), @@ -560,12 +616,19 @@ for regex, repl in slotdef_replacements: slotdefs_str = re.sub(regex, repl, slotdefs_str) +slotdefs = eval(slotdefs_str) +# PyPy addition +slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), +) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in eval(slotdefs_str)]) + for x in slotdefs]) + slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) - for x in eval(slotdefs_str)]) + for x in slotdefs]) if __name__ == "__main__": print slotdefs_str diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -443,7 +443,8 @@ "ll_upper": Meth([], self.SELFTYPE_T), "ll_lower": Meth([], self.SELFTYPE_T), "ll_substring": Meth([Signed, Signed], self.SELFTYPE_T), # ll_substring(start, count) - "ll_split_chr": Meth([self.CHAR], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_split_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_rsplit_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! "ll_contains": Meth([self.CHAR], Bool), "ll_replace_chr_chr": Meth([self.CHAR, self.CHAR], self.SELFTYPE_T), }) @@ -1480,9 +1481,16 @@ # NOT_RPYTHON return self.make_string(self._str[start:start+count]) - def ll_split_chr(self, ch): + def ll_split_chr(self, ch, max): # NOT_RPYTHON - l = [self.make_string(s) for s in self._str.split(ch)] + l = [self.make_string(s) for s in self._str.split(ch, max)] + res = _array(Array(self._TYPE), len(l)) + res._array[:] = l + return res + + def ll_rsplit_chr(self, ch, max): + # NOT_RPYTHON + l = [self.make_string(s) for s in self._str.rsplit(ch, max)] res = _array(Array(self._TYPE), len(l)) res._array[:] = l return res diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -154,6 +154,24 @@ self.emit_operation(op) + def optimize_INT_LSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_RSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_CALL_PURE(self, op): arg_consts = [] for i in range(op.numargs()): diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -151,9 +151,9 @@ class CPythonFakeFrame(eval.Frame): - def __init__(self, space, code, w_globals=None, numlocals=-1): + def __init__(self, space, code, w_globals=None): self.fakecode = code - eval.Frame.__init__(self, space, w_globals, numlocals) + eval.Frame.__init__(self, space, w_globals) def getcode(self): return self.fakecode diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -101,7 +101,7 @@ # first annotate, rtype, and backendoptimize PyPy try: - interp, graph = get_interpreter(entry_point, [], backendopt=True, + interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -8,9 +8,8 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, - X86XMMRegisterManager, get_ebp_ofs, - _get_scale) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, + _get_scale, gpr_reg_mgr_cls) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -78,8 +77,8 @@ self.loop_run_counters = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 - self.malloc_fixedsize_slowpath1 = 0 - self.malloc_fixedsize_slowpath2 = 0 + self.malloc_slowpath1 = 0 + self.malloc_slowpath2 = 0 self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False @@ -124,8 +123,8 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() - if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): - self._build_malloc_fixedsize_slowpath() + if gc_ll_descr.get_malloc_slowpath_addr is not None: + self._build_malloc_slowpath() self._build_stack_check_slowpath() debug_start('jit-backend-counts') self.set_debug(have_debug_prints()) @@ -133,6 +132,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" + self.current_clt = looptoken.compiled_loop_token self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -145,6 +145,7 @@ self.mc = None self.looppos = -1 self.currently_compiling_loop = None + self.current_clt = None def finish_once(self): if self._debug: @@ -170,26 +171,47 @@ self.float_const_neg_addr = float_constants self.float_const_abs_addr = float_constants + 16 - def _build_malloc_fixedsize_slowpath(self): + def _build_malloc_slowpath(self): + # With asmgcc, we need two helpers, so that we can write two CALL + # instructions in assembler, with a mark_gc_roots in between. + # With shadowstack, this is not needed, so we produce a single helper. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + # # ---------- first helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() if self.cpu.supports_floats: # save the XMM registers in for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - if IS_X86_32: - mc.MOV_sr(WORD, edx.value) # save it as the new argument - elif IS_X86_64: - # rdi can be clobbered: its content was forced to the stack - # by _fastpath_malloc(), like all other save_around_call_regs. - mc.MOV_rr(edi.value, edx.value) - - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() - mc.JMP(imm(addr)) # tail call to the real malloc - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart - # ---------- second helper for the slow path of malloc ---------- - mc = codebuf.MachineCodeBlockWrapper() + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() + # + if gcrootmap is not None and gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_br(ofs, reg.value) + mc.SUB_ri(esp.value, 16 - WORD) # stack alignment of 16 bytes + if IS_X86_32: + mc.MOV_sr(0, edx.value) # push argument + elif IS_X86_64: + mc.MOV_rr(edi.value, edx.value) + mc.CALL(imm(addr)) + mc.ADD_ri(esp.value, 16 - WORD) + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_rb(reg.value, ofs) + else: + # ---- asmgcc ---- + if IS_X86_32: + mc.MOV_sr(WORD, edx.value) # save it as the new argument + elif IS_X86_64: + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. + mc.MOV_rr(edi.value, edx.value) + mc.JMP(imm(addr)) # tail call to the real malloc + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.malloc_slowpath1 = rawstart + # ---------- second helper for the slow path of malloc ---------- + mc = codebuf.MachineCodeBlockWrapper() + # if self.cpu.supports_floats: # restore the XMM registers for i in range(self.cpu.NUM_REGS):# from where they were saved mc.MOVSD_xs(i, (WORD*2)+8*i) @@ -197,21 +219,28 @@ mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath2 = rawstart + self.malloc_slowpath2 = rawstart def _build_stack_check_slowpath(self): - from pypy.rlib import rstack _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or self.cpu.exit_frame_with_exception_v < 0: return # no stack check (for tests, or non-translated) # + # make a "function" that is called immediately at the start of + # an assembler function. In particular, the stack looks like: + # + # | ... | <-- aligned to a multiple of 16 + # | retaddr of caller | + # | my own retaddr | <-- esp + # +---------------------+ + # mc = codebuf.MachineCodeBlockWrapper() - mc.PUSH_r(ebp.value) - mc.MOV_rr(ebp.value, esp.value) # + stack_size = WORD if IS_X86_64: # on the x86_64, we have to save all the registers that may # have been used to pass arguments + stack_size += 6*WORD + 8*8 for reg in [edi, esi, edx, ecx, r8, r9]: mc.PUSH_r(reg.value) mc.SUB_ri(esp.value, 8*8) @@ -220,11 +249,13 @@ # if IS_X86_32: mc.LEA_rb(eax.value, +8) + stack_size += 2*WORD + mc.PUSH_r(eax.value) # alignment mc.PUSH_r(eax.value) elif IS_X86_64: mc.LEA_rb(edi.value, +16) - mc.AND_ri(esp.value, -16) # + # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) # mc.MOV(eax, heap(self.cpu.pos_exception())) @@ -232,16 +263,16 @@ mc.J_il8(rx86.Conditions['NZ'], 0) jnz_location = mc.get_relative_pos() # - if IS_X86_64: + if IS_X86_32: + mc.ADD_ri(esp.value, 2*WORD) + elif IS_X86_64: # restore the registers for i in range(7, -1, -1): mc.MOVSD_xs(i, 8*i) - for i, reg in [(6, r9), (5, r8), (4, ecx), - (3, edx), (2, esi), (1, edi)]: - mc.MOV_rb(reg.value, -8*i) + mc.ADD_ri(esp.value, 8*8) + for reg in [r9, r8, ecx, edx, esi, edi]: + mc.POP_r(reg.value) # - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) mc.RET() # # patch the JNZ above @@ -266,9 +297,7 @@ # function, and will instead return to the caller's caller. Note # also that we completely ignore the saved arguments, because we # are interrupting the function. - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) - mc.ADD_ri(esp.value, WORD) + mc.ADD_ri(esp.value, stack_size) mc.RET() # rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -537,7 +566,7 @@ def _get_offset_of_ebp_from_esp(self, allocated_depth): # Given that [EBP] is where we saved EBP, i.e. in the last word # of our fixed frame, then the 'words' value is: - words = (self.cpu.FRAME_FIXED_SIZE - 1) + allocated_depth + words = (FRAME_FIXED_SIZE - 1) + allocated_depth # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP return -WORD * aligned_words @@ -550,6 +579,10 @@ for regloc in self.cpu.CALLEE_SAVE_REGISTERS: self.mc.PUSH_r(regloc.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(gcrootmap) + def _call_header_with_stack_check(self): if self.stack_check_slowpath == 0: pass # no stack check (e.g. not translated) @@ -571,12 +604,32 @@ def _call_footer(self): self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) self.mc.POP_r(ebp.value) self.mc.RET() + def _call_header_shadowstack(self, gcrootmap): + # we need to put two words into the shadowstack: the MARKER + # and the address of the frame (ebp, actually) + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] + self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER + self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp + self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + + def _call_footer_shadowstack(self, gcrootmap): + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.SUB_ji8(rst, 2*WORD) # SUB [rootstacktop], 2*WORD + def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): if IS_X86_64: return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) @@ -686,8 +739,8 @@ nonfloatlocs, floatlocs = arglocs self._call_header() stackadjustpos = self._patchable_stackadjust() - tmp = X86RegisterManager.all_regs[0] - xmmtmp = X86XMMRegisterManager.all_regs[0] + tmp = eax + xmmtmp = xmm0 self.mc.begin_reuse_scratch_register() for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] @@ -896,9 +949,9 @@ self.implement_guard(guard_token, checkfalsecond) return genop_cmp_guard_float - def _emit_call(self, x, arglocs, start=0, tmp=eax): + def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: - return self._emit_call_64(x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start) p = 0 n = len(arglocs) @@ -924,9 +977,9 @@ self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) - def _emit_call_64(self, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start): src_locs = [] dst_locs = [] xmm_src_locs = [] @@ -984,12 +1037,27 @@ self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) def call(self, addr, args, res): - self._emit_call(imm(addr), args) + force_index = self.write_new_force_index() + self._emit_call(force_index, imm(addr), args) assert res is eax + def write_new_force_index(self): + # for shadowstack only: get a new, unused force_index number and + # write it to FORCE_INDEX_OFS. Used to record the call shape + # (i.e. where the GC pointers are in the stack) around a CALL + # instruction that doesn't already have a force_index. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + clt = self.current_clt + force_index = clt.reserve_and_record_some_faildescr_index() + self.mc.MOV_bi(FORCE_INDEX_OFS, force_index) + return force_index + else: + return 0 + genop_int_neg = _unaryop("NEG") genop_int_invert = _unaryop("NOT") genop_int_add = _binaryop("ADD", True) @@ -1205,6 +1273,11 @@ assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + def set_new_array_length(self, loc, ofs_length, loc_num_elem): + assert isinstance(loc, RegLoc) + assert isinstance(loc_num_elem, ImmedLoc) + self.mc.MOV(mem(loc, ofs_length), loc_num_elem) + # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) def genop_new(self, op, arglocs, result_loc): @@ -1783,6 +1856,10 @@ self.pending_guard_tokens.append(guard_token) def genop_call(self, op, arglocs, resloc): + force_index = self.write_new_force_index() + self._genop_call(op, arglocs, resloc, force_index) + + def _genop_call(self, op, arglocs, resloc, force_index): sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -1796,8 +1873,8 @@ tmp = ecx else: tmp = eax - - self._emit_call(x, arglocs, 3, tmp=tmp) + + self._emit_call(force_index, x, arglocs, 3, tmp=tmp) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return @@ -1828,7 +1905,7 @@ faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - self.genop_call(op, arglocs, result_loc) + self._genop_call(op, arglocs, result_loc, fail_index) self.mc.CMP_bi(FORCE_INDEX_OFS, 0) self.implement_guard(guard_token, 'L') @@ -1842,8 +1919,8 @@ assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2, - tmp=eax) + self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_void_v @@ -1868,7 +1945,7 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self._emit_call(imm(asm_helper_adr), [eax, arglocs[1]], 0, + self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0, tmp=ecx) if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: self.mc.FSTP_b(result_loc.value) @@ -1895,7 +1972,7 @@ # load the return value from fail_boxes_xxx[0] kind = op.result.type if kind == FLOAT: - xmmtmp = X86XMMRegisterManager.all_regs[0] + xmmtmp = xmm0 adr = self.fail_boxes_float.get_addr_for_num(0) self.mc.MOVSD(xmmtmp, heap(adr)) self.mc.MOVSD(result_loc, xmmtmp) @@ -1990,11 +2067,16 @@ not_implemented("not implemented operation (guard): %s" % op.getopname()) - def mark_gc_roots(self): + def mark_gc_roots(self, force_index, use_copy_area=False): + if force_index < 0: + return # not needed gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: - mark = self._regalloc.get_mark_gc_roots(gcrootmap) - self.mc.insert_gcroot_marker(mark) + mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area) + if gcrootmap.is_shadow_stack: + gcrootmap.write_callshape(mark, force_index) + else: + self.mc.insert_gcroot_marker(mark) def target_arglocs(self, loop_token): return loop_token._x86_arglocs @@ -2006,8 +2088,7 @@ else: self.mc.JMP(imm(loop_token._x86_loop_code)) - def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, - size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) @@ -2015,7 +2096,7 @@ self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - # See comments in _build_malloc_fixedsize_slowpath for the + # See comments in _build_malloc_slowpath for the # details of the two helper functions that we are calling below. # First, we need to call two of them and not just one because we # need to have a mark_gc_roots() in between. Then the calling @@ -2025,19 +2106,27 @@ # result in EAX; slowpath_addr2 additionally returns in EDX a # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + # reserve room for the argument to the real malloc and the # 8 saved XMM regs self._regalloc.reserve_param(1+16) - self.mc.CALL(imm(slowpath_addr1)) - self.mark_gc_roots() - slowpath_addr2 = self.malloc_fixedsize_slowpath2 + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) + if not shadow_stack: + # there are two helpers to call only with asmgcc + slowpath_addr1 = self.malloc_slowpath1 + self.mc.CALL(imm(slowpath_addr1)) + self.mark_gc_roots(self.write_new_force_index(), + use_copy_area=shadow_stack) + slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) # on 64-bits, 'tid' is a value that fits in 31 bits + assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver from pypy.rlib.objectmodel import compute_hash from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import history From commits-noreply at bitbucket.org Tue Apr 12 22:19:45 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 22:19:45 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Start support on the JIT. Either I messed something up or Message-ID: <20110412201945.B35DF36C053@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43315:867c75fc3389 Date: 2011-04-12 12:50 -0700 http://bitbucket.org/pypy/pypy/changeset/867c75fc3389/ Log: Start support on the JIT. Either I messed something up or it works so far out of the box with the untranslated llgraph backend. diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -208,6 +208,8 @@ def rewrite_op_cast_int_to_uint(self, op): pass def rewrite_op_cast_uint_to_int(self, op): pass def rewrite_op_resume_point(self, op): pass + def rewrite_op_show_from_ptr32(self, op): pass + def rewrite_op_hide_into_ptr32(self, op): pass def _rewrite_symmetric(self, op): """Rewrite 'c1+v2' into 'v2+c1' in an attempt to avoid generating diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -0,0 +1,50 @@ +import py +from pypy.config.translationoption import IS_64_BITS +from pypy.jit.metainterp.test.support import LLJitMixin +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem.lloperation import llop +from pypy.rlib import jit + + +class TestRCompressed(LLJitMixin): + + def setup_class(cls): + if not IS_64_BITS: + py.test.skip("only for 64-bits") + + def test_simple(self): + S = lltype.GcStruct('S', ('n', lltype.Signed)) + SPTR = lltype.Ptr(S) + @jit.dont_look_inside + def escape(p): + assert lltype.typeOf(p) == llmemory.HiddenGcRef32 + return p + def f(n): + y = lltype.malloc(S) + y.n = n + p = llop.hide_into_ptr32(llmemory.HiddenGcRef32, y) + p = escape(p) + z = llop.show_from_ptr32(SPTR, p) + return z.n + res = self.interp_operations(f, [42]) + assert res == 42 + + def test_store_load(self): + S = lltype.GcStruct('S', ('n', lltype.Signed)) + T = lltype.GcStruct('T', ('p', llmemory.HiddenGcRef32), + ('c', lltype.Char)) + SPTR = lltype.Ptr(S) + @jit.dont_look_inside + def escape(p): + return p + def f(n): + y = lltype.malloc(S) + y.n = n + t = lltype.malloc(T) + t.c = '?' + t.p = llop.hide_into_ptr32(llmemory.HiddenGcRef32, y) + t = escape(t) + z = llop.show_from_ptr32(SPTR, t.p) + return z.n * 1000 + ord(t.c) + res = self.interp_operations(f, [42]) + assert res == 42063 diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -779,6 +779,16 @@ assert op1.args == [v1, ('fielddescr', S, 'x')] assert op1.result == v2 +def test_getfield_hiddengcref32_gives_r(): + S = lltype.GcStruct('S', ('p', llmemory.HiddenGcRef32)) + v1 = varoftype(lltype.Ptr(S)) + v2 = varoftype(llmemory.HiddenGcRef32) + op = SpaceOperation('getfield', [v1, Constant('p', lltype.Void)], v2) + op1 = Transformer(FakeCPU()).rewrite_operation(op) + assert op1.opname == 'getfield_gc_r' # it gives directly an 'r' + assert op1.args == [v1, ('fielddescr', S, 'p')] + assert op1.result == v2 + def test_int_abs(): v1 = varoftype(lltype.Signed) v2 = varoftype(lltype.Signed) From commits-noreply at bitbucket.org Tue Apr 12 22:19:47 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 12 Apr 2011 22:19:47 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Found out the issue -- cast_opaque_ptr() happily converts between Message-ID: <20110412201947.3584F36C053@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43316:11d653a94d4d Date: 2011-04-12 13:17 -0700 http://bitbucket.org/pypy/pypy/changeset/11d653a94d4d/ Log: Found out the issue -- cast_opaque_ptr() happily converts between HiddenGcRef32 and GCREFs. To have a bit more control, forbid this and use explicit conversions. diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -15,15 +15,10 @@ def test_simple(self): S = lltype.GcStruct('S', ('n', lltype.Signed)) SPTR = lltype.Ptr(S) - @jit.dont_look_inside - def escape(p): - assert lltype.typeOf(p) == llmemory.HiddenGcRef32 - return p def f(n): y = lltype.malloc(S) y.n = n p = llop.hide_into_ptr32(llmemory.HiddenGcRef32, y) - p = escape(p) z = llop.show_from_ptr32(SPTR, p) return z.n res = self.interp_operations(f, [42]) diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -583,6 +583,8 @@ if not ptr: return lltype.nullptr(llmemory.HiddenGcRef32.TO) ptr = ptr.ptr + if isinstance(lltype.typeOf(ptr).TO, lltype.GcOpaqueType): + ptr = ptr._obj.container._as_ptr() return lltype.cast_opaque_ptr(llmemory.HiddenGcRef32, ptr) def op_show_from_ptr32(RESTYPE, ptr32): @@ -592,6 +594,8 @@ PTRTYPE = lltype.Ptr(ptr32._obj.container._TYPE) ptr = lltype.cast_opaque_ptr(PTRTYPE, ptr32) return llmemory.cast_ptr_to_adr(ptr) + if isinstance(RESTYPE.TO, lltype.GcOpaqueType): + ptr32 = ptr32._obj.container._as_ptr() return lltype.cast_opaque_ptr(RESTYPE, ptr32) op_show_from_ptr32.need_result_type = True diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -12,6 +12,7 @@ REF, INT, FLOAT) from pypy.jit.codewriter import heaptracker from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype from pypy.rpython.module.support import LLSupport, OOSupport from pypy.rpython.llinterp import LLException @@ -1065,11 +1066,18 @@ x = heaptracker.adr2int(x) return lltype.cast_primitive(TYPE, x) -def cast_to_ptr(x): - assert isinstance(lltype.typeOf(x), lltype.Ptr) +def cast_to_ptr(x, may_be_hiddengcref32=False): + TYPE = lltype.typeOf(x) + assert isinstance(TYPE, lltype.Ptr) + if TYPE == llmemory.HiddenGcRef32: + assert may_be_hiddengcref32 + return llop.show_from_ptr32(llmemory.GCREF, x) return lltype.cast_opaque_ptr(llmemory.GCREF, x) -def cast_from_ptr(TYPE, x): +def cast_from_ptr(TYPE, x, may_be_hiddengcref32=False): + if TYPE == llmemory.HiddenGcRef32: + assert may_be_hiddengcref32 + return llop.hide_into_ptr32(TYPE, x) return lltype.cast_opaque_ptr(TYPE, x) def cast_to_floatstorage(x): @@ -1302,7 +1310,7 @@ def do_getarrayitem_gc_ptr(array, index): array = array._obj.container - return cast_to_ptr(array.getitem(index)) + return cast_to_ptr(array.getitem(index), may_be_hiddengcref32=True) def _getfield_gc(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] @@ -1316,7 +1324,8 @@ return cast_to_floatstorage(_getfield_gc(struct, fieldnum)) def do_getfield_gc_ptr(struct, fieldnum): - return cast_to_ptr(_getfield_gc(struct, fieldnum)) + return cast_to_ptr(_getfield_gc(struct, fieldnum), + may_be_hiddengcref32=True) def _getfield_raw(struct, fieldnum): STRUCT, fieldname = symbolic.TokenToField[fieldnum] @@ -1369,7 +1378,7 @@ def do_setarrayitem_gc_ptr(array, index, newvalue): array = array._obj.container ITEMTYPE = lltype.typeOf(array).OF - newvalue = cast_from_ptr(ITEMTYPE, newvalue) + newvalue = cast_from_ptr(ITEMTYPE, newvalue, may_be_hiddengcref32=True) array.setitem(index, newvalue) def do_setfield_gc_int(struct, fieldnum, newvalue): @@ -1390,7 +1399,7 @@ STRUCT, fieldname = symbolic.TokenToField[fieldnum] ptr = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), struct) FIELDTYPE = getattr(STRUCT, fieldname) - newvalue = cast_from_ptr(FIELDTYPE, newvalue) + newvalue = cast_from_ptr(FIELDTYPE, newvalue, may_be_hiddengcref32=True) setattr(ptr, fieldname, newvalue) def do_setfield_raw_int(struct, fieldnum, newvalue): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -917,6 +917,14 @@ solid = ptr._solid) elif (isinstance(CURTYPE.TO, OpaqueType) and isinstance(PTRTYPE.TO, OpaqueType)): + from pypy.rpython.lltypesystem import llmemory + if (CURTYPE == llmemory.HiddenGcRef32 or + PTRTYPE == llmemory.HiddenGcRef32): + OTHER = CURTYPE + if CURTYPE == llmemory.HiddenGcRef32: OTHER = PTRTYPE + raise InvalidCast("don't use cast_opaque_ptr() to cast between " + "HiddenGcRef32 and %r; use explicitly " + "show_from_ptr32 or hide_into_ptr32." % OTHER) if not ptr: return nullptr(PTRTYPE.TO) try: From commits-noreply at bitbucket.org Wed Apr 13 01:01:57 2011 From: commits-noreply at bitbucket.org (gutworth) Date: Wed, 13 Apr 2011 01:01:57 +0200 (CEST) Subject: [pypy-svn] pypy default: add a test for assignment to a unicode literal Message-ID: <20110412230157.AEDDE2A202E@codespeak.net> Author: Benjamin Peterson Branch: Changeset: r43317:9dada9aaba1a Date: 2011-04-12 17:59 -0500 http://bitbucket.org/pypy/pypy/changeset/9dada9aaba1a/ Log: add a test for assignment to a unicode literal diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -754,6 +754,7 @@ ("{x for x in z}", "set comprehension"), ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), + ("u'str'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), From commits-noreply at bitbucket.org Wed Apr 13 01:01:58 2011 From: commits-noreply at bitbucket.org (gutworth) Date: Wed, 13 Apr 2011 01:01:58 +0200 (CEST) Subject: [pypy-svn] pypy default: also test assignment to byte literals Message-ID: <20110412230158.4BE722A202E@codespeak.net> Author: Benjamin Peterson Branch: Changeset: r43318:5bc5b474b1f9 Date: 2011-04-12 18:02 -0500 http://bitbucket.org/pypy/pypy/changeset/5bc5b474b1f9/ Log: also test assignment to byte literals diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -755,6 +755,7 @@ ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), ("u'str'", "literal"), + ("b'bytes'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), From commits-noreply at bitbucket.org Wed Apr 13 08:57:12 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 08:57:12 +0200 (CEST) Subject: [pypy-svn] pypy default: fix the test Message-ID: <20110413065712.0D9BE2A2030@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43319:3ecefd7254ef Date: 2011-04-13 08:56 +0200 http://bitbucket.org/pypy/pypy/changeset/3ecefd7254ef/ Log: fix the test diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support from pypy.rlib.nonconst import NonConstant from pypy.rlib.rsre.test.test_match import get_code from pypy.rlib.rsre import rsre_core @@ -45,7 +45,7 @@ assert m._jit_unroll_safe_ -class TestJitRSre(test_basic.LLJitMixin): +class TestJitRSre(support.LLJitMixin): def meta_interp_match(self, pattern, string, repeat=1): r = get_code(pattern) From commits-noreply at bitbucket.org Wed Apr 13 08:57:14 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 08:57:14 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110413065714.782B82A2030@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43320:661e61aa9cf2 Date: 2011-04-13 08:56 +0200 http://bitbucket.org/pypy/pypy/changeset/661e61aa9cf2/ Log: merge diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -1,6 +1,9 @@ import _rawffi, sys -import threading +try: + from thread import _local as local +except ImportError: + local = object # no threads class ConvMode: encoding = 'ascii' @@ -28,7 +31,7 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(threading.local): +class ErrorObject(local): def __init__(self): self.errno = 0 self.winerror = 0 diff --git a/pypy/translator/jvm/test/test_extreme.py b/pypy/translator/jvm/test/test_extreme.py --- a/pypy/translator/jvm/test/test_extreme.py +++ b/pypy/translator/jvm/test/test_extreme.py @@ -1,5 +1,8 @@ +import py from pypy.translator.jvm.test.runtest import JvmTest from pypy.translator.oosupport.test_template.extreme import BaseTestExtreme class TestExtreme(BaseTestExtreme, JvmTest): - pass + + def test_runtimeerror_due_to_stack_overflow(self): + py.test.skip('hotspot bug') diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -754,6 +754,8 @@ ("{x for x in z}", "set comprehension"), ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), + ("u'str'", "literal"), + ("b'bytes'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), From commits-noreply at bitbucket.org Wed Apr 13 09:09:04 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 13 Apr 2011 09:09:04 +0200 (CEST) Subject: [pypy-svn] pypy default: kill all these tests, which have already been ported to test_pypy_c_new Message-ID: <20110413070904.67ED42A2030@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43321:81e98ec04e5d Date: 2011-04-13 09:08 +0200 http://bitbucket.org/pypy/pypy/changeset/81e98ec04e5d/ Log: kill all these tests, which have already been ported to test_pypy_c_new diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -198,44 +198,6 @@ print print '@' * 79 - def test_f1(self): - self.run_source(''' - def main(n): - "Arbitrary test function." - i = 0 - x = 1 - while i 1: - r *= n - n -= 1 - return r - ''', 28, - ([5], 120), - ([25], 15511210043330985984000000L)) - - def test_factorialrec(self): - self.run_source(''' - def main(n): - if n > 1: - return n * main(n-1) - else: - return 1 - ''', 0, - ([5], 120), - ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -247,529 +209,6 @@ ''' % (sys.path,), 7200, ([], 42)) - def test_simple_call(self): - self.run_source(''' - OFFSET = 0 - def f(i): - return i + 1 + OFFSET - def main(n): - i = 0 - while i < n+OFFSET: - i = f(f(i)) - return i - ''', 98, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOAD_GLOBAL", True) - assert len(ops) == 5 - assert ops[0].get_opnames() == ["guard_value", - "getfield_gc", "guard_value", - "getfield_gc", "guard_isnull", - "getfield_gc", "guard_nonnull_class"] - # the second getfield on the same globals is quicker - assert ops[1].get_opnames() == ["getfield_gc", "guard_nonnull_class"] - assert not ops[2] # second LOAD_GLOBAL of the same name folded away - # LOAD_GLOBAL of the same name but in different function partially - # folded away - # XXX could be improved - assert ops[3].get_opnames() == ["guard_value", - "getfield_gc", "guard_isnull"] - assert not ops[4] - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 10 - - ops = self.get_by_bytecode("LOAD_GLOBAL") - assert len(ops) == 5 - for bytecode in ops: - assert not bytecode - - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for bytecode in ops: - assert len(bytecode) <= 1 - - - def test_method_call(self): - self.run_source(''' - class A(object): - def __init__(self, a): - self.a = a - def f(self, i): - return self.a + i - def main(n): - i = 0 - a = A(1) - while i < n: - x = a.f(i) - i = a.f(x) - return i - ''', 93, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOOKUP_METHOD", True) - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 3 - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert not ops[0] # first LOOKUP_METHOD folded away - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("CALL_METHOD", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 6 - assert len(ops[1]) < len(ops[0]) - - ops = self.get_by_bytecode("CALL_METHOD") - assert len(ops) == 2 - assert len(ops[0]) <= 1 - assert len(ops[1]) <= 1 - - ops = self.get_by_bytecode("LOAD_ATTR", True) - assert len(ops) == 2 - # With mapdict, we get fast access to (so far) the 5 first - # attributes, which means it is done with only the following - # operations. (For the other attributes there is additionally - # a getarrayitem_gc.) - assert ops[0].get_opnames() == ["getfield_gc", - "guard_nonnull_class"] - assert not ops[1] # second LOAD_ATTR folded away - - ops = self.get_by_bytecode("LOAD_ATTR") - assert not ops[0] # first LOAD_ATTR folded away - assert not ops[1] # second LOAD_ATTR folded away - - def test_static_classmethod_call(self): - self.run_source(''' - class A(object): - @classmethod - def f(cls, i): - return i + (cls is A) + 1 - - @staticmethod - def g(i): - return i - 1 - - def main(n): - i = 0 - a = A() - while i < n: - x = a.f(i) - i = a.g(x) - return i - ''', 106, - ([20], 20), - ([31], 31)) - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 2 - assert len(ops[0].get_opnames("getfield")) <= 4 - assert not ops[1] # second LOOKUP_METHOD folded away - - def test_default_and_kw(self): - self.run_source(''' - def f(i, j=1): - return i + j - def main(n): - i = 0 - while i < n: - i = f(f(i), j=1) - return i - ''', 100, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - def test_kwargs(self): - self.run_source(''' - d = {} - - def g(**args): - return len(args) - - def main(x): - s = 0 - d = {} - for i in range(x): - s += g(**d) - d[str(i)] = i - if i % 100 == 99: - d = {} - return s - ''', 100000, ([100], 4950), - ([1000], 49500), - ([10000], 495000), - ([100000], 4950000)) - assert len(self.rawloops) + len(self.rawentrybridges) == 4 - op, = self.get_by_bytecode("CALL_FUNCTION_KW") - # XXX a bit too many guards, but better than before - assert len(op.get_opnames("guard")) <= 12 - - def test_stararg_virtual(self): - self.run_source(''' - d = {} - - def g(*args): - return len(args) - def h(a, b, c): - return c - - def main(x): - s = 0 - for i in range(x): - l = [i, x, 2] - s += g(*l) - s += h(*l) - s += g(i, x, 2) - for i in range(x): - l = [x, 2] - s += g(i, *l) - s += h(i, *l) - return s - ''', 100000, ([100], 1300), - ([1000], 13000), - ([10000], 130000), - ([100000], 1300000)) - assert len(self.loops) == 2 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - assert len(ops) == 4 - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - ops = self.get_by_bytecode("CALL_FUNCTION") - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_stararg(self): - self.run_source(''' - d = {} - - def g(*args): - return args[-1] - def h(*args): - return len(args) - - def main(x): - s = 0 - l = [] - i = 0 - while i < x: - l.append(1) - s += g(*l) - i = h(*l) - return s - ''', 100000, ([100], 100), - ([1000], 1000), - ([2000], 2000), - ([4000], 4000)) - assert len(self.loops) == 1 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - for op in ops: - assert len(op.get_opnames("new_with_vtable")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_virtual_instance(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0 - while i < n: - a = A() - assert isinstance(a, A) - assert not isinstance(a, int) - a.x = 2 - i = i + a.x - return i - ''', 69, - ([20], 20), - ([31], 32)) - - callA, callisinstance1, callisinstance2 = ( - self.get_by_bytecode("CALL_FUNCTION")) - assert not callA.get_opnames("call") - assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 2 - assert not callisinstance1.get_opnames("call") - assert not callisinstance1.get_opnames("new") - assert len(callisinstance1.get_opnames("guard")) <= 2 - # calling isinstance on a builtin type gives zero guards - # because the version_tag of a builtin type is immutable - assert not len(callisinstance1.get_opnames("guard")) - - - bytecode, = self.get_by_bytecode("STORE_ATTR") - assert bytecode.get_opnames() == [] - - def test_load_attr(self): - self.run_source(''' - class A(object): - pass - a = A() - a.x = 2 - def main(n): - i = 0 - while i < n: - i = i + a.x - return i - ''', 41, - ([20], 20), - ([31], 32)) - - load, = self.get_by_bytecode("LOAD_ATTR") - # 1 guard_value for the class - # 1 guard_value for the version_tag - # 1 guard_value for the structure - # 1 guard_nonnull_class for the result since it is used later - assert len(load.get_opnames("guard")) <= 4 - - def test_mixed_type_loop(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0.0 - j = 2 - while i < n: - i = j + i - return i, type(i) is float - ''', 35, - ([20], (20, True)), - ([31], (32, True))) - - bytecode, = self.get_by_bytecode("BINARY_ADD") - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 2 - - def test_call_builtin_function(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 2 - l = [] - while i < n: - i += 1 - l.append(i) - return i, len(l) - ''', 39, - ([20], (20, 18)), - ([31], (31, 29))) - - bytecode, = self.get_by_bytecode("CALL_METHOD") - assert len(bytecode.get_opnames("new_with_vtable")) == 1 # the forcing of the int - assert len(bytecode.get_opnames("call")) == 1 # the call to append - assert len(bytecode.get_opnames("guard")) == 1 # guard for guard_no_exception after the call - bytecode, = self.get_by_bytecode("CALL_METHOD", True) - assert len(bytecode.get_opnames("guard")) == 2 # guard for profiling disabledness + guard_no_exception after the call - - def test_range_iter(self): - self.run_source(''' - def g(n): - return range(n) - - def main(n): - s = 0 - for i in range(n): - s += g(n)[i] - return s - ''', 143, ([1000], 1000 * 999 / 2)) - bytecode, = self.get_by_bytecode("BINARY_SUBSCR", True) - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER", True) # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_value", - "guard_class", # check the class of the iterator - "guard_nonnull", # check that the iterator is not finished - "guard_isnull", # check that the range list is not forced - "guard_false", # check that the index is lower than the current length - ] - - bytecode, = self.get_by_bytecode("BINARY_SUBSCR") - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER") # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is lower than the current length - ] - - def test_exception_inside_loop_1(self): - self.run_source(''' - def main(n): - while n: - try: - raise ValueError - except ValueError: - pass - n -= 1 - return n - ''', 33, - ([30], 0)) - - bytecode, = self.get_by_bytecode("SETUP_EXCEPT") - #assert not bytecode.get_opnames("new") -- currently, we have - # new_with_vtable(pypy.interpreter.pyopcode.ExceptBlock) - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert not bytecode.get_opnames() - - def test_exception_inside_loop_2(self): - self.run_source(''' - def g(n): - raise ValueError(n) - def f(n): - g(n) - def main(n): - while n: - try: - f(n) - except ValueError: - pass - n -= 1 - return n - ''', 51, - ([30], 0)) - - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert len(bytecode.get_opnames()) <= 2 # oois, guard_true - - def test_chain_of_guards(self): - self.run_source(''' - class A(object): - def method_x(self): - return 3 - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - i = 0 - while i < 2000: - name = l[arg] - sum += getattr(a, 'method_' + name)() - i += 1 - return sum - ''', 3000, ([0], 2000*3)) - assert len(self.loops) == 1 - - def test_getattr_with_dynamic_attribute(self): - self.run_source(''' - class A(object): - pass - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - a.a1 = 0 - a.a2 = 0 - a.a3 = 0 - a.a4 = 0 - a.a5 = 0 # workaround, because the first five attributes need a promotion - a.x = 1 - a.y = 2 - i = 0 - while i < 2000: - name = l[i % 2] - sum += getattr(a, name) - i += 1 - return sum - ''', 3000, ([0], 3000)) - assert len(self.loops) == 1 - - def test_blockstack_virtualizable(self): - self.run_source(''' - from pypyjit import residual_call - - def main(): - i = 0 - while i < 100: - try: - residual_call(len, []) - except: - pass - i += 1 - return i - ''', 1000, ([], 100)) - bytecode, = self.get_by_bytecode("CALL_FUNCTION") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('new_with_vtable')) == 2 - - def test_import_in_function(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - from sys import version - i += 1 - return i - ''', 100, ([], 100)) - bytecode, = self.get_by_bytecode('IMPORT_NAME') - bytecode2, = self.get_by_bytecode('IMPORT_FROM') - assert len(bytecode.get_opnames('call')) == 2 # split_chr and list_pop - assert len(bytecode2.get_opnames('call')) == 0 - - def test_arraycopy_disappears(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - t = (1, 2, 3, i + 1) - t2 = t[:] - del t - i = t2[3] - del t2 - return i - ''', 40, ([], 100)) - bytecode, = self.get_by_bytecode('BINARY_SUBSCR') - assert len(bytecode.get_opnames('new_array')) == 0 def test_overflow_checking(self): startvalue = sys.maxint - 2147483647 @@ -783,269 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_boolrewrite_invers(self): - for a, b, res, ops in (('2000', '2000', 20001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 16001700, 83), - ( 'a', 'b', 16001700, 89), - ( 'a', 'a', 13001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if i >= %s: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - def test_boolrewrite_reflex(self): - for a, b, res, ops in (('2000', '2000', 10001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 14001700, 83), - ( 'a', 'b', 14001700, 89), - ( 'a', 'a', 17001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if %s > i: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - - def test_boolrewrite_correct_invers(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b-1, op2, b) * 10000 * (b) - res += opval( b, op2, b) * 10000 - res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if i %s %d: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, op2, b), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if i %s %f: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, op2, float(b)/4.0), 156, ([], res)) - - - def test_boolrewrite_correct_reflex(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b, op2, b-1) * 10000 * (b) - res += opval(b, op2, b) * 10000 - res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if %d %s i: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, b, op2), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if %f %s i: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, float(b)/4.0, op2), 156, ([], res)) - - def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') - for e1 in compares: - for e2 in compares: - a, b, c = 1, 2, 3 - if eval(e1): res = 752 * 1 - else: res = 752 * 2 - if eval(e2): res += 752 * 10000 - else: res += 752 * 20000 - a = b - if eval(e1): res += 248 * 1 - else: res += 248 * 2 - if eval(e2): res += 248 * 10000 - else: res += 248 * 20000 - - - if 'c' in e1 or 'c' in e2: - n = 337 - else: - n = 215 - - print - print 'Test:', e1, e2, n, res - self.run_source(''' - class tst(object): - pass - def main(): - a = tst() - b = tst() - c = tst() - sa = 0 - for i in range(1000): - if %s: sa += 1 - else: sa += 2 - if %s: sa += 10000 - else: sa += 20000 - if i > 750: a = b - return sa - '''%(e1, e2), n, ([], res)) - - def test_array_sum(self): - for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): - res = 19352859 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(): - img = array("%s", range(127) * 5) * 484 - l, i = 0, 0 - while i < 640 * 480: - l += img[i] - i += 1 - return l - ''' % tc, maxops, ([], res)) - - def test_array_sum_char(self): - self.run_source(''' - from array import array - - def main(): - img = array("c", "Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - l += ord(img[i]) - i += 1 - return l - ''', 60, ([], 30720000)) - - def test_array_sum_unicode(self): - self.run_source(''' - from array import array - - def main(): - img = array("u", u"Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - if img[i] == u"l": - l += 1 - i += 1 - return l - ''', 65, ([], 122880)) - - def test_array_intimg(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - for tc, maxops in zip('ilILd', (67, 67, 70, 70, 61)): - print - print '='*65 - print '='*20, 'running test for tc=%r' % (tc,), '='*20 - res = 73574560 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(tc): - img = array(tc, range(3)) * (350 * 480) - intimg = array(tc, (0,)) * (640 * 480) - l, i = 0, 640 - while i < 640 * 480: - l = l + img[i] - intimg[i] = (intimg[i-640] + l) - i += 1 - return intimg[i - 1] - ''', maxops, ([tc], res)) - - def test_unpackiterable(self): - self.run_source(''' - from array import array - - def main(): - i = 0 - t = array('l', (1, 2)) - while i < 2000: - a, b = t - i += 1 - return 3 - - ''', 100, ([], 3)) - bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('call_may_force')) == 0 def test_intbound_simple(self): From commits-noreply at bitbucket.org Wed Apr 13 09:26:28 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 09:26:28 +0200 (CEST) Subject: [pypy-svn] pypy default: Unskip one test, keep one skipped until we merge quasi-immut fields and Message-ID: <20110413072628.4B66536C201@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43322:4be4cb558143 Date: 2011-04-13 09:22 +0200 http://bitbucket.org/pypy/pypy/changeset/4be4cb558143/ Log: Unskip one test, keep one skipped until we merge quasi-immut fields and fix one 64bit test diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -277,6 +277,7 @@ """) def test_default_and_kw(self): + py.test.skip("Wait until we have saner defaults strat") def main(n): def f(i, j=1): return i + j @@ -487,7 +488,6 @@ """) def test_range_iter(self): - py.test.skip("until we fix defaults") def main(n): def g(n): return range(n) @@ -1010,7 +1010,7 @@ """) def test_func_defaults(self): - py.test.skip("skipped until we fix defaults") + py.test.skip("until we fix defaults") def main(n): i = 1 while i < n: @@ -1063,7 +1063,7 @@ i23 = int_lt(0, i21) guard_true(i23, descr=) i24 = getfield_gc(p17, descr=) - i25 = getarrayitem_raw(i24, 0, descr=) + i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=) i28 = int_add_ovf(i10, i25) From commits-noreply at bitbucket.org Wed Apr 13 09:26:28 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 09:26:28 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110413072628.EB32B36C201@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43323:7f8040abfc71 Date: 2011-04-13 09:26 +0200 http://bitbucket.org/pypy/pypy/changeset/7f8040abfc71/ Log: merge diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -198,44 +198,6 @@ print print '@' * 79 - def test_f1(self): - self.run_source(''' - def main(n): - "Arbitrary test function." - i = 0 - x = 1 - while i 1: - r *= n - n -= 1 - return r - ''', 28, - ([5], 120), - ([25], 15511210043330985984000000L)) - - def test_factorialrec(self): - self.run_source(''' - def main(n): - if n > 1: - return n * main(n-1) - else: - return 1 - ''', 0, - ([5], 120), - ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -247,529 +209,6 @@ ''' % (sys.path,), 7200, ([], 42)) - def test_simple_call(self): - self.run_source(''' - OFFSET = 0 - def f(i): - return i + 1 + OFFSET - def main(n): - i = 0 - while i < n+OFFSET: - i = f(f(i)) - return i - ''', 98, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOAD_GLOBAL", True) - assert len(ops) == 5 - assert ops[0].get_opnames() == ["guard_value", - "getfield_gc", "guard_value", - "getfield_gc", "guard_isnull", - "getfield_gc", "guard_nonnull_class"] - # the second getfield on the same globals is quicker - assert ops[1].get_opnames() == ["getfield_gc", "guard_nonnull_class"] - assert not ops[2] # second LOAD_GLOBAL of the same name folded away - # LOAD_GLOBAL of the same name but in different function partially - # folded away - # XXX could be improved - assert ops[3].get_opnames() == ["guard_value", - "getfield_gc", "guard_isnull"] - assert not ops[4] - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 10 - - ops = self.get_by_bytecode("LOAD_GLOBAL") - assert len(ops) == 5 - for bytecode in ops: - assert not bytecode - - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for bytecode in ops: - assert len(bytecode) <= 1 - - - def test_method_call(self): - self.run_source(''' - class A(object): - def __init__(self, a): - self.a = a - def f(self, i): - return self.a + i - def main(n): - i = 0 - a = A(1) - while i < n: - x = a.f(i) - i = a.f(x) - return i - ''', 93, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOOKUP_METHOD", True) - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 3 - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert not ops[0] # first LOOKUP_METHOD folded away - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("CALL_METHOD", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 6 - assert len(ops[1]) < len(ops[0]) - - ops = self.get_by_bytecode("CALL_METHOD") - assert len(ops) == 2 - assert len(ops[0]) <= 1 - assert len(ops[1]) <= 1 - - ops = self.get_by_bytecode("LOAD_ATTR", True) - assert len(ops) == 2 - # With mapdict, we get fast access to (so far) the 5 first - # attributes, which means it is done with only the following - # operations. (For the other attributes there is additionally - # a getarrayitem_gc.) - assert ops[0].get_opnames() == ["getfield_gc", - "guard_nonnull_class"] - assert not ops[1] # second LOAD_ATTR folded away - - ops = self.get_by_bytecode("LOAD_ATTR") - assert not ops[0] # first LOAD_ATTR folded away - assert not ops[1] # second LOAD_ATTR folded away - - def test_static_classmethod_call(self): - self.run_source(''' - class A(object): - @classmethod - def f(cls, i): - return i + (cls is A) + 1 - - @staticmethod - def g(i): - return i - 1 - - def main(n): - i = 0 - a = A() - while i < n: - x = a.f(i) - i = a.g(x) - return i - ''', 106, - ([20], 20), - ([31], 31)) - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 2 - assert len(ops[0].get_opnames("getfield")) <= 4 - assert not ops[1] # second LOOKUP_METHOD folded away - - def test_default_and_kw(self): - self.run_source(''' - def f(i, j=1): - return i + j - def main(n): - i = 0 - while i < n: - i = f(f(i), j=1) - return i - ''', 100, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - def test_kwargs(self): - self.run_source(''' - d = {} - - def g(**args): - return len(args) - - def main(x): - s = 0 - d = {} - for i in range(x): - s += g(**d) - d[str(i)] = i - if i % 100 == 99: - d = {} - return s - ''', 100000, ([100], 4950), - ([1000], 49500), - ([10000], 495000), - ([100000], 4950000)) - assert len(self.rawloops) + len(self.rawentrybridges) == 4 - op, = self.get_by_bytecode("CALL_FUNCTION_KW") - # XXX a bit too many guards, but better than before - assert len(op.get_opnames("guard")) <= 12 - - def test_stararg_virtual(self): - self.run_source(''' - d = {} - - def g(*args): - return len(args) - def h(a, b, c): - return c - - def main(x): - s = 0 - for i in range(x): - l = [i, x, 2] - s += g(*l) - s += h(*l) - s += g(i, x, 2) - for i in range(x): - l = [x, 2] - s += g(i, *l) - s += h(i, *l) - return s - ''', 100000, ([100], 1300), - ([1000], 13000), - ([10000], 130000), - ([100000], 1300000)) - assert len(self.loops) == 2 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - assert len(ops) == 4 - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - ops = self.get_by_bytecode("CALL_FUNCTION") - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_stararg(self): - self.run_source(''' - d = {} - - def g(*args): - return args[-1] - def h(*args): - return len(args) - - def main(x): - s = 0 - l = [] - i = 0 - while i < x: - l.append(1) - s += g(*l) - i = h(*l) - return s - ''', 100000, ([100], 100), - ([1000], 1000), - ([2000], 2000), - ([4000], 4000)) - assert len(self.loops) == 1 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - for op in ops: - assert len(op.get_opnames("new_with_vtable")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_virtual_instance(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0 - while i < n: - a = A() - assert isinstance(a, A) - assert not isinstance(a, int) - a.x = 2 - i = i + a.x - return i - ''', 69, - ([20], 20), - ([31], 32)) - - callA, callisinstance1, callisinstance2 = ( - self.get_by_bytecode("CALL_FUNCTION")) - assert not callA.get_opnames("call") - assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 2 - assert not callisinstance1.get_opnames("call") - assert not callisinstance1.get_opnames("new") - assert len(callisinstance1.get_opnames("guard")) <= 2 - # calling isinstance on a builtin type gives zero guards - # because the version_tag of a builtin type is immutable - assert not len(callisinstance1.get_opnames("guard")) - - - bytecode, = self.get_by_bytecode("STORE_ATTR") - assert bytecode.get_opnames() == [] - - def test_load_attr(self): - self.run_source(''' - class A(object): - pass - a = A() - a.x = 2 - def main(n): - i = 0 - while i < n: - i = i + a.x - return i - ''', 41, - ([20], 20), - ([31], 32)) - - load, = self.get_by_bytecode("LOAD_ATTR") - # 1 guard_value for the class - # 1 guard_value for the version_tag - # 1 guard_value for the structure - # 1 guard_nonnull_class for the result since it is used later - assert len(load.get_opnames("guard")) <= 4 - - def test_mixed_type_loop(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0.0 - j = 2 - while i < n: - i = j + i - return i, type(i) is float - ''', 35, - ([20], (20, True)), - ([31], (32, True))) - - bytecode, = self.get_by_bytecode("BINARY_ADD") - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 2 - - def test_call_builtin_function(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 2 - l = [] - while i < n: - i += 1 - l.append(i) - return i, len(l) - ''', 39, - ([20], (20, 18)), - ([31], (31, 29))) - - bytecode, = self.get_by_bytecode("CALL_METHOD") - assert len(bytecode.get_opnames("new_with_vtable")) == 1 # the forcing of the int - assert len(bytecode.get_opnames("call")) == 1 # the call to append - assert len(bytecode.get_opnames("guard")) == 1 # guard for guard_no_exception after the call - bytecode, = self.get_by_bytecode("CALL_METHOD", True) - assert len(bytecode.get_opnames("guard")) == 2 # guard for profiling disabledness + guard_no_exception after the call - - def test_range_iter(self): - self.run_source(''' - def g(n): - return range(n) - - def main(n): - s = 0 - for i in range(n): - s += g(n)[i] - return s - ''', 143, ([1000], 1000 * 999 / 2)) - bytecode, = self.get_by_bytecode("BINARY_SUBSCR", True) - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER", True) # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_value", - "guard_class", # check the class of the iterator - "guard_nonnull", # check that the iterator is not finished - "guard_isnull", # check that the range list is not forced - "guard_false", # check that the index is lower than the current length - ] - - bytecode, = self.get_by_bytecode("BINARY_SUBSCR") - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER") # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is lower than the current length - ] - - def test_exception_inside_loop_1(self): - self.run_source(''' - def main(n): - while n: - try: - raise ValueError - except ValueError: - pass - n -= 1 - return n - ''', 33, - ([30], 0)) - - bytecode, = self.get_by_bytecode("SETUP_EXCEPT") - #assert not bytecode.get_opnames("new") -- currently, we have - # new_with_vtable(pypy.interpreter.pyopcode.ExceptBlock) - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert not bytecode.get_opnames() - - def test_exception_inside_loop_2(self): - self.run_source(''' - def g(n): - raise ValueError(n) - def f(n): - g(n) - def main(n): - while n: - try: - f(n) - except ValueError: - pass - n -= 1 - return n - ''', 51, - ([30], 0)) - - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert len(bytecode.get_opnames()) <= 2 # oois, guard_true - - def test_chain_of_guards(self): - self.run_source(''' - class A(object): - def method_x(self): - return 3 - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - i = 0 - while i < 2000: - name = l[arg] - sum += getattr(a, 'method_' + name)() - i += 1 - return sum - ''', 3000, ([0], 2000*3)) - assert len(self.loops) == 1 - - def test_getattr_with_dynamic_attribute(self): - self.run_source(''' - class A(object): - pass - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - a.a1 = 0 - a.a2 = 0 - a.a3 = 0 - a.a4 = 0 - a.a5 = 0 # workaround, because the first five attributes need a promotion - a.x = 1 - a.y = 2 - i = 0 - while i < 2000: - name = l[i % 2] - sum += getattr(a, name) - i += 1 - return sum - ''', 3000, ([0], 3000)) - assert len(self.loops) == 1 - - def test_blockstack_virtualizable(self): - self.run_source(''' - from pypyjit import residual_call - - def main(): - i = 0 - while i < 100: - try: - residual_call(len, []) - except: - pass - i += 1 - return i - ''', 1000, ([], 100)) - bytecode, = self.get_by_bytecode("CALL_FUNCTION") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('new_with_vtable')) == 2 - - def test_import_in_function(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - from sys import version - i += 1 - return i - ''', 100, ([], 100)) - bytecode, = self.get_by_bytecode('IMPORT_NAME') - bytecode2, = self.get_by_bytecode('IMPORT_FROM') - assert len(bytecode.get_opnames('call')) == 2 # split_chr and list_pop - assert len(bytecode2.get_opnames('call')) == 0 - - def test_arraycopy_disappears(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - t = (1, 2, 3, i + 1) - t2 = t[:] - del t - i = t2[3] - del t2 - return i - ''', 40, ([], 100)) - bytecode, = self.get_by_bytecode('BINARY_SUBSCR') - assert len(bytecode.get_opnames('new_array')) == 0 def test_overflow_checking(self): startvalue = sys.maxint - 2147483647 @@ -783,269 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_boolrewrite_invers(self): - for a, b, res, ops in (('2000', '2000', 20001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 16001700, 83), - ( 'a', 'b', 16001700, 89), - ( 'a', 'a', 13001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if i >= %s: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - def test_boolrewrite_reflex(self): - for a, b, res, ops in (('2000', '2000', 10001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 14001700, 83), - ( 'a', 'b', 14001700, 89), - ( 'a', 'a', 17001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if %s > i: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - - def test_boolrewrite_correct_invers(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b-1, op2, b) * 10000 * (b) - res += opval( b, op2, b) * 10000 - res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if i %s %d: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, op2, b), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if i %s %f: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, op2, float(b)/4.0), 156, ([], res)) - - - def test_boolrewrite_correct_reflex(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b, op2, b-1) * 10000 * (b) - res += opval(b, op2, b) * 10000 - res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if %d %s i: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, b, op2), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if %f %s i: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, float(b)/4.0, op2), 156, ([], res)) - - def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') - for e1 in compares: - for e2 in compares: - a, b, c = 1, 2, 3 - if eval(e1): res = 752 * 1 - else: res = 752 * 2 - if eval(e2): res += 752 * 10000 - else: res += 752 * 20000 - a = b - if eval(e1): res += 248 * 1 - else: res += 248 * 2 - if eval(e2): res += 248 * 10000 - else: res += 248 * 20000 - - - if 'c' in e1 or 'c' in e2: - n = 337 - else: - n = 215 - - print - print 'Test:', e1, e2, n, res - self.run_source(''' - class tst(object): - pass - def main(): - a = tst() - b = tst() - c = tst() - sa = 0 - for i in range(1000): - if %s: sa += 1 - else: sa += 2 - if %s: sa += 10000 - else: sa += 20000 - if i > 750: a = b - return sa - '''%(e1, e2), n, ([], res)) - - def test_array_sum(self): - for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): - res = 19352859 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(): - img = array("%s", range(127) * 5) * 484 - l, i = 0, 0 - while i < 640 * 480: - l += img[i] - i += 1 - return l - ''' % tc, maxops, ([], res)) - - def test_array_sum_char(self): - self.run_source(''' - from array import array - - def main(): - img = array("c", "Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - l += ord(img[i]) - i += 1 - return l - ''', 60, ([], 30720000)) - - def test_array_sum_unicode(self): - self.run_source(''' - from array import array - - def main(): - img = array("u", u"Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - if img[i] == u"l": - l += 1 - i += 1 - return l - ''', 65, ([], 122880)) - - def test_array_intimg(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - for tc, maxops in zip('ilILd', (67, 67, 70, 70, 61)): - print - print '='*65 - print '='*20, 'running test for tc=%r' % (tc,), '='*20 - res = 73574560 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(tc): - img = array(tc, range(3)) * (350 * 480) - intimg = array(tc, (0,)) * (640 * 480) - l, i = 0, 640 - while i < 640 * 480: - l = l + img[i] - intimg[i] = (intimg[i-640] + l) - i += 1 - return intimg[i - 1] - ''', maxops, ([tc], res)) - - def test_unpackiterable(self): - self.run_source(''' - from array import array - - def main(): - i = 0 - t = array('l', (1, 2)) - while i < 2000: - a, b = t - i += 1 - return 3 - - ''', 100, ([], 3)) - bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('call_may_force')) == 0 def test_intbound_simple(self): From commits-noreply at bitbucket.org Wed Apr 13 12:41:31 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 12:41:31 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: typo Message-ID: <20110413104131.A231A2A2030@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43324:bb74a3f1a9ae Date: 2011-04-13 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/bb74a3f1a9ae/ Log: typo diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py --- a/pypy/jit/metainterp/quasiimmut.py +++ b/pypy/jit/metainterp/quasiimmut.py @@ -75,7 +75,7 @@ def invalidate(self): # When this is called, all the loops that we record become - # become invalid and must not be called again, nor returned to. + # invalid and must not be called again, nor returned to. wrefs = self.looptokens_wrefs self.looptokens_wrefs = [] for wref in wrefs: From commits-noreply at bitbucket.org Wed Apr 13 12:52:39 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 12:52:39 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: write a failing test Message-ID: <20110413105239.8E1A42A2030@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43325:df9144938d0a Date: 2011-04-13 12:52 +0200 http://bitbucket.org/pypy/pypy/changeset/df9144938d0a/ Log: write a failing test diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -175,6 +175,32 @@ assert res == 700707 self.check_loops(getfield_gc=0) + def test_invalidate_by_setfield(self): + py.test.skip("Not implemented") + jitdriver = JitDriver(greens=['bc', 'foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + + def f(foo, bc): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(bc=bc, i=i, foo=foo, total=total) + if bc == 0: + f(foo, 1) + if bc == 1: + foo.a = int(i > 5) + i += 1 + total += foo.a + return total + + def g(): + return f(Foo(1), 0) + + assert self.meta_interp(g, []) == g() class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass From commits-noreply at bitbucket.org Wed Apr 13 12:55:23 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 12:55:23 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Write another failing test for missing piece of invalidating in the backend Message-ID: <20110413105523.CBC9A2A2030@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43326:463f4df577de Date: 2011-04-13 12:55 +0200 http://bitbucket.org/pypy/pypy/changeset/463f4df577de/ Log: Write another failing test for missing piece of invalidating in the backend diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -4,6 +4,7 @@ from pypy.jit.metainterp.quasiimmut import QuasiImmut from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver, dont_look_inside @@ -175,6 +176,33 @@ assert res == 700707 self.check_loops(getfield_gc=0) + def test_invalidate_while_running(self): + jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + + def external(foo, v): + if v: + foo.a = 2 + + def f(foo): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(i=i, foo=foo, total=total) + external(foo, i > 7) + i += 1 + total += foo.a + return total + + def g(): + return f(Foo(1)) + + assert self.meta_interp(g, [], policy=StopAtXPolicy(external)) == g() + def test_invalidate_by_setfield(self): py.test.skip("Not implemented") jitdriver = JitDriver(greens=['bc', 'foo'], reds=['i', 'total']) From commits-noreply at bitbucket.org Wed Apr 13 14:18:17 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 14:18:17 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Progress on out-of-line-guards-2. This is the poor-man's version, we can do Message-ID: <20110413121817.243A52A2032@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43327:6d4d86899596 Date: 2011-04-13 14:17 +0200 http://bitbucket.org/pypy/pypy/changeset/6d4d86899596/ Log: Progress on out-of-line-guards-2. This is the poor-man's version, we can do better. * Record a guard on each of quasi_immutable_fields * This guard fails if the loop is invalidated (only llgraph backend so far) * It creates a normal bridge. This is the point where we can improve by invalidating call assemblers for example and resetting counters diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -80,6 +80,7 @@ if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) + # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken loop.token = None if not we_are_translated(): @@ -400,6 +401,12 @@ self.copy_all_attributes_into(res) return res +class ResumeGuardNotInvalidated(ResumeGuardDescr): + def _clone_if_mutable(self): + res = ResumeGuardNotInvalidated() + self.copy_all_attributes_into(res) + return res + class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -286,6 +286,10 @@ raise ValueError("CALL_ASSEMBLER not supported") llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) + def invalidate_loop(self, looptoken): + for loop in looptoken.compiled_loop_token.loop_and_bridges: + loop._obj.externalobj.invalid = True + # ---------- def sizeof(self, S): diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE +from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, @@ -12,6 +13,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.heaptracker import register_known_gctype, adr2int from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -62,6 +64,18 @@ nextdescr = cpu.fielddescrof(NODE, 'next') otherdescr = cpu.fielddescrof(NODE2, 'other') + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_field': IR_QUASI_IMMUTABLE}) + QUASI = lltype.GcStruct('QUASIIMMUT', ('inst_field', lltype.Signed), + ('mutate_field', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + quasi = lltype.malloc(QUASI, immortal=True) + quasifielddescr = cpu.fielddescrof(QUASI, 'inst_field') + quasibox = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, quasi)) + quasiimmutdescr = QuasiImmutDescr(cpu, quasibox, + quasifielddescr, + cpu.fielddescrof(QUASI, 'mutate_field')) + NODEOBJ = lltype.GcStruct('NODEOBJ', ('parent', OBJECT), ('ref', lltype.Ptr(OBJECT))) nodeobj = lltype.malloc(NODEOBJ) diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -555,14 +555,15 @@ opimpl_setfield_raw_r = _opimpl_setfield_raw_any opimpl_setfield_raw_f = _opimpl_setfield_raw_any - @arguments("box", "descr", "descr") + @arguments("box", "descr", "descr", "orgpc") def opimpl_record_quasiimmut_field(self, box, fielddescr, - mutatefielddescr): + mutatefielddescr, orgpc): from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr cpu = self.metainterp.cpu descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], None, descr=descr) + self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) def _nonstandard_virtualizable(self, pc, box): # returns True if 'box' is actually not the "standard" virtualizable @@ -1085,6 +1086,8 @@ if opnum == rop.GUARD_NOT_FORCED: resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, metainterp.jitdriver_sd) + elif opnum == rop.GUARD_NOT_INVALIDATED: + resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() guard_op = metainterp.history.record(opnum, moreargs, None, @@ -1857,6 +1860,9 @@ self.handle_possible_exception() except ChangeFrame: pass + elif opnum == rop.GUARD_NOT_INVALIDATED: + pass # XXX we want to do something special in resume descr, + # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected self.execute_raised(OverflowError(), constant=True) try: diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -960,7 +960,7 @@ op = SpaceOperation('getfield', [v_x, Constant('inst_x', lltype.Void)], v2) tr = Transformer(FakeCPU()) - [op1, op2] = tr.rewrite_operation(op) + [_, op1, op2] = tr.rewrite_operation(op) assert op1.opname == 'record_quasiimmut_field' assert len(op1.args) == 3 assert op1.args[0] == v_x diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -1,3 +1,6 @@ + +import py + from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE from pypy.jit.metainterp import typesystem diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -119,6 +119,7 @@ self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} + self._remove_guard_not_invalidated = False def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() @@ -386,6 +387,7 @@ # constant-fold the following getfield_gc. structvalue = self.getvalue(op.getarg(0)) if not structvalue.is_constant(): + self._remove_guard_not_invalidated = True return # not a constant at all; ignore QUASIIMMUT_FIELD # from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr @@ -396,6 +398,7 @@ # simply ignoring the QUASIIMMUT_FIELD hint and compiling it # as a regular getfield. if not qmutdescr.is_still_valid(): + self._remove_guard_not_invalidated = True return # record as an out-of-line guard if self.optimizer.quasi_immutable_deps is None: @@ -405,6 +408,13 @@ fieldvalue = self.getvalue(qmutdescr.constantfieldbox) cf = self.field_cache(qmutdescr.fielddescr) cf.remember_field_value(structvalue, fieldvalue) + self._remove_guard_not_invalidated = False + + def optimize_GUARD_NOT_INVALIDATED(self, op): + if self._remove_guard_not_invalidated: + return + self._remove_guard_not_invalidated = False + self.emit_operation(op) def propagate_forward(self, op): opnum = op.getopnum() diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py --- a/pypy/jit/metainterp/quasiimmut.py +++ b/pypy/jit/metainterp/quasiimmut.py @@ -81,9 +81,7 @@ for wref in wrefs: looptoken = wref() if looptoken is not None: - pass - # - # XXX tell the backend to mark the loop as invalid + self.cpu.invalidate_loop(looptoken) class QuasiImmutDescr(AbstractDescr): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1292,6 +1292,8 @@ # We get here because it used to overflow, but now it no longer # does. pass + elif opnum == rop.GUARD_NOT_INVALIDATED: + pass else: from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -583,7 +583,8 @@ descr1 = self.cpu.fielddescrof( v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) - op1 = [SpaceOperation('record_quasiimmut_field', + op1 = [SpaceOperation('-live-', [], None), + SpaceOperation('record_quasiimmut_field', [v_inst, descr, descr1], None), op1] return op1 diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -167,6 +167,7 @@ class CompiledLoop(object): has_been_freed = False + invalid = False def __init__(self): self.inputargs = [] @@ -933,6 +934,9 @@ if forced: raise GuardFailed + def op_guard_not_invalidated(self, descr): + if self.loop.invalid: + raise GuardFailed class OOFrame(Frame): diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5702,8 +5702,35 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() - - + def test_quasi_immut(self): + ops = """ + [p0, p1, i0] + quasiimmut_field(p0, descr=quasiimmutdescr) + guard_not_invalidated() [] + i1 = getfield_gc(p0, descr=quasifielddescr) + jump(p1, p0, i1) + """ + expected = """ + [p0, p1, i0] + i1 = getfield_gc(p0, descr=quasifielddescr) + jump(p1, p0, i1) + """ + self.optimize_loop(ops, expected) + + def test_quasi_immut_2(self): + ops = """ + [] + quasiimmut_field(ConstPtr(myptr), descr=quasiimmutdescr) + guard_not_invalidated() [] + i1 = getfield_gc(ConstPtr(myptr), descr=quasifielddescr) + jump() + """ + expected = """ + [] + guard_not_invalidated() [] + jump() + """ + self.optimize_loop(ops, expected, expected) ##class TestOOtype(OptimizeOptTest, OOtypeMixin): diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -380,6 +380,7 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- From commits-noreply at bitbucket.org Wed Apr 13 15:30:52 2011 From: commits-noreply at bitbucket.org (fijal) Date: Wed, 13 Apr 2011 15:30:52 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: write a test and note first problem Message-ID: <20110413133052.9E2B32A2035@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43328:d5cf3b70d497 Date: 2011-04-13 15:30 +0200 http://bitbucket.org/pypy/pypy/changeset/d5cf3b70d497/ Log: write a test and note first problem diff --git a/pypy/jit/backend/x86/test/test_quasiimmut.py b/pypy/jit/backend/x86/test/test_quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/x86/test/test_quasiimmut.py @@ -0,0 +1,9 @@ + +import py +from pypy.jit.backend.x86.test.test_basic import Jit386Mixin +from pypy.jit.metainterp.test import test_quasiimmut + +class TestLoopSpec(Jit386Mixin, test_quasiimmut.QuasiImmutTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_loop.py + pass diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py --- a/pypy/jit/metainterp/quasiimmut.py +++ b/pypy/jit/metainterp/quasiimmut.py @@ -22,6 +22,7 @@ """Returns the current QuasiImmut instance in the field, possibly creating one. """ + # XXX this is broken on x86 qmut_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) if qmut_gcref: qmut = QuasiImmut.show(cpu, qmut_gcref) From commits-noreply at bitbucket.org Wed Apr 13 16:24:27 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 13 Apr 2011 16:24:27 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: some comments from Peng Wu Message-ID: <20110413142427.23BB42A2035@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3498:f563410ac3de Date: 2011-04-13 16:23 +0200 http://bitbucket.org/pypy/extradoc/changeset/f563410ac3de/ Log: some comments from Peng Wu diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -185,6 +185,9 @@ the hints are applied to the tiny object model and Section~\ref{sec:evaluation} presents benchmarks. +\cfbolz{XXX stress more that "the crux of the techniques and a significant +portion of new contributions in the paper are from how to refactoring codes to +expose likely runtime constants and pure functions"} \section{Background} @@ -372,6 +375,9 @@ \section{Hints for Controlling Optimization} \label{sec:hints} +\cfbolz{XXX more precise definition of what promote does} +\cfbolz{change the hint(x, promote=True) syntax to something more readable} + In this section we will describe how to add two hints that allow the interpreter author to increase the optimization opportunities for constant folding. If applied correctly these techniques can give really big speedups by @@ -467,8 +473,8 @@ return z + y \end{lstlisting} -The meaning of this hint is that the tracer should pretend that \texttt{x} is a -constant +The hint indicates that \texttt{x} is likely a runtime constant and the JIT +should try to perform runtime specialization on it in the code that follows. When just running the code, the function has no effect, as it simply returns its first argument. When tracing, some extra work is done. Let's assume that this changed function is traced with @@ -528,6 +534,9 @@ promote the types. Section~\ref{sec:} will present a complete example of how this works. +\cfbolz{XXX explain how value specialization on the interpreter level can lead +to type specialization on the language level} + \subsection{Declaring New Pure Operations} @@ -592,6 +601,8 @@ return self.x * 2 + 1 \end{lstlisting} +\cfbolz{XXX define the meaning of purefunction more precisely, particularly because add\_attribute has side effects, which is confusing} + \cfbolz{should we mention that pure functions are not actually called by the optimizer, but the values that are seen during tracing are used?} Now the trace will look like this: From commits-noreply at bitbucket.org Wed Apr 13 16:32:29 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 13 Apr 2011 16:32:29 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: comments by rhy0lite Message-ID: <20110413143229.01A902A2035@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3499:fd57ef390cb4 Date: 2011-04-13 16:32 +0200 http://bitbucket.org/pypy/extradoc/changeset/fd57ef390cb4/ Log: comments by rhy0lite diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -154,7 +154,7 @@ specialized versions of the same code, one for each actual value. To exploit the runtime feedback, the implementation code and data structures need to be structured so that many such slow-varying values are at hand. The hints that -we present allow exactly to implement such feedback and exploitation in a +we present precisely allow us to implement such feedback and exploitation in a meta-tracing context. Concretely these hints are used to control how the optimizer of the @@ -712,6 +712,10 @@ memory address of the \texttt{Map} instance that has been promoted. Operations that can be optimized away are grayed out. +\cfbolz{XXX also explain that some forwarding of guarded values is happening, +make clearer which figures show optimized code and which show non-optimized +code} + The calls to \texttt{Map.getindex} can be optimized away, because they are calls to a pure function and they have constant arguments. That means that \texttt{index1/2/3} are constant and the guards on them can be removed. All but the first guard on @@ -989,6 +993,8 @@ \section*{Acknowledgements} +XXX Peng Wu and David Edelsohn + \bibliographystyle{abbrv} \bibliography{paper} diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index bba7e9b9246f9bee671aa631b5f8ed889869ba04..74788ae085bc4b96ba57bf6d8c906b05b37dbceb GIT binary patch [cut] From commits-noreply at bitbucket.org Wed Apr 13 17:29:23 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 13 Apr 2011 17:29:23 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: change x = hint(x, promote=True) to just promote(x) to reduce confusion Message-ID: <20110413152923.740EF36C206@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3500:5ec3af1eeb7e Date: 2011-04-13 16:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/5ec3af1eeb7e/ Log: change x = hint(x, promote=True) to just promote(x) to reduce confusion diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -376,7 +376,6 @@ \label{sec:hints} \cfbolz{XXX more precise definition of what promote does} -\cfbolz{change the hint(x, promote=True) syntax to something more readable} In this section we will describe how to add two hints that allow the interpreter author to increase the optimization opportunities for constant @@ -468,15 +467,17 @@ case, we can add a hint to promote \texttt{x}, like this: \begin{lstlisting}[mathescape,basicstyle=\ttfamily] def f1(x, y): - x = hint(x, promote=True) + promote(x) z = x * 2 + 1 return z + y \end{lstlisting} The hint indicates that \texttt{x} is likely a runtime constant and the JIT should try to perform runtime specialization on it -in the code that follows. When just running the code, the function has no -effect, as it simply returns its first argument. When tracing, some extra work +in the code that follows.\footnote{For technical reasons the promote hint needs +to be written down slightly differently in the actual code.} When just running +the code, the \texttt{promote} function has no +effect. When tracing, some extra work is done. Let's assume that this changed function is traced with the arguments \texttt{4} and \texttt{8}. The trace will be the same, except for one operation at the beginning: @@ -593,7 +594,7 @@ self.y = y def f(self, val): - self = hint(self, promote=True) + promote(self) self.y = self.compute() + val @purefunction diff --git a/talk/icooolps2011/code/version.tex b/talk/icooolps2011/code/version.tex --- a/talk/icooolps2011/code/version.tex +++ b/talk/icooolps2011/code/version.tex @@ -9,8 +9,9 @@ self.version = VersionTag() def find_method(self, name): - self = hint(self, promote=True) - version = hint(self.version, promote=True) + promote(self) + version = self.version + promote(version) return self._find_method(name, version) @purefunction diff --git a/talk/icooolps2011/code/map.tex b/talk/icooolps2011/code/map.tex --- a/talk/icooolps2011/code/map.tex +++ b/talk/icooolps2011/code/map.tex @@ -26,14 +26,16 @@ self.storage = [] def getfield(self, name): - map = hint(self.map, promote=True) + map = self.map + promote(map) index = map.getindex(name) if index != -1: return self.storage[index] return None def write_attribute(self, name, value): - map = hint(self.map, promote=True) + map = self.map + promote(map) index = map.getindex(name) if index != -1: self.storage[index] = value From commits-noreply at bitbucket.org Wed Apr 13 17:29:23 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 13 Apr 2011 17:29:23 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: michael's notes Message-ID: <20110413152923.E832A36C206@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3501:1f698977d63c Date: 2011-04-13 17:29 +0200 http://bitbucket.org/pypy/extradoc/changeset/1f698977d63c/ Log: michael's notes diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -100,7 +100,7 @@ \begin{abstract} -Meta-tracing JITs can be applied to a variety of different +Meta-tracing JIT compilers can be applied to a variety of different languages without explicitly encoding language semantics into the compiler. So far, they lacked a way to feed back runtime information into the compiler, which restricted their performance. In this paper we describe the @@ -108,6 +108,8 @@ enough to implement classical VM techniques such as maps and polymorphic inline caches. +\cfbolz{XXX tracing is runtime feed back too, clarify} + \end{abstract} @@ -116,9 +118,9 @@ One of the hardest parts of implementing a dynamic language efficiently is to optimize its object model. This is made harder by the fact that many recent -languages such as Python, JavaScript or Ruby have rather complex core object +languages such as Python, JavaScript or Ruby have a rather complex core object semantics. For them, even implementing just an interpreter is already a complex -task. Implementing them efficiently with a just-in-time compiler (JIT) is +task. Implementing these languages efficiently with a just-in-time compiler (JIT) is extremely challenging, because of their many corner-cases. It has long been an objective of the partial evaluation community to @@ -157,6 +159,7 @@ we present precisely allow us to implement such feedback and exploitation in a meta-tracing context. +\cfbolz{XXX kill the next paragraph? the info is repeated in the list below} Concretely these hints are used to control how the optimizer of the tracing JIT can improve the traces of the object model. In particular the hints influence the constant folding @@ -164,15 +167,15 @@ variables in the trace into constant by feeding back runtime values. The second hint allows the definition of additional foldable operations. -Together these two hints can be used to express many classic implementation +Together these hints can be used to express many classic implementation techniques used for object models of dynamic languages, such as maps and polymorphic inline caches. The contributions of this paper are: \begin{itemize} - \item A hint to turn arbitrary variables into constants in the trace, that - means the feedback of runtime information into compilation. - \item A way to define new pure operations which the constant folding + \item A hint to turn arbitrary variables into constants in the trace by + feeding back runtime information into compilation. + \item A way to annotate operations as pure which the constant folding optimization then recognizes. \item A worked-out example of a simple object model of a dynamic language and how it can be improved using these hints. @@ -233,7 +236,7 @@ A tracing JIT works by recording traces of concrete execution paths through the program. Those -traces are therefore linear list of operations, which are optimized and then +traces are linear lists of operations, which are optimized and then get turned into machine code. This recording automatically inlines functions: when a function call is encountered the operations of the called functions are simply put into the trace of the caller too. The tracing JIT tries to produce traces @@ -251,10 +254,9 @@ program are turned into machine code. The interpreter is also used when a guard fails to continue the execution from the failing guard. -One disadvantage of (tracing) JITs which makes them not directly applicable to -PyPy is that they need to encode the language semantics of the language they are -tracing. Since PyPy wants to be a -general framework, we want to reuse our tracer for different languages. +Since PyPy wants to be a general framework, we want to reuse our tracer for +different languages, which makes classical tracers inapplicable, because they +encode language semantics. Therefore PyPy's JIT is a \emph{meta-tracer} \cite{bolz_tracing_2009}. It does not trace the execution of the user program, but instead traces the execution of the \emph{interpreter} that is running the program. This means that the traces @@ -270,7 +272,7 @@ While the operations in a trace are those of the interpreter, the loops that are traced by the tracer are the loops in the user program. To achieve this the tracer stops tracing after one iteration of -the loop in the user function that is being considered. At this point, it probably +the loop in the user function that is being considered; at this point, it probably traced many iterations of the interpreter main loop. \begin{figure} @@ -311,7 +313,7 @@ As the running example of this paper we will use a very simple and bare-bones object model that just supports classes and instances, without any -inheritance or other advanced features. The model has classes, which contain methods. +inheritance or other advanced features. In the model classes contain methods. Instances have a class. Instances have their own attributes (or fields). When looking up an attribute on an instance, the instances attributes are searched. If the attribute is not found there, the class' methods are searched. @@ -323,7 +325,7 @@ \end{figure} -To implement this object model, we could use the RPython code in +To implement this object model, we use the RPython code in Figure~\ref{fig:interpreter-slow} as part of the interpreter source code. In this straightforward implementation the methods and attributes are just stored in dictionaries (hash maps) on the classes and instances, respectively. @@ -334,7 +336,7 @@ created. When using this object model in -an interpreter, a huge amount of time will be spent doing lookups in these +an interpreter, a large amount of time will be spent doing lookups in these dictionaries. Let's assume we trace through code that sums three attributes, such as: \anto{I still think it's a bit weird to call them ``methods'' and then use @@ -405,10 +407,7 @@ the trace is said to be constant if its value is statically known by the optimizer. -The simplest example of constants are literal values. For example, if in the -RPython source code we have a line like \texttt{y = x + 1}, the second operand will -be a constant in the trace. - +The simplest example of constants are literal values, such as \texttt{1}. However, the optimizer can statically know the value of a variable even if it is not a constant in the original source code. For example, consider the following fragment of RPython code: @@ -532,7 +531,7 @@ program. An example would be the types of variables in a user function. Even though in principle the argument to a Python function could be any Python type, in practice the argument types tend not to vary often. Therefore it is possible to -promote the types. Section~\ref{sec:} will present a complete example of how +promote the types. Section~\ref{sec:fastobjmodel} will present a complete example of how this works. \cfbolz{XXX explain how value specialization on the interpreter level can lead @@ -542,7 +541,7 @@ \subsection{Declaring New Pure Operations} In the previous section we saw a way to turn arbitrary variables into constants. All -pure operations on these constants can be constant-folded. This works great for +pure operations on these constants can be constant-folded. This works well for constant folding of simple types, e.g. integers. Unfortunately, in the context of an interpreter for a dynamic language, most operations actually manipulate objects, not simple types. The @@ -639,6 +638,8 @@ annotation is indeed to declare the immutability of fields. Because it is so common, we have special syntactic sugar for it.}. +\cfbolz{XXX mention a possible debug mode for findings bugs in this, or too many values in promotion. stress more that promote is safe} + \subsubsection{Observably Pure Functions} From commits-noreply at bitbucket.org Wed Apr 13 18:29:39 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 13 Apr 2011 18:29:39 +0200 (CEST) Subject: [pypy-svn] pypy jitypes2: add a test that fails if we do not emit CALL_RELEASE_GIL for _ffi calls Message-ID: <20110413162939.A53562A2035@codespeak.net> Author: Antonio Cuni Branch: jitypes2 Changeset: r43329:489e9af0684e Date: 2011-04-13 17:09 +0200 http://bitbucket.org/pypy/pypy/changeset/489e9af0684e/ Log: add a test that fails if we do not emit CALL_RELEASE_GIL for _ffi calls diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1037,3 +1037,34 @@ --TICK-- jump(p0, p1, p2, p3, p4, i22, i6, i7, p8, p9, descr=) """) + + def test__ffi_call_releases_gil(self): + from pypy.rlib.test.test_libffi import get_libc_name + def main(libc_name, n): + import time + from threading import Thread + from _ffi import CDLL, types + + libc = CDLL(libc_name) + sleep = libc.getfunc('sleep', [types.uint], types.uint) + delays = [0]*n + [1] + + def loop_of_sleeps(i, delays): + import time + for delay in delays: + sleep(delay) # ID: sleep + + threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)] + start = time.time() + for i, thread in enumerate(threads): + thread.start() + for thread in threads: + thread.join() + end = time.time() + # + return end - start + # + log = self.run(main, [get_libc_name(), 200], threshold=150) + assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead + loops = log.loops_by_id('sleep') + assert len(loops) == 1 # make sure that we actually JITted the loop From commits-noreply at bitbucket.org Wed Apr 13 18:29:40 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 13 Apr 2011 18:29:40 +0200 (CEST) Subject: [pypy-svn] pypy jitypes2: try not to have newlines, to make merging easier Message-ID: <20110413162940.38D4E2A2035@codespeak.net> Author: Antonio Cuni Branch: jitypes2 Changeset: r43330:4aa00ab09876 Date: 2011-04-13 18:27 +0200 http://bitbucket.org/pypy/pypy/changeset/4aa00ab09876/ Log: try not to have newlines, to make merging easier diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1044,16 +1044,16 @@ import time from threading import Thread from _ffi import CDLL, types - + ### libc = CDLL(libc_name) sleep = libc.getfunc('sleep', [types.uint], types.uint) delays = [0]*n + [1] - + ### def loop_of_sleeps(i, delays): import time for delay in delays: sleep(delay) # ID: sleep - + ### threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)] start = time.time() for i, thread in enumerate(threads): @@ -1061,9 +1061,9 @@ for thread in threads: thread.join() end = time.time() - # + ### return end - start - # + ### log = self.run(main, [get_libc_name(), 200], threshold=150) assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead loops = log.loops_by_id('sleep') From commits-noreply at bitbucket.org Wed Apr 13 18:31:23 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 13 Apr 2011 18:31:23 +0200 (CEST) Subject: [pypy-svn] pypy jitypes2: merge heads Message-ID: <20110413163123.9225B2A2035@codespeak.net> Author: Antonio Cuni Branch: jitypes2 Changeset: r43331:6ebb73be4f72 Date: 2011-04-13 18:28 +0200 http://bitbucket.org/pypy/pypy/changeset/6ebb73be4f72/ Log: merge heads diff --git a/pypy/jit/metainterp/optimizeopt/string.py b/pypy/jit/metainterp/optimizeopt/string.py --- a/pypy/jit/metainterp/optimizeopt/string.py +++ b/pypy/jit/metainterp/optimizeopt/string.py @@ -47,7 +47,7 @@ class __extend__(optimizer.OptValue): """New methods added to the base class OptValue for this file.""" - def getstrlen(self, newoperations, mode): + def getstrlen(self, optimization, mode): if mode is mode_string: s = self.get_constant_string_spec(mode_string) if s is not None: @@ -56,12 +56,12 @@ s = self.get_constant_string_spec(mode_unicode) if s is not None: return ConstInt(len(s)) - if newoperations is None: + if optimization is None: return None self.ensure_nonnull() box = self.force_box() lengthbox = BoxInt() - newoperations.append(ResOperation(mode.STRLEN, [box], lengthbox)) + optimization.emit_operation(ResOperation(mode.STRLEN, [box], lengthbox)) return lengthbox @specialize.arg(1) @@ -72,13 +72,13 @@ else: return None - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): + def string_copy_parts(self, optimization, targetbox, offsetbox, mode): # Copies the pointer-to-string 'self' into the target string # given by 'targetbox', at the specified offset. Returns the offset # at the end of the copy. - lengthbox = self.getstrlen(newoperations, mode) + lengthbox = self.getstrlen(optimization, mode) srcbox = self.force_box() - return copy_str_content(newoperations, srcbox, targetbox, + return copy_str_content(optimization, srcbox, targetbox, CONST_0, offsetbox, lengthbox, mode) @@ -105,13 +105,12 @@ return assert self.source_op is not None self.box = box = self.source_op.result - newoperations = self.optimizer.newoperations - lengthbox = self.getstrlen(newoperations, self.mode) + lengthbox = self.getstrlen(self.optimizer, self.mode) op = ResOperation(self.mode.NEWSTR, [lengthbox], box) if not we_are_translated(): op.name = 'FORCE' - newoperations.append(op) - self.string_copy_parts(newoperations, box, CONST_0, self.mode) + self.optimizer.emit_operation(op) + self.string_copy_parts(self.optimizer, box, CONST_0, self.mode) class VStringPlainValue(VAbstractStringValue): @@ -145,14 +144,14 @@ return mode.emptystr.join([mode.chr(c.box.getint()) for c in self._chars]) - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): for i in range(len(self._chars)): charbox = self._chars[i].force_box() - newoperations.append(ResOperation(mode.STRSETITEM, [targetbox, + optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox], None)) - offsetbox = _int_add(newoperations, offsetbox, CONST_1) + offsetbox = _int_add(optimizer, offsetbox, CONST_1) return offsetbox def get_args_for_fail(self, modifier): @@ -186,16 +185,16 @@ self.left = left self.right = right - def getstrlen(self, newoperations, mode): + def getstrlen(self, optimizer, mode): if self.lengthbox is None: - len1box = self.left.getstrlen(newoperations, mode) + len1box = self.left.getstrlen(optimizer, mode) if len1box is None: return None - len2box = self.right.getstrlen(newoperations, mode) + len2box = self.right.getstrlen(optimizer, mode) if len2box is None: return None - self.lengthbox = _int_add(newoperations, len1box, len2box) - # ^^^ may still be None, if newoperations is None + self.lengthbox = _int_add(optimizer, len1box, len2box) + # ^^^ may still be None, if optimizer is None return self.lengthbox @specialize.arg(1) @@ -208,10 +207,10 @@ return None return s1 + s2 - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): - offsetbox = self.left.string_copy_parts(newoperations, targetbox, + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): + offsetbox = self.left.string_copy_parts(optimizer, targetbox, offsetbox, mode) - offsetbox = self.right.string_copy_parts(newoperations, targetbox, + offsetbox = self.right.string_copy_parts(optimizer, targetbox, offsetbox, mode) return offsetbox @@ -266,9 +265,9 @@ return s1[start : start + length] return None - def string_copy_parts(self, newoperations, targetbox, offsetbox, mode): - lengthbox = self.getstrlen(newoperations, mode) - return copy_str_content(newoperations, + def string_copy_parts(self, optimizer, targetbox, offsetbox, mode): + lengthbox = self.getstrlen(optimizer, mode) + return copy_str_content(optimizer, self.vstr.force_box(), targetbox, self.vstart.force_box(), offsetbox, lengthbox, mode) @@ -299,7 +298,7 @@ return modifier.make_vstrslice(self.mode is mode_unicode) -def copy_str_content(newoperations, srcbox, targetbox, +def copy_str_content(optimizer, srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox, mode): if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const): M = 5 @@ -309,23 +308,23 @@ # up to M characters are done "inline", i.e. with STRGETITEM/STRSETITEM # instead of just a COPYSTRCONTENT. for i in range(lengthbox.value): - charbox = _strgetitem(newoperations, srcbox, srcoffsetbox, mode) - srcoffsetbox = _int_add(newoperations, srcoffsetbox, CONST_1) - newoperations.append(ResOperation(mode.STRSETITEM, [targetbox, + charbox = _strgetitem(optimizer, srcbox, srcoffsetbox, mode) + srcoffsetbox = _int_add(optimizer, srcoffsetbox, CONST_1) + optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox], None)) - offsetbox = _int_add(newoperations, offsetbox, CONST_1) + offsetbox = _int_add(optimizer, offsetbox, CONST_1) else: - nextoffsetbox = _int_add(newoperations, offsetbox, lengthbox) + nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox) op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox], None) - newoperations.append(op) + optimizer.emit_operation(op) offsetbox = nextoffsetbox return offsetbox -def _int_add(newoperations, box1, box2): +def _int_add(optimizer, box1, box2): if isinstance(box1, ConstInt): if box1.value == 0: return box2 @@ -333,23 +332,23 @@ return ConstInt(box1.value + box2.value) elif isinstance(box2, ConstInt) and box2.value == 0: return box1 - if newoperations is None: + if optimizer is None: return None resbox = BoxInt() - newoperations.append(ResOperation(rop.INT_ADD, [box1, box2], resbox)) + optimizer.emit_operation(ResOperation(rop.INT_ADD, [box1, box2], resbox)) return resbox -def _int_sub(newoperations, box1, box2): +def _int_sub(optimizer, box1, box2): if isinstance(box2, ConstInt): if box2.value == 0: return box1 if isinstance(box1, ConstInt): return ConstInt(box1.value - box2.value) resbox = BoxInt() - newoperations.append(ResOperation(rop.INT_SUB, [box1, box2], resbox)) + optimizer.emit_operation(ResOperation(rop.INT_SUB, [box1, box2], resbox)) return resbox -def _strgetitem(newoperations, strbox, indexbox, mode): +def _strgetitem(optimizer, strbox, indexbox, mode): if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt): if mode is mode_string: s = strbox.getref(lltype.Ptr(rstr.STR)) @@ -358,7 +357,7 @@ s = strbox.getref(lltype.Ptr(rstr.UNICODE)) return ConstInt(ord(s.chars[indexbox.getint()])) resbox = BoxInt() - newoperations.append(ResOperation(mode.STRGETITEM, [strbox, indexbox], + optimizer.emit_operation(ResOperation(mode.STRGETITEM, [strbox, indexbox], resbox)) return resbox @@ -370,7 +369,7 @@ def reconstruct_for_next_iteration(self, optimizer, valuemap): self.enabled = True return self - + def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(self.optimizer, box, source_op, mode) self.make_equal_to(box, vvalue) @@ -431,7 +430,7 @@ value.ensure_nonnull() # if value.is_virtual() and isinstance(value, VStringSliceValue): - fullindexbox = _int_add(self.optimizer.newoperations, + fullindexbox = _int_add(self.optimizer, value.vstart.force_box(), vindex.force_box()) value = value.vstr @@ -441,7 +440,7 @@ if vindex.is_constant(): return value.getitem(vindex.box.getint()) # - resbox = _strgetitem(self.optimizer.newoperations, + resbox = _strgetitem(self.optimizer, value.force_box(),vindex.force_box(), mode) return self.getvalue(resbox) @@ -452,7 +451,7 @@ def _optimize_STRLEN(self, op, mode): value = self.getvalue(op.getarg(0)) - lengthbox = value.getstrlen(self.optimizer.newoperations, mode) + lengthbox = value.getstrlen(self, mode) self.make_equal_to(op.result, self.getvalue(lengthbox)) def optimize_CALL(self, op): @@ -498,13 +497,11 @@ vright = self.getvalue(op.getarg(2)) vleft.ensure_nonnull() vright.ensure_nonnull() - newoperations = self.optimizer.newoperations value = self.make_vstring_concat(op.result, op, mode) value.setup(vleft, vright) return True def opt_call_stroruni_STR_SLICE(self, op, mode): - newoperations = self.optimizer.newoperations vstr = self.getvalue(op.getarg(1)) vstart = self.getvalue(op.getarg(2)) vstop = self.getvalue(op.getarg(3)) @@ -518,14 +515,14 @@ return True # vstr.ensure_nonnull() - lengthbox = _int_sub(newoperations, vstop.force_box(), + lengthbox = _int_sub(self.optimizer, vstop.force_box(), vstart.force_box()) # if isinstance(vstr, VStringSliceValue): # double slicing s[i:j][k:l] vintermediate = vstr vstr = vintermediate.vstr - startbox = _int_add(newoperations, + startbox = _int_add(self.optimizer, vintermediate.vstart.force_box(), vstart.force_box()) vstart = self.getvalue(startbox) @@ -574,7 +571,7 @@ l2box = v2.getstrlen(None, mode) if isinstance(l2box, ConstInt): if l2box.value == 0: - lengthbox = v1.getstrlen(self.optimizer.newoperations, mode) + lengthbox = v1.getstrlen(self.optimizer, mode) seo = self.optimizer.send_extra_operation seo(ResOperation(rop.INT_EQ, [lengthbox, CONST_0], resultbox)) return True @@ -609,7 +606,7 @@ op = ResOperation(rop.PTR_EQ, [v1.force_box(), llhelper.CONST_NULL], resultbox) - self.optimizer.newoperations.append(op) + self.optimizer.emit_operation(op) return True # return False @@ -646,7 +643,7 @@ calldescr, func = cic.callinfo_for_oopspec(oopspecindex) op = ResOperation(rop.CALL, [ConstInt(func)] + args, result, descr=calldescr) - self.optimizer.newoperations.append(op) + self.optimizer.emit_operation(op) def propagate_forward(self, op): if not self.enabled: diff --git a/pypy/jit/tl/pypyjit_child.py b/pypy/jit/tl/pypyjit_child.py --- a/pypy/jit/tl/pypyjit_child.py +++ b/pypy/jit/tl/pypyjit_child.py @@ -2,7 +2,6 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp import warmspot from pypy.module.pypyjit.policy import PyPyJitPolicy -from pypy.rlib.jit import OPTIMIZER_FULL, OPTIMIZER_NO_UNROLL def run_child(glob, loc): @@ -34,6 +33,5 @@ option.view = True warmspot.jittify_and_run(interp, graph, [], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -179,6 +179,9 @@ """ raise NotImplementedError + def count_fields_if_immutable(self): + return -1 + def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/module/cpyext/include/abstract.h b/pypy/module/cpyext/include/abstract.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/abstract.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -108,6 +108,7 @@ Anders Qvist Alan McIntyre Bert Freudenberg + Tav Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -118,6 +119,9 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' ============================================================== diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -39,6 +39,10 @@ state = space.fromcache(State) state.clear_exception() + at cpython_api([PyObject], PyObject) +def PyExceptionInstance_Class(space, w_obj): + return space.type(w_obj) + @cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void) def PyErr_Fetch(space, ptype, pvalue, ptraceback): """Retrieve the error indicator into three variables whose addresses are passed. @@ -75,6 +79,9 @@ error indicator temporarily; use PyErr_Fetch() to save the current exception state.""" state = space.fromcache(State) + if w_type is None: + state.clear_exception() + return state.set_exception(OperationError(w_type, w_value)) Py_DecRef(space, w_type) Py_DecRef(space, w_value) @@ -300,3 +307,11 @@ operror = state.clear_exception() if operror: operror.write_unraisable(space, space.str_w(space.repr(w_where))) + + at cpython_api([], lltype.Void) +def PyErr_SetInterrupt(space): + """This function simulates the effect of a SIGINT signal arriving --- the + next time PyErr_CheckSignals() is called, KeyboardInterrupt will be raised. + It may be called without holding the interpreter lock.""" + space.check_signal_action.set_interrupt() + diff --git a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py --- a/pypy/module/test_lib_pypy/test_distributed/test_distributed.py +++ b/pypy/module/test_lib_pypy/test_distributed/test_distributed.py @@ -91,19 +91,22 @@ class AppTestDistributedTasklets(object): spaceconfig = {"objspace.std.withtproxy": True, "objspace.usemodules._stackless": True} + reclimit = sys.getrecursionlimit() + def setup_class(cls): + import py.test + py.test.importorskip('greenlet') #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True, # "usemodules":("_stackless",)}) cls.w_test_env_ = cls.space.appexec([], """(): from distributed import test_env return (test_env,) """) - cls.reclimit = sys.getrecursionlimit() sys.setrecursionlimit(100000) def teardown_class(cls): sys.setrecursionlimit(cls.reclimit) - + def test_remote_protocol_call(self): def f(x, y): return x + y diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -103,6 +103,7 @@ except KeyError: subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, weakrefable) + assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -4,7 +4,7 @@ """ from cStringIO import StringIO -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.ootypesystem import ootype, rclass from pypy.rpython.ootypesystem.module import ll_os from pypy.translator.jvm import node, methods @@ -229,9 +229,15 @@ if not ootype.isSubclass(OOTYPE, SELF): continue mobj = self._function_for_graph( clsobj, mname, False, mimpl.graph) - graphs = OOTYPE._lookup_graphs(mname) - if len(graphs) == 1: - mobj.is_final = True + # XXX: this logic is broken: it might happen that there are + # ootype.Instance which contains a meth whose graph is exactly + # the same as the meth in the superclass: in this case, + # len(graphs) == 1 but we cannot just mark the method as final + # (or we can, but we should avoid to emit the method in the + # subclass, then) + ## graphs = OOTYPE._lookup_graphs(mname) + ## if len(graphs) == 1: + ## mobj.is_final = True clsobj.add_method(mobj) # currently, we always include a special "dump" method for debugging @@ -359,6 +365,7 @@ ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, ootype.String:jvm.PYPYESCAPEDSTRING, ootype.Unicode:jvm.PYPYESCAPEDUNICODE, + rffi.SHORT:jvm.SHORTTOSTRINGS, } def toString_method_for_ootype(self, OOTYPE): @@ -406,6 +413,7 @@ ootype.UniChar: jvm.jChar, ootype.Class: jvm.jClass, ootype.ROOT: jvm.jObject, # treat like a scalar + rffi.SHORT: jvm.jShort, } # Dictionary for non-scalar types; in this case, if we see the key, we diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -23,18 +23,22 @@ self.fail_descr_list = [] self.fail_descr_free_list = [] + def reserve_some_free_fail_descr_number(self): + lst = self.fail_descr_list + if len(self.fail_descr_free_list) > 0: + n = self.fail_descr_free_list.pop() + assert lst[n] is None + else: + n = len(lst) + lst.append(None) + return n + def get_fail_descr_number(self, descr): assert isinstance(descr, history.AbstractFailDescr) n = descr.index if n < 0: - lst = self.fail_descr_list - if len(self.fail_descr_free_list) > 0: - n = self.fail_descr_free_list.pop() - assert lst[n] is None - lst[n] = descr - else: - n = len(lst) - lst.append(descr) + n = self.reserve_some_free_fail_descr_number() + self.fail_descr_list[n] = descr descr.index = n return n @@ -294,6 +298,13 @@ def record_faildescr_index(self, n): self.faildescr_indices.append(n) + def reserve_and_record_some_faildescr_index(self): + # like record_faildescr_index(), but invent and return a new, + # unused faildescr index + n = self.cpu.reserve_some_free_fail_descr_number() + self.record_faildescr_index(n) + return n + def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -6,6 +6,7 @@ from pypy.tool.udir import udir from pypy.rlib import streamio from pypy.conftest import gettestobjspace +import pytest import sys, os import tempfile, marshal @@ -109,6 +110,14 @@ p.join('lone.pyc').write(p.join('x.pyc').read(mode='rb'), mode='wb') + # create a .pyw file + p = setuppkg("windows", x = "x = 78") + try: + p.join('x.pyw').remove() + except py.error.ENOENT: + pass + p.join('x.py').rename(p.join('x.pyw')) + return str(root) @@ -177,6 +186,14 @@ import a assert a == a0 + def test_trailing_slash(self): + import sys + try: + sys.path[0] += '/' + import a + finally: + sys.path[0] = sys.path[0].rstrip('/') + def test_import_pkg(self): import sys import pkg @@ -325,6 +342,11 @@ import compiled.x assert compiled.x == sys.modules.get('compiled.x') + @pytest.mark.skipif("sys.platform != 'win32'") + def test_pyw(self): + import windows.x + assert windows.x.__file__.endswith('x.pyw') + def test_cannot_write_pyc(self): import sys, os p = os.path.join(sys.path[-1], 'readonly') @@ -985,7 +1007,8 @@ class AppTestPyPyExtension(object): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['imp', 'zipimport']) + cls.space = gettestobjspace(usemodules=['imp', 'zipimport', + '__pypy__']) cls.w_udir = cls.space.wrap(str(udir)) def test_run_compiled_module(self): diff --git a/lib_pypy/pyrepl/unicodedata_.py b/lib_pypy/pyrepl/unicodedata_.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unicodedata_.py @@ -0,0 +1,59 @@ +try: + from unicodedata import * +except ImportError: + + def category(ch): + """ + ASCII only implementation + """ + if type(ch) is not unicode: + raise TypeError + if len(ch) != 1: + raise TypeError + return _categories.get(ord(ch), 'Co') # "Other, private use" + + _categories = { + 0: 'Cc', 1: 'Cc', 2: 'Cc', 3: 'Cc', 4: 'Cc', 5: 'Cc', + 6: 'Cc', 7: 'Cc', 8: 'Cc', 9: 'Cc', 10: 'Cc', 11: 'Cc', + 12: 'Cc', 13: 'Cc', 14: 'Cc', 15: 'Cc', 16: 'Cc', 17: 'Cc', + 18: 'Cc', 19: 'Cc', 20: 'Cc', 21: 'Cc', 22: 'Cc', 23: 'Cc', + 24: 'Cc', 25: 'Cc', 26: 'Cc', 27: 'Cc', 28: 'Cc', 29: 'Cc', + 30: 'Cc', 31: 'Cc', 32: 'Zs', 33: 'Po', 34: 'Po', 35: 'Po', + 36: 'Sc', 37: 'Po', 38: 'Po', 39: 'Po', 40: 'Ps', 41: 'Pe', + 42: 'Po', 43: 'Sm', 44: 'Po', 45: 'Pd', 46: 'Po', 47: 'Po', + 48: 'Nd', 49: 'Nd', 50: 'Nd', 51: 'Nd', 52: 'Nd', 53: 'Nd', + 54: 'Nd', 55: 'Nd', 56: 'Nd', 57: 'Nd', 58: 'Po', 59: 'Po', + 60: 'Sm', 61: 'Sm', 62: 'Sm', 63: 'Po', 64: 'Po', 65: 'Lu', + 66: 'Lu', 67: 'Lu', 68: 'Lu', 69: 'Lu', 70: 'Lu', 71: 'Lu', + 72: 'Lu', 73: 'Lu', 74: 'Lu', 75: 'Lu', 76: 'Lu', 77: 'Lu', + 78: 'Lu', 79: 'Lu', 80: 'Lu', 81: 'Lu', 82: 'Lu', 83: 'Lu', + 84: 'Lu', 85: 'Lu', 86: 'Lu', 87: 'Lu', 88: 'Lu', 89: 'Lu', + 90: 'Lu', 91: 'Ps', 92: 'Po', 93: 'Pe', 94: 'Sk', 95: 'Pc', + 96: 'Sk', 97: 'Ll', 98: 'Ll', 99: 'Ll', 100: 'Ll', 101: 'Ll', + 102: 'Ll', 103: 'Ll', 104: 'Ll', 105: 'Ll', 106: 'Ll', 107: 'Ll', + 108: 'Ll', 109: 'Ll', 110: 'Ll', 111: 'Ll', 112: 'Ll', 113: 'Ll', + 114: 'Ll', 115: 'Ll', 116: 'Ll', 117: 'Ll', 118: 'Ll', 119: 'Ll', + 120: 'Ll', 121: 'Ll', 122: 'Ll', 123: 'Ps', 124: 'Sm', 125: 'Pe', + 126: 'Sm', 127: 'Cc', 128: 'Cc', 129: 'Cc', 130: 'Cc', 131: 'Cc', + 132: 'Cc', 133: 'Cc', 134: 'Cc', 135: 'Cc', 136: 'Cc', 137: 'Cc', + 138: 'Cc', 139: 'Cc', 140: 'Cc', 141: 'Cc', 142: 'Cc', 143: 'Cc', + 144: 'Cc', 145: 'Cc', 146: 'Cc', 147: 'Cc', 148: 'Cc', 149: 'Cc', + 150: 'Cc', 151: 'Cc', 152: 'Cc', 153: 'Cc', 154: 'Cc', 155: 'Cc', + 156: 'Cc', 157: 'Cc', 158: 'Cc', 159: 'Cc', 160: 'Zs', 161: 'Po', + 162: 'Sc', 163: 'Sc', 164: 'Sc', 165: 'Sc', 166: 'So', 167: 'So', + 168: 'Sk', 169: 'So', 170: 'Ll', 171: 'Pi', 172: 'Sm', 173: 'Cf', + 174: 'So', 175: 'Sk', 176: 'So', 177: 'Sm', 178: 'No', 179: 'No', + 180: 'Sk', 181: 'Ll', 182: 'So', 183: 'Po', 184: 'Sk', 185: 'No', + 186: 'Ll', 187: 'Pf', 188: 'No', 189: 'No', 190: 'No', 191: 'Po', + 192: 'Lu', 193: 'Lu', 194: 'Lu', 195: 'Lu', 196: 'Lu', 197: 'Lu', + 198: 'Lu', 199: 'Lu', 200: 'Lu', 201: 'Lu', 202: 'Lu', 203: 'Lu', + 204: 'Lu', 205: 'Lu', 206: 'Lu', 207: 'Lu', 208: 'Lu', 209: 'Lu', + 210: 'Lu', 211: 'Lu', 212: 'Lu', 213: 'Lu', 214: 'Lu', 215: 'Sm', + 216: 'Lu', 217: 'Lu', 218: 'Lu', 219: 'Lu', 220: 'Lu', 221: 'Lu', + 222: 'Lu', 223: 'Ll', 224: 'Ll', 225: 'Ll', 226: 'Ll', 227: 'Ll', + 228: 'Ll', 229: 'Ll', 230: 'Ll', 231: 'Ll', 232: 'Ll', 233: 'Ll', + 234: 'Ll', 235: 'Ll', 236: 'Ll', 237: 'Ll', 238: 'Ll', 239: 'Ll', + 240: 'Ll', 241: 'Ll', 242: 'Ll', 243: 'Ll', 244: 'Ll', 245: 'Ll', + 246: 'Ll', 247: 'Sm', 248: 'Ll', 249: 'Ll', 250: 'Ll', 251: 'Ll', + 252: 'Ll', 253: 'Ll', 254: 'Ll' + } diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/lib_pypy/pyrepl/keymaps.py b/lib_pypy/pyrepl/keymaps.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/keymaps.py @@ -0,0 +1,140 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +reader_emacs_keymap = tuple( + [(r'\C-a', 'beginning-of-line'), + (r'\C-b', 'left'), + (r'\C-c', 'interrupt'), + (r'\C-d', 'delete'), + (r'\C-e', 'end-of-line'), + (r'\C-f', 'right'), + (r'\C-g', 'cancel'), + (r'\C-h', 'backspace'), + (r'\C-j', 'self-insert'), + (r'\', 'accept'), + (r'\C-k', 'kill-line'), + (r'\C-l', 'clear-screen'), +# (r'\C-m', 'accept'), + (r'\C-q', 'quoted-insert'), + (r'\C-t', 'transpose-characters'), + (r'\C-u', 'unix-line-discard'), + (r'\C-v', 'quoted-insert'), + (r'\C-w', 'unix-word-rubout'), + (r'\C-x\C-u', 'upcase-region'), + (r'\C-y', 'yank'), + (r'\C-z', 'suspend'), + + (r'\M-b', 'backward-word'), + (r'\M-c', 'capitalize-word'), + (r'\M-d', 'kill-word'), + (r'\M-f', 'forward-word'), + (r'\M-l', 'downcase-word'), + (r'\M-t', 'transpose-words'), + (r'\M-u', 'upcase-word'), + (r'\M-y', 'yank-pop'), + (r'\M--', 'digit-arg'), + (r'\M-0', 'digit-arg'), + (r'\M-1', 'digit-arg'), + (r'\M-2', 'digit-arg'), + (r'\M-3', 'digit-arg'), + (r'\M-4', 'digit-arg'), + (r'\M-5', 'digit-arg'), + (r'\M-6', 'digit-arg'), + (r'\M-7', 'digit-arg'), + (r'\M-8', 'digit-arg'), + (r'\M-9', 'digit-arg'), + (r'\M-\n', 'self-insert'), + (r'\', 'self-insert')] + \ + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\', 'up'), + (r'\', 'down'), + (r'\', 'left'), + (r'\', 'right'), + (r'\', 'quoted-insert'), + (r'\', 'delete'), + (r'\', 'backspace'), + (r'\M-\', 'backward-kill-word'), + (r'\', 'end'), + (r'\', 'home'), + (r'\', 'help'), + (r'\EOF', 'end'), # the entries in the terminfo database for xterms + (r'\EOH', 'home'), # seem to be wrong. this is a less than ideal + # workaround + ]) + +hist_emacs_keymap = reader_emacs_keymap + ( + (r'\C-n', 'next-history'), + (r'\C-p', 'previous-history'), + (r'\C-o', 'operate-and-get-next'), + (r'\C-r', 'reverse-history-isearch'), + (r'\C-s', 'forward-history-isearch'), + (r'\M-r', 'restore-history'), + (r'\M-.', 'yank-arg'), + (r'\', 'last-history'), + (r'\', 'first-history')) + +comp_emacs_keymap = hist_emacs_keymap + ( + (r'\t', 'complete'),) + +python_emacs_keymap = comp_emacs_keymap + ( + (r'\n', 'maybe-accept'), + (r'\M-\n', 'self-insert')) + +reader_vi_insert_keymap = tuple( + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\C-d', 'delete'), + (r'\', 'backspace'), + ('')]) + +reader_vi_command_keymap = tuple( + [ + ('E', 'enter-emacs-mode'), + ('R', 'enter-replace-mode'), + ('dw', 'delete-word'), + ('dd', 'delete-line'), + + ('h', 'left'), + ('i', 'enter-insert-mode'), + ('j', 'down'), + ('k', 'up'), + ('l', 'right'), + ('r', 'replace-char'), + ('w', 'forward-word'), + ('x', 'delete'), + ('.', 'repeat-edit'), # argh! + (r'\', 'enter-insert-mode'), + ] + + [(c, 'digit-arg') for c in '01234567689'] + + []) + + +reader_keymaps = { + 'emacs' : reader_emacs_keymap, + 'vi-insert' : reader_vi_insert_keymap, + 'vi-command' : reader_vi_command_keymap + } + +del c # from the listcomps + diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,6 +1,7 @@ import py from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver +from pypy.rlib import objectmodel class DictTests: @@ -69,6 +70,66 @@ res = self.meta_interp(f, [10], listops=True) assert res == expected + def test_dict_trace_hash(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + if total not in dct: + dct[total] = [] + dct[total].append(total) + total -= 1 + return len(dct[0]) + + res1 = f(100) + res2 = self.meta_interp(f, [100], listops=True) + assert res1 == res2 + self.check_loops(int_mod=1) # the hash was traced + + def test_dict_setdefault(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def f(n): + dct = {} + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct.setdefault(total % 2, []).append(total) + total -= 1 + return len(dct[0]) + + assert f(100) == 50 + res = self.meta_interp(f, [100], listops=True) + assert res == 50 + self.check_loops(new=0, new_with_vtable=0) + + def test_dict_as_counter(self): + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct[total] = dct.get(total, 0) + 1 + total -= 1 + return dct[0] + + assert f(100) == 50 + res = self.meta_interp(f, [100], listops=True) + assert res == 50 + self.check_loops(int_mod=1) + class TestOOtype(DictTests, OOJitMixin): pass diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -68,6 +68,16 @@ nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') + INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), + ('intval', lltype.Signed)) + INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), + ('intval', lltype.Signed), + hints={'immutable': True}) + intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') + immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -147,7 +157,6 @@ FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token - virtualrefindexdescr = vrefinfo.descr_virtualref_index virtualforceddescr = vrefinfo.descr_forced jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) @@ -156,6 +165,8 @@ register_known_gctype(cpu, node_vtable2, NODE2) register_known_gctype(cpu, u_vtable, U) register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) + register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) + register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,8 @@ +import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror +from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop @@ -15,12 +17,13 @@ from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr from pypy.jit.backend.llsupport.descr import get_call_descr -from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 + get_malloc_slowpath_addr = None + def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr @@ -34,6 +37,8 @@ pass def can_inline_malloc(self, descr): return False + def can_inline_malloc_varsize(self, descr, num_elem): + return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): @@ -212,10 +217,12 @@ return addr_ref -class GcRootMap_asmgcc: +class GcRootMap_asmgcc(object): """Handles locating the stack roots in the assembler. This is the class supporting --gcrootfinder=asmgcc. """ + is_shadow_stack = False + LOC_REG = 0 LOC_ESP_PLUS = 1 LOC_EBP_PLUS = 2 @@ -224,7 +231,7 @@ GCMAP_ARRAY = rffi.CArray(lltype.Signed) CALLSHAPE_ARRAY_PTR = rffi.CArrayPtr(rffi.UCHAR) - def __init__(self): + def __init__(self, gcdescr=None): # '_gcmap' is an array of length '_gcmap_maxlength' of addresses. # '_gcmap_curlength' tells how full the array really is. # The addresses are actually grouped in pairs: @@ -237,6 +244,13 @@ self._gcmap_deadentries = 0 self._gcmap_sorted = True + def add_jit2gc_hooks(self, jit2gc): + jit2gc.update({ + 'gcmapstart': lambda: self.gcmapstart(), + 'gcmapend': lambda: self.gcmapend(), + 'gcmarksorted': lambda: self.gcmarksorted(), + }) + def initialize(self): # hack hack hack. Remove these lines and see MissingRTypeAttribute # when the rtyper tries to annotate these methods only when GC-ing... @@ -309,6 +323,7 @@ @rgc.no_collect def freeing_block(self, start, stop): + from pypy.rpython.memory.gctransform import asmgcroot # if [start:stop] is a raw block of assembler, then look up the # corresponding gcroot markers, and mark them as freed now in # self._gcmap by setting the 2nd address of every entry to NULL. @@ -365,7 +380,7 @@ number >>= 7 shape.append(chr(number | flag)) - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset @@ -388,6 +403,126 @@ return rawaddr +class GcRootMap_shadowstack(object): + """Handles locating the stack roots in the assembler. + This is the class supporting --gcrootfinder=shadowstack. + """ + is_shadow_stack = True + MARKER = 8 + + # The "shadowstack" is a portable way in which the GC finds the + # roots that live in the stack. Normally it is just a list of + # pointers to GC objects. The pointers may be moved around by a GC + # collection. But with the JIT, an entry can also be MARKER, in + # which case the next entry points to an assembler stack frame. + # During a residual CALL from the assembler (which may indirectly + # call the GC), we use the force_index stored in the assembler + # stack frame to identify the call: we can go from the force_index + # to a list of where the GC pointers are in the frame (this is the + # purpose of the present class). + # + # Note that across CALL_MAY_FORCE or CALL_ASSEMBLER, we can also go + # from the force_index to a ResumeGuardForcedDescr instance, which + # is used if the virtualizable or the virtualrefs need to be forced + # (see pypy.jit.backend.model). The force_index number in the stack + # frame is initially set to a non-negative value x, but it is + # occasionally turned into (~x) in case of forcing. + + INTARRAYPTR = rffi.CArrayPtr(rffi.INT) + CALLSHAPES_ARRAY = rffi.CArray(INTARRAYPTR) + + def __init__(self, gcdescr): + self._callshapes = lltype.nullptr(self.CALLSHAPES_ARRAY) + self._callshapes_maxlength = 0 + self.force_index_ofs = gcdescr.force_index_ofs + + def add_jit2gc_hooks(self, jit2gc): + # + def collect_jit_stack_root(callback, gc, addr): + if addr.signed[0] != GcRootMap_shadowstack.MARKER: + # common case + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return WORD + else: + # case of a MARKER followed by an assembler stack frame + follow_stack_frame_of_assembler(callback, gc, addr) + return 2 * WORD + # + def follow_stack_frame_of_assembler(callback, gc, addr): + frame_addr = addr.signed[1] + addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + callshape = self._callshapes[force_index] + n = 0 + while True: + offset = rffi.cast(lltype.Signed, callshape[n]) + if offset == 0: + break + addr = llmemory.cast_int_to_adr(frame_addr + offset) + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + n += 1 + # + jit2gc.update({ + 'rootstackhook': collect_jit_stack_root, + }) + + def initialize(self): + pass + + def get_basic_shape(self, is_64_bit=False): + return [] + + def add_frame_offset(self, shape, offset): + assert offset != 0 + shape.append(offset) + + def add_callee_save_reg(self, shape, register): + msg = "GC pointer in %s was not spilled" % register + os.write(2, '[llsupport/gc] %s\n' % msg) + raise AssertionError(msg) + + def compress_callshape(self, shape, datablockwrapper): + length = len(shape) + SZINT = rffi.sizeof(rffi.INT) + rawaddr = datablockwrapper.malloc_aligned((length + 1) * SZINT, SZINT) + p = rffi.cast(self.INTARRAYPTR, rawaddr) + for i in range(length): + p[i] = rffi.cast(rffi.INT, shape[i]) + p[length] = rffi.cast(rffi.INT, 0) + return p + + def write_callshape(self, p, force_index): + if force_index >= self._callshapes_maxlength: + self._enlarge_callshape_list(force_index + 1) + self._callshapes[force_index] = p + + def _enlarge_callshape_list(self, minsize): + newlength = 250 + (self._callshapes_maxlength // 3) * 4 + if newlength < minsize: + newlength = minsize + newarray = lltype.malloc(self.CALLSHAPES_ARRAY, newlength, + flavor='raw', track_allocation=False) + if self._callshapes: + i = self._callshapes_maxlength - 1 + while i >= 0: + newarray[i] = self._callshapes[i] + i -= 1 + lltype.free(self._callshapes, flavor='raw') + self._callshapes = newarray + self._callshapes_maxlength = newlength + + def freeing_block(self, start, stop): + pass # nothing needed here + + def get_root_stack_top_addr(self): + rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) + return rffi.cast(lltype.Signed, rst_addr) + + class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 @@ -437,7 +572,7 @@ except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls() + gcrootmap = cls(gcdescr) self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) @@ -446,12 +581,9 @@ # where it can be fished and reused by the FrameworkGCTransformer self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = { - 'layoutbuilder': self.layoutbuilder, - 'gcmapstart': lambda: gcrootmap.gcmapstart(), - 'gcmapend': lambda: gcrootmap.gcmapend(), - 'gcmarksorted': lambda: gcrootmap.gcmarksorted(), - } + self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) @@ -461,6 +593,10 @@ self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() + # for the fast path of mallocs, the following must be true, at least + assert self.GCClass.inline_simple_malloc + assert self.GCClass.inline_simple_malloc_varsize + # make a malloc function, with three arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) @@ -539,20 +675,23 @@ x3 = x0 * 0.3 for_test_only.x = x0 + x1 + x2 + x3 # - def malloc_fixedsize_slowpath(size): + def malloc_slowpath(size): if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery try: + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, 0, size, True, False, False) except MemoryError: fatalerror("out of memory (from JITted code)") return 0 return rffi.cast(lltype.Signed, gcref) - self.malloc_fixedsize_slowpath = malloc_fixedsize_slowpath - self.MALLOC_FIXEDSIZE_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) + self.malloc_slowpath = malloc_slowpath + self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -562,9 +701,8 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_fixedsize_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_FIXEDSIZE_SLOWPATH), - self.malloc_fixedsize_slowpath) + def get_malloc_slowpath_addr(self): + fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) def initialize(self): @@ -710,6 +848,16 @@ return True return False + def can_inline_malloc_varsize(self, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + return size < self.max_size_of_young_obj + except OverflowError: + return False + def has_write_barrier_class(self): return WriteBarrierDescr diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -39,13 +39,15 @@ [user at debian-box ~]$ sudo apt-get install \ gcc make python-dev libffi-dev pkg-config \ - libz-dev libbz2-dev libncurses-dev libexpat1-dev libssl-dev libgc-dev python-sphinx + libz-dev libbz2-dev libncurses-dev libexpat1-dev \ + libssl-dev libgc-dev python-sphinx python-greenlet On a Fedora box these are:: [user at fedora-or-rh-box ~]$ sudo yum install \ gcc make python-devel libffi-devel pkg-config \ - zlib-devel bzip2-devel ncurses-devel expat-devel openssl-devel gc-devel python-sphinx + zlib-devel bzip2-devel ncurses-devel expat-devel \ + openssl-devel gc-devel python-sphinx python-greenlet The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. @@ -57,6 +59,7 @@ * ``libssl-dev`` (for the optional ``_ssl`` module) * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) * ``python-sphinx`` (for the optional documentation build) + * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) 2. Translation is somewhat time-consuming (30 min to over one hour) and RAM-hungry. If you have less than 1.5 GB of @@ -76,7 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel **32-bit** environment needs ``4GB``. + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html @@ -229,6 +233,12 @@ ../../.. etc. +If the executable fails to find suitable libraries, it will report +``debug: WARNING: library path not found, using compiled-in sys.path`` +and then attempt to continue normally. If the default path is usable, +most code will be fine. However, the ``sys.prefix`` will be unset +and some existing libraries assume that this is never the case. + In order to use ``distutils`` or ``setuptools`` a directory ``PREFIX/site-packages`` needs to be created. Here's an example session setting up and using ``easy_install``:: $ cd PREFIX diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -127,12 +127,15 @@ checks[2], checks[3])) subclasses = {} for key, subcls in typedef._subclass_cache.items(): + if key[0] is not space.config: + continue cls = key[1] subclasses.setdefault(cls, {}) - subclasses[cls][subcls] = True + prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls) + assert subcls is prevsubcls for cls, set in subclasses.items(): assert len(set) <= 6, "%s has %d subclasses:\n%r" % ( - cls, len(set), [subcls.__name__ for subcls in set]) + cls, len(set), list(set)) def test_getsetproperty(self): class W_SomeType(Wrappable): diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ pypy/doc/*.html pypy/doc/config/*.html pypy/doc/discussion/*.html +pypy/module/cpyext/src/*.o +pypy/module/cpyext/test/*.o pypy/module/test_lib_pypy/ctypes_tests/*.o pypy/translator/c/src/dtoa.o pypy/translator/goal/pypy-c diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/completing_reader.py @@ -0,0 +1,280 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl import commands, reader +from pyrepl.reader import Reader + +def uniqify(l): + d = {} + for i in l: + d[i] = 1 + r = d.keys() + r.sort() + return r + +def prefix(wordlist, j = 0): + d = {} + i = j + try: + while 1: + for word in wordlist: + d[word[i]] = 1 + if len(d) > 1: + return wordlist[0][j:i] + i += 1 + d = {} + except IndexError: + return wordlist[0][j:i] + +import re +def stripcolor(s): + return stripcolor.regexp.sub('', s) +stripcolor.regexp = re.compile(r"\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[m|K]") + +def real_len(s): + return len(stripcolor(s)) + +def left_align(s, maxlen): + stripped = stripcolor(s) + if len(stripped) > maxlen: + # too bad, we remove the color + return stripped[:maxlen] + padding = maxlen - len(stripped) + return s + ' '*padding + +def build_menu(cons, wordlist, start, use_brackets, sort_in_column): + if use_brackets: + item = "[ %s ]" + padding = 4 + else: + item = "%s " + padding = 2 + maxlen = min(max(map(real_len, wordlist)), cons.width - padding) + cols = cons.width / (maxlen + padding) + rows = (len(wordlist) - 1)/cols + 1 + + if sort_in_column: + # sort_in_column=False (default) sort_in_column=True + # A B C A D G + # D E F B E + # G C F + # + # "fill" the table with empty words, so we always have the same amout + # of rows for each column + missing = cols*rows - len(wordlist) + wordlist = wordlist + ['']*missing + indexes = [(i%cols)*rows + i//cols for i in range(len(wordlist))] + wordlist = [wordlist[i] for i in indexes] + menu = [] + i = start + for r in range(rows): + row = [] + for col in range(cols): + row.append(item % left_align(wordlist[i], maxlen)) + i += 1 + if i >= len(wordlist): + break + menu.append( ''.join(row) ) + if i >= len(wordlist): + i = 0 + break + if r + 5 > cons.height: + menu.append(" %d more... "%(len(wordlist) - i)) + break + return menu, i + +# this gets somewhat user interface-y, and as a result the logic gets +# very convoluted. +# +# To summarise the summary of the summary:- people are a problem. +# -- The Hitch-Hikers Guide to the Galaxy, Episode 12 + +#### Desired behaviour of the completions commands. +# the considerations are: +# (1) how many completions are possible +# (2) whether the last command was a completion +# (3) if we can assume that the completer is going to return the same set of +# completions: this is controlled by the ``assume_immutable_completions`` +# variable on the reader, which is True by default to match the historical +# behaviour of pyrepl, but e.g. False in the ReadlineAlikeReader to match +# more closely readline's semantics (this is needed e.g. by +# fancycompleter) +# +# if there's no possible completion, beep at the user and point this out. +# this is easy. +# +# if there's only one possible completion, stick it in. if the last thing +# user did was a completion, point out that he isn't getting anywhere, but +# only if the ``assume_immutable_completions`` is True. +# +# now it gets complicated. +# +# for the first press of a completion key: +# if there's a common prefix, stick it in. + +# irrespective of whether anything got stuck in, if the word is now +# complete, show the "complete but not unique" message + +# if there's no common prefix and if the word is not now complete, +# beep. + +# common prefix -> yes no +# word complete \/ +# yes "cbnu" "cbnu" +# no - beep + +# for the second bang on the completion key +# there will necessarily be no common prefix +# show a menu of the choices. + +# for subsequent bangs, rotate the menu around (if there are sufficient +# choices). + +class complete(commands.Command): + def do(self): + r = self.reader + stem = r.get_stem() + if r.assume_immutable_completions and \ + r.last_command_is(self.__class__): + completions = r.cmpltn_menu_choices + else: + r.cmpltn_menu_choices = completions = \ + r.get_completions(stem) + if len(completions) == 0: + r.error("no matches") + elif len(completions) == 1: + if r.assume_immutable_completions and \ + len(completions[0]) == len(stem) and \ + r.last_command_is(self.__class__): + r.msg = "[ sole completion ]" + r.dirty = 1 + r.insert(completions[0][len(stem):]) + else: + p = prefix(completions, len(stem)) + if p <> '': + r.insert(p) + if r.last_command_is(self.__class__): + if not r.cmpltn_menu_vis: + r.cmpltn_menu_vis = 1 + r.cmpltn_menu, r.cmpltn_menu_end = build_menu( + r.console, completions, r.cmpltn_menu_end, + r.use_brackets, r.sort_in_column) + r.dirty = 1 + elif stem + p in completions: + r.msg = "[ complete but not unique ]" + r.dirty = 1 + else: + r.msg = "[ not unique ]" + r.dirty = 1 + +class self_insert(commands.self_insert): + def do(self): + commands.self_insert.do(self) + r = self.reader + if r.cmpltn_menu_vis: + stem = r.get_stem() + if len(stem) < 1: + r.cmpltn_reset() + else: + completions = [w for w in r.cmpltn_menu_choices + if w.startswith(stem)] + if completions: + r.cmpltn_menu, r.cmpltn_menu_end = build_menu( + r.console, completions, 0, + r.use_brackets, r.sort_in_column) + else: + r.cmpltn_reset() + +class CompletingReader(Reader): + """Adds completion support + + Adds instance variables: + * cmpltn_menu, cmpltn_menu_vis, cmpltn_menu_end, cmpltn_choices: + * + """ + # see the comment for the complete command + assume_immutable_completions = True + use_brackets = True # display completions inside [] + sort_in_column = False + + def collect_keymap(self): + return super(CompletingReader, self).collect_keymap() + ( + (r'\t', 'complete'),) + + def __init__(self, console): + super(CompletingReader, self).__init__(console) + self.cmpltn_menu = ["[ menu 1 ]", "[ menu 2 ]"] + self.cmpltn_menu_vis = 0 + self.cmpltn_menu_end = 0 + for c in [complete, self_insert]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + + def after_command(self, cmd): + super(CompletingReader, self).after_command(cmd) + if not isinstance(cmd, complete) and not isinstance(cmd, self_insert): + self.cmpltn_reset() + + def calc_screen(self): + screen = super(CompletingReader, self).calc_screen() + if self.cmpltn_menu_vis: + ly = self.lxy[1] + screen[ly:ly] = self.cmpltn_menu + self.screeninfo[ly:ly] = [(0, [])]*len(self.cmpltn_menu) + self.cxy = self.cxy[0], self.cxy[1] + len(self.cmpltn_menu) + return screen + + def finish(self): + super(CompletingReader, self).finish() + self.cmpltn_reset() + + def cmpltn_reset(self): + self.cmpltn_menu = [] + self.cmpltn_menu_vis = 0 + self.cmpltn_menu_end = 0 + self.cmpltn_menu_choices = [] + + def get_stem(self): + st = self.syntax_table + SW = reader.SYNTAX_WORD + b = self.buffer + p = self.pos - 1 + while p >= 0 and st.get(b[p], SW) == SW: + p -= 1 + return u''.join(b[p+1:self.pos]) + + def get_completions(self, stem): + return [] + +def test(): + class TestReader(CompletingReader): + def get_completions(self, stem): + return [s for l in map(lambda x:x.split(),self.history) + for s in l if s and s.startswith(stem)] + reader = TestReader() + reader.ps1 = "c**> " + reader.ps2 = "c/*> " + reader.ps3 = "c|*> " + reader.ps4 = "c\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/.hgsubstate b/.hgsubstate deleted file mode 100644 --- a/.hgsubstate +++ /dev/null @@ -1,2 +0,0 @@ -80037 greenlet -80409 lib_pypy/pyrepl diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -9,7 +9,6 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.test.test_optimizeopt import equaloplists -from pypy.rpython.memory.gctransform import asmgcroot def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -75,8 +74,8 @@ num2a = ((-num2|3) >> 7) | 128 num2b = (-num2|3) & 127 shape = gcrootmap.get_basic_shape() - gcrootmap.add_ebp_offset(shape, num1) - gcrootmap.add_ebp_offset(shape, num2) + gcrootmap.add_frame_offset(shape, num1) + gcrootmap.add_frame_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, @@ -228,6 +227,33 @@ gc.asmgcroot = saved +class TestGcRootMapShadowStack: + class FakeGcDescr: + force_index_ofs = 92 + + def test_make_shapes(self): + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = gcrootmap.get_basic_shape() + gcrootmap.add_frame_offset(shape, 16) + gcrootmap.add_frame_offset(shape, -24) + assert shape == [16, -24] + + def test_compress_callshape(self): + class FakeDataBlockWrapper: + def malloc_aligned(self, size, alignment): + assert alignment == 4 # even on 64-bits + assert size == 12 # 4*3, even on 64-bits + return rffi.cast(lltype.Signed, p) + datablockwrapper = FakeDataBlockWrapper() + p = lltype.malloc(rffi.CArray(rffi.INT), 3, immortal=True) + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = [16, -24] + gcrootmap.compress_callshape(shape, datablockwrapper) + assert rffi.cast(lltype.Signed, p[0]) == 16 + assert rffi.cast(lltype.Signed, p[1]) == -24 + assert rffi.cast(lltype.Signed, p[2]) == 0 + + class FakeLLOp(object): def __init__(self): self.record = [] diff --git a/pypy/rlib/rdtoa.py b/pypy/rlib/rdtoa.py --- a/pypy/rlib/rdtoa.py +++ b/pypy/rlib/rdtoa.py @@ -5,16 +5,33 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.rlib import jit from pypy.rlib.rstring import StringBuilder -import py +import py, sys cdir = py.path.local(pypydir) / 'translator' / 'c' include_dirs = [cdir] +# set the word endianness based on the host's endianness +# and the C double's endianness (which should be equal) +if hasattr(float, '__getformat__'): + assert float.__getformat__('double') == 'IEEE, %s-endian' % sys.byteorder +if sys.byteorder == 'little': + source_file = ['#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754'] +elif sys.byteorder == 'big': + source_file = ['#define WORDS_BIGENDIAN', + '#define DOUBLE_IS_BIG_ENDIAN_IEEE754'] +else: + raise AssertionError(sys.byteorder) + +source_file.append('#include "src/dtoa.c"') +source_file = '\n\n'.join(source_file) + +# ____________________________________________________________ + eci = ExternalCompilationInfo( include_dirs = [cdir], includes = ['src/dtoa.h'], libraries = [], - separate_module_files = [cdir / 'src' / 'dtoa.c'], + separate_module_sources = [source_file], export_symbols = ['_PyPy_dg_strtod', '_PyPy_dg_dtoa', '_PyPy_dg_freedtoa', diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,13 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level @@ -159,8 +152,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_wait.py @@ -0,0 +1,51 @@ +from ctypes import CDLL, c_int, POINTER, byref +from ctypes.util import find_library +from resource import _struct_rusage, struct_rusage + +__all__ = ["wait3", "wait4"] + +libc = CDLL(find_library("c")) +c_wait3 = libc.wait3 + +c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] + +c_wait4 = libc.wait4 + +c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] + +def create_struct_rusage(c_struct): + return struct_rusage(( + float(c_struct.ru_utime), + float(c_struct.ru_stime), + c_struct.ru_maxrss, + c_struct.ru_ixrss, + c_struct.ru_idrss, + c_struct.ru_isrss, + c_struct.ru_minflt, + c_struct.ru_majflt, + c_struct.ru_nswap, + c_struct.ru_inblock, + c_struct.ru_oublock, + c_struct.ru_msgsnd, + c_struct.ru_msgrcv, + c_struct.ru_nsignals, + c_struct.ru_nvcsw, + c_struct.ru_nivcsw)) + +def wait3(options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage + +def wait4(pid, options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -34,11 +34,7 @@ @jit.purefunction def _getcell_makenew(self, key): - res = self.content.get(key, None) - if res is not None: - return res - result = self.content[key] = ModuleCell() - return result + return self.content.setdefault(key, ModuleCell()) def impl_setitem(self, w_key, w_value): space = self.space @@ -50,6 +46,16 @@ def impl_setitem_str(self, name, w_value): self.getcell(name, True).w_value = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + cell = self.getcell(space.str_w(w_key), True) + if cell.w_value is None: + cell.w_value = w_default + return cell.w_value + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -7,6 +7,7 @@ CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject +from pypy.objspace.std.objectobject import W_ObjectObject from pypy.rlib.objectmodel import specialize, we_are_translated from pypy.rlib.rweakref import RWeakKeyDictionary from pypy.rpython.annlowlevel import llhelper @@ -370,6 +371,15 @@ @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): obj.c_ob_refcnt = 1 + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + assert isinstance(w_type, W_TypeObject) + if w_type.is_cpytype(): + w_obj = space.allocate_instance(W_ObjectObject, w_type) + track_reference(space, obj, w_obj) + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + else: + assert False, "Please add more cases in _Py_NewReference()" def _Py_Dealloc(space, obj): from pypy.module.cpyext.api import generic_cpy_call_dont_decref diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unix_console.py @@ -0,0 +1,567 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import termios, select, os, struct, errno +import signal, re, time, sys +from fcntl import ioctl +from pyrepl import curses +from pyrepl.fancy_termios import tcgetattr, tcsetattr +from pyrepl.console import Console, Event +from pyrepl import unix_eventqueue + +class InvalidTerminal(RuntimeError): + pass + +_error = (termios.error, curses.error, InvalidTerminal) + +# there are arguments for changing this to "refresh" +SIGWINCH_EVENT = 'repaint' + +FIONREAD = getattr(termios, "FIONREAD", None) +TIOCGWINSZ = getattr(termios, "TIOCGWINSZ", None) + +def _my_getstr(cap, optional=0): + r = curses.tigetstr(cap) + if not optional and r is None: + raise InvalidTerminal, \ + "terminal doesn't have the required '%s' capability"%cap + return r + +# at this point, can we say: AAAAAAAAAAAAAAAAAAAAAARGH! +def maybe_add_baudrate(dict, rate): + name = 'B%d'%rate + if hasattr(termios, name): + dict[getattr(termios, name)] = rate + +ratedict = {} +for r in [0, 110, 115200, 1200, 134, 150, 1800, 19200, 200, 230400, + 2400, 300, 38400, 460800, 4800, 50, 57600, 600, 75, 9600]: + maybe_add_baudrate(ratedict, r) + +del r, maybe_add_baudrate + +delayprog = re.compile("\\$<([0-9]+)((?:/|\\*){0,2})>") + +try: + poll = select.poll +except AttributeError: + # this is exactly the minumum necessary to support what we + # do with poll objects + class poll: + def __init__(self): + pass + def register(self, fd, flag): + self.fd = fd + def poll(self, timeout=None): + r,w,e = select.select([self.fd],[],[],timeout) + return r + +POLLIN = getattr(select, "POLLIN", None) + +class UnixConsole(Console): + def __init__(self, f_in=0, f_out=1, term=None, encoding=None): + if encoding is None: + encoding = sys.getdefaultencoding() + + self.encoding = encoding + + if isinstance(f_in, int): + self.input_fd = f_in + else: + self.input_fd = f_in.fileno() + + if isinstance(f_out, int): + self.output_fd = f_out + else: + self.output_fd = f_out.fileno() + + self.pollob = poll() + self.pollob.register(self.input_fd, POLLIN) + curses.setupterm(term, self.output_fd) + self.term = term + + self._bel = _my_getstr("bel") + self._civis = _my_getstr("civis", optional=1) + self._clear = _my_getstr("clear") + self._cnorm = _my_getstr("cnorm", optional=1) + self._cub = _my_getstr("cub", optional=1) + self._cub1 = _my_getstr("cub1", 1) + self._cud = _my_getstr("cud", 1) + self._cud1 = _my_getstr("cud1", 1) + self._cuf = _my_getstr("cuf", 1) + self._cuf1 = _my_getstr("cuf1", 1) + self._cup = _my_getstr("cup") + self._cuu = _my_getstr("cuu", 1) + self._cuu1 = _my_getstr("cuu1", 1) + self._dch1 = _my_getstr("dch1", 1) + self._dch = _my_getstr("dch", 1) + self._el = _my_getstr("el") + self._hpa = _my_getstr("hpa", 1) + self._ich = _my_getstr("ich", 1) + self._ich1 = _my_getstr("ich1", 1) + self._ind = _my_getstr("ind", 1) + self._pad = _my_getstr("pad", 1) + self._ri = _my_getstr("ri", 1) + self._rmkx = _my_getstr("rmkx", 1) + self._smkx = _my_getstr("smkx", 1) + + ## work out how we're going to sling the cursor around + if 0 and self._hpa: # hpa don't work in windows telnet :-( + self.__move_x = self.__move_x_hpa + elif self._cub and self._cuf: + self.__move_x = self.__move_x_cub_cuf + elif self._cub1 and self._cuf1: + self.__move_x = self.__move_x_cub1_cuf1 + else: + raise RuntimeError, "insufficient terminal (horizontal)" + + if self._cuu and self._cud: + self.__move_y = self.__move_y_cuu_cud + elif self._cuu1 and self._cud1: + self.__move_y = self.__move_y_cuu1_cud1 + else: + raise RuntimeError, "insufficient terminal (vertical)" + + if self._dch1: + self.dch1 = self._dch1 + elif self._dch: + self.dch1 = curses.tparm(self._dch, 1) + else: + self.dch1 = None + + if self._ich1: + self.ich1 = self._ich1 + elif self._ich: + self.ich1 = curses.tparm(self._ich, 1) + else: + self.ich1 = None + + self.__move = self.__move_short + + self.event_queue = unix_eventqueue.EventQueue(self.input_fd) + self.partial_char = '' + self.cursor_visible = 1 + + def change_encoding(self, encoding): + self.encoding = encoding + + def refresh(self, screen, (cx, cy)): + # this function is still too long (over 90 lines) + + if not self.__gone_tall: + while len(self.screen) < min(len(screen), self.height): + self.__hide_cursor() + self.__move(0, len(self.screen) - 1) + self.__write("\n") + self.__posxy = 0, len(self.screen) + self.screen.append("") + else: + while len(self.screen) < len(screen): + self.screen.append("") + + if len(screen) > self.height: + self.__gone_tall = 1 + self.__move = self.__move_tall + + px, py = self.__posxy + old_offset = offset = self.__offset + height = self.height + + if 0: + global counter + try: + counter + except NameError: + counter = 0 + self.__write_code(curses.tigetstr("setaf"), counter) + counter += 1 + if counter > 8: + counter = 0 + + # we make sure the cursor is on the screen, and that we're + # using all of the screen if we can + if cy < offset: + offset = cy + elif cy >= offset + height: + offset = cy - height + 1 + elif offset > 0 and len(screen) < offset + height: + offset = max(len(screen) - height, 0) + screen.append("") + + oldscr = self.screen[old_offset:old_offset + height] + newscr = screen[offset:offset + height] + + # use hardware scrolling if we have it. + if old_offset > offset and self._ri: + self.__hide_cursor() + self.__write_code(self._cup, 0, 0) + self.__posxy = 0, old_offset + for i in range(old_offset - offset): + self.__write_code(self._ri) + oldscr.pop(-1) + oldscr.insert(0, "") + elif old_offset < offset and self._ind: + self.__hide_cursor() + self.__write_code(self._cup, self.height - 1, 0) + self.__posxy = 0, old_offset + self.height - 1 + for i in range(offset - old_offset): + self.__write_code(self._ind) + oldscr.pop(0) + oldscr.append("") + + self.__offset = offset + + for y, oldline, newline, in zip(range(offset, offset + height), + oldscr, + newscr): + if oldline != newline: + self.__write_changed_line(y, oldline, newline, px) + + y = len(newscr) + while y < len(oldscr): + self.__hide_cursor() + self.__move(0, y) + self.__posxy = 0, y + self.__write_code(self._el) + y += 1 + + self.__show_cursor() + + self.screen = screen + self.move_cursor(cx, cy) + self.flushoutput() + + def __write_changed_line(self, y, oldline, newline, px): + # this is frustrating; there's no reason to test (say) + # self.dch1 inside the loop -- but alternative ways of + # structuring this function are equally painful (I'm trying to + # avoid writing code generators these days...) + x = 0 + minlen = min(len(oldline), len(newline)) + # + # reuse the oldline as much as possible, but stop as soon as we + # encounter an ESCAPE, because it might be the start of an escape + # sequene + while x < minlen and oldline[x] == newline[x] and newline[x] != '\x1b': + x += 1 + if oldline[x:] == newline[x+1:] and self.ich1: + if ( y == self.__posxy[1] and x > self.__posxy[0] + and oldline[px:x] == newline[px+1:x+1] ): + x = px + self.__move(x, y) + self.__write_code(self.ich1) + self.__write(newline[x]) + self.__posxy = x + 1, y + elif x < minlen and oldline[x + 1:] == newline[x + 1:]: + self.__move(x, y) + self.__write(newline[x]) + self.__posxy = x + 1, y + elif (self.dch1 and self.ich1 and len(newline) == self.width + and x < len(newline) - 2 + and newline[x+1:-1] == oldline[x:-2]): + self.__hide_cursor() + self.__move(self.width - 2, y) + self.__posxy = self.width - 2, y + self.__write_code(self.dch1) + self.__move(x, y) + self.__write_code(self.ich1) + self.__write(newline[x]) + self.__posxy = x + 1, y + else: + self.__hide_cursor() + self.__move(x, y) + if len(oldline) > len(newline): + self.__write_code(self._el) + self.__write(newline[x:]) + self.__posxy = len(newline), y + + if '\x1b' in newline: + # ANSI escape characters are present, so we can't assume + # anything about the position of the cursor. Moving the cursor + # to the left margin should work to get to a known position. + self.move_cursor(0, y) + + def __write(self, text): + self.__buffer.append((text, 0)) + + def __write_code(self, fmt, *args): + self.__buffer.append((curses.tparm(fmt, *args), 1)) + + def __maybe_write_code(self, fmt, *args): + if fmt: + self.__write_code(fmt, *args) + + def __move_y_cuu1_cud1(self, y): + dy = y - self.__posxy[1] + if dy > 0: + self.__write_code(dy*self._cud1) + elif dy < 0: + self.__write_code((-dy)*self._cuu1) + + def __move_y_cuu_cud(self, y): + dy = y - self.__posxy[1] + if dy > 0: + self.__write_code(self._cud, dy) + elif dy < 0: + self.__write_code(self._cuu, -dy) + + def __move_x_hpa(self, x): + if x != self.__posxy[0]: + self.__write_code(self._hpa, x) + + def __move_x_cub1_cuf1(self, x): + dx = x - self.__posxy[0] + if dx > 0: + self.__write_code(self._cuf1*dx) + elif dx < 0: + self.__write_code(self._cub1*(-dx)) + + def __move_x_cub_cuf(self, x): + dx = x - self.__posxy[0] + if dx > 0: + self.__write_code(self._cuf, dx) + elif dx < 0: + self.__write_code(self._cub, -dx) + + def __move_short(self, x, y): + self.__move_x(x) + self.__move_y(y) + + def __move_tall(self, x, y): + assert 0 <= y - self.__offset < self.height, y - self.__offset + self.__write_code(self._cup, y - self.__offset, x) + + def move_cursor(self, x, y): + if y < self.__offset or y >= self.__offset + self.height: + self.event_queue.insert(Event('scroll', None)) + else: + self.__move(x, y) + self.__posxy = x, y + self.flushoutput() + + def prepare(self): + # per-readline preparations: + self.__svtermstate = tcgetattr(self.input_fd) + raw = self.__svtermstate.copy() + raw.iflag &=~ (termios.BRKINT | termios.INPCK | + termios.ISTRIP | termios.IXON) + raw.oflag &=~ (termios.OPOST) + raw.cflag &=~ (termios.CSIZE|termios.PARENB) + raw.cflag |= (termios.CS8) + raw.lflag &=~ (termios.ICANON|termios.ECHO| + termios.IEXTEN|(termios.ISIG*1)) + raw.cc[termios.VMIN] = 1 + raw.cc[termios.VTIME] = 0 + tcsetattr(self.input_fd, termios.TCSADRAIN, raw) + + self.screen = [] + self.height, self.width = self.getheightwidth() + + self.__buffer = [] + + self.__posxy = 0, 0 + self.__gone_tall = 0 + self.__move = self.__move_short + self.__offset = 0 + + self.__maybe_write_code(self._smkx) + + self.old_sigwinch = signal.signal( + signal.SIGWINCH, self.__sigwinch) + + def restore(self): + self.__maybe_write_code(self._rmkx) + self.flushoutput() + tcsetattr(self.input_fd, termios.TCSADRAIN, self.__svtermstate) + + signal.signal(signal.SIGWINCH, self.old_sigwinch) + + def __sigwinch(self, signum, frame): + self.height, self.width = self.getheightwidth() + self.event_queue.insert(Event('resize', None)) + + def push_char(self, char): + self.partial_char += char + try: + c = unicode(self.partial_char, self.encoding) + except UnicodeError, e: + if len(e.args) > 4 and \ + e.args[4] == 'unexpected end of data': + pass + else: + raise + else: + self.partial_char = '' + self.event_queue.push(c) + + def get_event(self, block=1): + while self.event_queue.empty(): + while 1: # All hail Unix! + try: + self.push_char(os.read(self.input_fd, 1)) + except (IOError, OSError), err: + if err.errno == errno.EINTR: + if not self.event_queue.empty(): + return self.event_queue.get() + else: + continue + else: + raise + else: + break + if not block: + break + return self.event_queue.get() + + def wait(self): + self.pollob.poll() + + def set_cursor_vis(self, vis): + if vis: + self.__show_cursor() + else: + self.__hide_cursor() + + def __hide_cursor(self): + if self.cursor_visible: + self.__maybe_write_code(self._civis) + self.cursor_visible = 0 + + def __show_cursor(self): + if not self.cursor_visible: + self.__maybe_write_code(self._cnorm) + self.cursor_visible = 1 + + def repaint_prep(self): + if not self.__gone_tall: + self.__posxy = 0, self.__posxy[1] + self.__write("\r") + ns = len(self.screen)*['\000'*self.width] + self.screen = ns + else: + self.__posxy = 0, self.__offset + self.__move(0, self.__offset) + ns = self.height*['\000'*self.width] + self.screen = ns + + if TIOCGWINSZ: + def getheightwidth(self): + try: + return int(os.environ["LINES"]), int(os.environ["COLUMNS"]) + except KeyError: + height, width = struct.unpack( + "hhhh", ioctl(self.input_fd, TIOCGWINSZ, "\000"*8))[0:2] + if not height: return 25, 80 + return height, width + else: + def getheightwidth(self): + try: + return int(os.environ["LINES"]), int(os.environ["COLUMNS"]) + except KeyError: + return 25, 80 + + def forgetinput(self): + termios.tcflush(self.input_fd, termios.TCIFLUSH) + + def flushoutput(self): + for text, iscode in self.__buffer: + if iscode: + self.__tputs(text) + else: + os.write(self.output_fd, text.encode(self.encoding)) + del self.__buffer[:] + + def __tputs(self, fmt, prog=delayprog): + """A Python implementation of the curses tputs function; the + curses one can't really be wrapped in a sane manner. + + I have the strong suspicion that this is complexity that + will never do anyone any good.""" + # using .get() means that things will blow up + # only if the bps is actually needed (which I'm + # betting is pretty unlkely) + bps = ratedict.get(self.__svtermstate.ospeed) + while 1: + m = prog.search(fmt) + if not m: + os.write(self.output_fd, fmt) + break + x, y = m.span() + os.write(self.output_fd, fmt[:x]) + fmt = fmt[y:] + delay = int(m.group(1)) + if '*' in m.group(2): + delay *= self.height + if self._pad: + nchars = (bps*delay)/1000 + os.write(self.output_fd, self._pad*nchars) + else: + time.sleep(float(delay)/1000.0) + + def finish(self): + y = len(self.screen) - 1 + while y >= 0 and not self.screen[y]: + y -= 1 + self.__move(0, min(y, self.height + self.__offset - 1)) + self.__write("\n\r") + self.flushoutput() + + def beep(self): + self.__maybe_write_code(self._bel) + self.flushoutput() + + if FIONREAD: + def getpending(self): + e = Event('key', '', '') + + while not self.event_queue.empty(): + e2 = self.event_queue.get() + e.data += e2.data + e.raw += e.raw + + amount = struct.unpack( + "i", ioctl(self.input_fd, FIONREAD, "\0\0\0\0"))[0] + raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace') + e.data += raw + e.raw += raw + return e + else: + def getpending(self): + e = Event('key', '', '') + + while not self.event_queue.empty(): + e2 = self.event_queue.get() + e.data += e2.data + e.raw += e.raw + + amount = 10000 + raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace') + e.data += raw + e.raw += raw + return e + + def clear(self): + self.__write_code(self._clear) + self.__gone_tall = 1 + self.__move = self.__move_tall + self.__posxy = 0, 0 + self.screen = [] + diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -3,9 +3,8 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import Py_LT, Py_LE, Py_NE, Py_EQ,\ - Py_GE, Py_GT, fopen, fclose, fwrite -from pypy.tool.udir import udir +from pypy.module.cpyext.api import ( + Py_LT, Py_LE, Py_NE, Py_EQ, Py_GE, Py_GT) class TestObject(BaseApiTest): def test_IsTrue(self, space, api): @@ -175,58 +174,23 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" assert api.PyObject_Unicode(space.wrap("\xe9")) is None api.PyErr_Clear() - def test_file_fromstring(self, space, api): - filename = rffi.str2charp(str(udir / "_test_file")) - mode = rffi.str2charp("wb") - w_file = api.PyFile_FromString(filename, mode) - rffi.free_charp(filename) - rffi.free_charp(mode) - - assert api.PyFile_Check(w_file) - assert api.PyFile_CheckExact(w_file) - assert not api.PyFile_Check(space.wrap("text")) - - space.call_method(w_file, "write", space.wrap("text")) - space.call_method(w_file, "close") - assert (udir / "_test_file").read() == "text" - - def test_file_getline(self, space, api): - filename = rffi.str2charp(str(udir / "_test_file")) - - mode = rffi.str2charp("w") - w_file = api.PyFile_FromString(filename, mode) - space.call_method(w_file, "write", - space.wrap("line1\nline2\nline3\nline4")) - space.call_method(w_file, "close") - - rffi.free_charp(mode) - mode = rffi.str2charp("r") - w_file = api.PyFile_FromString(filename, mode) - rffi.free_charp(filename) - rffi.free_charp(mode) - - w_line = api.PyFile_GetLine(w_file, 0) - assert space.str_w(w_line) == "line1\n" - - w_line = api.PyFile_GetLine(w_file, 4) - assert space.str_w(w_line) == "line" - - w_line = api.PyFile_GetLine(w_file, 0) - assert space.str_w(w_line) == "2\n" - - # XXX We ought to raise an EOFError here, but don't - w_line = api.PyFile_GetLine(w_file, -1) - # assert api.PyErr_Occurred() is space.w_EOFError - assert space.str_w(w_line) == "line3\n" - - space.call_method(w_file, "close") - class AppTestObject(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/lib_pypy/pyrepl/tests/wishes.py b/lib_pypy/pyrepl/tests/wishes.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/wishes.py @@ -0,0 +1,38 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +# this test case should contain as-verbatim-as-possible versions of +# (applicable) feature requests + +class WishesTestCase(ReaderTestCase): + + def test_quoted_insert_repeat(self): + self.run_test([(('digit-arg', '3'), ['']), + ( 'quoted-insert', ['']), + (('self-insert', '\033'), ['^[^[^[']), + ( 'accept', None)]) + +def test(): + run_testcase(WishesTestCase) + +if __name__ == '__main__': + test() diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -278,6 +278,22 @@ rex_mem_reg_plus_scaled_reg_plus_const) # ____________________________________________________________ +# Emit a mod/rm referencing an immediate address that fits in 32-bit +# (the immediate address itself must be explicitely encoded as well, +# with immediate(argnum)). + +def encode_abs(mc, _1, _2, orbyte): + # expands to either '\x05' on 32-bit, or '\x04\x25' or 64-bit + if mc.WORD == 8: + mc.writechar(chr(0x04 | orbyte)) + mc.writechar(chr(0x25)) + else: + mc.writechar(chr(0x05 | orbyte)) + return 0 + +abs_ = encode_abs, 0, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -348,7 +364,9 @@ INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) - INSN_rj = insn(rex_w, chr(base+3), register(1,8), '\x05', immediate(2)) + INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2)) + INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1), + immediate(2,'b')) INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b')) INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2)) @@ -366,7 +384,8 @@ INSN_bi32(mc, offset, immed) INSN_bi._always_inline_ = True # try to constant-fold single_byte() - return INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj + return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, + INSN_ji8) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -444,23 +463,25 @@ # ------------------------------ Arithmetic ------------------------------ - ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj = common_modes(0) - OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj = common_modes(1) - AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj = common_modes(4) - SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj = common_modes(5) - SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj = common_modes(3) - XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj = common_modes(6) - CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj = common_modes(7) + ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj, _ = common_modes(0) + OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj, _ = common_modes(1) + AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj, _ = common_modes(4) + SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj, SUB_ji8 = common_modes(5) + SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj, _ = common_modes(3) + XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj, _ = common_modes(6) + CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj, _ = common_modes(7) CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32) CMP_mr = insn(rex_w, '\x39', register(2, 8), mem_reg_plus_const(1)) - CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b')) - CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2)) + CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_, + immediate(1), immediate(2, 'b')) + CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_, + immediate(1), immediate(2)) CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) - CMP_jr = insn(rex_w, '\x39', register(2, 8), '\x05', immediate(1)) + CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_, immediate(1)) CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) @@ -505,10 +526,11 @@ POP_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) LEA_rb = insn(rex_w, '\x8D', register(1,8), stack_bp(2)) + LEA_rs = insn(rex_w, '\x8D', register(1,8), stack_sp(2)) LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) - LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_, immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) @@ -534,12 +556,15 @@ CDQ = insn(rex_nw, '\x99') TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) - TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), '\x05', immediate(1), immediate(2, 'b')) + TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_, immediate(1), immediate(2, 'b')) TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') # x87 instructions FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + # reserved as an illegal instruction + UD2 = insn('\x0F\x0B') + # ------------------------------ SSE2 ------------------------------ # Conversion @@ -639,7 +664,7 @@ add_insn('s', stack_sp(modrm_argnum)) add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) - add_insn('j', '\x05', immediate(modrm_argnum)) + add_insn('j', abs_, immediate(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register @@ -680,7 +705,7 @@ # assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') - add_insn('j', '\x05', immediate(2)) + add_insn('j', abs_, immediate(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/jit/metainterp/virtualref.py b/pypy/jit/metainterp/virtualref.py --- a/pypy/jit/metainterp/virtualref.py +++ b/pypy/jit/metainterp/virtualref.py @@ -13,7 +13,6 @@ self.JIT_VIRTUAL_REF = lltype.GcStruct('JitVirtualRef', ('super', rclass.OBJECT), ('virtual_token', lltype.Signed), - ('virtualref_index', lltype.Signed), ('forced', rclass.OBJECTPTR)) self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE, zero=True, flavor='raw', @@ -27,8 +26,6 @@ fielddescrof = self.cpu.fielddescrof self.descr_virtual_token = fielddescrof(self.JIT_VIRTUAL_REF, 'virtual_token') - self.descr_virtualref_index = fielddescrof(self.JIT_VIRTUAL_REF, - 'virtualref_index') self.descr_forced = fielddescrof(self.JIT_VIRTUAL_REF, 'forced') # # record the type JIT_VIRTUAL_REF explicitly in the rtyper, too diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -519,7 +519,7 @@ return code = frame.pycode if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr <= frame.instr_prev: + if frame.last_instr < frame.instr_prev_plus_one: # We jumped backwards in the same line. executioncontext._trace(frame, 'line', self.space.w_None) else: @@ -557,5 +557,5 @@ frame.f_lineno = line executioncontext._trace(frame, 'line', self.space.w_None) - frame.instr_prev = frame.last_instr + frame.instr_prev_plus_one = frame.last_instr + 1 self.space.frame_trace_action.fire() # continue tracing diff --git a/pypy/translator/backendopt/test/test_merge_if_blocks.py b/pypy/translator/backendopt/test/test_merge_if_blocks.py --- a/pypy/translator/backendopt/test/test_merge_if_blocks.py +++ b/pypy/translator/backendopt/test/test_merge_if_blocks.py @@ -2,7 +2,7 @@ from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof as tgraphof -from pypy.objspace.flow.model import flatten, Block +from pypy.objspace.flow.model import Block from pypy.translator.backendopt.removenoops import remove_same_as from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.simplify import (get_graph, transform_dead_op_vars, desugar_isinstance) -from pypy.objspace.flow.model import traverse, Block, Constant, summary +from pypy.objspace.flow.model import Block, Constant, summary from pypy import conftest def translate(func, argtypes, backend_optimize=True): @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -190,14 +190,30 @@ def wait(): """ wait() -> (pid, status) - + Wait for completion of a child process. """ return posix.waitpid(-1, 0) + def wait3(options): + """ wait3(options) -> (pid, status, rusage) + + Wait for completion of a child process and provides resource usage informations + """ + from _pypy_wait import wait3 + return wait3(options) + + def wait4(pid, options): + """ wait4(pid, options) -> (pid, status, rusage) + + Wait for completion of the child process "pid" and provides resource usage informations + """ + from _pypy_wait import wait4 + return wait4(pid, options) + else: # Windows implementations - + # Supply os.popen() based on subprocess def popen(cmd, mode="r", bufsize=-1): """popen(command [, mode='r' [, bufsize]]) -> pipe @@ -285,7 +301,7 @@ raise TypeError("invalid cmd type (%s, expected string)" % (type(cmd),)) return cmd - + # A proxy for a file whose close waits for the process class _wrap_close(object): def __init__(self, stream, proc): diff --git a/lib_pypy/pypy_test/test_os_wait.py b/lib_pypy/pypy_test/test_os_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_os_wait.py @@ -0,0 +1,44 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + +import os + +from lib_pypy._pypy_wait import wait3, wait4 + +if hasattr(os, 'wait3'): + def test_os_wait3(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait3()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) + +if hasattr(os, 'wait4'): + def test_os_wait4(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait4()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/doc/config/confrest.py b/pypy/doc/config/confrest.py --- a/pypy/doc/config/confrest.py +++ b/pypy/doc/config/confrest.py @@ -7,7 +7,6 @@ all_optiondescrs = [pypyoption.pypy_optiondescription, translationoption.translation_optiondescription, ] - start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) class PyPyPage(PyPyPage): @@ -29,7 +28,7 @@ Page = PyPyPage def get_content(self, txtpath, encoding): - if txtpath.basename == "commandline.txt": + if txtpath.basename == "commandline.rst": result = [] for line in txtpath.read().splitlines(): if line.startswith('.. GENERATE:'): diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -12,12 +12,13 @@ 'get_ident': 'os_thread.get_ident', 'exit': 'os_thread.exit', 'exit_thread': 'os_thread.exit', # obsolete synonym + 'interrupt_main': 'os_thread.interrupt_main', 'stack_size': 'os_thread.stack_size', '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -382,7 +382,7 @@ send_bridge_to_backend(metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.token) - def copy_all_attrbutes_into(self, res): + def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here res.rd_snapshot = self.rd_snapshot res.rd_frame_info_list = self.rd_frame_info_list @@ -393,13 +393,13 @@ def _clone_if_mutable(self): res = ResumeGuardDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeGuardForcedDescr(ResumeGuardDescr): @@ -473,7 +473,7 @@ def _clone_if_mutable(self): res = ResumeGuardForcedDescr(self.metainterp_sd, self.jitdriver_sd) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res diff --git a/pypy/jit/metainterp/test/test_optimizebasic.py b/pypy/jit/metainterp/test/test_optimizebasic.py --- a/pypy/jit/metainterp/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/test/test_optimizebasic.py @@ -256,7 +256,7 @@ loop.call_pure_results = args_dict() if call_pure_results is not None: for k, v in call_pure_results.items(): - loop.call_pure_results[list(k)] = v + loop.call_pure_results[list(k)] = v metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo @@ -2889,7 +2889,7 @@ # the result of the call, recorded as the first arg), or turned into # a regular CALL. arg_consts = [ConstInt(i) for i in (123456, 4, 5, 6)] - call_pure_results = {tuple(arg_consts): ConstInt(42)} + call_pure_results = {tuple(arg_consts): ConstInt(42)} ops = ''' [i0, i1, i2] escape(i1) @@ -2934,7 +2934,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -2967,7 +2966,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3008,7 +3006,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3065,7 +3062,7 @@ self.loop.inputargs[0].value = self.nodeobjvalue self.check_expanded_fail_descr('''p2, p1 p0.refdescr = p2 - where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 where p1 is a node_vtable, nextdescr=p1b where p1b is a node_vtable, valuedescr=i1 ''', rop.GUARD_NO_EXCEPTION) @@ -3087,7 +3084,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3114,7 +3110,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3363,7 +3358,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) i3 = int_lt(i2, -5) guard_true(i3) [] @@ -3374,7 +3369,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) jump(i0) """ diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -42,3 +42,13 @@ assert arr[1:].tolist() == [2,3,4] assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + + def test_buffer(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + # XXX big-endian + assert str(buffer(arr)) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') + diff --git a/pypy/rpython/lltypesystem/rdict.py b/pypy/rpython/lltypesystem/rdict.py --- a/pypy/rpython/lltypesystem/rdict.py +++ b/pypy/rpython/lltypesystem/rdict.py @@ -4,12 +4,15 @@ from pypy.rpython.rdict import AbstractDictRepr, AbstractDictIteratorRepr,\ rtype_newdict from pypy.rpython.lltypesystem import lltype -from pypy.rlib.rarithmetic import r_uint, intmask +from pypy.rlib.rarithmetic import r_uint, intmask, LONG_BIT from pypy.rlib.objectmodel import hlinvoke from pypy.rpython import robject -from pypy.rlib import objectmodel +from pypy.rlib import objectmodel, jit from pypy.rpython import rmodel +HIGHEST_BIT = intmask(1 << (LONG_BIT - 1)) +MASK = intmask(HIGHEST_BIT - 1) + # ____________________________________________________________ # # generic implementation of RPython dictionary, with parametric DICTKEY and @@ -405,6 +408,10 @@ ENTRIES = lltype.typeOf(entries).TO return ENTRIES.fasthashfn(entries[i].key) + at jit.dont_look_inside +def ll_get_value(d, i): + return d.entries[i].value + def ll_keyhash_custom(d, key): DICT = lltype.typeOf(d).TO return hlinvoke(DICT.r_rdict_hashfn, d.fnkeyhash, key) @@ -422,18 +429,21 @@ def ll_dict_getitem(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - entries = d.entries - if entries.valid(i): - return entries[i].value - else: - raise KeyError -ll_dict_getitem.oopspec = 'dict.getitem(d, key)' + if not i & HIGHEST_BIT: + return ll_get_value(d, i) + else: + raise KeyError def ll_dict_setitem(d, key, value): hash = d.keyhash(key) i = ll_dict_lookup(d, key, hash) + return _ll_dict_setitem_lookup_done(d, key, value, hash, i) + + at jit.dont_look_inside +def _ll_dict_setitem_lookup_done(d, key, value, hash, i): + valid = (i & HIGHEST_BIT) == 0 + i = i & MASK everused = d.entries.everused(i) - valid = d.entries.valid(i) # set up the new entry ENTRY = lltype.typeOf(d.entries).TO.OF entry = d.entries[i] @@ -449,7 +459,6 @@ d.num_pristine_entries -= 1 if d.num_pristine_entries <= len(d.entries) / 3: ll_dict_resize(d) -ll_dict_setitem.oopspec = 'dict.setitem(d, key, value)' def ll_dict_insertclean(d, key, value, hash): # Internal routine used by ll_dict_resize() to insert an item which is @@ -470,7 +479,7 @@ def ll_dict_delitem(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - if not d.entries.valid(i): + if i & HIGHEST_BIT: raise KeyError _ll_dict_del(d, i) ll_dict_delitem.oopspec = 'dict.delitem(d, key)' @@ -542,7 +551,7 @@ elif entries.everused(i): freeslot = i else: - return i # pristine entry -- lookup failed + return i | HIGHEST_BIT # pristine entry -- lookup failed # In the loop, a deleted entry (everused and not valid) is by far # (factor of 100s) the least likely outcome, so test for that last. @@ -557,7 +566,7 @@ if not entries.everused(i): if freeslot == -1: freeslot = i - return freeslot + return freeslot | HIGHEST_BIT elif entries.valid(i): checkingkey = entries[i].key if direct_compare and checkingkey == key: @@ -711,22 +720,19 @@ def ll_get(dict, key, default): i = ll_dict_lookup(dict, key, dict.keyhash(key)) - entries = dict.entries - if entries.valid(i): - return entries[i].value - else: + if not i & HIGHEST_BIT: + return ll_get_value(dict, i) + else: return default -ll_get.oopspec = 'dict.get(dict, key, default)' def ll_setdefault(dict, key, default): - i = ll_dict_lookup(dict, key, dict.keyhash(key)) - entries = dict.entries - if entries.valid(i): - return entries[i].value + hash = dict.keyhash(key) + i = ll_dict_lookup(dict, key, hash) + if not i & HIGHEST_BIT: + return ll_get_value(dict, i) else: - ll_dict_setitem(dict, key, default) + _ll_dict_setitem_lookup_done(dict, key, default, hash, i) return default -ll_setdefault.oopspec = 'dict.setdefault(dict, key, default)' def ll_copy(dict): DICT = lltype.typeOf(dict).TO @@ -768,7 +774,10 @@ while i < d2len: if entries.valid(i): entry = entries[i] - ll_dict_setitem(dic1, entry.key, entry.value) + hash = entries.hash(i) + key = entry.key + j = ll_dict_lookup(dic1, key, hash) + _ll_dict_setitem_lookup_done(dic1, key, entry.value, hash, j) i += 1 ll_update.oopspec = 'dict.update(dic1, dic2)' @@ -818,8 +827,7 @@ def ll_contains(d, key): i = ll_dict_lookup(d, key, d.keyhash(key)) - return d.entries.valid(i) -ll_contains.oopspec = 'dict.contains(d, key)' + return not i & HIGHEST_BIT POPITEMINDEX = lltype.Struct('PopItemIndex', ('nextindex', lltype.Signed)) global_popitem_index = lltype.malloc(POPITEMINDEX, zero=True, immortal=True) diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -11,6 +11,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -15,7 +15,7 @@ ## The problem ## ----------- ## -## PyString_AsString() must returns a (non-movable) pointer to the underlying +## PyString_AsString() must return a (non-movable) pointer to the underlying ## buffer, whereas pypy strings are movable. C code may temporarily store ## this address and use it, as long as it owns a reference to the PyObject. ## There is no "release" function to specify that the pointer is not needed diff --git a/lib_pypy/pyrepl/copy_code.py b/lib_pypy/pyrepl/copy_code.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/copy_code.py @@ -0,0 +1,73 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import new + +def copy_code_with_changes(codeobject, + argcount=None, + nlocals=None, + stacksize=None, + flags=None, + code=None, + consts=None, + names=None, + varnames=None, + filename=None, + name=None, + firstlineno=None, + lnotab=None): + if argcount is None: argcount = codeobject.co_argcount + if nlocals is None: nlocals = codeobject.co_nlocals + if stacksize is None: stacksize = codeobject.co_stacksize + if flags is None: flags = codeobject.co_flags + if code is None: code = codeobject.co_code + if consts is None: consts = codeobject.co_consts + if names is None: names = codeobject.co_names + if varnames is None: varnames = codeobject.co_varnames + if filename is None: filename = codeobject.co_filename + if name is None: name = codeobject.co_name + if firstlineno is None: firstlineno = codeobject.co_firstlineno + if lnotab is None: lnotab = codeobject.co_lnotab + return new.code(argcount, + nlocals, + stacksize, + flags, + code, + consts, + names, + varnames, + filename, + name, + firstlineno, + lnotab) + +code_attrs=['argcount', + 'nlocals', + 'stacksize', + 'flags', + 'code', + 'consts', + 'names', + 'varnames', + 'filename', + 'name', + 'firstlineno', + 'lnotab'] + + diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -26,9 +26,10 @@ CPU = getcpuclass() class MockGcRootMap(object): + is_shadow_stack = False def get_basic_shape(self, is_64_bit): return ['shape'] - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): shape.append(offset) def add_callee_save_reg(self, shape, reg_index): index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } @@ -44,7 +45,8 @@ get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new - + get_malloc_slowpath_addr = None + moving_gc = True gcrootmap = MockGcRootMap() @@ -166,26 +168,29 @@ class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - + expected_malloc_slowpath_size = WORD*2 + def __init__(self): GcCache.__init__(self, False) # create a nursery NTP = rffi.CArray(lltype.Signed) self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 64 - # 64 bytes + self.addrs[1] = self.addrs[0] + 16*WORD + self.addrs[2] = 0 + # 16 WORDs def malloc_slowpath(size): - assert size == WORD*2 + assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size + self.addrs[2] += 1 return nadr self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) - self._counter = 123 + self._counter = 123000 def can_inline_malloc(self, descr): return True @@ -204,7 +209,7 @@ def get_nursery_top_addr(self): return rffi.cast(lltype.Signed, self.addrs) + WORD - def get_malloc_fixedsize_slowpath_addr(self): + def get_malloc_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) @@ -220,9 +225,11 @@ cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() - NODE = lltype.Struct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) # xxx hack: NODE is not a GcStruct + # hack: specify 'tid' explicitly, because this test is not running + # with the gc transformer + NODE = lltype.GcStruct('node', ('tid', lltype.Signed), + ('value', lltype.Signed)) + nodedescr = cpu.sizeof(NODE) valuedescr = cpu.fielddescrof(NODE, 'value') self.cpu = cpu @@ -254,6 +261,7 @@ assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called def test_malloc_slowpath(self): ops = ''' @@ -274,6 +282,7 @@ gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nadr + (WORD*2) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once def test_new_with_vtable(self): ops = ''' @@ -289,3 +298,93 @@ assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + +class Seen(Exception): + pass + +class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): + def can_inline_malloc_varsize(self, arraydescr, num_elem): + return num_elem < 5 + def get_funcptr_for_newarray(self): + return 52 + def init_array_descr(self, A, descr): + descr.tid = self._counter + self._counter += 1 + def args_for_new_array(self, descr): + raise Seen("args_for_new_array") + +class TestMallocVarsizeFastpath(BaseTestRegalloc): + def setup_method(self, method): + cpu = CPU(None, None) + cpu.vtable_offset = WORD + cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() + cpu.setup_once() + self.cpu = cpu + + ARRAY = lltype.GcArray(lltype.Signed) + arraydescr = cpu.arraydescrof(ARRAY) + self.arraydescr = arraydescr + + self.namespace = locals().copy() + + def test_malloc_varsize_fastpath(self): + # Hack. Running the GcLLDescr_framework without really having + # a complete GC means that we end up with both the tid and the + # length being at offset 0. In this case, so the length overwrites + # the tid. This is of course only the case in this test class. + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 142, descr=arraydescr) + setarrayitem_gc(p0, 3, 143, descr=arraydescr) + finish(p0) + ''' + self.interpret(ops, []) + # check the nursery + gc_ll_descr = self.cpu.gc_ll_descr + assert gc_ll_descr.nursery[0] == 4 + assert gc_ll_descr.nursery[1] == 142 + assert gc_ll_descr.nursery[4] == 143 + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + def test_malloc_varsize_slowpath(self): + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 420, descr=arraydescr) + setarrayitem_gc(p0, 3, 430, descr=arraydescr) + p1 = new_array(4, descr=arraydescr) + setarrayitem_gc(p1, 0, 421, descr=arraydescr) + setarrayitem_gc(p1, 3, 431, descr=arraydescr) + p2 = new_array(4, descr=arraydescr) + setarrayitem_gc(p2, 0, 422, descr=arraydescr) + setarrayitem_gc(p2, 3, 432, descr=arraydescr) + p3 = new_array(4, descr=arraydescr) + setarrayitem_gc(p3, 0, 423, descr=arraydescr) + setarrayitem_gc(p3, 3, 433, descr=arraydescr) + finish(p0, p1, p2, p3) + ''' + gc_ll_descr = self.cpu.gc_ll_descr + gc_ll_descr.expected_malloc_slowpath_size = 5*WORD + self.interpret(ops, []) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once + + def test_malloc_varsize_too_big(self): + ops = ''' + [] + p0 = new_array(5, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_varsize_variable(self): + ops = ''' + [i0] + p0 = new_array(i0, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,9 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); -int _pypy_math_isnan(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -201,6 +201,23 @@ assert cmpr == 3 assert cmpr != 42 + def test_richcompare(self): + module = self.import_module("comparisons") + cmpr = module.CmpType() + + # should not crash + cmpr < 4 + cmpr <= 4 + cmpr > 4 + cmpr >= 4 + + assert cmpr.__le__(4) is NotImplemented + + def test_tpcompare(self): + module = self.import_module("comparisons") + cmpr = module.OldCmpType() + assert cmpr < cmpr + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() @@ -245,6 +262,11 @@ obj = foo.new() assert module.read_tp_dict(obj) == foo.fooType.copy + def test_custom_allocation(self): + foo = self.import_module("foo") + obj = foo.newCustom() + assert type(obj) is foo.Custom + assert type(foo.Custom) is foo.MetaType class TestTypes(BaseApiTest): def test_type_attributes(self, space, api): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,17 +29,22 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) + + def test_basic_threadstate_dance(self, space, api): + # Let extension modules call these functions, + # Not sure of the semantics in pypy though. + # (cpyext always acquires and releases the GIL around calls) + tstate = api.PyThreadState_Swap(None) + assert tstate is not None + assert not api.PyThreadState_Swap(tstate) + + api.PyEval_AcquireThread(tstate) + api.PyEval_ReleaseThread(tstate) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -56,13 +56,10 @@ """A frame is an environment supporting the execution of a code object. Abstract base class.""" - def __init__(self, space, w_globals=None, numlocals=-1): + def __init__(self, space, w_globals=None): self.space = space self.w_globals = w_globals # wrapped dict of globals self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(self.getcode().getvarnames()) - self.numlocals = numlocals def run(self): "Abstract method to override. Runs the frame" @@ -96,6 +93,10 @@ where the order is according to self.getcode().signature().""" raise TypeError, "abstract" + def getfastscopelength(self): + "Abstract. Get the expected number of locals." + raise TypeError, "abstract" + def fast2locals(self): # Copy values from self.fastlocals_w to self.w_locals if self.w_locals is None: @@ -113,10 +114,11 @@ # Copy values from self.w_locals to self.fastlocals_w assert self.w_locals is not None varnames = self.getcode().getvarnames() + numlocals = self.getfastscopelength() - new_fastlocals_w = [None]*self.numlocals - - for i in range(min(len(varnames), self.numlocals)): + new_fastlocals_w = [None] * numlocals + + for i in range(min(len(varnames), numlocals)): w_name = self.space.wrap(varnames[i]) try: w_value = self.space.getitem(self.w_locals, w_name) diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py --- a/pypy/jit/codewriter/support.py +++ b/pypy/jit/codewriter/support.py @@ -399,12 +399,7 @@ return ll_rdict.ll_newdict(DICT) _ll_0_newdict.need_result_type = True - _ll_2_dict_getitem = ll_rdict.ll_dict_getitem - _ll_3_dict_setitem = ll_rdict.ll_dict_setitem _ll_2_dict_delitem = ll_rdict.ll_dict_delitem - _ll_3_dict_setdefault = ll_rdict.ll_setdefault - _ll_2_dict_contains = ll_rdict.ll_contains - _ll_3_dict_get = ll_rdict.ll_get _ll_1_dict_copy = ll_rdict.ll_copy _ll_1_dict_clear = ll_rdict.ll_clear _ll_2_dict_update = ll_rdict.ll_update diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -1,17 +1,29 @@ # Constants that depend on whether we are on 32-bit or 64-bit +# The frame size gives the standard fixed part at the start of +# every assembler frame: the saved value of some registers, +# one word for the force_index, and some extra space used only +# during a malloc that needs to go via its slow path. + import sys if sys.maxint == (2**31 - 1): WORD = 4 - # ebp + ebx + esi + edi + force_index = 5 words - FRAME_FIXED_SIZE = 5 + # ebp + ebx + esi + edi + 4 extra words + force_index = 9 words + FRAME_FIXED_SIZE = 9 + FORCE_INDEX_OFS = -8*WORD + MY_COPY_OF_REGS = -7*WORD IS_X86_32 = True IS_X86_64 = False else: WORD = 8 - # rbp + rbx + r12 + r13 + r14 + r15 + force_index = 7 words - FRAME_FIXED_SIZE = 7 + # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 + FRAME_FIXED_SIZE = 18 + FORCE_INDEX_OFS = -17*WORD + MY_COPY_OF_REGS = -16*WORD IS_X86_32 = False IS_X86_64 = True -FORCE_INDEX_OFS = -(FRAME_FIXED_SIZE-1)*WORD +# The extra space has room for almost all registers, apart from eax and edx +# which are used in the malloc itself. They are: +# ecx, ebx, esi, edi [32 and 64 bits] +# r8, r9, r10, r12, r13, r14, r15 [64 bits only] diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -486,6 +486,7 @@ class W_IMap(Wrappable): _error_name = "imap" + _immutable_fields_ = ["w_fun", "iterators_w"] def __init__(self, space, w_fun, args_w): self.space = space diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): @@ -153,128 +167,132 @@ result = formatd(value, tp, precision, flags) return result, special -if USE_SHORT_FLOAT_REPR: - def round_double(value, ndigits): - # The basic idea is very simple: convert and round the double to - # a decimal string using _Py_dg_dtoa, then convert that decimal - # string back to a double with _Py_dg_strtod. There's one minor - # difficulty: Python 2.x expects round to do - # round-half-away-from-zero, while _Py_dg_dtoa does - # round-half-to-even. So we need some way to detect and correct - # the halfway cases. +def round_double(value, ndigits): + if USE_SHORT_FLOAT_REPR: + return round_double_short_repr(value, ndigits) + else: + return round_double_fallback_repr(value, ndigits) - # a halfway value has the form k * 0.5 * 10**-ndigits for some - # odd integer k. Or in other words, a rational number x is - # exactly halfway between two multiples of 10**-ndigits if its - # 2-valuation is exactly -ndigits-1 and its 5-valuation is at - # least -ndigits. For ndigits >= 0 the latter condition is - # automatically satisfied for a binary float x, since any such - # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x - # needs to be an integral multiple of 5**-ndigits; we can check - # this using fmod. For -22 > ndigits, there are no halfway - # cases: 5**23 takes 54 bits to represent exactly, so any odd - # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of - # precision to represent exactly. +def round_double_short_repr(value, ndigits): + # The basic idea is very simple: convert and round the double to + # a decimal string using _Py_dg_dtoa, then convert that decimal + # string back to a double with _Py_dg_strtod. There's one minor + # difficulty: Python 2.x expects round to do + # round-half-away-from-zero, while _Py_dg_dtoa does + # round-half-to-even. So we need some way to detect and correct + # the halfway cases. - sign = copysign(1.0, value) - value = abs(value) + # a halfway value has the form k * 0.5 * 10**-ndigits for some + # odd integer k. Or in other words, a rational number x is + # exactly halfway between two multiples of 10**-ndigits if its + # 2-valuation is exactly -ndigits-1 and its 5-valuation is at + # least -ndigits. For ndigits >= 0 the latter condition is + # automatically satisfied for a binary float x, since any such + # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x + # needs to be an integral multiple of 5**-ndigits; we can check + # this using fmod. For -22 > ndigits, there are no halfway + # cases: 5**23 takes 54 bits to represent exactly, so any odd + # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of + # precision to represent exactly. - # find 2-valuation value - m, expo = math.frexp(value) - while m != math.floor(m): - m *= 2.0 - expo -= 1 + sign = copysign(1.0, value) + value = abs(value) - # determine whether this is a halfway case. - halfway_case = 0 - if expo == -ndigits - 1: - if ndigits >= 0: + # find 2-valuation value + m, expo = math.frexp(value) + while m != math.floor(m): + m *= 2.0 + expo -= 1 + + # determine whether this is a halfway case. + halfway_case = 0 + if expo == -ndigits - 1: + if ndigits >= 0: + halfway_case = 1 + elif ndigits >= -22: + # 22 is the largest k such that 5**k is exactly + # representable as a double + five_pow = 1.0 + for i in range(-ndigits): + five_pow *= 5.0 + if math.fmod(value, five_pow) == 0.0: halfway_case = 1 - elif ndigits >= -22: - # 22 is the largest k such that 5**k is exactly - # representable as a double - five_pow = 1.0 - for i in range(-ndigits): - five_pow *= 5.0 - if math.fmod(value, five_pow) == 0.0: - halfway_case = 1 - # round to a decimal string; use an extra place for halfway case - strvalue = formatd(value, 'f', ndigits + halfway_case) + # round to a decimal string; use an extra place for halfway case + strvalue = formatd(value, 'f', ndigits + halfway_case) - if halfway_case: - buf = [c for c in strvalue] - if ndigits >= 0: - endpos = len(buf) - 1 - else: - endpos = len(buf) + ndigits - # Sanity checks: there should be exactly ndigits+1 places - # following the decimal point, and the last digit in the - # buffer should be a '5' - if not objectmodel.we_are_translated(): - assert buf[endpos] == '5' - if '.' in buf: - assert endpos == len(buf) - 1 - assert buf.index('.') == len(buf) - ndigits - 2 + if halfway_case: + buf = [c for c in strvalue] + if ndigits >= 0: + endpos = len(buf) - 1 + else: + endpos = len(buf) + ndigits + # Sanity checks: there should be exactly ndigits+1 places + # following the decimal point, and the last digit in the + # buffer should be a '5' + if not objectmodel.we_are_translated(): + assert buf[endpos] == '5' + if '.' in buf: + assert endpos == len(buf) - 1 + assert buf.index('.') == len(buf) - ndigits - 2 - # increment and shift right at the same time - i = endpos - 1 - carry = 1 - while i >= 0: + # increment and shift right at the same time + i = endpos - 1 + carry = 1 + while i >= 0: + digit = ord(buf[i]) + if digit == ord('.'): + buf[i+1] = chr(digit) + i -= 1 digit = ord(buf[i]) - if digit == ord('.'): - buf[i+1] = chr(digit) - i -= 1 - digit = ord(buf[i]) - carry += digit - ord('0') - buf[i+1] = chr(carry % 10 + ord('0')) - carry /= 10 - i -= 1 - buf[0] = chr(carry + ord('0')) - if ndigits < 0: - buf.append('0') + carry += digit - ord('0') + buf[i+1] = chr(carry % 10 + ord('0')) + carry /= 10 + i -= 1 + buf[0] = chr(carry + ord('0')) + if ndigits < 0: + buf.append('0') - strvalue = ''.join(buf) + strvalue = ''.join(buf) - return sign * rstring_to_float(strvalue) + return sign * rstring_to_float(strvalue) -else: - # fallback version, to be used when correctly rounded - # binary<->decimal conversions aren't available - def round_double(value, ndigits): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 +# fallback version, to be used when correctly rounded +# binary<->decimal conversions aren't available +def round_double_fallback_repr(value, ndigits): + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow + pow1 = math.pow(10.0, ndigits - 22) + pow2 = 1e22 + else: + pow1 = math.pow(10.0, ndigits) + pow2 = 1.0 - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value + y = (value * pow1) * pow2 + # if y overflows, then rounded value is exactly x + if isinf(y): + return value - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 + else: + pow1 = math.pow(10.0, -ndigits); + pow2 = 1.0 # unused; for translation + y = value / pow1 - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y-z) == 1.0: # obscure case, see the test - z = y + if y >= 0.0: + z = math.floor(y + 0.5) + else: + z = math.ceil(y - 0.5) + if math.fabs(y-z) == 1.0: # obscure case, see the test + z = y - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + return z INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -314,6 +314,7 @@ 'Py_BuildValue', 'Py_VaBuildValue', 'PyTuple_Pack', 'PyErr_Format', 'PyErr_NewException', 'PyErr_NewExceptionWithDoc', + 'PySys_WriteStdout', 'PySys_WriteStderr', 'PyEval_CallFunction', 'PyEval_CallMethod', 'PyObject_CallFunction', 'PyObject_CallMethod', 'PyObject_CallFunctionObjArgs', 'PyObject_CallMethodObjArgs', @@ -399,21 +400,9 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyBufferProcs = lltype.ForwardReference() PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) -def F(ARGS, RESULT=lltype.Signed): - return lltype.Ptr(lltype.FuncType(ARGS, RESULT)) -PyBufferProcsFields = ( - ("bf_getreadbuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getwritebuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getsegcount", F([PyObject, rffi.INTP])), - ("bf_getcharbuffer", F([PyObject, lltype.Signed, rffi.CCHARPP])), -# we don't support new buffer interface for now - ("bf_getbuffer", rffi.VOIDP), - ("bf_releasebuffer", rffi.VOIDP)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -cpython_struct('PyBufferProcs', PyBufferProcsFields, PyBufferProcs) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) @@ -538,7 +527,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): @@ -883,6 +873,7 @@ source_dir / "stringobject.c", source_dir / "mysnprintf.c", source_dir / "pythonrun.c", + source_dir / "sysmodule.c", source_dir / "bufferobject.c", source_dir / "object.c", source_dir / "cobject.c", diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import \ W_DictMultiObject, setitem__DictMulti_ANY_ANY, getitem__DictMulti_ANY, \ @@ -151,6 +152,8 @@ class AppTest_DictObject: + def setup_class(cls): + cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names) def test_equality(self): d = {1:2} @@ -259,7 +262,29 @@ d[33] = 99 assert d == dd assert x == 99 - + + def test_setdefault_fast(self): + class Key(object): + calls = 0 + def __hash__(self): + self.calls += 1 + return object.__hash__(self) + + k = Key() + d = {} + d.setdefault(k, []) + if self.on_pypy: + assert k.calls == 1 + + d.setdefault(k, 1) + if self.on_pypy: + assert k.calls == 2 + + k = Key() + d.setdefault(k, 42) + if self.on_pypy: + assert k.calls == 1 + def test_update(self): d = {1:2, 3:4} dd = d.copy() @@ -704,13 +729,20 @@ class FakeString(str): + hash_count = 0 def unwrap(self, space): self.unwrapped = True return str(self) + def __hash__(self): + self.hash_count += 1 + return str.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: + hash_count = 0 def hash_w(self, obj): + self.hash_count += 1 return hash(obj) def unwrap(self, x): return x @@ -726,6 +758,8 @@ return [] DictObjectCls = W_DictMultiObject def type(self, w_obj): + if isinstance(w_obj, FakeString): + return str return type(w_obj) w_str = str def str_w(self, string): @@ -890,6 +924,19 @@ impl.setitem(x, x) assert impl.r_dict_content is not None + def test_setdefault_fast(self): + on_pypy = "__pypy__" in sys.builtin_module_names + impl = self.impl + key = FakeString(self.string) + x = impl.setdefault(key, 1) + assert x == 1 + if on_pypy: + assert key.hash_count == 1 + x = impl.setdefault(key, 2) + assert x == 1 + if on_pypy: + assert key.hash_count == 2 + class TestStrDictImplementation(BaseTestRDictImplementation): ImplementionClass = StrDictImplementation diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -96,6 +96,10 @@ out, err = capfd.readouterr() assert "Exception ValueError: 'message' in 'location' ignored" == err.strip() + def test_ExceptionInstance_Class(self, space, api): + instance = space.call_function(space.w_ValueError) + assert api.PyExceptionInstance_Class(instance) is space.w_ValueError + class AppTestFetch(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -221,14 +221,33 @@ def rtype_method_split(self, hop): rstr = hop.args_r[0].repr - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO except AttributeError: list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr) + return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr, v_max) + + def rtype_method_rsplit(self, hop): + rstr = hop.args_r[0].repr + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) + try: + list_type = hop.r_result.lowleveltype.TO + except AttributeError: + list_type = hop.r_result.lowleveltype + cLIST = hop.inputconst(Void, list_type) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_rsplit_chr, cLIST, v_str, v_chr, v_max) def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,16 +1,20 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, PyObject, Py_ssize_t) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, - getattrfunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, - ssizeobjargproc, iternextfunc, initproc, richcmpfunc, hashfunc, - descrgetfunc, descrsetfunc, objobjproc) + getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, + ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, readbufferproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.buffer import Buffer as W_Buffer from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import specialize @@ -65,6 +69,12 @@ finally: rffi.free_charp(name_ptr) +def wrap_getattro(space, w_self, w_args, func): + func_target = rffi.cast(getattrofunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + return generic_cpy_call(space, func_target, w_self, args_w[0]) + def wrap_setattr(space, w_self, w_args, func): func_target = rffi.cast(setattrofunc, func) check_num_args(space, w_args, 2) @@ -187,18 +197,59 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) +class CPyBuffer(W_Buffer): + # Similar to Py_buffer + + def __init__(self, ptr, size, w_obj): + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + + def getlength(self): + return self.size + + def getitem(self, index): + return self.ptr[index] + +def wrap_getreadbuffer(space, w_self, w_args, func): + func_target = rffi.cast(readbufferproc, func) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: + index = rffi.cast(Py_ssize_t, 0) + size = generic_cpy_call(space, func_target, w_self, index, ptr) + if size < 0: + space.fromcache(State).check_and_raise_exception(always=True) + return space.wrap(CPyBuffer(ptr[0], size, w_self)) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) check_num_args(space, w_args, 1) - args_w = space.fixedview(w_args) - other_w = args_w[0] + w_other, = space.fixedview(w_args) return generic_cpy_call(space, func_target, - w_self, other_w, rffi.cast(rffi.INT_real, OP_CONST)) + w_self, w_other, rffi.cast(rffi.INT_real, OP_CONST)) return inner richcmp_eq = get_richcmp_func(Py_EQ) richcmp_ne = get_richcmp_func(Py_NE) +richcmp_lt = get_richcmp_func(Py_LT) +richcmp_le = get_richcmp_func(Py_LE) +richcmp_gt = get_richcmp_func(Py_GT) +richcmp_ge = get_richcmp_func(Py_GE) + +def wrap_cmpfunc(space, w_self, w_args, func): + func_target = rffi.cast(cmpfunc, func) + check_num_args(space, w_args, 1) + w_other, = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(w_self), + space.type(w_other))): + raise OperationError(space.w_TypeError, space.wrap( + "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % + (space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space)))) + + return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): @@ -289,7 +340,12 @@ # irregular interface, because of tp_getattr/tp_getattro confusion if NAME == "__getattr__": - wrapper = wrap_getattr + if SLOT == "tp_getattro": + wrapper = wrap_getattro + elif SLOT == "tp_getattr": + wrapper = wrap_getattr + else: + assert False function = globals().get(FUNCTION, None) assert FLAGS == 0 or FLAGS == PyWrapperFlag_KEYWORDS @@ -455,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), @@ -560,12 +616,19 @@ for regex, repl in slotdef_replacements: slotdefs_str = re.sub(regex, repl, slotdefs_str) +slotdefs = eval(slotdefs_str) +# PyPy addition +slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), +) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in eval(slotdefs_str)]) + for x in slotdefs]) + slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) - for x in eval(slotdefs_str)]) + for x in slotdefs]) if __name__ == "__main__": print slotdefs_str diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Block, Constant, Variable, flatten +from pypy.objspace.flow.model import Block, Constant, Variable from pypy.objspace.flow.model import checkgraph, mkentrymap from pypy.translator.backendopt.support import log @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -5,9 +5,16 @@ cpython_api, build_type_checkers, PyObject, CONST_STRING, CANNOT_FAIL, Py_ssize_t) from pypy.rlib.rarithmetic import r_uint +import sys PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") + at cpython_api([], lltype.Signed, error=CANNOT_FAIL) +def PyInt_GetMax(space): + """Return the system's idea of the largest integer it can handle (LONG_MAX, + as defined in the system header files).""" + return sys.maxint + @cpython_api([lltype.Signed], PyObject) def PyInt_FromLong(space, ival): """Create a new integer object with a value of ival. diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -163,7 +163,7 @@ if (not we_are_jitted() or w_self.is_heaptype() or w_self.space.config.objspace.std.mutable_builtintypes): return w_self._version_tag - # heap objects cannot get their version_tag changed + # prebuilt objects cannot get their version_tag changed return w_self._pure_version_tag() @purefunction_promote() @@ -253,7 +253,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,6 +262,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found diff --git a/lib-python/modified-2.7.0/distutils/command/build_ext.py b/lib-python/modified-2.7.0/distutils/command/build_ext.py --- a/lib-python/modified-2.7.0/distutils/command/build_ext.py +++ b/lib-python/modified-2.7.0/distutils/command/build_ext.py @@ -184,7 +184,7 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: @@ -192,8 +192,13 @@ # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree - self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) - if MSVC_VERSION == 9: + if 0: + # pypy has no PC directory + self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) + if 1: + # pypy has no PCBuild directory + pass + elif MSVC_VERSION == 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' @@ -695,24 +700,14 @@ shared extension. On most platforms, this is just 'ext.libraries'; on Windows and OS/2, we add the Python library (eg. python20.dll). """ - # The python library is always needed on Windows. For MSVC, this - # is redundant, since the library is mentioned in a pragma in - # pyconfig.h that MSVC groks. The other Windows compilers all seem - # to need it mentioned explicitly, though, so that's what we do. - # Append '_d' to the python import library on debug builds. + # The python library is always needed on Windows. if sys.platform == "win32": - from distutils.msvccompiler import MSVCCompiler - if not isinstance(self.compiler, MSVCCompiler): - template = "python%d%d" - if self.debug: - template = template + '_d' - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - # don't extend ext.libraries, it may be shared with other - # extensions, it is a reference to the original list - return ext.libraries + [pythonlib] - else: - return ext.libraries + template = "python%d%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + # don't extend ext.libraries, it may be shared with other + # extensions, it is a reference to the original list + return ext.libraries + [pythonlib] elif sys.platform == "os2emx": # EMX/GCC requires the python library explicitly, and I # believe VACPP does as well (though not confirmed) - AIM Apr01 diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -135,7 +135,7 @@ return type(self) is type(other) # xxx obscure def clone_if_mutable(self): res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res def _sortboxes(boxes): @@ -816,6 +816,52 @@ """ self.optimize_loop(ops, expected, preamble) + def test_compare_with_itself(self): + ops = """ + [] + i0 = escape() + i1 = int_lt(i0, i0) + guard_false(i1) [] + i2 = int_le(i0, i0) + guard_true(i2) [] + i3 = int_eq(i0, i0) + guard_true(i3) [] + i4 = int_ne(i0, i0) + guard_false(i4) [] + i5 = int_gt(i0, i0) + guard_false(i5) [] + i6 = int_ge(i0, i0) + guard_true(i6) [] + jump() + """ + expected = """ + [] + i0 = escape() + jump() + """ + self.optimize_loop(ops, expected) + + def test_compare_with_itself_uint(self): + py.test.skip("implement me") + ops = """ + [] + i0 = escape() + i7 = uint_lt(i0, i0) + guard_false(i7) [] + i8 = uint_le(i0, i0) + guard_true(i8) [] + i9 = uint_gt(i0, i0) + guard_false(i9) [] + i10 = uint_ge(i0, i0) + guard_true(i10) [] + jump() + """ + expected = """ + [] + i0 = escape() + jump() + """ + self.optimize_loop(ops, expected) @@ -1791,7 +1837,7 @@ """ self.optimize_loop(ops, ops) - def test_duplicate_setfield_1(self): + def test_duplicate_setfield_0(self): ops = """ [p1, i1, i2] setfield_gc(p1, i1, descr=valuedescr) @@ -1800,8 +1846,27 @@ """ expected = """ [p1, i1, i2] + jump(p1, i1, i2) + """ + # in this case, all setfields are removed, because we can prove + # that in the loop it will always have the same value + self.optimize_loop(ops, expected) + + def test_duplicate_setfield_1(self): + ops = """ + [p1] + i1 = escape() + i2 = escape() + setfield_gc(p1, i1, descr=valuedescr) setfield_gc(p1, i2, descr=valuedescr) - jump(p1, i1, i2) + jump(p1) + """ + expected = """ + [p1] + i1 = escape() + i2 = escape() + setfield_gc(p1, i2, descr=valuedescr) + jump(p1) """ self.optimize_loop(ops, expected) @@ -1848,6 +1913,7 @@ setfield_gc(p1, i4, descr=nextdescr) # setfield_gc(p1, i2, descr=valuedescr) + escape() jump(p1, i1, i2, p3) """ preamble = """ @@ -1860,6 +1926,7 @@ # setfield_gc(p1, i2, descr=valuedescr) setfield_gc(p1, i4, descr=nextdescr) + escape() jump(p1, i1, i2, p3, i3) """ expected = """ @@ -1871,6 +1938,7 @@ # setfield_gc(p1, i2, descr=valuedescr) setfield_gc(p1, i4, descr=nextdescr) + escape() jump(p1, i1, i2, p3, i3) """ self.optimize_loop(ops, expected, preamble) @@ -1943,6 +2011,7 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ preamble = """ @@ -1950,12 +2019,14 @@ guard_true(i3) [p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ expected = """ [p1, i2, i4] guard_true(i4) [p1] setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, 1) """ self.optimize_loop(ops, expected, preamble) @@ -1969,6 +2040,7 @@ guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ preamble = """ @@ -1976,12 +2048,14 @@ guard_true(i3) [i2, p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, i4) """ expected = """ [p1, i2, i4] guard_true(i4) [i2, p1] setfield_gc(p1, NULL, descr=nextdescr) + escape() jump(p1, i2, 1) """ self.optimize_loop(ops, expected) @@ -2027,15 +2101,34 @@ guard_value(p1, ConstPtr(myptr)) [] setfield_gc(p1, i1, descr=valuedescr) setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) + escape() jump(p1, i1, i2) """ expected = """ [i1, i2] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) + escape() jump(i1, i2) """ self.optimize_loop(ops, expected) + def test_dont_force_setfield_around_copystrcontent(self): + ops = """ + [p0, i0, p1, i1, i2] + setfield_gc(p0, i1, descr=valuedescr) + copystrcontent(p0, i0, p1, i1, i2) + escape() + jump(p0, i0, p1, i1, i2) + """ + expected = """ + [p0, i0, p1, i1, i2] + copystrcontent(p0, i0, p1, i1, i2) + setfield_gc(p0, i1, descr=valuedescr) + escape() + jump(p0, i0, p1, i1, i2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getarrayitem_1(self): ops = """ [p1] @@ -2356,6 +2449,33 @@ """ self.optimize_loop(ops, expected, preamble) + def test_bug_5(self): + ops = """ + [p0] + i0 = escape() + i2 = getfield_gc(p0, descr=valuedescr) + i4 = int_add(i2, 1) + setfield_gc(p0, i4, descr=valuedescr) + guard_true(i0) [] + i6 = getfield_gc(p0, descr=valuedescr) + i8 = int_sub(i6, 1) + setfield_gc(p0, i8, descr=valuedescr) + escape() + jump(p0) + """ + expected = """ + [p0] + i0 = escape() + i2 = getfield_gc(p0, descr=valuedescr) + i4 = int_add(i2, 1) + setfield_gc(p0, i4, descr=valuedescr) + guard_true(i0) [] + setfield_gc(p0, i2, descr=valuedescr) + escape() + jump(p0) + """ + self.optimize_loop(ops, expected) + def test_invalid_loop_1(self): ops = """ [p1] @@ -2637,7 +2757,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops(self): + def test_fold_partially_constant_add_sub(self): ops = """ [i0] i1 = int_sub(i0, 0) @@ -2671,7 +2791,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops_ovf(self): + def test_fold_partially_constant_add_sub_ovf(self): ops = """ [i0] i1 = int_sub_ovf(i0, 0) @@ -2708,6 +2828,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): + ops = """ + [i0] + i1 = int_lshift(i0, 0) + i2 = int_rshift(i1, 0) + i3 = int_eq(i2, i0) + guard_true(i3) [] + jump(i2) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + # ---------- class TestLLtype(OptimizeOptTest, LLtypeMixin): @@ -2992,7 +3127,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3025,7 +3159,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3065,7 +3198,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3103,6 +3235,7 @@ guard_no_exception(descr=fdescr) [p2, p1] virtual_ref_finish(p2, p1) setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ preamble = """ @@ -3111,6 +3244,7 @@ call(i1, descr=nonwritedescr) guard_no_exception(descr=fdescr) [i3, i1, p0] setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ expected = """ @@ -3119,6 +3253,7 @@ call(i1, descr=nonwritedescr) guard_no_exception(descr=fdescr2) [i3, i1, p0] setfield_gc(p0, NULL, descr=refdescr) + escape() jump(p0, i1) """ self.optimize_loop(ops, expected, preamble) @@ -3129,7 +3264,7 @@ #self.loop.inputargs[0].value = self.nodeobjvalue #self.check_expanded_fail_descr('''p2, p1 # p0.refdescr = p2 - # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 # where p1 is a node_vtable, nextdescr=p1b # where p1b is a node_vtable, valuedescr=i1 # ''', rop.GUARD_NO_EXCEPTION) @@ -3150,7 +3285,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3176,7 +3310,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3693,13 +3826,16 @@ guard_true(i1) [] jump(p0) """ - # The dead strlen will be eliminated be the backend. - expected = """ + preamble = """ [p0] i0 = strlen(p0) jump(p0) """ - self.optimize_strunicode_loop(ops, expected, expected) + expected = """ + [p0] + jump(p0) + """ + self.optimize_strunicode_loop(ops, expected, preamble) def test_addsub_const(self): ops = """ @@ -4839,6 +4975,58 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i1, descr=nextdescr) """ + py.test.skip("no test here") + + def test_immutable_not(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_noimmut_vtable)) + setfield_gc(p0, 42, descr=noimmut_intval) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_variable(self): + ops = """ + [i0] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, i0, descr=immut_intval) + escape(p0) + jump(i0) + """ + self.optimize_loop(ops, ops) + + def test_immutable_incomplete(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_constantfold(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, 1242, descr=immut_intval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class IntObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(self, other): + return other.container.intval == 1242 + self.namespace['intobj1242'] = lltype._ptr(llmemory.GCREF, + IntObj1242()) + expected = """ + [] + escape(ConstPtr(intobj1242)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble=None): @@ -5150,7 +5338,21 @@ """ expected = """ [p0] + jump(p0) + """ + self.optimize_loop(ops, expected) + + def test_strlen_repeated(self): + ops = """ + [p0] i0 = strlen(p0) + i1 = strlen(p0) + i2 = int_eq(i0, i1) + guard_true(i2) [] + jump(p0) + """ + expected = """ + [p0] jump(p0) """ self.optimize_loop(ops, expected) diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -191,7 +191,7 @@ def test_emittable(self, op): return self.is_emittable(op) - + def is_emittable(self, op): return self.next_optimization.test_emittable(op) @@ -247,7 +247,7 @@ def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None): #return self.__class__() raise NotImplementedError - + class Optimizer(Optimization): @@ -283,20 +283,20 @@ else: optimizations = [] self.first_optimization = self - - self.optimizations = optimizations + + self.optimizations = optimizations def force_at_end_of_preamble(self): self.resumedata_memo = resume.ResumeDataLoopMemo(self.metainterp_sd) for o in self.optimizations: o.force_at_end_of_preamble() - + def reconstruct_for_next_iteration(self, optimizer=None, valuemap=None): assert optimizer is None assert valuemap is None valuemap = {} new = Optimizer(self.metainterp_sd, self.loop) - optimizations = [o.reconstruct_for_next_iteration(new, valuemap) for o in + optimizations = [o.reconstruct_for_next_iteration(new, valuemap) for o in self.optimizations] new.set_optimizations(optimizations) @@ -313,7 +313,7 @@ for key, value in self.loop_invariant_results.items(): new.loop_invariant_results[key] = \ value.get_reconstructed(new, valuemap) - + new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None @@ -439,7 +439,7 @@ def test_emittable(self, op): return True - + def emit_operation(self, op): ###self.heap_op_optimizer.emitting_operation(op) self._emit_operation(op) @@ -517,19 +517,17 @@ canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW else: nextop = None - + if canfold: for i in range(op.numargs()): if self.get_constant_box(op.getarg(i)) is None: break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(op.getarg(i)) - for i in range(op.numargs())] - resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.getdescr()) - # FIXME: Don't we need to check for an overflow here? - self.make_constant(op.result, resbox.constbox()) + resbox = self.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.make_constant(op.result, resbox) return # did we do the exact same operation already? @@ -548,6 +546,13 @@ if nextop: self.emit_operation(nextop) + def constant_fold(self, op): + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] + resbox = execute_nonspec(self.cpu, None, + op.getopnum(), argboxes, op.getdescr()) + return resbox.constbox() + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,19 +1,16 @@ try: - import pypyjit - pypyjit.set_param(threshold=3, inlining=True) + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + main(10) - def sqrt(y, n=10000): - x = y / 2 - while n > 0: - #assert y > 0 and x > 0 - if y > 0 and x > 0: pass - n -= 1 - x = (x + y/x) / 2 - return x - - print sqrt(1234, 4) - except Exception, e: print "Exception: ", type(e) print e diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -213,7 +213,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 1) - elif v1.intbound.known_ge(v2.intbound): + elif v1.intbound.known_ge(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 0) else: self.emit_operation(op) @@ -223,7 +223,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 1) - elif v1.intbound.known_le(v2.intbound): + elif v1.intbound.known_le(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 0) else: self.emit_operation(op) @@ -231,7 +231,7 @@ def optimize_INT_LE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_le(v2.intbound): + if v1.intbound.known_le(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 1) elif v1.intbound.known_gt(v2.intbound): self.make_constant_int(op.result, 0) @@ -241,7 +241,7 @@ def optimize_INT_GE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_ge(v2.intbound): + if v1.intbound.known_ge(v2.intbound) or v1 is v2: self.make_constant_int(op.result, 1) elif v1.intbound.known_lt(v2.intbound): self.make_constant_int(op.result, 0) diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,8 +1,8 @@ from __future__ import with_statement import new import py -from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse -from pypy.objspace.flow.model import flatten, mkentrymap, c_last_exception +from pypy.objspace.flow.model import Constant, Block, Link, Variable +from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph from pypy.objspace.flow.objspace import FlowObjSpace, error @@ -37,12 +37,10 @@ def all_operations(self, graph): result = {} - def visit(node): - if isinstance(node, Block): - for op in node.operations: - result.setdefault(op.opname, 0) - result[op.opname] += 1 - traverse(visit, graph) + for node in graph.iterblocks(): + for op in node.operations: + result.setdefault(op.opname, 0) + result[op.opname] += 1 return result @@ -246,12 +244,9 @@ x = self.codetest(self.implicitException) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock - def implicitAttributeError(x): try: x = getattr(x, "y") @@ -263,10 +258,8 @@ x = self.codetest(self.implicitAttributeError) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock #__________________________________________________________ def implicitException_int_and_id(x): @@ -311,14 +304,12 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: if isinstance(link.args[0], Constant): found[link.args[0].value] = True else: found[link.exitcase] = None - traverse(find_exceptions, x) assert found == {IndexError: True, KeyError: True, Exception: None} def reraiseAnything(x): @@ -332,12 +323,10 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: assert isinstance(link.args[0], Constant) found[link.args[0].value] = True - traverse(find_exceptions, x) assert found == {ValueError: True, ZeroDivisionError: True, OverflowError: True} def loop_in_bare_except_bug(lst): @@ -521,11 +510,9 @@ def test_jump_target_specialization(self): x = self.codetest(self.jump_target_specialization) - def visitor(node): - if isinstance(node, Block): - for op in node.operations: - assert op.opname != 'mul', "mul should have disappeared" - traverse(visitor, x) + for block in x.iterblocks(): + for op in block.operations: + assert op.opname != 'mul', "mul should have disappeared" #__________________________________________________________ def highly_branching_example(a,b,c,d,e,f,g,h,i,j): @@ -573,7 +560,8 @@ def test_highly_branching_example(self): x = self.codetest(self.highly_branching_example) - assert len(flatten(x)) < 60 # roughly 20 blocks + 30 links + # roughly 20 blocks + 30 links + assert len(list(x.iterblocks())) + len(list(x.iterlinks())) < 60 #__________________________________________________________ def test_unfrozen_user_class1(self): @@ -589,11 +577,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 2 def test_unfrozen_user_class2(self): @@ -607,11 +593,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert not isinstance(results[0], Constant) def test_frozen_user_class1(self): @@ -630,11 +614,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 1 def test_frozen_user_class2(self): @@ -650,11 +632,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert results == [Constant(4)] def test_const_star_call(self): @@ -663,14 +643,9 @@ def f(): return g(1,*(2,3)) graph = self.codetest(f) - call_args = [] - def visit(block): - if isinstance(block, Block): - for op in block.operations: - if op.opname == "call_args": - call_args.append(op) - traverse(visit, graph) - assert not call_args + for block in graph.iterblocks(): + for op in block.operations: + assert not op.opname == "call_args" def test_catch_importerror_1(self): def f(): @@ -997,11 +972,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, AttributeError] @@ -1019,11 +992,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, TypeError] diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -6,7 +6,7 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.tool.sourcetools import func_with_new_name from pypy.tool.autopath import pypydir -from pypy.rlib import rposix +from pypy.rlib import jit, rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf', '_pypy_math_isnan'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -57,8 +56,6 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -91,13 +88,13 @@ # # Custom implementations - def ll_math_isnan(y): - return bool(math_isnan(y)) - + # By not calling into the extenal function the JIT can inline this. Floats + # are awesome. + return y != y def ll_math_isinf(y): - return bool(math_isinf(y)) + return y != 0 and y * .5 == y ll_math_copysign = math_copysign diff --git a/lib_pypy/pyrepl/test/test_functional.py b/lib_pypy/pyrepl/test/test_functional.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/test/test_functional.py @@ -0,0 +1,50 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# some functional tests, to see if this is really working + +import py +import sys + +class TestTerminal(object): + def _spawn(self, *args, **kwds): + try: + import pexpect + except ImportError, e: + py.test.skip(str(e)) + kwds.setdefault('timeout', 10) + child = pexpect.spawn(*args, **kwds) + child.logfile = sys.stdout + return child + + def spawn(self, argv=[]): + # avoid running start.py, cause it might contain + # things like readline or rlcompleter(2) included + child = self._spawn(sys.executable, ['-S'] + argv) + child.sendline('from pyrepl.python_reader import main') + child.sendline('main()') + return child + + def test_basic(self): + child = self.spawn() + child.sendline('a = 3') + child.sendline('a') + child.expect('3') + diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -22,8 +22,7 @@ remover = cls.MallocRemover() checkgraph(graph) count1 = count2 = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == cls.MallocRemover.MALLOC_OP: S = op.args[0].value @@ -47,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -158,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -199,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/rlib/_rweakvaldict.py b/pypy/rlib/_rweakvaldict.py --- a/pypy/rlib/_rweakvaldict.py +++ b/pypy/rlib/_rweakvaldict.py @@ -113,7 +113,7 @@ @jit.dont_look_inside def ll_get(self, d, llkey): hash = self.ll_keyhash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK #llop.debug_print(lltype.Void, i, 'get') valueref = d.entries[i].value if valueref: @@ -132,7 +132,7 @@ def ll_set_nonnull(self, d, llkey, llvalue): hash = self.ll_keyhash(llkey) valueref = weakref_create(llvalue) # GC effects here, before the rest - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK everused = d.entries.everused(i) d.entries[i].key = llkey d.entries[i].value = valueref @@ -146,7 +146,7 @@ @jit.dont_look_inside def ll_set_null(self, d, llkey): hash = self.ll_keyhash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK if d.entries.everused(i): # If the entry was ever used, clean up its key and value. # We don't store a NULL value, but a dead weakref, because diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -9,6 +9,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert +from pypy.rlib.objectmodel import we_are_translated from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc from pypy.annotation import model as annmodel @@ -151,8 +152,13 @@ # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.root_stack_jit_hook = None if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + try: + self.root_stack_jit_hook = translator._jit2gc['rootstackhook'] + except KeyError: + pass else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) self.layoutbuilder.transformer = self @@ -500,6 +506,10 @@ s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) + s_gc_data = self.translator.annotator.bookkeeper.valueoftype( + gctypelayout.GCData) + r_gc_data = self.translator.rtyper.getrepr(s_gc_data) + self.c_const_gcdata = rmodel.inputconst(r_gc_data, self.gcdata) self.malloc_zero_filled = GCClass.malloc_zero_filled HDR = self.HDR = self.gcdata.gc.gcheaderbuilder.HDR @@ -786,6 +796,15 @@ resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) + def gct_gc_adr_of_root_stack_top(self, hop): + op = hop.spaceop + ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, + 'inst_root_stack_top') + c_ofs = rmodel.inputconst(lltype.Signed, ofs) + v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], + resulttype=llmemory.Address) + hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) + def gct_gc_x_swap_pool(self, hop): op = hop.spaceop [v_malloced] = op.args @@ -1327,6 +1346,14 @@ return top self.decr_stack = decr_stack + self.rootstackhook = gctransformer.root_stack_jit_hook + if self.rootstackhook is None: + def collect_stack_root(callback, gc, addr): + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return sizeofaddr + self.rootstackhook = collect_stack_root + def push_stack(self, addr): top = self.incr_stack(1) top.address[0] = addr @@ -1336,10 +1363,7 @@ return top.address[0] def allocate_stack(self): - result = llmemory.raw_malloc(self.rootstacksize) - if result: - llmemory.raw_memclear(result, self.rootstacksize) - return result + return llmemory.raw_malloc(self.rootstacksize) def setup_root_walker(self): stackbase = self.allocate_stack() @@ -1351,12 +1375,11 @@ def walk_stack_roots(self, collect_stack_root): gcdata = self.gcdata gc = self.gc + rootstackhook = self.rootstackhook addr = gcdata.root_stack_base end = gcdata.root_stack_top while addr != end: - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - addr += sizeofaddr + addr += rootstackhook(collect_stack_root, gc, addr) if self.collect_stacks_from_other_threads is not None: self.collect_stacks_from_other_threads(collect_stack_root) @@ -1463,12 +1486,11 @@ # collect all valid stacks from the dict (the entry # corresponding to the current thread is not valid) gc = self.gc + rootstackhook = self.rootstackhook end = stacktop - sizeofaddr addr = end.address[0] while addr != end: - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - addr += sizeofaddr + addr += rootstackhook(callback, gc, addr) def collect_more_stacks(callback): ll_assert(get_aid() == gcdata.active_thread, diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/curses.py @@ -0,0 +1,39 @@ + +# Copyright 2000-2010 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Some try-import logic for two purposes: avoiding to bring in the whole +# pure Python curses package if possible; and, in _curses is not actually +# present, falling back to _minimal_curses (which is either a ctypes-based +# pure Python module or a PyPy built-in module). +try: + import _curses +except ImportError: + try: + import _minimal_curses as _curses + except ImportError: + # Who knows, maybe some environment has "curses" but not "_curses". + # If not, at least the following import gives a clean ImportError. + import _curses + +setupterm = _curses.setupterm +tigetstr = _curses.tigetstr +tparm = _curses.tparm +error = _curses.error diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,10 +46,12 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject import pypy.module.cpyext.codecs +import pypy.module.cpyext.pyfile # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py --- a/pypy/interpreter/test/test_eval.py +++ b/pypy/interpreter/test/test_eval.py @@ -13,7 +13,8 @@ def __init__(self, space, code, numlocals): self.code = code - Frame.__init__(self, space, numlocals=numlocals) + Frame.__init__(self, space) + self.numlocals = numlocals self.fastlocals_w = [None] * self.numlocals def getcode(self): @@ -24,7 +25,10 @@ def getfastscope(self): return self.fastlocals_w - + + def getfastscopelength(self): + return self.numlocals + self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -102,6 +102,7 @@ #include "modsupport.h" #include "pythonrun.h" #include "pyerrors.h" +#include "sysmodule.h" #include "stringobject.h" #include "descrobject.h" #include "tupleobject.h" @@ -109,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] @@ -385,6 +390,19 @@ assert module.__doc__ == "docstring" assert module.return_cookie() == 3.14 + def test_load_dynamic(self): + import sys + init = """ + if (Py_IsInitialized()) + Py_InitModule("foo", NULL); + """ + foo = self.import_module(name='foo', init=init) + assert 'foo' in sys.modules + del sys.modules['foo'] + import imp + foo2 = imp.load_dynamic('foo', foo.__file__) + assert 'foo' in sys.modules + assert foo.__dict__ == foo2.__dict__ def test_InitModule4_dotted(self): """ diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.extmodules.rst @@ -0,0 +1,12 @@ +You can pass a comma-separated list of third-party builtin modules +which should be translated along with the standard modules within +``pypy.module``. + +The module names need to be fully qualified (i.e. have a ``.`` in them), +be on the ``$PYTHONPATH`` and not conflict with any existing ones, e.g. +``mypkg.somemod``. + +Once translated, the module will be accessible with a simple:: + + import somemod + diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -306,6 +306,15 @@ stackcounter = StackCounter() stackcounter._freeze_() +def llexternal_use_eci(compilation_info): + """Return a dummy function that, if called in a RPython program, + adds the given ExternalCompilationInfo to it.""" + eci = ExternalCompilationInfo(post_include_bits=['#define PYPY_NO_OP()']) + eci = eci.merge(compilation_info) + return llexternal('PYPY_NO_OP', [], lltype.Void, + compilation_info=eci, sandboxsafe=True, _nowrapper=True, + _callable=lambda: None) + # ____________________________________________________________ # Few helpers for keeping callback arguments alive # this makes passing opaque objects possible (they don't even pass @@ -738,6 +747,7 @@ def charpsize2str(cp, size): l = [cp[i] for i in range(size)] return emptystr.join(l) + charpsize2str._annenforceargs_ = [None, int] return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - diff --git a/lib_pypy/pyrepl/tests/__init__.py b/lib_pypy/pyrepl/tests/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# moo diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/lib_pypy/pyrepl/cmdrepl.py b/lib_pypy/pyrepl/cmdrepl.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/cmdrepl.py @@ -0,0 +1,118 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""Wedge pyrepl behaviour into cmd.Cmd-derived classes. + +replize, when given a subclass of cmd.Cmd, returns a class that +behaves almost identically to the supplied class, except that it uses +pyrepl instead if raw_input. + +It was designed to let you do this: + +>>> import pdb +>>> from pyrepl import replize +>>> pdb.Pdb = replize(pdb.Pdb) + +which is in fact done by the `pythoni' script that comes with +pyrepl.""" + +from __future__ import nested_scopes + +from pyrepl import completing_reader as cr, reader, completer +from pyrepl.completing_reader import CompletingReader as CR +import cmd + +class CmdReader(CR): + def collect_keymap(self): + return super(CmdReader, self).collect_keymap() + ( + ("\\M-\\n", "invalid-key"), + ("\\n", "accept")) + + CR_init = CR.__init__ + def __init__(self, completions): + self.CR_init(self) + self.completions = completions + + def get_completions(self, stem): + if len(stem) != self.pos: + return [] + return cr.uniqify([s for s in self.completions + if s.startswith(stem)]) + +def replize(klass, history_across_invocations=1): + + """Return a subclass of the cmd.Cmd-derived klass that uses + pyrepl instead of readline. + + Raises a ValueError if klass does not derive from cmd.Cmd. + + The optional history_across_invocations parameter (default 1) + controls whether instances of the returned class share + histories.""" + + completions = [s[3:] + for s in completer.get_class_members(klass) + if s.startswith("do_")] + + if not issubclass(klass, cmd.Cmd): + raise Exception +# if klass.cmdloop.im_class is not cmd.Cmd: +# print "this may not work" + + class CmdRepl(klass): + k_init = klass.__init__ + + if history_across_invocations: + _CmdRepl__history = [] + def __init__(self, *args, **kw): + self.k_init(*args, **kw) + self.__reader = CmdReader(completions) + self.__reader.history = CmdRepl._CmdRepl__history + self.__reader.historyi = len(CmdRepl._CmdRepl__history) + else: + def __init__(self, *args, **kw): + self.k_init(*args, **kw) + self.__reader = CmdReader(completions) + + def cmdloop(self, intro=None): + self.preloop() + if intro is not None: + self.intro = intro + if self.intro: + print self.intro + stop = None + while not stop: + if self.cmdqueue: + line = self.cmdqueue[0] + del self.cmdqueue[0] + else: + try: + self.__reader.ps1 = self.prompt + line = self.__reader.readline() + except EOFError: + line = "EOF" + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + + CmdRepl.__name__ = "replize(%s.%s)"%(klass.__module__, klass.__name__) + return CmdRepl + diff --git a/lib_pypy/pyrepl/historical_reader.py b/lib_pypy/pyrepl/historical_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/historical_reader.py @@ -0,0 +1,311 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl import reader, commands +from pyrepl.reader import Reader as R + +isearch_keymap = tuple( + [('\\%03o'%c, 'isearch-end') for c in range(256) if chr(c) != '\\'] + \ + [(c, 'isearch-add-character') + for c in map(chr, range(32, 127)) if c != '\\'] + \ + [('\\%03o'%c, 'isearch-add-character') + for c in range(256) if chr(c).isalpha() and chr(c) != '\\'] + \ + [('\\\\', 'self-insert'), + (r'\C-r', 'isearch-backwards'), + (r'\C-s', 'isearch-forwards'), + (r'\C-c', 'isearch-cancel'), + (r'\C-g', 'isearch-cancel'), + (r'\', 'isearch-backspace')]) + +del c + +ISEARCH_DIRECTION_NONE = '' +ISEARCH_DIRECTION_BACKWARDS = 'r' +ISEARCH_DIRECTION_FORWARDS = 'f' + +class next_history(commands.Command): + def do(self): + r = self.reader + if r.historyi == len(r.history): + r.error("end of history list") + return + r.select_item(r.historyi + 1) + +class previous_history(commands.Command): + def do(self): + r = self.reader + if r.historyi == 0: + r.error("start of history list") + return + r.select_item(r.historyi - 1) + +class restore_history(commands.Command): + def do(self): + r = self.reader + if r.historyi != len(r.history): + if r.get_unicode() != r.history[r.historyi]: + r.buffer = list(r.history[r.historyi]) + r.pos = len(r.buffer) + r.dirty = 1 + +class first_history(commands.Command): + def do(self): + self.reader.select_item(0) + +class last_history(commands.Command): + def do(self): + self.reader.select_item(len(self.reader.history)) + +class operate_and_get_next(commands.FinishCommand): + def do(self): + self.reader.next_history = self.reader.historyi + 1 + +class yank_arg(commands.Command): + def do(self): + r = self.reader + if r.last_command is self.__class__: + r.yank_arg_i += 1 + else: + r.yank_arg_i = 0 + if r.historyi < r.yank_arg_i: + r.error("beginning of history list") + return + a = r.get_arg(-1) + # XXX how to split? + words = r.get_item(r.historyi - r.yank_arg_i - 1).split() + if a < -len(words) or a >= len(words): + r.error("no such arg") + return + w = words[a] + b = r.buffer + if r.yank_arg_i > 0: + o = len(r.yank_arg_yanked) + else: + o = 0 + b[r.pos - o:r.pos] = list(w) + r.yank_arg_yanked = w + r.pos += len(w) - o + r.dirty = 1 + +class forward_history_isearch(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_FORWARDS + r.isearch_start = r.historyi, r.pos + r.isearch_term = '' + r.dirty = 1 + r.push_input_trans(r.isearch_trans) + + +class reverse_history_isearch(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_BACKWARDS + r.dirty = 1 + r.isearch_term = '' + r.push_input_trans(r.isearch_trans) + r.isearch_start = r.historyi, r.pos + +class isearch_cancel(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_NONE + r.pop_input_trans() + r.select_item(r.isearch_start[0]) + r.pos = r.isearch_start[1] + r.dirty = 1 + +class isearch_add_character(commands.Command): + def do(self): + r = self.reader + b = r.buffer + r.isearch_term += self.event[-1] + r.dirty = 1 + p = r.pos + len(r.isearch_term) - 1 + if b[p:p+1] != [r.isearch_term[-1]]: + r.isearch_next() + +class isearch_backspace(commands.Command): + def do(self): + r = self.reader + if len(r.isearch_term) > 0: + r.isearch_term = r.isearch_term[:-1] + r.dirty = 1 + else: + r.error("nothing to rubout") + +class isearch_forwards(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_FORWARDS + r.isearch_next() + +class isearch_backwards(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_BACKWARDS + r.isearch_next() + +class isearch_end(commands.Command): + def do(self): + r = self.reader + r.isearch_direction = ISEARCH_DIRECTION_NONE + r.console.forgetinput() + r.pop_input_trans() + r.dirty = 1 + +class HistoricalReader(R): + """Adds history support (with incremental history searching) to the + Reader class. + + Adds the following instance variables: + * history: + a list of strings + * historyi: + * transient_history: + * next_history: + * isearch_direction, isearch_term, isearch_start: + * yank_arg_i, yank_arg_yanked: + used by the yank-arg command; not actually manipulated by any + HistoricalReader instance methods. + """ + + def collect_keymap(self): + return super(HistoricalReader, self).collect_keymap() + ( + (r'\C-n', 'next-history'), + (r'\C-p', 'previous-history'), + (r'\C-o', 'operate-and-get-next'), + (r'\C-r', 'reverse-history-isearch'), + (r'\C-s', 'forward-history-isearch'), + (r'\M-r', 'restore-history'), + (r'\M-.', 'yank-arg'), + (r'\', 'last-history'), + (r'\', 'first-history')) + + + def __init__(self, console): + super(HistoricalReader, self).__init__(console) + self.history = [] + self.historyi = 0 + self.transient_history = {} + self.next_history = None + self.isearch_direction = ISEARCH_DIRECTION_NONE + for c in [next_history, previous_history, restore_history, + first_history, last_history, yank_arg, + forward_history_isearch, reverse_history_isearch, + isearch_end, isearch_add_character, isearch_cancel, + isearch_add_character, isearch_backspace, + isearch_forwards, isearch_backwards, operate_and_get_next]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + from pyrepl import input + self.isearch_trans = input.KeymapTranslator( + isearch_keymap, invalid_cls=isearch_end, + character_cls=isearch_add_character) + + def select_item(self, i): + self.transient_history[self.historyi] = self.get_unicode() + buf = self.transient_history.get(i) + if buf is None: + buf = self.history[i] + self.buffer = list(buf) + self.historyi = i + self.pos = len(self.buffer) + self.dirty = 1 + + def get_item(self, i): + if i <> len(self.history): + return self.transient_history.get(i, self.history[i]) + else: + return self.transient_history.get(i, self.get_unicode()) + + def prepare(self): + super(HistoricalReader, self).prepare() + try: + self.transient_history = {} + if self.next_history is not None \ + and self.next_history < len(self.history): + self.historyi = self.next_history + self.buffer[:] = list(self.history[self.next_history]) + self.pos = len(self.buffer) + self.transient_history[len(self.history)] = '' + else: + self.historyi = len(self.history) + self.next_history = None + except: + self.restore() + raise + + def get_prompt(self, lineno, cursor_on_line): + if cursor_on_line and self.isearch_direction <> ISEARCH_DIRECTION_NONE: + d = 'rf'[self.isearch_direction == ISEARCH_DIRECTION_FORWARDS] + return "(%s-search `%s') "%(d, self.isearch_term) + else: + return super(HistoricalReader, self).get_prompt(lineno, cursor_on_line) + + def isearch_next(self): + st = self.isearch_term + p = self.pos + i = self.historyi + s = self.get_unicode() + forwards = self.isearch_direction == ISEARCH_DIRECTION_FORWARDS + while 1: + if forwards: + p = s.find(st, p + 1) + else: + p = s.rfind(st, 0, p + len(st) - 1) + if p != -1: + self.select_item(i) + self.pos = p + return + elif ((forwards and i == len(self.history) - 1) + or (not forwards and i == 0)): + self.error("not found") + return + else: + if forwards: + i += 1 + s = self.get_item(i) + p = -1 + else: + i -= 1 + s = self.get_item(i) + p = len(s) + + def finish(self): + super(HistoricalReader, self).finish() + ret = self.get_unicode() + for i, t in self.transient_history.items(): + if i < len(self.history) and i != self.historyi: + self.history[i] = t + if ret: + self.history.append(ret) + +def test(): + from pyrepl.unix_console import UnixConsole + reader = HistoricalReader(UnixConsole()) + reader.ps1 = "h**> " + reader.ps2 = "h/*> " + reader.ps3 = "h|*> " + reader.ps4 = "h\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/pypy/module/cpyext/src/modsupport.c b/pypy/module/cpyext/src/modsupport.c --- a/pypy/module/cpyext/src/modsupport.c +++ b/pypy/module/cpyext/src/modsupport.c @@ -241,13 +241,12 @@ case 'I': { - Py_FatalError("I unsupported so far"); - //unsigned int n; - //n = va_arg(*p_va, unsigned int); - //if (n > (unsigned long)PyInt_GetMax()) - // return PyLong_FromUnsignedLong((unsigned long)n); - //else - // return PyInt_FromLong(n); + unsigned int n; + n = va_arg(*p_va, unsigned int); + if (n > (unsigned long)PyInt_GetMax()) + return PyLong_FromUnsignedLong((unsigned long)n); + else + return PyInt_FromLong(n); } case 'n': @@ -260,23 +259,20 @@ case 'k': { - Py_FatalError("Py_BuildValue k unsupported so far\n"); - /* unsigned long n; */ - /* n = va_arg(*p_va, unsigned long); */ - /* if (n > (unsigned long)PyInt_GetMax()) */ - /* return PyLong_FromUnsignedLong(n); */ - /* else */ - /* return PyInt_FromLong(n); */ + unsigned long n; + n = va_arg(*p_va, unsigned long); + if (n > (unsigned long)PyInt_GetMax()) + return PyLong_FromUnsignedLong(n); + else + return PyInt_FromLong(n); } #ifdef HAVE_LONG_LONG case 'L': - Py_FatalError("Py_BuildValue L unsupported for now\n"); - //return PyLong_FromLongLong((PY_LONG_LONG)va_arg(*p_va, PY_LONG_LONG)); + return PyLong_FromLongLong((PY_LONG_LONG)va_arg(*p_va, PY_LONG_LONG)); case 'K': - Py_FatalError("Py_BuildValue K unsupported for now\n"); - //return PyLong_FromUnsignedLongLong((PY_LONG_LONG)va_arg(*p_va, unsigned PY_LONG_LONG)); + return PyLong_FromUnsignedLongLong((PY_LONG_LONG)va_arg(*p_va, unsigned PY_LONG_LONG)); #endif #ifdef Py_USING_UNICODE case 'u': diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -283,9 +283,14 @@ sys.stdout = out = Out() try: raises(UnicodeError, "print unichr(0xa2)") + assert out.data == [] out.encoding = "cp424" print unichr(0xa2) assert out.data == [unichr(0xa2).encode("cp424"), "\n"] + del out.data[:] + del out.encoding + print u"foo\t", u"bar\n", u"trick", u"baz\n" # softspace handling + assert out.data == ["foo\t", "bar\n", "trick", " ", "baz\n", "\n"] finally: sys.stdout = save diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -1,3 +1,4 @@ +from __future__ import with_statement MARKER = 42 class AppTestImpModule: @@ -34,7 +35,8 @@ def test_load_dynamic(self): raises(ImportError, self.imp.load_dynamic, 'foo', 'bar') - raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', 'baz.so') + raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', + open(self.file_module)) def test_suffixes(self): for suffix, mode, type in self.imp.get_suffixes(): @@ -138,3 +140,58 @@ ) # Doesn't end up in there when run with -A assert sys.path_importer_cache.get(lib_pypy) is None + + def test_rewrite_pyc_check_code_name(self): + # This one is adapted from cpython's Lib/test/test_import.py + from os import chmod + from os.path import join + from sys import modules, path + from shutil import rmtree + from tempfile import mkdtemp + code = """if 1: + import sys + code_filename = sys._getframe().f_code.co_filename + module_filename = __file__ + constant = 1 + def func(): + pass + func_filename = func.func_code.co_filename + """ + + module_name = "unlikely_module_name" + dir_name = mkdtemp(prefix='pypy_test') + file_name = join(dir_name, module_name + '.py') + with open(file_name, "wb") as f: + f.write(code) + compiled_name = file_name + ("c" if __debug__ else "o") + chmod(file_name, 0777) + + # Setup + sys_path = path[:] + orig_module = modules.pop(module_name, None) + assert modules.get(module_name) == None + path.insert(0, dir_name) + + # Test + import py_compile + py_compile.compile(file_name, dfile="another_module.py") + __import__(module_name, globals(), locals()) + mod = modules.get(module_name) + + try: + # Ensure proper results + assert mod != orig_module + assert mod.module_filename == compiled_name + assert mod.code_filename == file_name + assert mod.func_filename == file_name + finally: + # TearDown + path[:] = sys_path + if orig_module is not None: + modules[module_name] = orig_module + else: + try: + del modules[module_name] + except KeyError: + pass + rmtree(dir_name, True) diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -112,6 +112,7 @@ try: while True: count = fread(buf, 1, BUF_SIZE, fp) + count = rffi.cast(lltype.Signed, count) source += rffi.charpsize2str(buf, count) if count < BUF_SIZE: if feof(fp): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -46,4 +46,5 @@ return PyBuffer_New(150); """), ]) - module.buffer_new() + b = module.buffer_new() + raises(AttributeError, getattr, b, 'x') diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -1,4 +1,5 @@ from pypy.rpython.ootypesystem import ootype +from pypy.rpython.lltypesystem import rffi from pypy.translator.oosupport.metavm import MicroInstruction from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType import pypy.translator.jvm.typesystem as jvm @@ -94,14 +95,20 @@ (ootype.SignedLongLong, ootype.Signed): jvm.L2I, (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, + (ootype.Signed, rffi.SHORT): jvm.I2S, + (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, + (ootype.Signed, ootype.Unsigned): None, + (ootype.Unsigned, ootype.Signed): None, } class _CastPrimitive(MicroInstruction): def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype + if TO == FROM: + return opcode = CASTS[(FROM, TO)] if opcode: generator.emit(opcode) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -10,6 +10,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import Wrappable from pypy.interpreter.eval import Code +from pypy.interpreter.pycode import PyCode from pypy.rlib import streamio, jit, rposix from pypy.rlib.streamio import StreamErrors from pypy.rlib.rarithmetic import intmask @@ -31,6 +32,7 @@ else: SO = ".so" DEFAULT_SOABI = 'pypy-14' +CHECK_FOR_PYW = sys.platform == 'win32' @specialize.memo() def get_so_extension(space): @@ -57,6 +59,12 @@ if os.path.exists(pyfile) and case_ok(pyfile): return PY_SOURCE, ".py", "U" + # on Windows, also check for a .pyw file + if CHECK_FOR_PYW: + pyfile = filepart + ".pyw" + if os.path.exists(pyfile) and case_ok(pyfile): + return PY_SOURCE, ".pyw", "U" + # The .py file does not exist. By default on PyPy, lonepycfiles # is False: if a .py file does not exist, we don't even try to # look for a lone .pyc file. @@ -84,6 +92,9 @@ # XXX that's slow def case_ok(filename): index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) if index < 0: directory = os.curdir else: @@ -774,10 +785,24 @@ if space.config.objspace.usepycfiles and write_pyc: write_compiled_module(space, code_w, cpathname, mode, mtime) + update_code_filenames(space, code_w, pathname) exec_code_module(space, w_mod, code_w) return w_mod +def update_code_filenames(space, code_w, pathname, oldname=None): + assert isinstance(code_w, PyCode) + if oldname is None: + oldname = code_w.co_filename + elif code_w.co_filename != oldname: + return + + code_w.co_filename = pathname + constants = code_w.co_consts_w + for const in constants: + if const is not None and isinstance(const, PyCode): + update_code_filenames(space, const, pathname, oldname) + def _get_long(s): a = ord(s[0]) b = ord(s[1]) diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -18,7 +18,6 @@ def should_skip_instruction(self, instrname, argmodes): return ( super(TestRx86_64, self).should_skip_instruction(instrname, argmodes) or - ('j' in argmodes) or # Not testing FSTP on 64-bit for now (instrname == 'FSTP') ) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -311,8 +311,7 @@ # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations # that will be performed later on the flow graph. - def fixegg(link): - if isinstance(link, Link): + for link in list(self.graph.iterlinks()): block = link.target if isinstance(block, EggBlock): if (not block.operations and len(block.exits) == 1 and @@ -324,15 +323,14 @@ link.args = list(link2.args) link.target = link2.target assert link2.exitcase is None - fixegg(link) else: mapping = {} for a in block.inputargs: mapping[a] = Variable(a) block.renamevariables(mapping) - elif isinstance(link, SpamBlock): + for block in self.graph.iterblocks(): + if isinstance(link, SpamBlock): del link.framestate # memory saver - traverse(fixegg, self.graph) def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -745,13 +737,6 @@ described there.""" raise NotImplementedError - at cpython_api([], lltype.Void) -def PyErr_SetInterrupt(space): - """This function simulates the effect of a SIGINT signal arriving --- the - next time PyErr_CheckSignals() is called, KeyboardInterrupt will be raised. - It may be called without holding the interpreter lock.""" - raise NotImplementedError - @cpython_api([rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) def PySignal_SetWakeupFd(space, fd): """This utility function specifies a file descriptor to which a '\0' byte will @@ -850,13 +835,6 @@ successful invocation of Py_EnterRecursiveCall().""" raise NotImplementedError - at cpython_api([FILE, rffi.CCHARP, rffi.CCHARP, rffi.INT_real], PyObject) -def PyFile_FromFile(space, fp, name, mode, close): - """Create a new PyFileObject from the already-open standard C file - pointer, fp. The function close will be called when the file should be - closed. Return NULL on failure.""" - raise NotImplementedError - @cpython_api([PyFileObject], lltype.Void) def PyFile_IncUseCount(space, p): """Increments the PyFileObject's internal use count to indicate @@ -899,12 +877,6 @@ borrow_from() raise NotImplementedError - at cpython_api([PyFileObject, rffi.INT_real], lltype.Void) -def PyFile_SetBufSize(space, p, n): - """Available on systems with setvbuf() only. This should only be called - immediately after file object creation.""" - raise NotImplementedError - @cpython_api([PyFileObject, rffi.CCHARP], rffi.INT_real, error=0) def PyFile_SetEncoding(space, p, enc): """Set the file's encoding for Unicode output to enc. Return 1 on success and 0 @@ -941,12 +913,6 @@ appropriate exception will be set.""" raise NotImplementedError - at cpython_api([rffi.CCHARP, PyObject], rffi.INT_real, error=-1) -def PyFile_WriteString(space, s, p): - """Write string s to file object p. Return 0 on success or -1 on - failure; the appropriate exception will be set.""" - raise NotImplementedError - @cpython_api([], PyObject) def PyFloat_GetInfo(space): """Return a structseq instance which contains information about the @@ -1142,20 +1108,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code @@ -1770,12 +1722,6 @@ """ raise NotImplementedError - at cpython_api([], lltype.Signed, error=CANNOT_FAIL) -def PyInt_GetMax(space): - """Return the system's idea of the largest integer it can handle (LONG_MAX, - as defined in the system header files).""" - raise NotImplementedError - @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyInt_ClearFreeList(space): """Clear the integer free list. Return the number of items that could not @@ -1997,14 +1943,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias @@ -2336,28 +2274,6 @@ (: on Unix, ; on Windows).""" raise NotImplementedError - at cpython_api([rffi.CCHARP, ], lltype.Void) -def PySys_WriteStdout(space, format): - """Write the output string described by format to sys.stdout. No - exceptions are raised, even if truncation occurs (see below). - - format should limit the total size of the formatted output string to - 1000 bytes or less -- after 1000 bytes, the output string is truncated. - In particular, this means that no unrestricted "%s" formats should occur; - these should be limited using "%.s" where is a decimal number - calculated so that plus the maximum size of other formatted text does not - exceed 1000 bytes. Also watch out for "%f", which can print hundreds of - digits for very large numbers. - - If a problem occurs, or sys.stdout is unset, the formatted message - is written to the real (C level) stdout.""" - raise NotImplementedError - - at cpython_api([rffi.CCHARP, ], lltype.Void) -def PySys_WriteStderr(space, format): - """As above, but write to sys.stderr or stderr instead.""" - raise NotImplementedError - @cpython_api([rffi.INT_real], lltype.Void) def Py_Exit(space, status): """Exit the current process. This calls Py_Finalize() and then calls the diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -722,31 +722,75 @@ newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) - def ll_split_chr(LIST, s, c): + def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) count = 1 i = 0 + if max == 0: + i = strlen while i < strlen: if chars[i] == c: count += 1 + if max >= 0 and count > max: + break i += 1 res = LIST.ll_newlist(count) items = res.ll_items() i = 0 j = 0 resindex = 0 + if max == 0: + j = strlen while j < strlen: if chars[j] == c: item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) resindex += 1 i = j + 1 + if max >= 0 and resindex >= max: + j = strlen + break j += 1 item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) return res + def ll_rsplit_chr(LIST, s, c, max): + chars = s.chars + strlen = len(chars) + count = 1 + i = 0 + if max == 0: + i = strlen + while i < strlen: + if chars[i] == c: + count += 1 + if max >= 0 and count > max: + break + i += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + i = strlen + j = strlen + resindex = count - 1 + assert resindex >= 0 + if max == 0: + j = 0 + while j > 0: + j -= 1 + if chars[j] == c: + item = items[resindex] = s.malloc(i - j - 1) + item.copy_contents(s, item, j + 1, 0, i - j - 1) + resindex -= 1 + i = j + if resindex == 0: + j = 0 + break + item = items[resindex] = s.malloc(i - j) + item.copy_contents(s, item, j, 0, i - j) + return res + @purefunction def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -43,9 +43,14 @@ class SizeDescr(AbstractDescr): size = 0 # help translation + is_immutable = False - def __init__(self, size): + def __init__(self, size, count_fields_if_immut=-1): self.size = size + self.count_fields_if_immut = count_fields_if_immut + + def count_fields_if_immutable(self): + return self.count_fields_if_immut def repr_of_descr(self): return '' % self.size @@ -62,15 +67,15 @@ return cache[STRUCT] except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) + count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) else: - sizedescr = SizeDescr(size) + sizedescr = SizeDescr(size, count_fields_if_immut) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr - # ____________________________________________________________ # FieldDescrs diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -283,9 +283,15 @@ # These are the worst cases: val2 = loc2.value_i() code1 = loc1.location_code() - if (code1 == 'j' - or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) - or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + if code1 == 'j': + checkvalue = loc1.value_j() + elif code1 == 'm': + checkvalue = loc1.value_m()[1] + elif code1 == 'a': + checkvalue = loc1.value_a()[3] + else: + checkvalue = 0 + if not rx86.fits_in_32bits(checkvalue): # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai # and the constant offset in the address is 64-bit. # Hopefully this doesn't happen too often @@ -330,10 +336,10 @@ if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if self.WORD == 8 and possible_code1 == 'j': + if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif self.WORD == 8 and possible_code2 == 'j': + elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): @@ -378,6 +384,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) diff --git a/pypy/translator/backendopt/test/test_tailrecursion.py b/pypy/translator/backendopt/test/test_tailrecursion.py --- a/pypy/translator/backendopt/test/test_tailrecursion.py +++ b/pypy/translator/backendopt/test/test_tailrecursion.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -69,12 +69,31 @@ }; +static int cmp_compare(PyObject *self, PyObject *other) { + return -1; +} + +PyTypeObject OldCmpType = { + PyVarObject_HEAD_INIT(NULL, 0) + "comparisons.OldCmpType", /* tp_name */ + sizeof(CmpObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)cmp_compare, /* tp_compare */ +}; + + void initcomparisons(void) { PyObject *m, *d; if (PyType_Ready(&CmpType) < 0) return; + if (PyType_Ready(&OldCmpType) < 0) + return; m = Py_InitModule("comparisons", NULL); if (m == NULL) return; @@ -83,4 +102,6 @@ return; if (PyDict_SetItemString(d, "CmpType", (PyObject *)&CmpType) < 0) return; + if (PyDict_SetItemString(d, "OldCmpType", (PyObject *)&OldCmpType) < 0) + return; } diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,18 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - -int -_pypy_math_isnan(double x) -{ - return PyPy_IS_NAN(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -260,13 +259,13 @@ @classmethod def is_const(cls, v1): return isinstance(v1, str) and v1.startswith('ConstClass(') - + def match_var(self, v1, exp_v2): assert v1 != '_' if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 @@ -285,9 +284,9 @@ self.match_var(op.res, exp_res) self._assert(len(op.args) == len(exp_args), "wrong number of arguments") for arg, exp_arg in zip(op.args, exp_args): - self._assert(self.match_var(arg, exp_arg), "variable mismatch") + self._assert(self.match_var(arg, exp_arg), "variable mismatch: %r instead of %r" % (arg, exp_arg)) self.match_descr(op.descr, exp_descr) - + def _next_op(self, iter_ops, assert_raises=False): try: diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -1,10 +1,10 @@ -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rpython.tool import rffi_platform as platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import py, os from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rlib import jit from pypy.rlib.debug import ll_assert from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem.lloperation import llop @@ -79,6 +79,7 @@ # wrappers... + at jit.loop_invariant def get_ident(): return rffi.cast(lltype.Signed, c_thread_get_ident()) @@ -113,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,5 +1,5 @@ -from pypy.jit.metainterp.history import Const, Box +from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated class TempBox(Box): @@ -37,6 +37,11 @@ self.frame_depth += size return newloc + def reserve_location_in_frame(self, size): + frame_depth = self.frame_depth + self.frame_depth += size + return frame_depth + # abstract methods that need to be overwritten for specific assemblers @staticmethod def frame_pos(loc, type): @@ -313,11 +318,12 @@ self.assembler.regalloc_mov(reg, to) # otherwise it's clean - def before_call(self, force_store=[], save_all_regs=False): + def before_call(self, force_store=[], save_all_regs=0): """ Spill registers before a call, as described by 'self.save_around_call_regs'. Registers are not spilled if they don't survive past the current operation, unless they - are listed in 'force_store'. + are listed in 'force_store'. 'save_all_regs' can be 0 (default), + 1 (save all), or 2 (save default+PTRs). """ for v, reg in self.reg_bindings.items(): if v not in force_store and self.longevity[v][1] <= self.position: @@ -325,9 +331,11 @@ del self.reg_bindings[v] self.free_regs.append(reg) continue - if not save_all_regs and reg not in self.save_around_call_regs: - # we don't have to - continue + if save_all_regs != 1 and reg not in self.save_around_call_regs: + if save_all_regs == 0: + continue # we don't have to + if v.type != REF: + continue # only save GC pointers self._sync_var(v) del self.reg_bindings[v] self.free_regs.append(reg) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -3,6 +3,7 @@ import py from py.test import skip import sys, os, re +import subprocess class BytecodeTrace(list): def get_opnames(self, prefix=""): @@ -118,13 +119,12 @@ print >> f, "print 'OK :-)'" f.close() - if sys.platform.startswith('win'): - py.test.skip("XXX this is not Windows-friendly") print logfilepath - child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( - logfilepath, self.pypy_c, filepath), 'r') - result = child_stdout.read() - child_stdout.close() + env = os.environ.copy() + env['PYPYLOG'] = ":%s" % (logfilepath,) + p = subprocess.Popen([self.pypy_c, str(filepath)], + env=env, stdout=subprocess.PIPE) + result, _ = p.communicate() assert result if result.strip().startswith('SKIP:'): py.test.skip(result.strip()) @@ -1454,6 +1454,8 @@ res1 += dd(a, b, a1, b1) res2 += dd(a, b, a2, b2) res3 += dd(a, b, a3, b3) + # The purpose of this test is to check that we get + # the correct results, not really to count operations. self.run_source(''' def main(a, b): i = sa = 0 @@ -1461,11 +1463,10 @@ %s i += 1 return sa - ''' % code, 179, ([a1, b1], 2000 * res1), - ([a2, b2], 2000 * res2), - ([a3, b3], 2000 * res3), - count_debug_merge_point=False) - + ''' % code, sys.maxint, ([a1, b1], 2000 * res1), + ([a2, b2], 2000 * res2), + ([a3, b3], 2000 * res3)) + def test_mod(self): avalues = ('a', 'b', 7, -42, 8) bvalues = ['b'] + range(-10, 0) + range(1,10) @@ -1486,6 +1487,8 @@ res1 += dd(a, b, a1, b1) res2 += dd(a, b, a2, b2) res3 += dd(a, b, a3, b3) + # The purpose of this test is to check that we get + # the correct results, not really to count operations. self.run_source(''' def main(a, b): i = sa = 0 @@ -1495,11 +1498,10 @@ %s i += 1 return sa - ''' % code, 450, ([a1, b1], 2000 * res1), - ([a2, b2], 2000 * res2), - ([a3, b3], 2000 * res3), - count_debug_merge_point=False) - + ''' % code, sys.maxint, ([a1, b1], 2000 * res1), + ([a2, b2], 2000 * res2), + ([a3, b3], 2000 * res3)) + def test_dont_trace_every_iteration(self): self.run_source(''' def main(a, b): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -18,12 +18,33 @@ descr_t = get_size_descr(c0, T) assert descr_s.size == symbolic.get_size(S, False) assert descr_t.size == symbolic.get_size(T, False) + assert descr_s.count_fields_if_immutable() == -1 + assert descr_t.count_fields_if_immutable() == -1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # descr_s = get_size_descr(c1, S) assert isinstance(descr_s.size, Symbolic) + assert descr_s.count_fields_if_immutable() == -1 +def test_get_size_descr_immut(): + S = lltype.GcStruct('S', hints={'immutable': True}) + T = lltype.GcStruct('T', ('parent', S), + ('x', lltype.Char), + hints={'immutable': True}) + U = lltype.GcStruct('U', ('parent', T), + ('u', lltype.Ptr(T)), + ('v', lltype.Signed), + hints={'immutable': True}) + V = lltype.GcStruct('V', ('parent', U), + ('miss1', lltype.Void), + ('miss2', lltype.Void), + hints={'immutable': True}) + for STRUCT, expected in [(S, 0), (T, 1), (U, 3), (V, 3)]: + for translated in [False, True]: + c0 = GcCache(translated) + descr_s = get_size_descr(c0, STRUCT) + assert descr_s.count_fields_if_immutable() == expected def test_get_field_descr(): U = lltype.Struct('U') diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -266,6 +274,8 @@ x = inttype(random.randint(-100000, 100000)) y = inttype(random.randint(-100000, 100000)) if not y: continue + if (i & 31) == 0: + x = (x//y) * y # case where x is exactly divisible by y res = self.interpret(d, [x, y]) assert res == d(x, y) @@ -276,6 +286,8 @@ x = inttype(random.randint(-100000, 100000)) y = inttype(random.randint(-100000, 100000)) if not y: continue + if (i & 31) == 0: + x = (x//y) * y # case where x is exactly divisible by y res = self.interpret(m, [x, y]) assert res == m(x, y) @@ -384,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -38,6 +38,10 @@ public final static int INT_MIN = Integer.MIN_VALUE; public final static double ULONG_MAX = 18446744073709551616.0; + public static boolean int_between(int a, int b, int c) { + return a <= b && b < c; + } + /** * Compares two unsigned integers (value1 and value2) and returns * a value greater than, equal to, or less than zero if value 1 is @@ -163,6 +167,13 @@ return ULONG_MAX + value; } } + + public static long double_to_ulong(double value) { + if (value < 0) + return (long)(ULONG_MAX + value); + else + return (long)value; + } public static int double_to_uint(double value) { if (value <= Integer.MAX_VALUE) @@ -746,11 +757,13 @@ return str.substring(start, end); } - public static Object[] ll_split_chr(String str, char c) { + public static Object[] ll_split_chr(String str, char c, int max) { ArrayList list = new ArrayList(); int lastidx = 0, idx = 0; while ((idx = str.indexOf(c, lastidx)) != -1) { + if (max >= 0 && list.size() >= max) + break; String sub = str.substring(lastidx, idx); list.add(sub); lastidx = idx+1; @@ -759,6 +772,21 @@ return list.toArray(new String[list.size()]); } + public static Object[] ll_rsplit_chr(String str, char c, int max) { + ArrayList list = new ArrayList(); + int lastidx = str.length(), idx = 0; + while ((idx = str.lastIndexOf(c, lastidx - 1)) != -1) + { + if (max >= 0 && list.size() >= max) + break; + String sub = str.substring(idx + 1, lastidx); + list.add(0, sub); + lastidx = idx; + } + list.add(0, str.substring(0, lastidx)); + return list.toArray(new String[list.size()]); + } + public static String ll_substring(String str, int start, int cnt) { return str.substring(start,start+cnt); } @@ -1158,6 +1186,18 @@ return Math.tanh(x); } + public double ll_math_copysign(double x, double y) { + return Math.copySign(x, y); + } + + public boolean ll_math_isnan(double x) { + return Double.isNaN(x); + } + + public boolean ll_math_isinf(double x) { + return Double.isInfinite(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); @@ -1170,9 +1210,42 @@ return Character.toLowerCase(c); } + public int locale_tolower(int chr) + { + return Character.toLowerCase(chr); + } + + public int locale_isupper(int chr) + { + return boolean2int(Character.isUpperCase(chr)); + } + + public int locale_islower(int chr) + { + return boolean2int(Character.isLowerCase(chr)); + } + + public int locale_isalpha(int chr) + { + return boolean2int(Character.isLetter(chr)); + } + + public int locale_isalnum(int chr) + { + return boolean2int(Character.isLetterOrDigit(chr)); + } + + // ---------------------------------------------------------------------- // Self Test + public static int boolean2int(boolean b) + { + if (b) + return 1; + return 0; + } + public static int __counter = 0, __failures = 0; public static void ensure(boolean f) { if (f) { diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py --- a/pypy/jit/backend/x86/support.py +++ b/pypy/jit/backend/x86/support.py @@ -38,11 +38,7 @@ if sys.platform == 'win32': ensure_sse2_floats = lambda : None else: - _sse2_eci = ExternalCompilationInfo( + ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = ['-msse2', '-mfpmath=sse', '-DPYPY_CPU_HAS_STANDARD_PRECISION'], - separate_module_sources = ['void PYPY_NO_OP(void) {}'], - ) - ensure_sse2_floats = rffi.llexternal('PYPY_NO_OP', [], lltype.Void, - compilation_info=_sse2_eci, - sandboxsafe=True) + )) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -347,8 +347,9 @@ assert list('') == [] assert list('abc') == ['a', 'b', 'c'] assert list((1, 2)) == [1, 2] - l = [] + l = [1] assert list(l) is not l + assert list(l) == l assert list(range(10)) == range(10) def test_explicit_new_init(self): diff --git a/pypy/translator/jvm/test/test_builtin.py b/pypy/translator/jvm/test/test_builtin.py --- a/pypy/translator/jvm/test/test_builtin.py +++ b/pypy/translator/jvm/test/test_builtin.py @@ -37,6 +37,15 @@ def test_cast_primitive(self): py.test.skip('fixme!') + def test_os_fstat(self): + import os, stat + def fn(): + fd = os.open(__file__, os.O_RDONLY, 0) + st = os.fstat(fd) + os.close(fd) + return st.st_mode + res = self.interpret(fn, []) + assert stat.S_ISREG(res) class TestJvmTime(JvmTest, BaseTestTime): diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -10,6 +10,30 @@ def int2adr(int): return llmemory.cast_int_to_adr(int) +def count_fields_if_immutable(STRUCT): + assert isinstance(STRUCT, lltype.GcStruct) + if STRUCT._hints.get('immutable', False): + try: + return _count_fields(STRUCT) + except ValueError: + pass + return -1 + +def _count_fields(STRUCT): + if STRUCT == rclass.OBJECT: + return 0 # don't count 'typeptr' + result = 0 + for fieldname, TYPE in STRUCT._flds.items(): + if TYPE is lltype.Void: + pass # ignore Voids + elif not isinstance(TYPE, lltype.ContainerType): + result += 1 + elif isinstance(TYPE, lltype.GcStruct): + result += _count_fields(TYPE) + else: + raise ValueError(TYPE) + return result + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -379,27 +379,6 @@ return result -def traverse(visit, functiongraph): - block = functiongraph.startblock - visit(block) - seen = identity_dict() - seen[block] = True - stack = list(block.exits[::-1]) - while stack: - link = stack.pop() - visit(link) - block = link.target - if block not in seen: - visit(block) - seen[block] = True - stack += block.exits[::-1] - - -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - def flattenobj(*args): for arg in args: try: @@ -497,6 +476,19 @@ assert block.operations == () assert block.exits == () + def definevar(v, only_in_link=None): + assert isinstance(v, Variable) + assert v not in vars, "duplicate variable %r" % (v,) + assert v not in vars_previous_blocks, ( + "variable %r used in more than one block" % (v,)) + vars[v] = only_in_link + + def usevar(v, in_link=None): + assert v in vars + if in_link is not None: + assert vars[v] is None or vars[v] is in_link + + for block in graph.iterblocks(): assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( @@ -506,18 +498,6 @@ assert block in exitblocks vars = {} - def definevar(v, only_in_link=None): - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = only_in_link - - def usevar(v, in_link=None): - assert v in vars - if in_link is not None: - assert vars[v] is None or vars[v] is in_link - for v in block.inputargs: definevar(v) diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -17,7 +17,6 @@ ^pypy/doc/.+\.html$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ -^pypy/translator/c/src/dtoa.o$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ ^pypy/translator/c/src/libffi_msvc/.+\.dll$ ^pypy/translator/c/src/libffi_msvc/.+\.lib$ diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -0,0 +1,72 @@ +from pypy.module.cpyext.api import fopen, fclose, fwrite +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.tool.udir import udir +import pytest + +class TestFile(BaseApiTest): + + def test_file_fromstring(self, space, api): + filename = rffi.str2charp(str(udir / "_test_file")) + mode = rffi.str2charp("wb") + w_file = api.PyFile_FromString(filename, mode) + rffi.free_charp(filename) + rffi.free_charp(mode) + + assert api.PyFile_Check(w_file) + assert api.PyFile_CheckExact(w_file) + assert not api.PyFile_Check(space.wrap("text")) + + space.call_method(w_file, "write", space.wrap("text")) + space.call_method(w_file, "close") + assert (udir / "_test_file").read() == "text" + + def test_file_getline(self, space, api): + filename = rffi.str2charp(str(udir / "_test_file")) + + mode = rffi.str2charp("w") + w_file = api.PyFile_FromString(filename, mode) + space.call_method(w_file, "write", + space.wrap("line1\nline2\nline3\nline4")) + space.call_method(w_file, "close") + + rffi.free_charp(mode) + mode = rffi.str2charp("r") + w_file = api.PyFile_FromString(filename, mode) + rffi.free_charp(filename) + rffi.free_charp(mode) + + w_line = api.PyFile_GetLine(w_file, 0) + assert space.str_w(w_line) == "line1\n" + + w_line = api.PyFile_GetLine(w_file, 4) + assert space.str_w(w_line) == "line" + + w_line = api.PyFile_GetLine(w_file, 0) + assert space.str_w(w_line) == "2\n" + + # XXX We ought to raise an EOFError here, but don't + w_line = api.PyFile_GetLine(w_file, -1) + # assert api.PyErr_Occurred() is space.w_EOFError + assert space.str_w(w_line) == "line3\n" + + space.call_method(w_file, "close") + + @pytest.mark.xfail + def test_file_fromfile(self, space, api): + api.PyFile_Fromfile() + + @pytest.mark.xfail + def test_file_setbufsize(self, space, api): + api.PyFile_SetBufSize() + + def test_file_writestring(self, space, api, capfd): + s = rffi.str2charp("test\n") + try: + api.PyFile_WriteString(s, space.sys.get("stdout")) + finally: + rffi.free_charp(s) + out, err = capfd.readouterr() + out = out.replace('\r\n', '\n') + assert out == "test\n" + diff --git a/lib_pypy/pyrepl/completer.py b/lib_pypy/pyrepl/completer.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/completer.py @@ -0,0 +1,87 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import __builtin__ + +class Completer: + def __init__(self, ns): + self.ns = ns + + def complete(self, text): + if "." in text: + return self.attr_matches(text) + else: + return self.global_matches(text) + + def global_matches(self, text): + """Compute matches when text is a simple name. + + Return a list of all keywords, built-in functions and names + currently defines in __main__ that match. + + """ + import keyword + matches = [] + n = len(text) + for list in [keyword.kwlist, + __builtin__.__dict__.keys(), + self.ns.keys()]: + for word in list: + if word[:n] == text and word != "__builtins__": + matches.append(word) + return matches + + def attr_matches(self, text): + """Compute matches when text contains a dot. + + Assuming the text is of the form NAME.NAME....[NAME], and is + evaluatable in the globals of __main__, it will be evaluated + and its attributes (as revealed by dir()) are used as possible + completions. (For class instances, class members are are also + considered.) + + WARNING: this can still invoke arbitrary C code, if an object + with a __getattr__ hook is evaluated. + + """ + import re + m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) + if not m: + return [] + expr, attr = m.group(1, 3) + object = eval(expr, self.ns) + words = dir(object) + if hasattr(object, '__class__'): + words.append('__class__') + words = words + get_class_members(object.__class__) + matches = [] + n = len(attr) + for word in words: + if word[:n] == attr and word != "__builtins__": + matches.append("%s.%s" % (expr, word)) + return matches + +def get_class_members(klass): + ret = dir(klass) + if hasattr(klass, '__bases__'): + for base in klass.__bases__: + ret = ret + get_class_members(base) + return ret + + diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() @@ -40,8 +40,9 @@ translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", "struct", "md5", "cStringIO", "array", "_ffi", - # the following are needed for pyrepl (and hence for the interactive prompt/pdb) - "termios", "_minimal_curses", "fcntl", "signal", + # the following are needed for pyrepl (and hence for the + # interactive prompt/pdb) + "termios", "_minimal_curses", ])) working_oo_modules = default_modules.copy() @@ -162,6 +163,11 @@ cmdline="--allworkingmodules", negation=True), + StrOption("extmodules", + "Comma-separated list of third-party builtin modules", + cmdline="--ext", + default=None), + BoolOption("translationmodules", "use only those modules that are needed to run translate.py on pypy", default=False, @@ -355,8 +361,8 @@ config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) config.objspace.std.suggest(newshortcut=True) - if not IS_64_BITS: - config.objspace.std.suggest(withsmalllong=True) + #if not IS_64_BITS: + # config.objspace.std.suggest(withsmalllong=True) # extra costly optimizations only go in level 3 if level == '3': diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -31,6 +50,10 @@ if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' + if hasattr(os, 'wait3'): + appleveldefs['wait3'] = 'app_posix.wait3' + if hasattr(os, 'wait4'): + appleveldefs['wait4'] = 'app_posix.wait4' interpleveldefs = { 'open' : 'interp_posix.open', @@ -156,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/lib_pypy/pyrepl/input.py b/lib_pypy/pyrepl/input.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/input.py @@ -0,0 +1,97 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# (naming modules after builtin functions is not such a hot idea...) + +# an KeyTrans instance translates Event objects into Command objects + +# hmm, at what level do we want [C-i] and [tab] to be equivalent? +# [meta-a] and [esc a]? obviously, these are going to be equivalent +# for the UnixConsole, but should they be for PygameConsole? + +# it would in any situation seem to be a bad idea to bind, say, [tab] +# and [C-i] to *different* things... but should binding one bind the +# other? + +# executive, temporary decision: [tab] and [C-i] are distinct, but +# [meta-key] is identified with [esc key]. We demand that any console +# class does quite a lot towards emulating a unix terminal. + +from pyrepl import unicodedata_ + +class InputTranslator(object): + def push(self, evt): + pass + def get(self): + pass + def empty(self): + pass + +class KeymapTranslator(InputTranslator): + def __init__(self, keymap, verbose=0, + invalid_cls=None, character_cls=None): + self.verbose = verbose + from pyrepl.keymap import compile_keymap, parse_keys + self.keymap = keymap + self.invalid_cls = invalid_cls + self.character_cls = character_cls + d = {} + for keyspec, command in keymap: + keyseq = tuple(parse_keys(keyspec)) + d[keyseq] = command + if self.verbose: + print d + self.k = self.ck = compile_keymap(d, ()) + self.results = [] + self.stack = [] + def push(self, evt): + if self.verbose: + print "pushed", evt.data, + key = evt.data + d = self.k.get(key) + if isinstance(d, dict): + if self.verbose: + print "transition" + self.stack.append(key) + self.k = d + else: + if d is None: + if self.verbose: + print "invalid" + if self.stack or len(key) > 1 or unicodedata_.category(key) == 'C': + self.results.append( + (self.invalid_cls, self.stack + [key])) + else: + # small optimization: + self.k[key] = self.character_cls + self.results.append( + (self.character_cls, [key])) + else: + if self.verbose: + print "matched", d + self.results.append((d, self.stack + [key])) + self.stack = [] + self.k = self.ck + def get(self): + if self.results: + return self.results.pop(0) + else: + return None + def empty(self): + return not self.results diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -604,6 +604,18 @@ else: self._as_rdict().impl_fallback_setitem(w_key, w_value) + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + key = space.str_w(w_key) + w_result = self.impl_getitem_str(key) + if w_result is not None: + return w_result + self.impl_setitem_str(key, w_default) + return w_default + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/rlib/test/test_rcoroutine.py b/pypy/rlib/test/test_rcoroutine.py --- a/pypy/rlib/test/test_rcoroutine.py +++ b/pypy/rlib/test/test_rcoroutine.py @@ -1,13 +1,16 @@ """ testing coroutines at interprepter level """ - +import py import os from pypy import conftest; conftest.translation_test_so_skip_if_appdirect() from pypy.rlib.rcoroutine import make_coroutine_classes from pypy.translator.c.test.test_stackless import StacklessTest from pypy.translator.c import gc +def setup_module(mod): + py.test.importorskip('greenlet') + d = make_coroutine_classes(object) syncstate = d['syncstate'] Coroutine = d['Coroutine'] diff --git a/pypy/translator/goal/query.py b/pypy/translator/goal/query.py --- a/pypy/translator/goal/query.py +++ b/pypy/translator/goal/query.py @@ -30,15 +30,13 @@ def polluted_qgen(translator): """list functions with still real SomeObject variables""" annotator = translator.annotator - def visit(block): - if isinstance(block, flowmodel.Block): - for v in block.getvariables(): - s = annotator.binding(v, None) - if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: - raise Found for g in translator.graphs: try: - flowmodel.traverse(visit, g) + for block in g.iterblocks(): + for v in block.getvariables(): + s = annotator.binding(v, None) + if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: + raise Found except Found: line = "%s: %s" % (g, graph_sig(translator, g)) yield line diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,18 +30,18 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno @@ -49,7 +49,7 @@ # produced by gateway.applevel(), such as the ones found in # nanos.py) return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/module/cpyext/test/test_sysmodule.py b/pypy/module/cpyext/test/test_sysmodule.py --- a/pypy/module/cpyext/test/test_sysmodule.py +++ b/pypy/module/cpyext/test/test_sysmodule.py @@ -14,6 +14,21 @@ assert module.get("excepthook") assert not module.get("spam_spam_spam") + def test_writestdout(self): + module = self.import_extension('foo', [ + ("writestdout", "METH_NOARGS", + """ + PySys_WriteStdout("format: %d\\n", 42); + Py_RETURN_NONE; + """)]) + import sys, StringIO + sys.stdout = StringIO.StringIO() + try: + module.writestdout() + assert sys.stdout.getvalue() == "format: 42\n" + finally: + sys.stdout = sys.__stdout__ + class TestSysModule(BaseApiTest): def test_sysmodule(self, space, api): buf = rffi.str2charp("last_tb") diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py --- a/py/_io/terminalwriter.py +++ b/py/_io/terminalwriter.py @@ -81,6 +81,9 @@ oldcolors = GetConsoleInfo(handle).wAttributes attr |= (oldcolors & 0x0f0) SetConsoleTextAttribute(handle, attr) + while len(text) > 32768: + file.write(text[:32768]) + text = text[32768:] file.write(text) SetConsoleTextAttribute(handle, oldcolors) else: diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/module/cpyext/include/pyerrors.h b/pypy/module/cpyext/include/pyerrors.h --- a/pypy/module/cpyext/include/pyerrors.h +++ b/pypy/module/cpyext/include/pyerrors.h @@ -15,6 +15,20 @@ PyObject *PyErr_NewExceptionWithDoc(char *name, char *doc, PyObject *base, PyObject *dict); PyObject *PyErr_Format(PyObject *exception, const char *format, ...); +/* These APIs aren't really part of the error implementation, but + often needed to format error messages; the native C lib APIs are + not available on all platforms, which is why we provide emulations + for those platforms in Python/mysnprintf.c, + WARNING: The return value of snprintf varies across platforms; do + not rely on any particular behavior; eventually the C99 defn may + be reliable. +*/ +#if defined(MS_WIN32) && !defined(HAVE_SNPRINTF) +# define HAVE_SNPRINTF +# define snprintf _snprintf +# define vsnprintf _vsnprintf +#endif + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -194,8 +194,8 @@ c_buf = py_str.c_ob_type.c_tp_as_buffer assert c_buf py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(rffi.INTP.TO)) == 1 - ref = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 assert ref[0] == 10 lltype.free(ref, flavor='raw') diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -255,7 +255,7 @@ x = ord(s[0]) << 7 i = 0 while i < length: - x = (1000003*x) ^ ord(s[i]) + x = intmask((1000003*x) ^ ord(s[i])) i += 1 x ^= length return intmask(x) diff --git a/pypy/module/cpyext/include/sysmodule.h b/pypy/module/cpyext/include/sysmodule.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/sysmodule.h @@ -0,0 +1,13 @@ +#ifndef Py_SYSMODULE_H +#define Py_SYSMODULE_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_FUNC(void) PySys_WriteStdout(const char *format, ...); +PyAPI_FUNC(void) PySys_WriteStderr(const char *format, ...); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_SYSMODULE_H */ diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -181,6 +181,7 @@ jIntegerClass = JvmClassType('java.lang.Integer') jLongClass = JvmClassType('java.lang.Long') +jShortClass = JvmClassType('java.lang.Short') jDoubleClass = JvmClassType('java.lang.Double') jByteClass = JvmClassType('java.lang.Byte') jCharClass = JvmClassType('java.lang.Character') @@ -239,6 +240,7 @@ jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue') jByte = JvmScalarType('B', jByteClass, 'byteValue') jChar = JvmScalarType('C', jCharClass, 'charValue') +jShort = JvmScalarType('S', jShortClass, 'shortValue') class Generifier(object): @@ -527,6 +529,7 @@ if desc == 'C': return self._o("i") # Characters if desc == 'B': return self._o("i") # Bytes if desc == 'Z': return self._o("i") # Boolean + if desc == 'S': return self._o("i") # Short assert False, "Unknown argtype=%s" % repr(argtype) raise NotImplementedError @@ -625,6 +628,7 @@ NOP = Opcode('nop') I2D = Opcode('i2d') I2L = Opcode('i2l') +I2S = Opcode('i2s') D2I= Opcode('d2i') #D2L= Opcode('d2l') #PAUL L2I = Opcode('l2i') @@ -891,6 +895,7 @@ SYSTEMIDENTITYHASH = Method.s(jSystem, 'identityHashCode', (jObject,), jInt) SYSTEMGC = Method.s(jSystem, 'gc', (), jVoid) INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString) +SHORTTOSTRINGS = Method.s(jShortClass, 'toString', (jShort,), jString) LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString) DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString) CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString) @@ -922,15 +927,19 @@ CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool) STRINGBUILDERAPPEND = Method.v(jStringBuilder, 'append', (jString,), jStringBuilder) +PYPYINTBETWEEN = Method.s(jPyPy, 'int_between', (jInt,jInt,jInt), jBool) PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt) PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt) PYPYUINTMOD = Method.v(jPyPy, 'uint_mod', (jInt, jInt), jInt) PYPYUINTMUL = Method.v(jPyPy, 'uint_mul', (jInt, jInt), jInt) PYPYUINTDIV = Method.v(jPyPy, 'uint_div', (jInt, jInt), jInt) PYPYULONGMOD = Method.v(jPyPy, 'ulong_mod', (jLong, jLong), jLong) +PYPYUINTTOLONG = Method.s(jPyPy, 'uint_to_long', (jInt,), jLong) PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL +PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) +PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -27,7 +27,7 @@ def optimize_loop_1(metainterp_sd, loop, enable_opts, inline_short_preamble=True, retraced=False): - """Optimize loop.operations to remove internal overheadish operations. + """Optimize loop.operations to remove internal overheadish operations. """ optimizations = [] unroll = 'unroll' in enable_opts @@ -43,7 +43,7 @@ if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: optimizations.append(OptSimplify()) - + if inline_short_preamble: optimizations = [OptInlineShortPreamble(retraced)] + optimizations diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,12 +1,12 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops from pypy.translator.test.snippet import simple_method from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,38 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] + +def ootype_to_mnemonic(FROM, TO, default=None): + if TO == ootype.Float: + return 'r8' + # + try: + size = str(INT_SIZE[TO]) + except KeyError: + return default + if FROM in UNSIGNED_TYPES: + return 'u' + size + else: + return 'i' + size class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + mnemonic = ootype_to_mnemonic(FROM, TO) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -8,8 +8,8 @@ for descr in all_optiondescrs: prefix = descr._name c = config.Config(descr) - thisdir.join(prefix + ".txt").ensure() + thisdir.join(prefix + ".rst").ensure() for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".txt" + basename = prefix + "." + p + ".rst" f = thisdir.join(basename) f.ensure() diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -138,11 +138,13 @@ # raised after the exception handler block was popped. try: trace = self.w_f_trace - self.w_f_trace = None + if trace is not None: + self.w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: - self.w_f_trace = trace + if trace is not None: + self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( @@ -1421,9 +1423,10 @@ # add a softspace unless we just printed a string which ends in a '\t' # or '\n' -- or more generally any whitespace character but ' ' - if isinstance(x, str) and x and x[-1].isspace() and x[-1]!=' ': - return - # XXX add unicode handling + if isinstance(x, (str, unicode)) and x: + lastchar = x[-1] + if lastchar.isspace() and lastchar != ' ': + return file_softspace(stream, True) print_item_to._annspecialcase_ = "specialize:argtype(0)" diff --git a/pypy/translator/goal/old_queries.py b/pypy/translator/goal/old_queries.py --- a/pypy/translator/goal/old_queries.py +++ b/pypy/translator/goal/old_queries.py @@ -415,12 +415,10 @@ ops = 0 count = Counter() def visit(block): - if isinstance(block, flowmodel.Block): + for block in graph.iterblocks(): count.blocks += 1 count.ops += len(block.operations) - elif isinstance(block, flowmodel.Link): - count.links += 1 - flowmodel.traverse(visit, graph) + count.links = len(list(graph.iterlinks())) return count.blocks, count.links, count.ops # better used before backends opts diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -219,12 +219,14 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] else: - nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + n = len(codeobj.co_freevars) + freevars = [None] * n + while True: + n -= 1 + if n < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/lib_pypy/pyrepl/unix_eventqueue.py b/lib_pypy/pyrepl/unix_eventqueue.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/unix_eventqueue.py @@ -0,0 +1,86 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# Bah, this would be easier to test if curses/terminfo didn't have so +# much non-introspectable global state. + +from pyrepl import keymap +from pyrepl.console import Event +from pyrepl import curses +from termios import tcgetattr, VERASE +import os + +_keynames = { + "delete" : "kdch1", + "down" : "kcud1", + "end" : "kend", + "enter" : "kent", + "f1" : "kf1", "f2" : "kf2", "f3" : "kf3", "f4" : "kf4", + "f5" : "kf5", "f6" : "kf6", "f7" : "kf7", "f8" : "kf8", + "f9" : "kf9", "f10" : "kf10", "f11" : "kf11", "f12" : "kf12", + "f13" : "kf13", "f14" : "kf14", "f15" : "kf15", "f16" : "kf16", + "f17" : "kf17", "f18" : "kf18", "f19" : "kf19", "f20" : "kf20", + "home" : "khome", + "insert" : "kich1", + "left" : "kcub1", + "page down" : "knp", + "page up" : "kpp", + "right" : "kcuf1", + "up" : "kcuu1", + } + +class EventQueue(object): + def __init__(self, fd): + our_keycodes = {} + for key, tiname in _keynames.items(): + keycode = curses.tigetstr(tiname) + if keycode: + our_keycodes[keycode] = unicode(key) + if os.isatty(fd): + our_keycodes[tcgetattr(fd)[6][VERASE]] = u'backspace' + self.k = self.ck = keymap.compile_keymap(our_keycodes) + self.events = [] + self.buf = [] + def get(self): + if self.events: + return self.events.pop(0) + else: + return None + def empty(self): + return not self.events + def insert(self, event): + self.events.append(event) + def push(self, char): + if char in self.k: + k = self.k[char] + if isinstance(k, dict): + self.buf.append(char) + self.k = k + else: + self.events.append(Event('key', k, ''.join(self.buf) + char)) + self.buf = [] + self.k = self.ck + elif self.buf: + self.events.extend([Event('key', c, c) for c in self.buf]) + self.buf = [] + self.k = self.ck + self.push(char) + else: + self.events.append(Event('key', char, char)) diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/simple_interact.py @@ -0,0 +1,64 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""This is an alternative to python_reader which tries to emulate +the CPython prompt as closely as possible, with the exception of +allowing multiline input and multiline history entries. +""" + +import sys +from pyrepl.readline import multiline_input, _error, _get_reader + +def check(): # returns False if there is a problem initializing the state + try: + _get_reader() + except _error: + return False + return True + +def run_multiline_interactive_console(mainmodule=None): + import code + if mainmodule is None: + import __main__ as mainmodule + console = code.InteractiveConsole(mainmodule.__dict__) + + def more_lines(unicodetext): + # ooh, look at the hack: + src = "#coding:utf-8\n"+unicodetext.encode('utf-8') + try: + code = console.compile(src, '', 'single') + except (OverflowError, SyntaxError, ValueError): + return False + else: + return code is None + + while 1: + try: + ps1 = getattr(sys, 'ps1', '>>> ') + ps2 = getattr(sys, 'ps2', '... ') + try: + statement = multiline_input(more_lines, ps1, ps2) + except EOFError: + break + more = console.push(statement) + assert not more + except KeyboardInterrupt: + console.write("\nKeyboardInterrupt\n") + console.resetbuffer() diff --git a/lib_pypy/pyrepl/__init__.py b/lib_pypy/pyrepl/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -26,7 +26,10 @@ else { string res = ""; foreach(char ch in x) - res+= string.Format("\\x{0:X2}", (int)ch); + if (ch >= 32 && ch < 128) + res+= ch; + else + res+= string.Format("\\x{0:X2}", (int)ch); return string.Format("'{0}'", res); } } @@ -498,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -717,9 +725,31 @@ return s.Substring(start, count); } - public static string[] ll_split_chr(string s, char ch) + public static string[] ll_split_chr(string s, char ch, int max) { - return s.Split(ch); + if (max < 0) + return s.Split(ch); + else + return s.Split(new Char[] {ch}, max + 1); + } + + public static string[] ll_rsplit_chr(string s, char ch, int max) + { + string[] splits = s.Split(ch); + if (max < 0 || splits.Length <= max + 1) + return splits; + else { + /* XXX not very efficient */ + string first = splits[0]; + // join the first (length - max - 1) items + int i; + for (i = 1; i < splits.Length - max; i++) + first += ch + splits[i]; + splits[0] = first; + Array.Copy(splits, i, splits, 1, max); + Array.Resize(ref splits, max + 1); + return splits; + } } public static bool ll_contains(string s, char ch) @@ -1123,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -9,7 +9,7 @@ from pypy.objspace.flow import operation from pypy.objspace.flow.model import (SpaceOperation, Variable, Constant, Block, Link, c_last_exception, checkgraph, - traverse, mkentrymap) + mkentrymap) from pypy.rlib import rarithmetic from pypy.translator import unsimplify from pypy.translator.backendopt import ssa @@ -76,23 +76,19 @@ def desugar_isinstance(graph): """Replace isinstance operation with a call to isinstance.""" constant_isinstance = Constant(isinstance) - def visit(block): - if not isinstance(block, Block): - return + for block in graph.iterblocks(): for i in range(len(block.operations) - 1, -1, -1): op = block.operations[i] if op.opname == "isinstance": args = [constant_isinstance, op.args[0], op.args[1]] new_op = SpaceOperation("simple_call", args, op.result) block.operations[i] = new_op - traverse(visit, graph) def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): + for link in list(graph.iterlinks()): while not link.target.operations: block1 = link.target if block1.exitswitch is not None: @@ -113,7 +109,6 @@ link.args = outputargs link.target = exit.target # the while loop above will simplify recursively the new link - traverse(visit, graph) def transform_ovfcheck(graph): """The special function calls ovfcheck and ovfcheck_lshift need to @@ -174,11 +169,10 @@ def rename(v): return renaming.get(v, v) - def visit(block): - if not (isinstance(block, Block) - and block.exitswitch == clastexc + for block in graph.iterblocks(): + if not (block.exitswitch == clastexc and block.exits[-1].exitcase is Exception): - return + continue covered = [link.exitcase for link in block.exits[1:-1]] seen = [] preserve = list(block.exits[:-1]) @@ -233,8 +227,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) - traverse(visit, graph) - def transform_xxxitem(graph): # xxx setitem too for block in graph.iterblocks(): @@ -262,9 +254,9 @@ return True return False - def visit(block): - if not (isinstance(block, Block) and block.exitswitch == clastexc): - return + for block in list(graph.iterblocks()): + if block.exitswitch != clastexc: + continue exits = [] seen = [] for link in block.exits: @@ -283,8 +275,6 @@ seen.append(case) block.recloseblock(*exits) - traverse(visit, graph) - def join_blocks(graph): """Links can be deleted if they are the single exit of a block and the single entry point of the next block. When this happens, we can @@ -340,8 +330,7 @@ this is how implicit exceptions are removed (see _implicit_ in flowcontext.py). """ - def visit(block): - if isinstance(block, Block): + for block in list(graph.iterblocks()): for i in range(len(block.exits)-1, -1, -1): exit = block.exits[i] if not (exit.target is graph.exceptblock and @@ -361,7 +350,6 @@ lst = list(block.exits) del lst[i] block.recloseblock(*lst) - traverse(visit, graph) # _____________________________________________________________________ @@ -627,12 +615,11 @@ tgts.append((exit.exitcase, tgt)) return tgts - def visit(block): - if isinstance(block, Block) and block.operations and block.operations[-1].opname == 'is_true': + for block in graph.iterblocks(): + if block.operations and block.operations[-1].opname == 'is_true': tgts = has_is_true_exitpath(block) if tgts: candidates.append((block, tgts)) - traverse(visit, graph) while candidates: cand, tgts = candidates.pop() diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -12,7 +12,7 @@ def __init__(self, space, initargs): self.initargs = initargs ident = thread.get_ident() - self.dicts = {ident: space.newdict()} + self.dicts = {ident: space.newdict(instance=True)} def getdict(self, space): ident = thread.get_ident() @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/jvm/src/pypy/StatResult.java b/pypy/translator/jvm/src/pypy/StatResult.java --- a/pypy/translator/jvm/src/pypy/StatResult.java +++ b/pypy/translator/jvm/src/pypy/StatResult.java @@ -8,7 +8,7 @@ * *

The actual stat() function is defined in PyPy.java. */ -class StatResult { +public class StatResult { public int item0, item3, item4, item5; public long item1, item2, item6; public double item7, item8, item9; diff --git a/pypy/translator/gensupp.py b/pypy/translator/gensupp.py --- a/pypy/translator/gensupp.py +++ b/pypy/translator/gensupp.py @@ -6,15 +6,13 @@ import sys from pypy.objspace.flow.model import Block -from pypy.objspace.flow.model import traverse # ordering the blocks of a graph by source position def ordered_blocks(graph): # collect all blocks allblocks = [] - def visit(block): - if isinstance(block, Block): + for block in graph.iterblocks(): # first we order by offset in the code string if block.operations: ofs = block.operations[0].offset @@ -26,7 +24,6 @@ else: txt = "dummy" allblocks.append((ofs, txt, block)) - traverse(visit, graph) allblocks.sort() #for ofs, txt, block in allblocks: # print ofs, txt, block diff --git a/pypy/translator/jvm/src/pypy/ll_os.java b/pypy/translator/jvm/src/pypy/ll_os.java --- a/pypy/translator/jvm/src/pypy/ll_os.java +++ b/pypy/translator/jvm/src/pypy/ll_os.java @@ -14,10 +14,22 @@ abstract class FileWrapper { + private final String name; + + public FileWrapper(String name) + { + this.name = name; + } + public abstract void write(String buffer); public abstract String read(int count); public abstract void close(); public abstract RandomAccessFile getFile(); + + public String getName() + { + return this.name; + } } class PrintStreamWrapper extends FileWrapper @@ -25,8 +37,9 @@ private final PrintStream stream; private final ll_os os; - public PrintStreamWrapper(PrintStream stream, ll_os os) + public PrintStreamWrapper(String name, PrintStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -58,8 +71,9 @@ private final InputStream stream; private final ll_os os; - public InputStreamWrapper(InputStream stream, ll_os os) + public InputStreamWrapper(String name, InputStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -102,11 +116,13 @@ private final boolean canWrite; private final ll_os os; - public RandomAccessFileWrapper(RandomAccessFile file, + public RandomAccessFileWrapper(String name, + RandomAccessFile file, boolean canRead, boolean canWrite, ll_os os) { + super(name); this.file = file; this.canRead = canRead; this.canWrite = canWrite; @@ -228,9 +244,9 @@ public ll_os(Interlink interlink) { this.interlink = interlink; - FileDescriptors.put(0, new InputStreamWrapper(System.in, this)); - FileDescriptors.put(1, new PrintStreamWrapper(System.out, this)); - FileDescriptors.put(2, new PrintStreamWrapper(System.err, this)); + FileDescriptors.put(0, new InputStreamWrapper("", System.in, this)); + FileDescriptors.put(1, new PrintStreamWrapper("", System.out, this)); + FileDescriptors.put(2, new PrintStreamWrapper("", System.err, this)); fdcount = 2; } @@ -339,7 +355,7 @@ // XXX: we ignore O_CREAT RandomAccessFile file = open_file(name, javaMode, flags); RandomAccessFileWrapper wrapper = - new RandomAccessFileWrapper(file, canRead, canWrite, this); + new RandomAccessFileWrapper(name, file, canRead, canWrite, this); fdcount++; FileDescriptors.put(fdcount, wrapper); @@ -418,6 +434,12 @@ return ll_os_stat(path); // XXX } + public StatResult ll_os_fstat(int fd) + { + String name = getfd(fd).getName(); + return ll_os_stat(name); + } + public String ll_os_strerror(int errno) { String msg = ErrorMessages.remove(errno); diff --git a/pypy/module/cpyext/include/modsupport.h b/pypy/module/cpyext/include/modsupport.h --- a/pypy/module/cpyext/include/modsupport.h +++ b/pypy/module/cpyext/include/modsupport.h @@ -38,7 +38,9 @@ PyObject * Py_BuildValue(const char *, ...); +PyObject * Py_VaBuildValue(const char *, va_list); PyObject * _Py_BuildValue_SizeT(const char *, ...); +PyObject * _Py_VaBuildValue_SizeT(const char *, va_list); int _PyArg_NoKeywords(const char *funcname, PyObject *kw); int PyArg_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, ...); diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/translator/backendopt/test/test_mallocprediction.py b/pypy/translator/backendopt/test/test_mallocprediction.py --- a/pypy/translator/backendopt/test/test_mallocprediction.py +++ b/pypy/translator/backendopt/test/test_mallocprediction.py @@ -4,7 +4,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.conftest import option import sys diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py --- a/pypy/annotation/unaryop.py +++ b/pypy/annotation/unaryop.py @@ -499,10 +499,14 @@ def getanyitem(str): return str.basecharclass() - def method_split(str, patt): # XXX + def method_split(str, patt, max=-1): getbookkeeper().count("str_split", str, patt) return getbookkeeper().newlist(str.basestringclass()) + def method_rsplit(str, patt, max=-1): + getbookkeeper().count("str_rsplit", str, patt) + return getbookkeeper().newlist(str.basestringclass()) + def method_replace(str, s1, s2): return str.basestringclass() diff --git a/pypy/module/_stackless/test/conftest.py b/pypy/module/_stackless/test/conftest.py --- a/pypy/module/_stackless/test/conftest.py +++ b/pypy/module/_stackless/test/conftest.py @@ -2,6 +2,7 @@ import py.test def pytest_runtest_setup(item): + py.test.importorskip('greenlet') if sys.platform == 'win32': py.test.skip("stackless tests segfault on Windows") diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,7 +4,6 @@ .. contents:: - .. _`try out the translator`: Trying out the translator @@ -18,9 +17,7 @@ * Download and install Pygame_. - * Download and install `Dot Graphviz`_ (optional if you have an internet - connection: the flowgraph viewer then connects to - codespeak.net and lets it convert the flowgraph by a graphviz server). + * Download and install `Dot Graphviz`_ To start the interactive translator shell do:: diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -5,7 +5,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem import lltype, llmemory, lloperation @@ -33,8 +33,7 @@ def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 count_calls = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == 'malloc': count_mallocs += 1 @@ -54,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -557,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -770,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -857,6 +857,9 @@ def op_gc_adr_of_nursery_free(self): raise NotImplementedError + def op_gc_adr_of_root_stack_top(self): + raise NotImplementedError + def op_gc_call_rtti_destructor(self, rtti, addr): if hasattr(rtti._obj, 'destructor_funcptr'): d = rtti._obj.destructor_funcptr diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -576,20 +576,56 @@ res = self.interpret(f, [i, newlines]) assert res == f(i, newlines) - def test_split(self): + def _make_split_test(self, split_fn): const = self.const def fn(i): s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = s.split(const('.')) + l = getattr(s, split_fn)(const('.')) sum = 0 for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) return sum + len(l) * 100 + return fn + + def test_split(self): + fn = self._make_split_test('split') for i in range(5): res = self.interpret(fn, [i]) assert res == fn(i) + def test_rsplit(self): + fn = self._make_split_test('rsplit') + for i in range(5): + res = self.interpret(fn, [i]) + assert res == fn(i) + + def _make_split_limit_test(self, split_fn): + const = self.const + def fn(i, j): + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.'), j) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + return fn + + def test_split_limit(self): + fn = self._make_split_limit_test('split') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + + def test_rsplit_limit(self): + fn = self._make_split_limit_test('rsplit') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + def test_contains(self): const = self.const constchar = self.constchar diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/dotviewer/conftest.py b/dotviewer/conftest.py --- a/dotviewer/conftest.py +++ b/dotviewer/conftest.py @@ -6,4 +6,6 @@ dest="pygame", default=False, help="allow interactive tests using Pygame") -option = py.test.config.option +def pytest_configure(config): + global option + option = config.option diff --git a/.hgsub b/.hgsub deleted file mode 100644 --- a/.hgsub +++ /dev/null @@ -1,2 +0,0 @@ -greenlet = [svn]http://codespeak.net/svn/greenlet/trunk/c -lib_pypy/pyrepl = [svn]http://codespeak.net/svn/pyrepl/trunk/pyrepl/pyrepl diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1645,11 +1645,11 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ .cfi_startproc - movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ - movq\t%rsi, %rcx\t/* 2nd argument, which is gcrootanchor */ + /* %rdi is the 1st argument, which is the callback */ + /* %rsi is the 2nd argument, which is gcrootanchor */ movq\t%rsp, %rax\t/* my frame top address */ pushq\t%rax\t\t/* ASM_FRAMEDATA[8] */ pushq\t%rbp\t\t/* ASM_FRAMEDATA[7] */ @@ -1662,15 +1662,15 @@ /* Add this ASM_FRAMEDATA to the front of the circular linked */ /* list. Let's call it 'self'. */ - movq\t8(%rcx), %rax\t/* next = gcrootanchor->next */ + movq\t8(%rsi), %rax\t/* next = gcrootanchor->next */ pushq\t%rax\t\t\t\t/* self->next = next */ - pushq\t%rcx\t\t\t/* self->prev = gcrootanchor */ - movq\t%rsp, 8(%rcx)\t/* gcrootanchor->next = self */ + pushq\t%rsi\t\t\t/* self->prev = gcrootanchor */ + movq\t%rsp, 8(%rsi)\t/* gcrootanchor->next = self */ movq\t%rsp, 0(%rax)\t\t\t/* next->prev = self */ .cfi_def_cfa_offset 80\t/* 9 pushes + the retaddr = 80 bytes */ /* note: the Mac OS X 16 bytes aligment must be respected. */ - call\t*%rdx\t\t/* invoke the callback */ + call\t*%rdi\t\t/* invoke the callback */ /* Detach this ASM_FRAMEDATA from the circular linked list */ popq\t%rsi\t\t/* prev = self->prev */ @@ -1687,10 +1687,16 @@ popq\t%rcx\t\t/* ignored ASM_FRAMEDATA[8] */ /* the return value is the one of the 'call' above, */ - /* because %rax (and possibly %rdx) are unmodified */ + /* because %rax is unmodified */ ret .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: diff --git a/pypy/rpython/memory/gc/env.py b/pypy/rpython/memory/gc/env.py --- a/pypy/rpython/memory/gc/env.py +++ b/pypy/rpython/memory/gc/env.py @@ -259,7 +259,7 @@ get_L2cache = globals().get('get_L2cache_' + sys.platform, lambda: -1) # implement me for other platforms -NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024*1024 +NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024 # arbitrary 1M. better than default of 131k for most cases # in case it didn't work diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -1,5 +1,5 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.objspace.flow.model import SpaceOperation, traverse +from pypy.objspace.flow.model import SpaceOperation from pypy.tool.algo.unionfind import UnionFind from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -149,8 +147,7 @@ set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except") set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except") - def visit(node): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname in self.IDENTITY_OPS: # special-case these operations to identify their input @@ -167,7 +164,7 @@ if isinstance(node.exitswitch, Variable): set_use_point(node, node.exitswitch, "exitswitch", node) - if isinstance(node, Link): + for node in graph.iterlinks(): if isinstance(node.last_exception, Variable): set_creation_point(node.prevblock, node.last_exception, "last_exception") @@ -187,7 +184,6 @@ else: d[arg] = True - traverse(visit, graph) return lifetimes.infos() def _try_inline_malloc(self, info): @@ -213,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -333,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -484,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -500,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -546,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -616,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -639,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -1126,7 +1126,7 @@ """ if not isinstance(source, str): source = py.std.inspect.getsource(source).lstrip() - while source.startswith('@py.test.mark.'): + while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function # object, we may ignore them assert '\n' in source diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -5,8 +5,8 @@ soon as possible (at least in a simple case). """ -import weakref -import py +import weakref, random +import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -67,6 +67,20 @@ return entrypoint +def get_functions_to_patch(): + from pypy.jit.backend.llsupport import gc + # + can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc + def can_inline_malloc2(*args): + try: + if os.environ['PYPY_NO_INLINE_MALLOC']: + return False + except KeyError: + pass + return can_inline_malloc1(*args) + # + return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + def compile(f, gc, enable_opts='', **kwds): from pypy.annotation.listdef import s_list_of_strings from pypy.translator.translator import TranslationContext @@ -82,8 +96,21 @@ ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy()) ann.build_types(f, [s_list_of_strings], main_entry_point=True) t.buildrtyper().specialize() + if kwds['jit']: - apply_jit(t, enable_opts=enable_opts) + patch = get_functions_to_patch() + old_value = {} + try: + for (obj, attr), value in patch.items(): + old_value[obj, attr] = getattr(obj, attr) + setattr(obj, attr, value) + # + apply_jit(t, enable_opts=enable_opts) + # + finally: + for (obj, attr), oldvalue in old_value.items(): + setattr(obj, attr, oldvalue) + cbuilder = genc.CStandaloneBuilder(t, f, t.config) cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES) cbuilder.compile() @@ -122,7 +149,7 @@ # ______________________________________________________________________ -class TestCompileFramework(object): +class CompileFrameworkTests(object): # Test suite using (so far) the minimark GC. def setup_class(cls): funcs = [] @@ -173,15 +200,21 @@ try: GcLLDescr_framework.DEBUG = True cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, - gcrootfinder="asmgcc", jit=True) + gcrootfinder=cls.gcrootfinder, jit=True) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG + def _run(self, name, n, env): + res = self.cbuilder.cmdexec("%s %d" %(name, n), env=env) + assert int(res) == 20 + def run(self, name, n=2000): pypylog = udir.join('TestCompileFramework.log') - res = self.cbuilder.cmdexec("%s %d" %(name, n), - env={'PYPYLOG': ':%s' % pypylog}) - assert int(res) == 20 + env = {'PYPYLOG': ':%s' % pypylog, + 'PYPY_NO_INLINE_MALLOC': '1'} + self._run(name, n, env) + env['PYPY_NO_INLINE_MALLOC'] = '' + self._run(name, n, env) def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) @@ -635,3 +668,10 @@ gcrootfinder="asmgcc", jit=True, enable_opts=ALL_OPTS_NAMES) assert int(res) == 20 + + +class TestShadowStack(CompileFrameworkTests): + gcrootfinder = "shadowstack" + +class TestAsmGcc(CompileFrameworkTests): + gcrootfinder = "asmgcc" diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -214,3 +214,21 @@ assert res == 1024*1024 res = thread.stack_size(0) assert res == 2*1024*1024 + + def test_interrupt_main(self): + import thread, time + import signal + + def f(): + time.sleep(0.5) + thread.interrupt_main() + + def busy_wait(): + for x in range(1000): + time.sleep(0.01) + + # This is normally called by app_main.py + signal.signal(signal.SIGINT, signal.default_int_handler) + + thread.start_new_thread(f, ()) + raises(KeyboardInterrupt, busy_wait) diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,13 +22,21 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - return jit.hint(self, promote=True).items + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items + return self.items def getitem(self, idx): return self.getitems()[idx] @@ -44,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -620,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/lib_pypy/pyrepl/module_lister.py b/lib_pypy/pyrepl/module_lister.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/module_lister.py @@ -0,0 +1,70 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.completing_reader import uniqify +import os, sys + +# for the completion support. +# this is all quite nastily written. +_packages = {} + +def _make_module_list_dir(dir, suffs, prefix=''): + l = [] + for fname in os.listdir(dir): + file = os.path.join(dir, fname) + if os.path.isfile(file): + for suff in suffs: + if fname.endswith(suff): + l.append( prefix + fname[:-len(suff)] ) + break + elif os.path.isdir(file) \ + and os.path.exists(os.path.join(file, "__init__.py")): + l.append( prefix + fname ) + _packages[prefix + fname] = _make_module_list_dir( + file, suffs, prefix + fname + '.' ) + l = uniqify(l) + l.sort() + return l + +def _make_module_list(): + import imp + suffs = [x[0] for x in imp.get_suffixes() if x[0] != '.pyc'] + def compare(x, y): + c = -cmp(len(x), len(y)) + if c: + return c + else: + return -cmp(x, y) + suffs.sort(compare) + _packages[''] = list(sys.builtin_module_names) + for dir in sys.path: + if dir == '': + dir = '.' + if os.path.isdir(dir): + _packages[''] += _make_module_list_dir(dir, suffs) + _packages[''].sort() + +def find_modules(stem): + l = stem.split('.') + pack = '.'.join(l[:-1]) + try: + mods = _packages[pack] + except KeyError: + raise ImportError, "can't find \"%s\" package"%pack + return [mod for mod in mods if mod.startswith(stem)] diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -7,15 +7,16 @@ bootstrap_function, PyObjectFields, cpython_struct, CONST_STRING, CONST_WSTRING) from pypy.module.cpyext.pyerrors import PyErr_BadArgument -from pypy.module.cpyext.pyobject import PyObject, from_ref, make_typedescr +from pypy.module.cpyext.pyobject import ( + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.objspace.std import unicodeobject, unicodetype from pypy.rlib import runicode import sys -## See comment in stringobject.py. PyUnicode_FromUnicode(NULL, size) is not -## yet supported. +## See comment in stringobject.py. PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) @@ -28,7 +29,8 @@ make_typedescr(space.w_unicode.instancetypedef, basestruct=PyUnicodeObject.TO, attach=unicode_attach, - dealloc=unicode_dealloc) + dealloc=unicode_dealloc, + realize=unicode_realize) # Buffer for the default encoding (used by PyUnicde_GetDefaultEncoding) DEFAULT_ENCODING_SIZE = 100 @@ -39,12 +41,39 @@ Py_UNICODE = lltype.UniChar +def new_empty_unicode(space, length): + """ + Allocatse a PyUnicodeObject and its buffer, but without a corresponding + interpreter object. The buffer may be mutated, until unicode_realize() is + called. + """ + typedescr = get_typedescr(space.w_unicode.instancetypedef) + py_obj = typedescr.allocate(space, space.w_unicode) + py_uni = rffi.cast(PyUnicodeObject, py_obj) + + buflen = length + 1 + py_uni.c_size = length + py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, + flavor='raw', zero=True) + return py_uni + def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) +def unicode_realize(space, py_obj): + """ + Creates the unicode in the interpreter. The PyUnicodeObject buffer must not + be modified after this call. + """ + py_uni = rffi.cast(PyUnicodeObject, py_obj) + s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + w_obj = space.wrap(s) + track_reference(space, py_obj, w_obj) + return w_obj + @cpython_api([PyObject], lltype.Void, external=False) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) @@ -128,7 +157,9 @@ def PyUnicode_AsUnicode(space, ref): """Return a read-only pointer to the Unicode object's internal Py_UNICODE buffer, NULL if unicode is not a Unicode object.""" - if not PyUnicode_Check(space, ref): + # Don't use PyUnicode_Check, it will realize the object :-( + w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) + if not space.is_true(space.issubtype(w_type, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap("expected unicode object")) return PyUnicode_AS_UNICODE(space, ref) @@ -237,10 +268,11 @@ object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" - if not wchar_p: - raise NotImplementedError - s = rffi.wcharpsize2unicode(wchar_p, length) - return space.wrap(s) + if wchar_p: + s = rffi.wcharpsize2unicode(wchar_p, length) + return make_ref(space, space.wrap(s)) + else: + return rffi.cast(PyObject, new_empty_unicode(space, length)) @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromWideChar(space, wchar_p, length): @@ -330,6 +362,29 @@ w_str = space.wrap(rffi.charpsize2str(s, size)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) + at cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) +def PyUnicode_Resize(space, ref, newsize): + # XXX always create a new string so far + py_uni = rffi.cast(PyUnicodeObject, ref[0]) + if not py_uni.c_buffer: + raise OperationError(space.w_SystemError, space.wrap( + "PyUnicode_Resize called on already created string")) + try: + py_newuni = new_empty_unicode(space, newsize) + except MemoryError: + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + raise + to_cp = newsize + oldsize = py_uni.c_size + if oldsize < newsize: + to_cp = oldsize + for i in range(to_cp): + py_newuni.c_buffer[i] = py_uni.c_buffer[i] + Py_DecRef(space, ref[0]) + ref[0] = rffi.cast(PyObject, py_newuni) + return 0 + @cpython_api([PyObject], PyObject) def PyUnicode_AsUTF8String(space, w_unicode): """Encode a Unicode object using UTF-8 and return the result as Python string diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -11,6 +11,8 @@ /* the -3 option will probably not be implemented */ #define Py_Py3kWarningFlag 0 +#define Py_FrozenFlag 0 + #ifdef __cplusplus } #endif diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -487,6 +487,7 @@ """) def test_range_iter(self): + py.test.skip("until we fix defaults") def main(n): def g(n): return range(n) @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): @@ -685,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) @@ -1009,10 +1010,11 @@ """) def test_func_defaults(self): + py.test.skip("skipped until we fix defaults") def main(n): i = 1 while i < n: - i += len(xrange(i)) / i + i += len(xrange(i+1)) - i return i log = self.run(main, [10000]) @@ -1023,19 +1025,18 @@ guard_true(i10, descr=) # This can be improved if the JIT realized the lookup of i5 produces # a constant and thus can be removed entirely - i12 = int_sub(i5, 1) - i13 = uint_floordiv(i12, i7) + i120 = int_add(i5, 1) + i140 = int_lt(0, i120) + guard_true(i140, descr=) + i13 = uint_floordiv(i5, i7) i15 = int_add(i13, 1) i17 = int_lt(i15, 0) - guard_false(i17, descr=) - i18 = int_floordiv(i15, i5) - i19 = int_xor(i15, i5) - i20 = int_mod(i15, i5) - i21 = int_is_true(i20) - i22 = int_add_ovf(i5, i18) - guard_no_overflow(descr=) + guard_false(i17, descr=) + i20 = int_sub(i15, i5) + i21 = int_add_ovf(i5, i20) + guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, p3, p4, i22, i6, i7, p8, p9, descr=) + jump(p0, p1, p2, p3, p4, i21, i6, i7, p8, p9, descr=) """) def test__ffi_call_releases_gil(self): @@ -1044,16 +1045,16 @@ import time from threading import Thread from _ffi import CDLL, types - ### + # libc = CDLL(libc_name) sleep = libc.getfunc('sleep', [types.uint], types.uint) delays = [0]*n + [1] - ### + # def loop_of_sleeps(i, delays): import time for delay in delays: sleep(delay) # ID: sleep - ### + # threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)] start = time.time() for i, thread in enumerate(threads): @@ -1061,10 +1062,42 @@ for thread in threads: thread.join() end = time.time() - ### return end - start - ### + # log = self.run(main, [get_libc_name(), 200], threshold=150) assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead loops = log.loops_by_id('sleep') assert len(loops) == 1 # make sure that we actually JITted the loop + + def test_unpack_iterable_non_list_tuple(self): + def main(n): + import array + + items = [array.array("i", [1])] * n + total = 0 + for a, in items: + total += a + return total + + log = self.run(main, [1000000]) + assert log.result == 1000000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i16 = int_ge(i12, i13) + guard_false(i16, descr=) + p17 = getarrayitem_gc(p15, i12, descr=) + i19 = int_add(i12, 1) + setfield_gc(p4, i19, descr=) + guard_nonnull_class(p17, 146982464, descr=) + i21 = getfield_gc(p17, descr=) + i23 = int_lt(0, i21) + guard_true(i23, descr=) + i24 = getfield_gc(p17, descr=) + i25 = getarrayitem_raw(i24, 0, descr=) + i27 = int_lt(1, i21) + guard_false(i27, descr=) + i28 = int_add_ovf(i10, i25) + guard_no_overflow(descr=) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) + """) diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -71,19 +71,6 @@ pieces.headerblock.exits[1], pieces.whileblock.exits[0]] -def test_traverse(): - lst = [] - traverse(lst.append, graph) - assert lst == [pieces.startblock, - pieces.startblock.exits[0], - pieces.headerblock, - pieces.headerblock.exits[0], - graph.returnblock, - pieces.headerblock.exits[1], - pieces.whileblock, - pieces.whileblock.exits[0]] - assert flatten(graph) == lst - def test_mkentrymap(): entrymap = mkentrymap(graph) startlink = entrymap[graph.startblock][0] diff --git a/pypy/translator/jvm/test/test_list.py b/pypy/translator/jvm/test/test_list.py --- a/pypy/translator/jvm/test/test_list.py +++ b/pypy/translator/jvm/test/test_list.py @@ -6,7 +6,10 @@ def test_recursive(self): py.test.skip("JVM doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_r_short_list(self): diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file diff --git a/lib_pypy/pyrepl/fancy_termios.py b/lib_pypy/pyrepl/fancy_termios.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/fancy_termios.py @@ -0,0 +1,52 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import termios + +class TermState: + def __init__(self, tuples): + self.iflag, self.oflag, self.cflag, self.lflag, \ + self.ispeed, self.ospeed, self.cc = tuples + def as_list(self): + return [self.iflag, self.oflag, self.cflag, self.lflag, + self.ispeed, self.ospeed, self.cc] + + def copy(self): + return self.__class__(self.as_list()) + +def tcgetattr(fd): + return TermState(termios.tcgetattr(fd)) + +def tcsetattr(fd, when, attrs): + termios.tcsetattr(fd, when, attrs.as_list()) + +class Term(TermState): + TS__init__ = TermState.__init__ + def __init__(self, fd=0): + self.TS__init__(termios.tcgetattr(fd)) + self.fd = fd + self.stack = [] + def save(self): + self.stack.append( self.as_list() ) + def set(self, when=termios.TCSANOW): + termios.tcsetattr(self.fd, when, self.as_list()) + def restore(self): + self.TS__init__(self.stack.pop()) + self.set() + diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -140,7 +140,7 @@ xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw', immortal=True) registers = rffi.ptradd(xmmregisters, 16) - stacklen = baseloc + 10 + stacklen = baseloc + 30 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw', immortal=True) expected_ints = [0] * len(content) diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -45,3 +47,29 @@ space.warn('PyImport_ImportModuleNoBlock() is not non-blocking', space.w_RuntimeWarning) return PyImport_Import(space, space.wrap(rffi.charp2str(name))) + + at cpython_api([PyObject], PyObject) +def PyImport_ReloadModule(space, w_mod): + from pypy.module.imp.importing import reload + return reload(space, w_mod) + + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -1,5 +1,5 @@ -from pypy.module.cpyext.api import cpython_api, generic_cpy_call, CANNOT_FAIL,\ - cpython_struct +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, CANNOT_FAIL, CConfig, cpython_struct) from pypy.rpython.lltypesystem import rffi, lltype PyInterpreterState = lltype.Ptr(cpython_struct("PyInterpreterState", ())) @@ -77,6 +77,52 @@ state = space.fromcache(InterpreterState) return state.get_thread_state(space) + at cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL) +def PyThreadState_Swap(space, tstate): + """Swap the current thread state with the thread state given by the argument + tstate, which may be NULL. The global interpreter lock must be held.""" + # All cpyext calls release and acquire the GIL, so this function has no + # side-effects + if tstate: + return lltype.nullptr(PyThreadState.TO) + else: + state = space.fromcache(InterpreterState) + return state.get_thread_state(space) + + at cpython_api([PyThreadState], lltype.Void) +def PyEval_AcquireThread(space, tstate): + """Acquire the global interpreter lock and set the current thread state to + tstate, which should not be NULL. The lock must have been created earlier. + If this thread already has the lock, deadlock ensues. This function is not + available when thread support is disabled at compile time.""" + # All cpyext calls release and acquire the GIL, so this is not necessary. + pass + + at cpython_api([PyThreadState], lltype.Void) +def PyEval_ReleaseThread(space, tstate): + """Reset the current thread state to NULL and release the global interpreter + lock. The lock must have been created earlier and must be held by the current + thread. The tstate argument, which must not be NULL, is only used to check + that it represents the current thread state --- if it isn't, a fatal error is + reported. This function is not available when thread support is disabled at + compile time.""" + # All cpyext calls release and acquire the GIL, so this is not necessary. + pass + +PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', + typedef='PyGILState_STATE', + compilation_info=CConfig._compilation_info_) + + at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) +def PyGILState_Ensure(space): + # All cpyext calls release and acquire the GIL, so this is not necessary. + return 0 + + at cpython_api([PyGILState_STATE], lltype.Void) +def PyGILState_Release(space, state): + # All cpyext calls release and acquire the GIL, so this is not necessary. + return + @cpython_api([], PyInterpreterState, error=CANNOT_FAIL) def PyInterpreterState_Head(space): """Return the interpreter state object at the head of the list of all such objects. diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,8 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.executor import execute +from pypy.jit.codewriter.heaptracker import vtable2descr class AbstractVirtualValue(optimizer.OptValue): @@ -72,28 +74,53 @@ assert isinstance(fieldvalue, optimizer.OptValue) self._fields[ofs] = fieldvalue + def _get_descr(self): + raise NotImplementedError + + def _is_immutable_and_filled_with_constants(self): + count = self._get_descr().count_fields_if_immutable() + if count != len(self._fields): # always the case if count == -1 + return False + for value in self._fields.itervalues(): + subbox = value.force_box() + if not isinstance(subbox, Const): + return False + return True + def _really_force(self): - assert self.source_op is not None + op = self.source_op + assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): - self.source_op.name = 'FORCE ' + self.source_op.name - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) + op.name = 'FORCE ' + self.source_op.name + + if self._is_immutable_and_filled_with_constants(): + box = self.optimizer.constant_fold(op) + self.make_constant(box) + for ofs, value in self._fields.iteritems(): + subbox = value.force_box() + assert isinstance(subbox, Const) + execute(self.optimizer.cpu, None, rop.SETFIELD_GC, + ofs, box, subbox) + # keep self._fields, because it's all immutable anyway + else: + newoperations = self.optimizer.newoperations newoperations.append(op) - self._fields = None + self.box = box = op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None def _get_field_descr_list(self): _cached_sorted_fields = self._cached_sorted_fields @@ -168,6 +195,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def _get_descr(self): + return vtable2descr(self.optimizer.cpu, self.known_class.getint()) + def __repr__(self): cls_name = self.known_class.value.adr.ptr._obj._TYPE._name if self._fields is None: @@ -185,6 +215,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_vstruct(self.structdescr, fielddescrs) + def _get_descr(self): + return self.structdescr + class VArrayValue(AbstractVirtualValue): def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): @@ -286,7 +319,6 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info c_cls = vrefinfo.jit_virtual_ref_const_class descr_virtual_token = vrefinfo.descr_virtual_token - descr_virtualref_index = vrefinfo.descr_virtualref_index # # Replace the VIRTUAL_REF operation with a virtual structure of type # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, @@ -296,7 +328,6 @@ tokenbox = BoxInt() self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox)) vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox)) - vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox)) def optimize_VIRTUAL_REF_FINISH(self, op): # Set the 'forced' field of the virtual_ref. diff --git a/pypy/jit/backend/x86/test/test_rx86.py b/pypy/jit/backend/x86/test/test_rx86.py --- a/pypy/jit/backend/x86/test/test_rx86.py +++ b/pypy/jit/backend/x86/test/test_rx86.py @@ -206,3 +206,8 @@ s = CodeBuilder64() s.MOV_rm(edx, (edi, -1)) assert s.getvalue() == '\x48\x8B\x57\xFF' + +def test_movsd_xj_64(): + s = CodeBuilder64() + s.MOVSD_xj(xmm2, 0x01234567) + assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py --- a/pypy/module/cpyext/stubsactive.py +++ b/pypy/module/cpyext/stubsactive.py @@ -34,43 +34,7 @@ PyThreadState_Clear().""" raise NotImplementedError - at cpython_api([PyThreadState], PyThreadState, error=CANNOT_FAIL) -def PyThreadState_Swap(space, tstate): - """Swap the current thread state with the thread state given by the argument - tstate, which may be NULL. The global interpreter lock must be held.""" - raise NotImplementedError - - at cpython_api([PyThreadState], lltype.Void) -def PyEval_AcquireThread(space, tstate): - """Acquire the global interpreter lock and set the current thread state to - tstate, which should not be NULL. The lock must have been created earlier. - If this thread already has the lock, deadlock ensues. This function is not - available when thread support is disabled at compile time.""" - raise NotImplementedError - - at cpython_api([PyThreadState], lltype.Void) -def PyEval_ReleaseThread(space, tstate): - """Reset the current thread state to NULL and release the global interpreter - lock. The lock must have been created earlier and must be held by the current - thread. The tstate argument, which must not be NULL, is only used to check - that it represents the current thread state --- if it isn't, a fatal error is - reported. This function is not available when thread support is disabled at - compile time.""" - raise NotImplementedError - @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def Py_MakePendingCalls(space): return 0 -PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', - typedef='PyGILState_STATE', - compilation_info=CConfig._compilation_info_) - - at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) -def PyGILState_Ensure(space): - return 0 - - at cpython_api([PyGILState_STATE], lltype.Void) -def PyGILState_Release(space, state): - return - diff --git a/pypy/rlib/_rweakkeydict.py b/pypy/rlib/_rweakkeydict.py --- a/pypy/rlib/_rweakkeydict.py +++ b/pypy/rlib/_rweakkeydict.py @@ -123,7 +123,7 @@ @jit.dont_look_inside def ll_get(d, llkey): hash = compute_identity_hash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK #llop.debug_print(lltype.Void, i, 'get', hex(hash), # ll_debugrepr(d.entries[i].key), # ll_debugrepr(d.entries[i].value)) @@ -143,7 +143,7 @@ def ll_set_nonnull(d, llkey, llvalue): hash = compute_identity_hash(llkey) keyref = weakref_create(llkey) # GC effects here, before the rest - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK everused = d.entries.everused(i) d.entries[i].key = keyref d.entries[i].value = llvalue @@ -160,7 +160,7 @@ @jit.dont_look_inside def ll_set_null(d, llkey): hash = compute_identity_hash(llkey) - i = rdict.ll_dict_lookup(d, llkey, hash) + i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK if d.entries.everused(i): # If the entry was ever used, clean up its key and value. # We don't store a NULL value, but a dead weakref, because diff --git a/pypy/module/cpyext/src/sysmodule.c b/pypy/module/cpyext/src/sysmodule.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/src/sysmodule.c @@ -0,0 +1,103 @@ +#include + +/* Reimplementation of PyFile_WriteString() no calling indirectly + PyErr_CheckSignals(): avoid the call to PyObject_Str(). */ + +static int +sys_pyfile_write_unicode(PyObject *unicode, PyObject *file) +{ + PyObject *writer = NULL, *args = NULL, *result = NULL; + int err; + + if (file == NULL) + return -1; + + writer = PyObject_GetAttrString(file, "write"); + if (writer == NULL) + goto error; + + args = PyTuple_Pack(1, unicode); + if (args == NULL) + goto error; + + result = PyEval_CallObject(writer, args); + if (result == NULL) { + goto error; + } else { + err = 0; + goto finally; + } + +error: + err = -1; +finally: + Py_XDECREF(writer); + Py_XDECREF(args); + Py_XDECREF(result); + return err; +} + +static int +sys_pyfile_write(const char *text, PyObject *file) +{ + PyObject *unicode = NULL; + int err; + + if (file == NULL) + return -1; + + unicode = PyUnicode_FromString(text); + if (unicode == NULL) + return -1; + + err = sys_pyfile_write_unicode(unicode, file); + Py_DECREF(unicode); + return err; +} + +/* APIs to write to sys.stdout or sys.stderr using a printf-like interface. + */ + +static void +sys_write(char *name, FILE *fp, const char *format, va_list va) +{ + PyObject *file; + PyObject *error_type, *error_value, *error_traceback; + char buffer[1001]; + int written; + + PyErr_Fetch(&error_type, &error_value, &error_traceback); + file = PySys_GetObject(name); + written = vsnprintf(buffer, sizeof(buffer), format, va); + if (sys_pyfile_write(buffer, file) != 0) { + PyErr_Clear(); + fputs(buffer, fp); + } + if (written < 0 || (size_t)written >= sizeof(buffer)) { + const char *truncated = "... truncated"; + if (sys_pyfile_write(truncated, file) != 0) + fputs(truncated, fp); + } + PyErr_Restore(error_type, error_value, error_traceback); +} + +void +PySys_WriteStdout(const char *format, ...) +{ + va_list va; + + va_start(va, format); + sys_write("stdout", stdout, format, va); + va_end(va); +} + +void +PySys_WriteStderr(const char *format, ...) +{ + va_list va; + + va_start(va, format); + sys_write("stderr", stderr, format, va); + va_end(va); +} + diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/lib_pypy/pyrepl/commands.py b/lib_pypy/pyrepl/commands.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/commands.py @@ -0,0 +1,385 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import sys, os + +# Catgories of actions: +# killing +# yanking +# motion +# editing +# history +# finishing +# [completion] + +class Command(object): + finish = 0 + kills_digit_arg = 1 + def __init__(self, reader, (event_name, event)): + self.reader = reader + self.event = event + self.event_name = event_name + def do(self): + pass + +class KillCommand(Command): + def kill_range(self, start, end): + if start == end: + return + r = self.reader + b = r.buffer + text = b[start:end] + del b[start:end] + if is_kill(r.last_command): + if start < r.pos: + r.kill_ring[-1] = text + r.kill_ring[-1] + else: + r.kill_ring[-1] = r.kill_ring[-1] + text + else: + r.kill_ring.append(text) + r.pos = start + r.dirty = 1 + +class YankCommand(Command): + pass + +class MotionCommand(Command): + pass + +class EditCommand(Command): + pass + +class FinishCommand(Command): + finish = 1 + pass + +def is_kill(command): + return command and issubclass(command, KillCommand) + +def is_yank(command): + return command and issubclass(command, YankCommand) + +# etc + +class digit_arg(Command): + kills_digit_arg = 0 + def do(self): + r = self.reader + c = self.event[-1] + if c == "-": + if r.arg is not None: + r.arg = -r.arg + else: + r.arg = -1 + else: + d = int(c) + if r.arg is None: + r.arg = d + else: + if r.arg < 0: + r.arg = 10*r.arg - d + else: + r.arg = 10*r.arg + d + r.dirty = 1 + +class clear_screen(Command): + def do(self): + r = self.reader + r.console.clear() + r.dirty = 1 + +class refresh(Command): + def do(self): + self.reader.dirty = 1 + +class repaint(Command): + def do(self): + self.reader.dirty = 1 + self.reader.console.repaint_prep() + +class kill_line(KillCommand): + def do(self): + r = self.reader + b = r.buffer + eol = r.eol() + for c in b[r.pos:eol]: + if not c.isspace(): + self.kill_range(r.pos, eol) + return + else: + self.kill_range(r.pos, eol+1) + +class unix_line_discard(KillCommand): + def do(self): + r = self.reader + self.kill_range(r.bol(), r.pos) + +# XXX unix_word_rubout and backward_kill_word should actually +# do different things... + +class unix_word_rubout(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.bow(), r.pos) + +class kill_word(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.pos, r.eow()) + +class backward_kill_word(KillCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + self.kill_range(r.bow(), r.pos) + +class yank(YankCommand): + def do(self): + r = self.reader + if not r.kill_ring: + r.error("nothing to yank") + return + r.insert(r.kill_ring[-1]) + +class yank_pop(YankCommand): + def do(self): + r = self.reader + b = r.buffer + if not r.kill_ring: + r.error("nothing to yank") + return + if not is_yank(r.last_command): + r.error("previous command was not a yank") + return + repl = len(r.kill_ring[-1]) + r.kill_ring.insert(0, r.kill_ring.pop()) + t = r.kill_ring[-1] + b[r.pos - repl:r.pos] = t + r.pos = r.pos - repl + len(t) + r.dirty = 1 + +class interrupt(FinishCommand): + def do(self): + import signal + self.reader.console.finish() + os.kill(os.getpid(), signal.SIGINT) + +class suspend(Command): + def do(self): + import signal + r = self.reader + p = r.pos + r.console.finish() + os.kill(os.getpid(), signal.SIGSTOP) + ## this should probably be done + ## in a handler for SIGCONT? + r.console.prepare() + r.pos = p + r.posxy = 0, 0 + r.dirty = 1 + r.console.screen = [] + +class up(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + bol1 = r.bol() + if bol1 == 0: + if r.historyi > 0: + r.select_item(r.historyi - 1) + return + r.pos = 0 + r.error("start of buffer") + return + bol2 = r.bol(bol1-1) + line_pos = r.pos - bol1 + if line_pos > bol1 - bol2 - 1: + r.sticky_y = line_pos + r.pos = bol1 - 1 + else: + r.pos = bol2 + line_pos + +class down(MotionCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + bol1 = r.bol() + eol1 = r.eol() + if eol1 == len(b): + if r.historyi < len(r.history): + r.select_item(r.historyi + 1) + r.pos = r.eol(0) + return + r.pos = len(b) + r.error("end of buffer") + return + eol2 = r.eol(eol1+1) + if r.pos - bol1 > eol2 - eol1 - 1: + r.pos = eol2 + else: + r.pos = eol1 + (r.pos - bol1) + 1 + +class left(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + p = r.pos - 1 + if p >= 0: + r.pos = p + else: + self.reader.error("start of buffer") + +class right(MotionCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + p = r.pos + 1 + if p <= len(b): + r.pos = p + else: + self.reader.error("end of buffer") + +class beginning_of_line(MotionCommand): + def do(self): + self.reader.pos = self.reader.bol() + +class end_of_line(MotionCommand): + def do(self): + r = self.reader + self.reader.pos = self.reader.eol() + +class home(MotionCommand): + def do(self): + self.reader.pos = 0 + +class end(MotionCommand): + def do(self): + self.reader.pos = len(self.reader.buffer) + +class forward_word(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + r.pos = r.eow() + +class backward_word(MotionCommand): + def do(self): + r = self.reader + for i in range(r.get_arg()): + r.pos = r.bow() + +class self_insert(EditCommand): + def do(self): + r = self.reader + r.insert(self.event * r.get_arg()) + +class insert_nl(EditCommand): + def do(self): + r = self.reader + r.insert("\n" * r.get_arg()) + +class transpose_characters(EditCommand): + def do(self): + r = self.reader + b = r.buffer + s = r.pos - 1 + if s < 0: + r.error("cannot transpose at start of buffer") + else: + if s == len(b): + s -= 1 + t = min(s + r.get_arg(), len(b) - 1) + c = b[s] + del b[s] + b.insert(t, c) + r.pos = t + r.dirty = 1 + +class backspace(EditCommand): + def do(self): + r = self.reader + b = r.buffer + for i in range(r.get_arg()): + if r.pos > 0: + r.pos -= 1 + del b[r.pos] + r.dirty = 1 + else: + self.reader.error("can't backspace at start") + +class delete(EditCommand): + def do(self): + r = self.reader + b = r.buffer + if ( r.pos == 0 and len(b) == 0 # this is something of a hack + and self.event[-1] == "\004"): + r.update_screen() + r.console.finish() + raise EOFError + for i in range(r.get_arg()): + if r.pos != len(b): + del b[r.pos] + r.dirty = 1 + else: + self.reader.error("end of buffer") + +class accept(FinishCommand): + def do(self): + pass + +class help(Command): + def do(self): + self.reader.msg = self.reader.help_text + self.reader.dirty = 1 + +class invalid_key(Command): + def do(self): + pending = self.reader.console.getpending() + s = ''.join(self.event) + pending.data + self.reader.error("`%r' not bound"%s) + +class invalid_command(Command): + def do(self): + s = self.event_name + self.reader.error("command `%s' not known"%s) + +class qIHelp(Command): + def do(self): + r = self.reader + r.insert((self.event + r.console.getpending().data) * r.get_arg()) + r.pop_input_trans() + +from pyrepl import input + +class QITrans(object): + def push(self, evt): + self.evt = evt + def get(self): + return ('qIHelp', self.evt.raw) + +class quoted_insert(Command): + kills_digit_arg = 0 + def do(self): + self.reader.push_input_trans(QITrans()) diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -435,14 +435,6 @@ return (PyObject *)foop; } -/* List of functions exported by this module */ - -static PyMethodDef foo_functions[] = { - {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, - {NULL, NULL} /* Sentinel */ -}; - - static int initerrtype_init(PyObject *self, PyObject *args, PyObject *kwargs) { PyErr_SetString(PyExc_ValueError, "init raised an error!"); return -1; @@ -592,6 +584,41 @@ 0 /*tp_weaklist*/ }; +/* A type with a custom allocator */ +static void custom_dealloc(PyObject *ob) +{ + free(ob); +} + +static PyTypeObject CustomType; + +static PyObject *newCustom(PyObject *self, PyObject *args) +{ + PyObject *obj = calloc(1, sizeof(PyObject)); + obj->ob_type = &CustomType; + _Py_NewReference(obj); + return obj; +} + +static PyTypeObject CustomType = { + PyObject_HEAD_INIT(NULL) + 0, + "foo.Custom", /*tp_name*/ + sizeof(PyObject), /*tp_size*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)custom_dealloc, /*tp_dealloc*/ +}; + + +/* List of functions exported by this module */ + +static PyMethodDef foo_functions[] = { + {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, + {"newCustom", (PyCFunction)newCustom, METH_NOARGS, NULL}, + {NULL, NULL} /* Sentinel */ +}; + /* Initialize this module. */ @@ -616,7 +643,10 @@ if (PyType_Ready(&InitErrType) < 0) return; if (PyType_Ready(&SimplePropertyType) < 0) - return; + return; + CustomType.ob_type = &MetaType; + if (PyType_Ready(&CustomType) < 0) + return; m = Py_InitModule("foo", foo_functions); if (m == NULL) return; @@ -635,4 +665,6 @@ return; if (PyDict_SetItemString(d, "Property", (PyObject *) &SimplePropertyType) < 0) return; + if (PyDict_SetItemString(d, "Custom", (PyObject *) &CustomType) < 0) + return; } diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -3,14 +3,14 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.annlowlevel import llhelper -from pypy.interpreter.baseobjspace import DescrMismatch +from pypy.interpreter.baseobjspace import W_Root, DescrMismatch from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, + cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - PyBufferProcs, build_type_checkers) + build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, RefcountState, borrow_from) @@ -24,7 +24,7 @@ from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, - PyNumberMethods, PySequenceMethods) + PyNumberMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.interpreter.error import OperationError @@ -287,11 +287,17 @@ W_TypeObject.__init__(self, space, extension_name, bases_w or [space.w_object], dict_w) - self.flag_cpytype = True + if not space.is_true(space.issubtype(self, space.w_type)): + self.flag_cpytype = True self.flag_heaptype = False @bootstrap_function def init_typeobject(space): + # Probably a hack + space.model.typeorder[W_PyCTypeObject] = [(W_PyCTypeObject, None), + (W_TypeObject, None), + (W_Root, None)] + make_typedescr(space.w_type.instancetypedef, basestruct=PyTypeObject, attach=type_attach, @@ -355,14 +361,14 @@ # hopefully this does not clash with the memory model assumed in # extension modules - at cpython_api([PyObject, rffi.INTP], lltype.Signed, external=False, + at cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: - ref[0] = rffi.cast(rffi.INT, space.len_w(w_obj)) + ref[0] = space.len_w(w_obj) return 1 - at cpython_api([PyObject, lltype.Signed, rffi.VOIDPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, external=False, error=-1) def str_getreadbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -375,7 +381,7 @@ Py_DecRef(space, pyref) return space.len_w(w_str) - at cpython_api([PyObject, lltype.Signed, rffi.CCHARPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, external=False, error=-1) def str_getcharbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -472,14 +478,19 @@ def PyType_Ready(space, pto): if pto.c_tp_flags & Py_TPFLAGS_READY: return 0 + type_realize(space, rffi.cast(PyObject, pto)) + return 0 + +def type_realize(space, py_obj): + pto = rffi.cast(PyTypeObjectPtr, py_obj) assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 pto.c_tp_flags |= Py_TPFLAGS_READYING try: - type_realize(space, rffi.cast(PyObject, pto)) - pto.c_tp_flags |= Py_TPFLAGS_READY + w_obj = _type_realize(space, py_obj) finally: pto.c_tp_flags &= ~Py_TPFLAGS_READYING - return 0 + pto.c_tp_flags |= Py_TPFLAGS_READY + return w_obj def solid_base(space, w_type): typedef = w_type.instancetypedef @@ -535,7 +546,7 @@ finally: Py_DecRef(space, base_pyo) -def type_realize(space, py_obj): +def _type_realize(space, py_obj): """ Creates an interpreter type from a PyTypeObject structure. """ @@ -554,7 +565,9 @@ finish_type_1(space, py_type) - w_obj = space.allocate_instance(W_PyCTypeObject, space.w_type) + w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type)) + + w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype) track_reference(space, py_obj, w_obj) w_obj.__init__(space, py_type) w_obj.ready() diff --git a/pypy/module/test_lib_pypy/test_stackless.py b/pypy/module/test_lib_pypy/test_stackless.py --- a/pypy/module/test_lib_pypy/test_stackless.py +++ b/pypy/module/test_lib_pypy/test_stackless.py @@ -3,6 +3,8 @@ class AppTest_Stackless: def setup_class(cls): + import py.test + py.test.importorskip('greenlet') space = gettestobjspace(usemodules=('_stackless', '_socket')) cls.space = space # cannot test the unpickle part on top of py.py diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py --- a/pypy/translator/goal/app_main.py +++ b/pypy/translator/goal/app_main.py @@ -205,7 +205,8 @@ if dirname == search: # not found! let's hope that the compiled-in path is ok print >> sys.stderr, ('debug: WARNING: library path not found, ' - 'using compiled-in sys.path') + 'using compiled-in sys.path ' + 'and sys.prefix will be unset') newpath = sys.path[:] break newpath = sys.pypy_initial_path(dirname) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -2,14 +2,12 @@ from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, PyVarObject, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, - Py_GE, CONST_STRING, FILEP, fwrite, build_type_checkers) + Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, - track_reference, get_typedescr, RefcountState) + track_reference, get_typedescr, _Py_NewReference, RefcountState) from pypy.module.cpyext.typeobject import PyTypeObjectPtr from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall -from pypy.module._file.interp_file import W_File -from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.error import OperationError import pypy.module.__builtin__.operation as operation @@ -185,26 +183,17 @@ return 0 @cpython_api([PyObject, PyTypeObjectPtr], PyObject) -def PyObject_Init(space, py_obj, type): +def PyObject_Init(space, obj, type): """Initialize a newly-allocated object op with its type and initial reference. Returns the initialized object. If type indicates that the object participates in the cyclic garbage detector, it is added to the detector's set of observed objects. Other fields of the object are not affected.""" - if not py_obj: + if not obj: PyErr_NoMemory(space) - py_obj.c_ob_type = type - py_obj.c_ob_refcnt = 1 - w_type = from_ref(space, rffi.cast(PyObject, type)) - assert isinstance(w_type, W_TypeObject) - if w_type.is_cpytype(): - w_obj = space.allocate_instance(W_ObjectObject, w_type) - track_reference(space, py_obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, py_obj) - else: - assert False, "Please add more cases in PyObject_Init" - return py_obj + obj.c_ob_type = type + _Py_NewReference(space, obj) + return obj @cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject) def PyObject_InitVar(space, py_obj, type, size): @@ -256,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, @@ -396,7 +395,7 @@ raise OperationError(space.w_TypeError, space.wrap( "expected a character buffer object")) if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(rffi.INTP.TO)) != 1: + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: raise OperationError(space.w_TypeError, space.wrap( "expected a single-segment buffer object")) size = generic_cpy_call(space, pb.c_bf_getcharbuffer, @@ -429,40 +428,3 @@ rffi.free_nonmovingbuffer(data, buf) return 0 -PyFile_Check, PyFile_CheckExact = build_type_checkers("File", W_File) - - at cpython_api([PyObject, rffi.INT_real], PyObject) -def PyFile_GetLine(space, w_obj, n): - """ - Equivalent to p.readline([n]), this function reads one line from the - object p. p may be a file object or any object with a readline() - method. If n is 0, exactly one line is read, regardless of the length of - the line. If n is greater than 0, no more than n bytes will be read - from the file; a partial line can be returned. In both cases, an empty string - is returned if the end of the file is reached immediately. If n is less than - 0, however, one line is read regardless of length, but EOFError is - raised if the end of the file is reached immediately.""" - try: - w_readline = space.getattr(w_obj, space.wrap('readline')) - except OperationError: - raise OperationError( - space.w_TypeError, space.wrap( - "argument must be a file, or have a readline() method.")) - - n = rffi.cast(lltype.Signed, n) - if space.is_true(space.gt(space.wrap(n), space.wrap(0))): - return space.call_function(w_readline, space.wrap(n)) - elif space.is_true(space.lt(space.wrap(n), space.wrap(0))): - return space.call_function(w_readline) - else: - # XXX Raise EOFError as specified - return space.call_function(w_readline) - at cpython_api([CONST_STRING, CONST_STRING], PyObject) -def PyFile_FromString(space, filename, mode): - """ - On success, return a new file object that is opened on the file given by - filename, with a file mode given by mode, where mode has the same - semantics as the standard C routine fopen(). On failure, return NULL.""" - w_filename = space.wrap(rffi.charp2str(filename)) - w_mode = space.wrap(rffi.charp2str(mode)) - return space.call_method(space.builtin, 'file', w_filename, w_mode) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -108,6 +108,11 @@ #return w_value or None return None + def impl_setdefault(self, w_key, w_default): + # here the dict is always empty + self._as_rdict().impl_fallback_setitem(w_key, w_default) + return w_default + def impl_setitem(self, w_key, w_value): self._as_rdict().impl_fallback_setitem(w_key, w_value) @@ -181,6 +186,9 @@ # _________________________________________________________________ # fallback implementation methods + def impl_fallback_setdefault(self, w_key, w_default): + return self.r_dict_content.setdefault(w_key, w_default) + def impl_fallback_setitem(self, w_key, w_value): self.r_dict_content[w_key] = w_value @@ -227,6 +235,7 @@ ("length", 0), ("setitem_str", 2), ("setitem", 2), + ("setdefault", 2), ("delitem", 1), ("iter", 0), ("items", 0), @@ -317,6 +326,14 @@ def impl_setitem_str(self, key, w_value): self.content[key] = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + return self.content.setdefault(space.str_w(w_key), w_default) + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) @@ -787,13 +804,7 @@ return w_default def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default): - # XXX should be more efficient, with only one dict lookup - w_value = w_dict.getitem(w_key) - if w_value is not None: - return w_value - else: - w_dict.setitem(w_key, w_default) - return w_default + return w_dict.setdefault(w_key, w_default) def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w): len_defaults = len(defaults_w) diff --git a/pypy/translator/backendopt/test/test_inline.py b/pypy/translator/backendopt/test/test_inline.py --- a/pypy/translator/backendopt/test/test_inline.py +++ b/pypy/translator/backendopt/test/test_inline.py @@ -1,7 +1,7 @@ # XXX clean up these tests to use more uniform helpers import py import os -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import last_exception, checkgraph from pypy.translator.backendopt import canraise from pypy.translator.backendopt.inline import simple_inline_function, CannotInline @@ -20,29 +20,27 @@ from pypy.translator.backendopt import removenoops from pypy.objspace.flow.model import summary -def no_missing_concretetype(node): - if isinstance(node, Block): - for v in node.inputargs: - assert hasattr(v, 'concretetype') - for op in node.operations: - for v in op.args: - assert hasattr(v, 'concretetype') - assert hasattr(op.result, 'concretetype') - if isinstance(node, Link): - if node.exitcase is not None: - assert hasattr(node, 'llexitcase') - for v in node.args: - assert hasattr(v, 'concretetype') - if isinstance(node.last_exception, (Variable, Constant)): - assert hasattr(node.last_exception, 'concretetype') - if isinstance(node.last_exc_value, (Variable, Constant)): - assert hasattr(node.last_exc_value, 'concretetype') - def sanity_check(t): # look for missing '.concretetype' for graph in t.graphs: checkgraph(graph) - traverse(no_missing_concretetype, graph) + for node in graph.iterblocks(): + for v in node.inputargs: + assert hasattr(v, 'concretetype') + for op in node.operations: + for v in op.args: + assert hasattr(v, 'concretetype') + assert hasattr(op.result, 'concretetype') + for node in graph.iterlinks(): + if node.exitcase is not None: + assert hasattr(node, 'llexitcase') + for v in node.args: + assert hasattr(v, 'concretetype') + if isinstance(node.last_exception, (Variable, Constant)): + assert hasattr(node.last_exception, 'concretetype') + if isinstance(node.last_exc_value, (Variable, Constant)): + assert hasattr(node.last_exc_value, 'concretetype') + class CustomError1(Exception): def __init__(self): diff --git a/lib_pypy/pyrepl/tests/bugs.py b/lib_pypy/pyrepl/tests/bugs.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/bugs.py @@ -0,0 +1,36 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +# this test case should contain as-verbatim-as-possible versions of +# (applicable) bug reports + +class BugsTestCase(ReaderTestCase): + + def test_transpose_at_start(self): + self.run_test([( 'transpose', [EA, '']), + ( 'accept', [''])]) + +def test(): + run_testcase(BugsTestCase) + +if __name__ == '__main__': + test() diff --git a/pypy/rpython/rint.py b/pypy/rpython/rint.py --- a/pypy/rpython/rint.py +++ b/pypy/rpython/rint.py @@ -212,52 +212,48 @@ # cpython, and rpython, assumed that integer division truncates # towards -infinity. however, in C99 and most (all?) other # backends, integer division truncates towards 0. so assuming - # that, we can generate scary code that applies the necessary + # that, we call a helper function that applies the necessary # correction in the right cases. - # paper and pencil are encouraged for this :) - - from pypy.rpython.rbool import bool_repr - assert isinstance(repr.lowleveltype, Number) - c_zero = inputconst(repr.lowleveltype, repr.lowleveltype._default) op = func.split('_', 1)[0] if op == 'floordiv': - # return (x/y) - (((x^y)<0)&((x%y)!=0)); - v_xor = hop.genop(prefix + 'xor', vlist, - resulttype=repr) - v_xor_le = hop.genop(prefix + 'lt', [v_xor, c_zero], - resulttype=Bool) - v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr) - v_mod = hop.genop(prefix + 'mod', vlist, - resulttype=repr) - v_mod_ne = hop.genop(prefix + 'ne', [v_mod, c_zero], - resulttype=Bool) - v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr) - v_corr = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne], - resulttype=repr) - v_res = hop.genop(prefix + 'sub', [v_res, v_corr], - resulttype=repr) + llfunc = globals()['ll_correct_' + prefix + 'floordiv'] + v_res = hop.gendirectcall(llfunc, vlist[0], vlist[1], v_res) elif op == 'mod': - # return r + y*(((x^y)<0)&(r!=0)); - v_xor = hop.genop(prefix + 'xor', vlist, - resulttype=repr) - v_xor_le = hop.genop(prefix + 'lt', [v_xor, c_zero], - resulttype=Bool) - v_xor_le = hop.llops.convertvar(v_xor_le, bool_repr, repr) - v_mod_ne = hop.genop(prefix + 'ne', [v_res, c_zero], - resulttype=Bool) - v_mod_ne = hop.llops.convertvar(v_mod_ne, bool_repr, repr) - v_corr1 = hop.genop(prefix + 'and', [v_xor_le, v_mod_ne], - resulttype=repr) - v_corr = hop.genop(prefix + 'mul', [v_corr1, vlist[1]], - resulttype=repr) - v_res = hop.genop(prefix + 'add', [v_res, v_corr], - resulttype=repr) + llfunc = globals()['ll_correct_' + prefix + 'mod'] + v_res = hop.gendirectcall(llfunc, vlist[1], v_res) + v_res = hop.llops.convertvar(v_res, repr, r_result) return v_res +INT_BITS_1 = r_int.BITS - 1 +LLONG_BITS_1 = r_longlong.BITS - 1 + +def ll_correct_int_floordiv(x, y, r): + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> INT_BITS_1) + +def ll_correct_llong_floordiv(x, y, r): + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> LLONG_BITS_1) + +def ll_correct_int_mod(y, r): + if y < 0: u = -r + else: u = r + return r + (y & (u >> INT_BITS_1)) + +def ll_correct_llong_mod(y, r): + if y < 0: u = -r + else: u = r + return r + (y & (u >> LLONG_BITS_1)) + + #Helper functions for comparisons def _rtype_compare_template(hop, func): diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -19,6 +19,7 @@ 'load_module': 'interp_imp.load_module', 'load_source': 'interp_imp.load_source', 'load_compiled': 'interp_imp.load_compiled', + 'load_dynamic': 'interp_imp.load_dynamic', '_run_compiled_module': 'interp_imp._run_compiled_module', # pypy '_getimporter': 'importing._getimporter', # pypy #'run_module': 'interp_imp.run_module', @@ -36,7 +37,6 @@ } appleveldefs = { - 'load_dynamic': 'app_imp.load_dynamic', } def __init__(self, space, *args): diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/translator/goal/targetrpystonedalone.py b/pypy/translator/goal/targetrpystonedalone.py --- a/pypy/translator/goal/targetrpystonedalone.py +++ b/pypy/translator/goal/targetrpystonedalone.py @@ -2,11 +2,11 @@ from pypy.translator.test import rpystone from pypy.translator.goal import richards import pypy.interpreter.gateway # needed before sys, order of imports !!! -from pypy.module.sys.version import svn_revision +from pypy.tool.version import get_repo_version_info # __________ Entry point __________ -VERSION = svn_revision() +VERSION = get_repo_version_info()[2] # note that we have %f but no length specifiers in RPython diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -146,6 +146,15 @@ self.pending_signals[n] = None self.reissue_signal_action.fire_after_thread_switch() + def set_interrupt(self): + "Simulates the effect of a SIGINT signal arriving" + n = cpy_signal.SIGINT + if self.reissue_signal_action is None: + self.report_signal(n) + else: + self.pending_signals[n] = None + self.reissue_signal_action.fire_after_thread_switch() + def report_signal(self, n): try: w_handler = self.handlers_w[n] diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -106,6 +106,10 @@ 'debug_catch_exception': Ignore, 'debug_reraise_traceback': Ignore, 'debug_print_traceback': Ignore, + 'debug_start': Ignore, + 'debug_stop': Ignore, + 'debug_print': Ignore, + 'keepalive': Ignore, # __________ numeric operations __________ @@ -144,6 +148,7 @@ 'int_xor_ovf': jvm.IXOR, 'int_floordiv_ovf_zer': jvm.IFLOORDIVZEROVF, 'int_mod_ovf_zer': _check_zer(jvm.IREMOVF), + 'int_between': jvm.PYPYINTBETWEEN, 'uint_invert': 'bitwise_negate', @@ -185,8 +190,8 @@ 'llong_mod_zer': _check_zer(jvm.LREM), 'llong_and': jvm.LAND, 'llong_or': jvm.LOR, - 'llong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'llong_rshift': [PushAllArgs, jvm.L2I, jvm.LSHR, StoreResult], + 'llong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'llong_rshift': [PushAllArgs, jvm.LSHR, StoreResult], 'llong_xor': jvm.LXOR, 'llong_floordiv_ovf': jvm.LFLOORDIVOVF, 'llong_floordiv_ovf_zer': jvm.LFLOORDIVZEROVF, @@ -202,9 +207,11 @@ 'ullong_truediv': None, # TODO 'ullong_floordiv': jvm.LDIV, # valid? 'ullong_mod': jvm.PYPYULONGMOD, - 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], + 'ullong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'ullong_rshift': [PushAllArgs, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, + 'ullong_or': jvm.LOR, + 'ullong_and': jvm.LAND, # when casting from bool we want that every truth value is casted # to 1: we can't simply DoNothing, because the CLI stack could @@ -227,5 +234,8 @@ 'cast_float_to_uint': jvm.PYPYDOUBLETOUINT, 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, + 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, + 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], + 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -46,15 +46,15 @@ w_f_trace = None # For tracing instr_lb = 0 - instr_ub = -1 - instr_prev = -1 + instr_ub = 0 + instr_prev_plus_one = 0 is_being_profiled = False def __init__(self, space, code, w_globals, closure): self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code - eval.Frame.__init__(self, space, w_globals, code.co_nlocals) + eval.Frame.__init__(self, space, w_globals) self.valuestack_w = [None] * code.co_stacksize self.valuestackdepth = 0 self.lastblock = None @@ -63,7 +63,7 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(closure, code) - self.fastlocals_w = [None]*self.numlocals + self.fastlocals_w = [None] * code.co_nlocals make_sure_not_resized(self.fastlocals_w) self.f_lineno = code.co_firstlineno @@ -335,7 +335,7 @@ w(self.instr_lb), #do we need these three (that are for tracing) w(self.instr_ub), - w(self.instr_prev), + w(self.instr_prev_plus_one), w_cells, ] @@ -349,7 +349,7 @@ args_w = space.unpackiterable(w_args) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ - w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev, w_cells = args_w + w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w new_frame = self pycode = space.interp_w(PyCode, w_pycode) @@ -397,7 +397,7 @@ new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev = space.int_w(w_instr_prev) + new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) # XXX what if the frame is in another thread?? @@ -430,7 +430,10 @@ """Initialize cellvars from self.fastlocals_w This is overridden in nestedscope.py""" pass - + + def getfastscopelength(self): + return self.pycode.co_nlocals + def getclosure(self): return None diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,12 +25,13 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None): + arg_types=None, count_fields_if_immut=-1): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types + self.count_fields_if_immut = count_fields_if_immut def get_arg_types(self): return self.arg_types @@ -63,6 +64,9 @@ def as_vtable_size_descr(self): return self + def count_fields_if_immutable(self): + return self.count_fields_if_immut + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -109,12 +113,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None): - key = (ofs, typeinfo, extrainfo, name, arg_types) + arg_types=None, count_fields_if_immut=-1): + key = (ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) try: return self._descrs[key] except KeyError: - descr = Descr(ofs, typeinfo, extrainfo, name, arg_types) + descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) self._descrs[key] = descr return descr @@ -284,7 +290,8 @@ def sizeof(self, S): assert not isinstance(S, lltype.Ptr) - return self.getdescr(symbolic.get_size(S)) + count = heaptracker.count_fields_if_immutable(S) + return self.getdescr(symbolic.get_size(S), count_fields_if_immut=count) class LLtypeCPU(BaseCPU): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,9 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import cpython_struct, \ - PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, \ - Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE, \ - PyTypeObject, PyTypeObjectPtr, PyBufferProcs, FILEP +from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -55,6 +54,14 @@ wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) +readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) +charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) +## We don't support new buffer interface for now +getbufferproc = rffi.VOIDP +releasebufferproc = rffi.VOIDP + PyGetSetDef = cpython_struct("PyGetSetDef", ( ("name", rffi.CCHARP), @@ -127,7 +134,6 @@ ("mp_ass_subscript", objobjargproc), )) -""" PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getreadbuffer", readbufferproc), ("bf_getwritebuffer", writebufferproc), @@ -136,7 +142,6 @@ ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), )) -""" PyMemberDef = cpython_struct("PyMemberDef", ( ("name", rffi.CCHARP), diff --git a/lib_pypy/pyrepl/keymap.py b/lib_pypy/pyrepl/keymap.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/keymap.py @@ -0,0 +1,186 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +functions for parsing keyspecs + +Support for turning keyspecs into appropriate sequences. + +pyrepl uses it's own bastardized keyspec format, which is meant to be +a strict superset of readline's \"KEYSEQ\" format (which is to say +that if you can come up with a spec readline accepts that this +doesn't, you've found a bug and should tell me about it). + +Note that this is the `\\C-o' style of readline keyspec, not the +`Control-o' sort. + +A keyspec is a string representing a sequence of keypresses that can +be bound to a command. + +All characters other than the backslash represent themselves. In the +traditional manner, a backslash introduces a escape sequence. + +The extension to readline is that the sequence \\ denotes the +sequence of charaters produced by hitting KEY. + +Examples: + +`a' - what you get when you hit the `a' key +`\\EOA' - Escape - O - A (up, on my terminal) +`\\' - the up arrow key +`\\' - ditto (keynames are case insensitive) +`\\C-o', `\\c-o' - control-o +`\\M-.' - meta-period +`\\E.' - ditto (that's how meta works for pyrepl) +`\\', `\\', `\\t', `\\011', '\\x09', '\\X09', '\\C-i', '\\C-I' + - all of these are the tab character. Can you think of any more? +""" + +_escapes = { + '\\':'\\', + "'":"'", + '"':'"', + 'a':'\a', + 'b':'\h', + 'e':'\033', + 'f':'\f', + 'n':'\n', + 'r':'\r', + 't':'\t', + 'v':'\v' + } + +_keynames = { + 'backspace': 'backspace', + 'delete': 'delete', + 'down': 'down', + 'end': 'end', + 'enter': '\r', + 'escape': '\033', + 'f1' : 'f1', 'f2' : 'f2', 'f3' : 'f3', 'f4' : 'f4', + 'f5' : 'f5', 'f6' : 'f6', 'f7' : 'f7', 'f8' : 'f8', + 'f9' : 'f9', 'f10': 'f10', 'f11': 'f11', 'f12': 'f12', + 'f13': 'f13', 'f14': 'f14', 'f15': 'f15', 'f16': 'f16', + 'f17': 'f17', 'f18': 'f18', 'f19': 'f19', 'f20': 'f20', + 'home': 'home', + 'insert': 'insert', + 'left': 'left', + 'page down': 'page down', + 'page up': 'page up', + 'return': '\r', + 'right': 'right', + 'space': ' ', + 'tab': '\t', + 'up': 'up', + } + +class KeySpecError(Exception): + pass + +def _parse_key1(key, s): + ctrl = 0 + meta = 0 + ret = '' + while not ret and s < len(key): + if key[s] == '\\': + c = key[s+1].lower() + if _escapes.has_key(c): + ret = _escapes[c] + s += 2 + elif c == "c": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\C must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if ctrl: + raise KeySpecError, "doubled \\C- (char %d of %s)"%( + s + 1, repr(key)) + ctrl = 1 + s += 3 + elif c == "m": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\M must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if meta: + raise KeySpecError, "doubled \\M- (char %d of %s)"%( + s + 1, repr(key)) + meta = 1 + s += 3 + elif c.isdigit(): + n = key[s+1:s+4] + ret = chr(int(n, 8)) + s += 4 + elif c == 'x': + n = key[s+2:s+4] + ret = chr(int(n, 16)) + s += 4 + elif c == '<': + t = key.find('>', s) + if t == -1: + raise KeySpecError, \ + "unterminated \\< starting at char %d of %s"%( + s + 1, repr(key)) + ret = key[s+2:t].lower() + if ret not in _keynames: + raise KeySpecError, \ + "unrecognised keyname `%s' at char %d of %s"%( + ret, s + 2, repr(key)) + ret = _keynames[ret] + s = t + 1 + else: + raise KeySpecError, \ + "unknown backslash escape %s at char %d of %s"%( + `c`, s + 2, repr(key)) + else: + ret = key[s] + s += 1 + if ctrl: + if len(ret) > 1: + raise KeySpecError, "\\C- must be followed by a character" + ret = chr(ord(ret) & 0x1f) # curses.ascii.ctrl() + if meta: + ret = ['\033', ret] + else: + ret = [ret] + return ret, s + +def parse_keys(key): + s = 0 + r = [] + while s < len(key): + k, s = _parse_key1(key, s) + r.extend(k) + return r + +def compile_keymap(keymap, empty=''): + r = {} + for key, value in keymap.items(): + r.setdefault(key[0], {})[key[1:]] = value + for key, value in r.items(): + if empty in value: + if len(value) <> 1: + raise KeySpecError, \ + "key definitions for %s clash"%(value.values(),) + else: + r[key] = value[empty] + else: + r[key] = compile_keymap(value, empty) + return r diff --git a/pypy/translator/platform/posix.py b/pypy/translator/platform/posix.py --- a/pypy/translator/platform/posix.py +++ b/pypy/translator/platform/posix.py @@ -113,11 +113,16 @@ m.eci = eci def pypyrel(fpath): - rel = py.path.local(fpath).relto(pypypath) + lpath = py.path.local(fpath) + rel = lpath.relto(pypypath) if rel: return os.path.join('$(PYPYDIR)', rel) - else: - return fpath + m_dir = m.makefile_dir + if m_dir == lpath: + return '.' + if m_dir.dirpath() == lpath: + return '..' + return fpath rel_cfiles = [m.pathrel(cfile) for cfile in cfiles] rel_ofiles = [rel_cfile[:-2]+'.o' for rel_cfile in rel_cfiles] diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -261,7 +261,8 @@ if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') - return space.wrap(rffi.charp2strn(buf, bufsize_p[0] - 1)) + length = intmask(bufsize_p[0] - 1) + return space.wrap(rffi.charp2strn(buf, length)) def convert_to_regdata(space, w_value, typ): buf = None @@ -445,9 +446,10 @@ continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') + length = intmask(retDataSize[0]) return space.newtuple([ convert_from_regdata(space, databuf, - retDataSize[0], retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) @@ -595,11 +597,11 @@ if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') + length = intmask(retDataSize[0]) return space.newtuple([ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, - retDataSize[0], - retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) diff --git a/pypy/module/cpyext/include/longintrepr.h b/pypy/module/cpyext/include/longintrepr.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/longintrepr.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -3,8 +3,102 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.jitexc import JitException +from pypy.jit.metainterp.optimizeopt.optimizer import Optimization -from pypy.jit.metainterp.optimizeopt.optimizer import Optimization + +class CachedField(object): + def __init__(self): + # Cache information for a field descr. It can be in one + # of two states: + # + # 1. 'cached_fields' is a dict mapping OptValues of structs + # to OptValues of fields. All fields on-heap are + # synchronized with the values stored in the cache. + # + # 2. we just did one setfield, which is delayed (and thus + # not synchronized). 'lazy_setfield' is the delayed + # ResOperation. In this state, 'cached_fields' contains + # out-of-date information. More precisely, the field + # value pending in the ResOperation is *not* visible in + # 'cached_fields'. + # + self._cached_fields = {} + self._lazy_setfield = None + self._lazy_setfield_registered = False + + def do_setfield(self, optheap, op): + # Update the state with the SETFIELD_GC operation 'op'. + structvalue = optheap.getvalue(op.getarg(0)) + fieldvalue = optheap.getvalue(op.getarg(1)) + if self.possible_aliasing(optheap, structvalue): + self.force_lazy_setfield(optheap) + assert not self.possible_aliasing(optheap, structvalue) + cached_fieldvalue = self._cached_fields.get(structvalue, None) + if cached_fieldvalue is not fieldvalue: + # common case: store the 'op' as lazy_setfield, and register + # myself in the optheap's _lazy_setfields list + self._lazy_setfield = op + if not self._lazy_setfield_registered: + optheap._lazy_setfields.append(self) + self._lazy_setfield_registered = True + else: + # this is the case where the pending setfield ends up + # storing precisely the value that is already there, + # as proved by 'cached_fields'. In this case, we don't + # need any _lazy_setfield: the heap value is already right. + # Note that this may reset to None a non-None lazy_setfield, + # cancelling its previous effects with no side effect. + self._lazy_setfield = None + + def possible_aliasing(self, optheap, structvalue): + # If lazy_setfield is set and contains a setfield on a different + # structvalue, then we are annoyed, because it may point to either + # the same or a different structure at runtime. + return (self._lazy_setfield is not None + and (optheap.getvalue(self._lazy_setfield.getarg(0)) + is not structvalue)) + + def getfield_from_cache(self, optheap, structvalue): + # Returns the up-to-date field's value, or None if not cached. + if self.possible_aliasing(optheap, structvalue): + self.force_lazy_setfield(optheap) + if self._lazy_setfield is not None: + op = self._lazy_setfield + assert optheap.getvalue(op.getarg(0)) is structvalue + return optheap.getvalue(op.getarg(1)) + else: + return self._cached_fields.get(structvalue, None) + + def remember_field_value(self, structvalue, fieldvalue): + assert self._lazy_setfield is None + self._cached_fields[structvalue] = fieldvalue + + def force_lazy_setfield(self, optheap): + op = self._lazy_setfield + if op is not None: + # This is the way _lazy_setfield is usually reset to None. + # Now we clear _cached_fields, because actually doing the + # setfield might impact any of the stored result (because of + # possible aliasing). + self._cached_fields.clear() + self._lazy_setfield = None + optheap.next_optimization.propagate_forward(op) + # Once it is done, we can put at least one piece of information + # back in the cache: the value of this particular structure's + # field. + structvalue = optheap.getvalue(op.getarg(0)) + fieldvalue = optheap.getvalue(op.getarg(1)) + self.remember_field_value(structvalue, fieldvalue) + + def get_reconstructed(self, optimizer, valuemap): + assert self._lazy_setfield is None + cf = CachedField() + for structvalue, fieldvalue in self._cached_fields.iteritems(): + structvalue2 = structvalue.get_reconstructed(optimizer, valuemap) + fieldvalue2 = fieldvalue .get_reconstructed(optimizer, valuemap) + cf._cached_fields[structvalue2] = fieldvalue2 + return cf + class CachedArrayItems(object): def __init__(self): @@ -20,40 +114,23 @@ """Cache repeated heap accesses""" def __init__(self): - # cached fields: {descr: {OptValue_instance: OptValue_fieldvalue}} + # cached fields: {descr: CachedField} self.cached_fields = {} - self.known_heap_fields = {} + self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} - # lazily written setfields (at most one per descr): {descr: op} - self.lazy_setfields = {} - self.lazy_setfields_descrs = [] # keys (at least) of previous dict def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() if True: self.force_all_lazy_setfields() - assert not self.lazy_setfields_descrs - assert not self.lazy_setfields else: - new.lazy_setfields_descrs = self.lazy_setfields_descrs - new.lazy_setfields = self.lazy_setfields + assert 0 # was: new.lazy_setfields = self.lazy_setfields for descr, d in self.cached_fields.items(): - newd = {} - new.cached_fields[descr] = newd - for value, fieldvalue in d.items(): - newd[value.get_reconstructed(optimizer, valuemap)] = \ - fieldvalue.get_reconstructed(optimizer, valuemap) - - for descr, d in self.known_heap_fields.items(): - newd = {} - new.known_heap_fields[descr] = newd - for value, fieldvalue in d.items(): - newd[value.get_reconstructed(optimizer, valuemap)] = \ - fieldvalue.get_reconstructed(optimizer, valuemap) - + new.cached_fields[descr] = d.get_reconstructed(optimizer, valuemap) + new.cached_arrayitems = {} for descr, d in self.cached_arrayitems.items(): newd = {} @@ -74,30 +151,16 @@ return new def clean_caches(self): + del self._lazy_setfields[:] self.cached_fields.clear() - self.known_heap_fields.clear() self.cached_arrayitems.clear() - def cache_field_value(self, descr, value, fieldvalue, write=False): - if write: - # when seeing a setfield, we have to clear the cache for the same - # field on any other structure, just in case they are aliasing - # each other - d = self.cached_fields[descr] = {} - else: - d = self.cached_fields.setdefault(descr, {}) - d[value] = fieldvalue - - def read_cached_field(self, descr, value): - # XXX self.cached_fields and self.lazy_setfields should probably - # be merged somehow - d = self.cached_fields.get(descr, None) - if d is None: - op = self.lazy_setfields.get(descr, None) - if op is None: - return None - return self.getvalue(op.getarg(1)) - return d.get(value, None) + def field_cache(self, descr): + try: + cf = self.cached_fields[descr] + except KeyError: + cf = self.cached_fields[descr] = CachedField() + return cf def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): d = self.cached_arrayitems.get(descr, None) @@ -157,11 +220,15 @@ self.optimizer.pendingfields = self.force_lazy_setfields_for_guard() return opnum = op.getopnum() - if (opnum == rop.SETFIELD_GC or - opnum == rop.SETFIELD_RAW or - opnum == rop.SETARRAYITEM_GC or - opnum == rop.SETARRAYITEM_RAW or - opnum == rop.DEBUG_MERGE_POINT): + if (opnum == rop.SETFIELD_GC or # handled specially + opnum == rop.SETFIELD_RAW or # no effect on GC struct/array + opnum == rop.SETARRAYITEM_GC or # handled specially + opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.STRSETITEM or # no effect on GC struct/array + opnum == rop.UNICODESETITEM or # no effect on GC struct/array + opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever + opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array + opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return assert opnum != rop.CALL_PURE if (opnum == rop.CALL or @@ -180,8 +247,8 @@ for fielddescr in effectinfo.write_descrs_fields: self.force_lazy_setfield(fielddescr) try: - del self.cached_fields[fielddescr] - del self.known_heap_fields[fielddescr] + cf = self.cached_fields[fielddescr] + cf._cached_fields.clear() except KeyError: pass for arraydescr in effectinfo.write_descrs_arrays: @@ -195,10 +262,7 @@ # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. return - self.force_all_lazy_setfields() - elif op.is_final() or (not we_are_translated() and - op.getopnum() < 0): # escape() operations - self.force_all_lazy_setfields() + self.force_all_lazy_setfields() self.clean_caches() @@ -206,58 +270,54 @@ assert value.is_constant() newvalue = self.getvalue(value.box) if value is not newvalue: - for d in self.cached_fields.values(): - if value in d: - d[newvalue] = d[value] - # FIXME: Update the other caches too? - - - def force_lazy_setfield(self, descr, before_guard=False): + for cf in self.cached_fields.itervalues(): + if value in cf._cached_fields: + cf._cached_fields[newvalue] = cf._cached_fields[value] + + def force_lazy_setfield(self, descr): try: - op = self.lazy_setfields[descr] + cf = self.cached_fields[descr] except KeyError: return - del self.lazy_setfields[descr] - value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) - try: - heapvalue = self.known_heap_fields[op.getdescr()][value] - if fieldvalue is heapvalue: - return - except KeyError: - pass - self.next_optimization.propagate_forward(op) + cf.force_lazy_setfield(self) + def fixup_guard_situation(self): # hackish: reverse the order of the last two operations if it makes # sense to avoid a situation like "int_eq/setfield_gc/guard_true", # which the backend (at least the x86 backend) does not handle well. newoperations = self.optimizer.newoperations - if before_guard and len(newoperations) >= 2: - lastop = newoperations[-1] - prevop = newoperations[-2] - # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" - # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" - # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" - opnum = prevop.getopnum() - lastop_args = lastop.getarglist() - if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE - or opnum == rop.CALL_RELEASE_GIL or prevop.is_ovf()) - and prevop.result not in lastop_args): - newoperations[-2] = lastop - newoperations[-1] = prevop + if len(newoperations) < 2: + return + lastop = newoperations[-1] + if (lastop.getopnum() != rop.SETFIELD_GC and + lastop.getopnum() != rop.SETARRAYITEM_GC): + return + # - is_comparison() for cases like "int_eq/setfield_gc/guard_true" + # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced" + # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow" + prevop = newoperations[-2] + opnum = prevop.getopnum() + if not (prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE + or prevop.is_ovf()): + return + if prevop.result in lastop.getarglist(): + return + newoperations[-2] = lastop + newoperations[-1] = prevop def force_all_lazy_setfields(self): - if len(self.lazy_setfields_descrs) > 0: - for descr in self.lazy_setfields_descrs: - self.force_lazy_setfield(descr) - del self.lazy_setfields_descrs[:] + for cf in self._lazy_setfields: + if not we_are_translated(): + assert cf in self.cached_fields.values() + cf.force_lazy_setfield(self) def force_lazy_setfields_for_guard(self): pendingfields = [] - for descr in self.lazy_setfields_descrs: - try: - op = self.lazy_setfields[descr] - except KeyError: + for cf in self._lazy_setfields: + if not we_are_translated(): + assert cf in self.cached_fields.values() + op = cf._lazy_setfield + if op is None: continue # the only really interesting case that we need to handle in the # guards' resume data is that of a virtual object that is stored @@ -267,41 +327,27 @@ fieldvalue = self.getvalue(op.getarg(1)) if fieldvalue.is_virtual(): # this is the case that we leave to resume.py - pendingfields.append((descr, value.box, + pendingfields.append((op.getdescr(), value.box, fieldvalue.get_key_box())) else: - self.force_lazy_setfield(descr, before_guard=True) + cf.force_lazy_setfield(self) + self.fixup_guard_situation() return pendingfields - def force_lazy_setfield_if_necessary(self, op, value, write=False): - try: - op1 = self.lazy_setfields[op.getdescr()] - except KeyError: - if write: - self.lazy_setfields_descrs.append(op.getdescr()) - else: - if self.getvalue(op1.getarg(0)) is not value: - self.force_lazy_setfield(op.getdescr()) - def optimize_GETFIELD_GC(self, op): - value = self.getvalue(op.getarg(0)) - self.force_lazy_setfield_if_necessary(op, value) - # check if the field was read from another getfield_gc just before - # or has been written to recently - fieldvalue = self.read_cached_field(op.getdescr(), value) + structvalue = self.getvalue(op.getarg(0)) + cf = self.field_cache(op.getdescr()) + fieldvalue = cf.getfield_from_cache(self, structvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) return # default case: produce the operation - value.ensure_nonnull() + structvalue.ensure_nonnull() ###self.optimizer.optimize_default(op) self.emit_operation(op) # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - self.cache_field_value(op.getdescr(), value, fieldvalue) - # keep track of what's on the heap - d = self.known_heap_fields.setdefault(op.getdescr(), {}) - d[value] = fieldvalue + cf.remember_field_value(structvalue, fieldvalue) def optimize_SETFIELD_GC(self, op): if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)], @@ -310,14 +356,8 @@ (op.getdescr().repr_of_descr())) raise BogusPureField # - value = self.getvalue(op.getarg(0)) - fieldvalue = self.getvalue(op.getarg(1)) - cached_fieldvalue = self.read_cached_field(op.getdescr(), value) - if fieldvalue is not cached_fieldvalue: - self.force_lazy_setfield_if_necessary(op, value, write=True) - self.lazy_setfields[op.getdescr()] = op - # remember the result of future reads of the field - self.cache_field_value(op.getdescr(), value, fieldvalue, write=True) + cf = self.field_cache(op.getdescr()) + cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC(self, op): value = self.getvalue(op.getarg(0)) diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/translator/c/src/dtoa.c b/pypy/translator/c/src/dtoa.c --- a/pypy/translator/c/src/dtoa.c +++ b/pypy/translator/c/src/dtoa.c @@ -116,7 +116,6 @@ /* Begin PYPY hacks */ /* #include "Python.h" */ -#define DOUBLE_IS_LITTLE_ENDIAN_IEEE754 #define HAVE_UINT32_T #define HAVE_INT32_T #define HAVE_UINT64_T diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -126,8 +126,16 @@ _run_compiled_module(space, w_modulename, filename, w_file, w_mod) return w_mod + at unwrap_spec(filename=str) +def load_dynamic(space, w_modulename, filename, w_file=None): + if not space.config.objspace.usemodules.cpyext: + raise OperationError(space.w_ImportError, space.wrap( + "Not implemented")) + importing.load_c_extension(space, filename, space.str_w(w_modulename)) + return importing.check_sys_modules(space, w_modulename) + def new_module(space, w_name): - return space.wrap(Module(space, w_name)) + return space.wrap(Module(space, w_name, add_package=False)) def init_builtin(space, w_name): name = space.str_w(w_name) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -253,8 +253,10 @@ except OperationError, e: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) - return 0 - return 1 + result = 0 + else: + result = 1 + return rffi.cast(rffi.INT, result) callback_type = lltype.Ptr(lltype.FuncType( [rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT)) XML_SetUnknownEncodingHandler = expat_external( diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -154,6 +154,24 @@ self.emit_operation(op) + def optimize_INT_LSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_RSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_CALL_PURE(self, op): arg_consts = [] for i in range(op.numargs()): diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/readline.py @@ -0,0 +1,408 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Alex Gaynor +# Antonio Cuni +# Armin Rigo +# Holger Krekel +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""A compatibility wrapper reimplementing the 'readline' standard module +on top of pyrepl. Not all functionalities are supported. Contains +extensions for multiline input. +""" + +import sys, os +from pyrepl import commands +from pyrepl.historical_reader import HistoricalReader +from pyrepl.completing_reader import CompletingReader +from pyrepl.unix_console import UnixConsole, _error + + +ENCODING = 'latin1' # XXX hard-coded + +__all__ = ['add_history', + 'clear_history', + 'get_begidx', + 'get_completer', + 'get_completer_delims', + 'get_current_history_length', + 'get_endidx', + 'get_history_item', + 'get_history_length', + 'get_line_buffer', + 'insert_text', + 'parse_and_bind', + 'read_history_file', + 'read_init_file', + 'redisplay', + 'remove_history_item', + 'replace_history_item', + 'set_completer', + 'set_completer_delims', + 'set_history_length', + 'set_pre_input_hook', + 'set_startup_hook', + 'write_history_file', + # ---- multiline extensions ---- + 'multiline_input', + ] + +# ____________________________________________________________ + +class ReadlineConfig(object): + readline_completer = None + completer_delims = dict.fromkeys(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?') + +class ReadlineAlikeReader(HistoricalReader, CompletingReader): + + assume_immutable_completions = False + use_brackets = False + sort_in_column = True + + def error(self, msg="none"): + pass # don't show error messages by default + + def get_stem(self): + b = self.buffer + p = self.pos - 1 + completer_delims = self.config.completer_delims + while p >= 0 and b[p] not in completer_delims: + p -= 1 + return ''.join(b[p+1:self.pos]) + + def get_completions(self, stem): + result = [] + function = self.config.readline_completer + if function is not None: + try: + stem = str(stem) # rlcompleter.py seems to not like unicode + except UnicodeEncodeError: + pass # but feed unicode anyway if we have no choice + state = 0 + while True: + try: + next = function(stem, state) + except: + break + if not isinstance(next, str): + break + result.append(next) + state += 1 + # emulate the behavior of the standard readline that sorts + # the completions before displaying them. + result.sort() + return result + + def get_trimmed_history(self, maxlength): + if maxlength >= 0: + cut = len(self.history) - maxlength + if cut < 0: + cut = 0 + else: + cut = 0 + return self.history[cut:] + + # --- simplified support for reading multiline Python statements --- + + # This duplicates small parts of pyrepl.python_reader. I'm not + # reusing the PythonicReader class directly for two reasons. One is + # to try to keep as close as possible to CPython's prompt. The + # other is that it is the readline module that we are ultimately + # implementing here, and I don't want the built-in raw_input() to + # start trying to read multiline inputs just because what the user + # typed look like valid but incomplete Python code. So we get the + # multiline feature only when using the multiline_input() function + # directly (see _pypy_interact.py). + + more_lines = None + + def collect_keymap(self): + return super(ReadlineAlikeReader, self).collect_keymap() + ( + (r'\n', 'maybe-accept'),) + + def __init__(self, console): + super(ReadlineAlikeReader, self).__init__(console) + self.commands['maybe_accept'] = maybe_accept + self.commands['maybe-accept'] = maybe_accept + + def after_command(self, cmd): + super(ReadlineAlikeReader, self).after_command(cmd) + if self.more_lines is None: + # Force single-line input if we are in raw_input() mode. + # Although there is no direct way to add a \n in this mode, + # multiline buffers can still show up using various + # commands, e.g. navigating the history. + try: + index = self.buffer.index("\n") + except ValueError: + pass + else: + self.buffer = self.buffer[:index] + if self.pos > len(self.buffer): + self.pos = len(self.buffer) + +class maybe_accept(commands.Command): + def do(self): + r = self.reader + r.dirty = 1 # this is needed to hide the completion menu, if visible + # + # if there are already several lines and the cursor + # is not on the last one, always insert a new \n. + text = r.get_unicode() + if "\n" in r.buffer[r.pos:]: + r.insert("\n") + elif r.more_lines is not None and r.more_lines(text): + r.insert("\n") + else: + self.finish = 1 + +# ____________________________________________________________ + +class _ReadlineWrapper(object): + f_in = 0 + f_out = 1 + reader = None + saved_history_length = -1 + startup_hook = None + config = ReadlineConfig() + + def get_reader(self): + if self.reader is None: + console = UnixConsole(self.f_in, self.f_out, encoding=ENCODING) + self.reader = ReadlineAlikeReader(console) + self.reader.config = self.config + return self.reader + + def raw_input(self, prompt=''): + try: + reader = self.get_reader() + except _error: + return _old_raw_input(prompt) + if self.startup_hook is not None: + self.startup_hook() + reader.ps1 = prompt + return reader.readline() + + def multiline_input(self, more_lines, ps1, ps2): + """Read an input on possibly multiple lines, asking for more + lines as long as 'more_lines(unicodetext)' returns an object whose + boolean value is true. + """ + reader = self.get_reader() + saved = reader.more_lines + try: + reader.more_lines = more_lines + reader.ps1 = reader.ps2 = ps1 + reader.ps3 = reader.ps4 = ps2 + return reader.readline() + finally: + reader.more_lines = saved + + def parse_and_bind(self, string): + pass # XXX we don't support parsing GNU-readline-style init files + + def set_completer(self, function=None): + self.config.readline_completer = function + + def get_completer(self): + return self.config.readline_completer + + def set_completer_delims(self, string): + self.config.completer_delims = dict.fromkeys(string) + + def get_completer_delims(self): + chars = self.config.completer_delims.keys() + chars.sort() + return ''.join(chars) + + def _histline(self, line): + return unicode(line.rstrip('\n'), ENCODING) + + def get_history_length(self): + return self.saved_history_length + + def set_history_length(self, length): + self.saved_history_length = length + + def get_current_history_length(self): + return len(self.get_reader().history) + + def read_history_file(self, filename='~/.history'): + # multiline extension (really a hack) for the end of lines that + # are actually continuations inside a single multiline_input() + # history item: we use \r\n instead of just \n. If the history + # file is passed to GNU readline, the extra \r are just ignored. + history = self.get_reader().history + f = open(os.path.expanduser(filename), 'r') + buffer = [] + for line in f: + if line.endswith('\r\n'): + buffer.append(line) + else: + line = self._histline(line) + if buffer: + line = ''.join(buffer).replace('\r', '') + line + del buffer[:] + if line: + history.append(line) + f.close() + + def write_history_file(self, filename='~/.history'): + maxlength = self.saved_history_length + history = self.get_reader().get_trimmed_history(maxlength) + f = open(os.path.expanduser(filename), 'w') + for entry in history: + if isinstance(entry, unicode): + entry = entry.encode(ENCODING) + entry = entry.replace('\n', '\r\n') # multiline history support + f.write(entry + '\n') + f.close() + + def clear_history(self): + del self.get_reader().history[:] + + def get_history_item(self, index): + history = self.get_reader().history + if 1 <= index <= len(history): + return history[index-1] + else: + return None # blame readline.c for not raising + + def remove_history_item(self, index): + history = self.get_reader().history + if 0 <= index < len(history): + del history[index] + else: + raise ValueError("No history item at position %d" % index) + # blame readline.c for raising ValueError + + def replace_history_item(self, index, line): + history = self.get_reader().history + if 0 <= index < len(history): + history[index] = self._histline(line) + else: + raise ValueError("No history item at position %d" % index) + # blame readline.c for raising ValueError + + def add_history(self, line): + self.get_reader().history.append(self._histline(line)) + + def set_startup_hook(self, function=None): + self.startup_hook = function + + def get_line_buffer(self): + return self.get_reader().get_buffer() + + def _get_idxs(self): + start = cursor = self.get_reader().pos + buf = self.get_line_buffer() + for i in xrange(cursor - 1, -1, -1): + if buf[i] in self.get_completer_delims(): + break + start = i + return start, cursor + + def get_begidx(self): + return self._get_idxs()[0] + + def get_endidx(self): + return self._get_idxs()[1] + + def insert_text(self, text): + return self.get_reader().insert(text) + + +_wrapper = _ReadlineWrapper() + +# ____________________________________________________________ +# Public API + +parse_and_bind = _wrapper.parse_and_bind +set_completer = _wrapper.set_completer +get_completer = _wrapper.get_completer +set_completer_delims = _wrapper.set_completer_delims +get_completer_delims = _wrapper.get_completer_delims +get_history_length = _wrapper.get_history_length +set_history_length = _wrapper.set_history_length +get_current_history_length = _wrapper.get_current_history_length +read_history_file = _wrapper.read_history_file +write_history_file = _wrapper.write_history_file +clear_history = _wrapper.clear_history +get_history_item = _wrapper.get_history_item +remove_history_item = _wrapper.remove_history_item +replace_history_item = _wrapper.replace_history_item +add_history = _wrapper.add_history +set_startup_hook = _wrapper.set_startup_hook +get_line_buffer = _wrapper.get_line_buffer +get_begidx = _wrapper.get_begidx +get_endidx = _wrapper.get_endidx +insert_text = _wrapper.insert_text + +# Extension +multiline_input = _wrapper.multiline_input + +# Internal hook +_get_reader = _wrapper.get_reader + +# ____________________________________________________________ +# Stubs + +def _make_stub(_name, _ret): + def stub(*args, **kwds): + import warnings + warnings.warn("readline.%s() not implemented" % _name, stacklevel=2) + stub.func_name = _name + globals()[_name] = stub + +for _name, _ret in [ + ('read_init_file', None), + ('redisplay', None), + ('set_pre_input_hook', None), + ]: + assert _name not in globals(), _name + _make_stub(_name, _ret) + +# ____________________________________________________________ + +def _setup(): + global _old_raw_input + if _old_raw_input is not None: + return # don't run _setup twice + + try: + f_in = sys.stdin.fileno() + f_out = sys.stdout.fileno() + except (AttributeError, ValueError): + return + if not os.isatty(f_in) or not os.isatty(f_out): + return + + _wrapper.f_in = f_in + _wrapper.f_out = f_out + + if hasattr(sys, '__raw_input__'): # PyPy + _old_raw_input = sys.__raw_input__ + sys.__raw_input__ = _wrapper.raw_input + else: + # this is not really what readline.c does. Better than nothing I guess + import __builtin__ + _old_raw_input = __builtin__.raw_input + __builtin__.raw_input = _wrapper.raw_input + +_old_raw_input = None +_setup() diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -248,3 +248,8 @@ """This is synonymous to ``raise SystemExit''. It will cause the current thread to exit silently unless the exception is caught.""" raise OperationError(space.w_SystemExit, space.w_None) + +def interrupt_main(space): + """Raise a KeyboardInterrupt in the main thread. +A subthread can use this function to interrupt the main thread.""" + space.check_signal_action.set_interrupt() diff --git a/pypy/module/imp/app_imp.py b/pypy/module/imp/app_imp.py deleted file mode 100644 --- a/pypy/module/imp/app_imp.py +++ /dev/null @@ -1,5 +0,0 @@ - - -def load_dynamic(name, pathname, file=None): - """Always raises ah ImportError on pypy""" - raise ImportError('Not implemented') diff --git a/lib-python/modified-2.7.0/distutils/msvc9compiler.py b/lib-python/modified-2.7.0/distutils/msvc9compiler.py --- a/lib-python/modified-2.7.0/distutils/msvc9compiler.py +++ b/lib-python/modified-2.7.0/distutils/msvc9compiler.py @@ -644,6 +644,7 @@ temp_manifest = os.path.join( build_temp, os.path.basename(output_filename) + ".manifest") + ld_args.append('/MANIFEST') ld_args.append('/MANIFESTFILE:' + temp_manifest) if extra_preargs: diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -4,6 +4,8 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.interpreter.error import OperationError from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask +from pypy.rlib.rbigint import rbigint +from pypy.rlib.rarithmetic import intmask PyLong_Check, PyLong_CheckExact = build_type_checkers("Long") @@ -177,4 +179,31 @@ assert isinstance(w_long, W_LongObject) return w_long.num.sign +UCHARP = rffi.CArrayPtr(rffi.UCHAR) + at cpython_api([UCHARP, rffi.SIZE_T, rffi.INT_real, rffi.INT_real], PyObject) +def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): + little_endian = rffi.cast(lltype.Signed, little_endian) + signed = rffi.cast(lltype.Signed, signed) + result = rbigint() + negative = False + + for i in range(0, n): + if little_endian: + c = intmask(bytes[i]) + else: + c = intmask(bytes[n - i - 1]) + if i == 0 and signed and c & 0x80: + negative = True + if negative: + c = c ^ 0xFF + digit = rbigint.fromint(c) + + result = result.lshift(8) + result = result.add(digit) + + if negative: + result = result.neg() + + return space.newlong_from_rbigint(result) + diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/lib_pypy/pyrepl/pygame_keymap.py b/lib_pypy/pyrepl/pygame_keymap.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/pygame_keymap.py @@ -0,0 +1,250 @@ +# Copyright 2000-2008 Michael Hudson-Doyle +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# keyspec parsing for a pygame console. currently this is simply copy +# n' change from the unix (ie. trad terminal) variant; probably some +# refactoring will happen when I work out how it will work best. + +# A key is represented as *either* + +# a) a (keycode, meta, ctrl) sequence (used for special keys such as +# f1, the up arrow key, etc) +# b) a (unichar, meta, ctrl) sequence (used for printable chars) + +# Because we allow keystokes like '\\C-xu', I'll use the same trick as +# the unix keymap module uses. + +# '\\C-a' --> (K_a, 0, 1) + +# XXX it's actually possible to test this module, so it should have a +# XXX test suite. + +from pygame.locals import * + +_escapes = { + '\\': K_BACKSLASH, + "'" : K_QUOTE, + '"' : K_QUOTEDBL, +# 'a' : '\a', + 'b' : K_BACKSLASH, + 'e' : K_ESCAPE, +# 'f' : '\f', + 'n' : K_RETURN, + 'r' : K_RETURN, + 't' : K_TAB, +# 'v' : '\v' + } + +_keynames = { + 'backspace' : K_BACKSPACE, + 'delete' : K_DELETE, + 'down' : K_DOWN, + 'end' : K_END, + 'enter' : K_KP_ENTER, + 'escape' : K_ESCAPE, + 'f1' : K_F1, 'f2' : K_F2, 'f3' : K_F3, 'f4' : K_F4, + 'f5' : K_F5, 'f6' : K_F6, 'f7' : K_F7, 'f8' : K_F8, + 'f9' : K_F9, 'f10': K_F10,'f11': K_F11,'f12': K_F12, + 'f13': K_F13,'f14': K_F14,'f15': K_F15, + 'home' : K_HOME, + 'insert' : K_INSERT, + 'left' : K_LEFT, + 'pgdown' : K_PAGEDOWN, 'page down' : K_PAGEDOWN, + 'pgup' : K_PAGEUP, 'page up' : K_PAGEUP, + 'return' : K_RETURN, + 'right' : K_RIGHT, + 'space' : K_SPACE, + 'tab' : K_TAB, + 'up' : K_UP, + } + +class KeySpecError(Exception): + pass + +def _parse_key1(key, s): + ctrl = 0 + meta = 0 + ret = '' + while not ret and s < len(key): + if key[s] == '\\': + c = key[s+1].lower() + if _escapes.has_key(c): + ret = _escapes[c] + s += 2 + elif c == "c": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\C must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if ctrl: + raise KeySpecError, "doubled \\C- (char %d of %s)"%( + s + 1, repr(key)) + ctrl = 1 + s += 3 + elif c == "m": + if key[s + 2] != '-': + raise KeySpecError, \ + "\\M must be followed by `-' (char %d of %s)"%( + s + 2, repr(key)) + if meta: + raise KeySpecError, "doubled \\M- (char %d of %s)"%( + s + 1, repr(key)) + meta = 1 + s += 3 + elif c.isdigit(): + n = key[s+1:s+4] + ret = chr(int(n, 8)) + s += 4 + elif c == 'x': + n = key[s+2:s+4] + ret = chr(int(n, 16)) + s += 4 + elif c == '<': + t = key.find('>', s) + if t == -1: + raise KeySpecError, \ + "unterminated \\< starting at char %d of %s"%( + s + 1, repr(key)) + try: + ret = _keynames[key[s+2:t].lower()] + s = t + 1 + except KeyError: + raise KeySpecError, \ + "unrecognised keyname `%s' at char %d of %s"%( + key[s+2:t], s + 2, repr(key)) + if ret is None: + return None, s + else: + raise KeySpecError, \ + "unknown backslash escape %s at char %d of %s"%( + `c`, s + 2, repr(key)) + else: + if ctrl: + ret = chr(ord(key[s]) & 0x1f) # curses.ascii.ctrl() + ret = unicode(ret) + else: + ret = unicode(key[s]) + s += 1 + return (ret, meta, ctrl), s + +def parse_keys(key): + s = 0 + r = [] + while s < len(key): + k, s = _parse_key1(key, s) + if k is None: + return None + r.append(k) + return tuple(r) + +def _compile_keymap(keymap): + r = {} + for key, value in keymap.items(): + r.setdefault(key[0], {})[key[1:]] = value + for key, value in r.items(): + if value.has_key(()): + if len(value) <> 1: + raise KeySpecError, \ + "key definitions for %s clash"%(value.values(),) + else: + r[key] = value[()] + else: + r[key] = _compile_keymap(value) + return r + +def compile_keymap(keymap): + r = {} + for key, value in keymap: + k = parse_keys(key) + if value is None and r.has_key(k): + del r[k] + if k is not None: + r[k] = value + return _compile_keymap(r) + +def keyname(key): + longest_match = '' + longest_match_name = '' + for name, keyseq in keyset.items(): + if keyseq and key.startswith(keyseq) and \ + len(keyseq) > len(longest_match): + longest_match = keyseq + longest_match_name = name + if len(longest_match) > 0: + return longest_match_name, len(longest_match) + else: + return None, 0 + +_unescapes = {'\r':'\\r', '\n':'\\n', '\177':'^?'} + +#for k,v in _escapes.items(): +# _unescapes[v] = k + +def unparse_key(keyseq): + if not keyseq: + return '' + name, s = keyname(keyseq) + if name: + if name <> 'escape' or s == len(keyseq): + return '\\<' + name + '>' + unparse_key(keyseq[s:]) + else: + return '\\M-' + unparse_key(keyseq[1:]) + else: + c = keyseq[0] + r = keyseq[1:] + if c == '\\': + p = '\\\\' + elif _unescapes.has_key(c): + p = _unescapes[c] + elif ord(c) < ord(' '): + p = '\\C-%s'%(chr(ord(c)+96),) + elif ord(' ') <= ord(c) <= ord('~'): + p = c + else: + p = '\\%03o'%(ord(c),) + return p + unparse_key(r) + +def _unparse_keyf(keyseq): + if not keyseq: + return [] + name, s = keyname(keyseq) + if name: + if name <> 'escape' or s == len(keyseq): + return [name] + _unparse_keyf(keyseq[s:]) + else: + rest = _unparse_keyf(keyseq[1:]) + return ['M-'+rest[0]] + rest[1:] + else: + c = keyseq[0] + r = keyseq[1:] + if c == '\\': + p = '\\' + elif _unescapes.has_key(c): + p = _unescapes[c] + elif ord(c) < ord(' '): + p = 'C-%s'%(chr(ord(c)+96),) + elif ord(' ') <= ord(c) <= ord('~'): + p = c + else: + p = '\\%03o'%(ord(c),) + return [p] + _unparse_keyf(r) + +def unparse_keyf(keyseq): + return " ".join(_unparse_keyf(keyseq)) diff --git a/pypy/interpreter/test/test_extmodules.py b/pypy/interpreter/test/test_extmodules.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_extmodules.py @@ -0,0 +1,68 @@ +import sys +import pytest + +from pypy.config.pypyoption import get_pypy_config +from pypy.objspace.std import StdObjSpace +from pypy.tool.udir import udir + +mod_init = """ +from pypy.interpreter.mixedmodule import MixedModule + +import time + +class Module(MixedModule): + + appleveldefs = {} + + interpleveldefs = { + 'clock' : 'interp_time.clock', + 'time' : 'interp_time.time_', + 'sleep' : 'interp_time.sleep', + } +""" + +mod_interp = """ +import time + +from pypy.interpreter.gateway import unwrap_spec + +def clock(space): + return space.wrap(time.clock()) + +def time_(space): + return space.wrap(time.time()) + + at unwrap_spec(seconds=float) +def sleep(space, seconds): + time.sleep(seconds) +""" + +old_sys_path = [] + +def init_extmodule_code(): + pkg = udir.join("testext") + pkg.ensure(dir=True) + pkg.join("__init__.py").write("# package") + mod = pkg.join("extmod") + mod.ensure(dir=True) + mod.join("__init__.py").write(mod_init) + mod.join("interp_time.py").write(mod_interp) + +class AppTestExtModules(object): + def setup_class(cls): + init_extmodule_code() + conf = get_pypy_config() + conf.objspace.extmodules = 'testext.extmod' + old_sys_path[:] = sys.path[:] + sys.path.insert(0, str(udir)) + space = StdObjSpace(conf) + cls.space = space + + def teardown_class(cls): + sys.path[:] = old_sys_path + + @pytest.mark.skipif("config.option.runappdirect") + def test_import(self): + import extmod + assert extmod.__file__.endswith('extmod') + assert type(extmod.time()) is float diff --git a/lib_pypy/pyrepl/tests/infrastructure.py b/lib_pypy/pyrepl/tests/infrastructure.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/infrastructure.py @@ -0,0 +1,82 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.reader import Reader +from pyrepl.console import Console, Event +import unittest +import sys + +class EqualsAnything(object): + def __eq__(self, other): + return True +EA = EqualsAnything() + +class TestConsole(Console): + height = 24 + width = 80 + encoding = 'utf-8' + + def __init__(self, events, testcase, verbose=False): + self.events = events + self.next_screen = None + self.verbose = verbose + self.testcase = testcase + + def refresh(self, screen, xy): + if self.next_screen is not None: + self.testcase.assertEqual( + screen, self.next_screen, + "[ %s != %s after %r ]"%(screen, self.next_screen, + self.last_event_name)) + + def get_event(self, block=1): + ev, sc = self.events.pop(0) + self.next_screen = sc + if not isinstance(ev, tuple): + ev = (ev,) + self.last_event_name = ev[0] + if self.verbose: + print "event", ev + return Event(*ev) + +class TestReader(Reader): + def get_prompt(self, lineno, cursor_on_line): + return '' + def refresh(self): + Reader.refresh(self) + self.dirty = True + +class ReaderTestCase(unittest.TestCase): + def run_test(self, test_spec, reader_class=TestReader): + # remember to finish your test_spec with 'accept' or similar! + con = TestConsole(test_spec, self) + reader = reader_class(con) + reader.readline() + +class BasicTestRunner: + def run(self, test): + result = unittest.TestResult() + test(result) + return result + +def run_testcase(testclass): + suite = unittest.makeSuite(testclass) + runner = unittest.TextTestRunner(sys.stdout, verbosity=1) + result = runner.run(suite) + diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -144,3 +144,20 @@ """), ]) assert module.from_string() == 0x1234 + + def test_frombytearray(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC", 2, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x9ABC + assert module.from_bytearray(True, True) == -0x6543 + assert module.from_bytearray(False, False) == 0xBC9A + assert module.from_bytearray(False, True) == -0x4365 + diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -19,6 +19,8 @@ def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): + if gcdescr is not None: + gcdescr.force_index_ofs = FORCE_INDEX_OFS AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) @@ -113,10 +115,11 @@ LLInterpreter.current_interpreter = prev_interpreter return res - @staticmethod def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) return CPU386.cast_adr_to_int(adr) + cast_ptr_to_int._annspecialcase_ = 'specialize:arglltype(0)' + cast_ptr_to_int = staticmethod(cast_ptr_to_int) all_null_registers = lltype.malloc(rffi.LONGP.TO, 24, flavor='raw', zero=True, @@ -127,7 +130,7 @@ fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) - rffi.cast(TP, addr_of_force_index)[0] = -1 + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index frb = self.assembler._find_failure_recovery_bytecode(faildescr) bytecode = rffi.cast(rffi.UCHARP, frb) # start of "no gc operation!" block @@ -147,7 +150,6 @@ WORD = 4 NUM_REGS = 8 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 supports_longlong = True @@ -163,7 +165,6 @@ WORD = 8 NUM_REGS = 16 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 def __init__(self, *args, **kwargs): assert sys.maxint == (2**63 - 1) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -89,6 +89,8 @@ metainterp.history = History() metainterp.history.operations = loop.operations[:] metainterp.history.inputargs = loop.inputargs[:] + cpu._all_size_descrs_with_vtable = ( + LLtypeMixin.cpu._all_size_descrs_with_vtable) # loop_tokens = [] loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -487,7 +487,9 @@ # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), # ^^^ returns an address of pointer, since it can change at runtime - + 'gc_adr_of_root_stack_top': LLOp(), + # ^^^ returns the address of gcdata.root_stack_top (for shadowstack only) + # experimental operations in support of thread cloning, only # implemented by the Mark&Sweep GC 'gc_x_swap_pool': LLOp(canraise=(MemoryError,), canunwindgc=True), diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -365,7 +365,11 @@ def setbuiltinmodule(self, importname): """NOT_RPYTHON. load a lazy pypy/module and put it into sys.modules""" - fullname = "pypy.module.%s" % importname + if '.' in importname: + fullname = importname + importname = fullname.rsplit('.', 1)[1] + else: + fullname = "pypy.module.%s" % importname Module = __import__(fullname, None, None, ["Module"]).Module @@ -428,6 +432,11 @@ if value and name not in modules: modules.append(name) + if self.config.objspace.extmodules: + for name in self.config.objspace.extmodules.split(','): + if name not in modules: + modules.append(name) + # a bit of custom logic: time2 or rctime take precedence over time # XXX this could probably be done as a "requires" in the config if ('time2' in modules or 'rctime' in modules) and 'time' in modules: @@ -745,7 +754,12 @@ """Unpack an iterable object into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" w_iterator = self.iter(w_iterable) - items = [] + # If we know the expected length we can preallocate. + if expected_length == -1: + items = [] + else: + items = [None] * expected_length + idx = 0 while True: try: w_item = self.next(w_iterator) @@ -753,19 +767,22 @@ if not e.match(self, self.w_StopIteration): raise break # done - if expected_length != -1 and len(items) == expected_length: + if expected_length != -1 and idx == expected_length: raise OperationError(self.w_ValueError, self.wrap("too many values to unpack")) - items.append(w_item) - if expected_length != -1 and len(items) < expected_length: - i = len(items) - if i == 1: + if expected_length == -1: + items.append(w_item) + else: + items[idx] = w_item + idx += 1 + if expected_length != -1 and idx < expected_length: + if idx == 1: plural = "" else: plural = "s" raise OperationError(self.w_ValueError, self.wrap("need more than %d value%s to unpack" % - (i, plural))) + (idx, plural))) return items unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable, @@ -1333,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/translator/cli/ilgenerator.py b/pypy/translator/cli/ilgenerator.py --- a/pypy/translator/cli/ilgenerator.py +++ b/pypy/translator/cli/ilgenerator.py @@ -443,8 +443,8 @@ self.ilasm.opcode('newarr', clitype.itemtype.typename()) def _array_suffix(self, ARRAY, erase_unsigned=False): - from pypy.translator.cli.metavm import OOTYPE_TO_MNEMONIC - suffix = OOTYPE_TO_MNEMONIC.get(ARRAY.ITEM, 'ref') + from pypy.translator.cli.metavm import ootype_to_mnemonic + suffix = ootype_to_mnemonic(ARRAY.ITEM, ARRAY.ITEM, 'ref') if erase_unsigned: suffix = suffix.replace('u', 'i') return suffix diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/translator/backendopt/ssa.py b/pypy/translator/backendopt/ssa.py --- a/pypy/translator/backendopt/ssa.py +++ b/pypy/translator/backendopt/ssa.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block +from pypy.objspace.flow.model import Variable, mkentrymap, Block from pypy.tool.algo.unionfind import UnionFind class DataFlowFamilyBuilder: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4.1' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.5-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -211,8 +211,11 @@ def ll_stringslice_minusone(s): return s.ll_substring(0, s.ll_strlen()-1) - def ll_split_chr(RESULT, s, c): - return RESULT.ll_convert_from_array(s.ll_split_chr(c)) + def ll_split_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_split_chr(c, max)) + + def ll_rsplit_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_rsplit_chr(c, max)) def ll_int(s, base): if not 2 <= base <= 36: diff --git a/lib_pypy/pyrepl/console.py b/lib_pypy/pyrepl/console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/console.py @@ -0,0 +1,93 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +class Event: + """An Event. `evt' is 'key' or somesuch.""" + + def __init__(self, evt, data, raw=''): + self.evt = evt + self.data = data + self.raw = raw + + def __repr__(self): + return 'Event(%r, %r)'%(self.evt, self.data) + +class Console: + """Attributes: + + screen, + height, + width, + """ + + def refresh(self, screen, xy): + pass + + def prepare(self): + pass + + def restore(self): + pass + + def move_cursor(self, x, y): + pass + + def set_cursor_vis(self, vis): + pass + + def getheightwidth(self): + """Return (height, width) where height and width are the height + and width of the terminal window in characters.""" + pass + + def get_event(self, block=1): + """Return an Event instance. Returns None if |block| is false + and there is no event pending, otherwise waits for the + completion of an event.""" + pass + + def beep(self): + pass + + def clear(self): + """Wipe the screen""" + pass + + def finish(self): + """Move the cursor to the end of the display and otherwise get + ready for end. XXX could be merged with restore? Hmm.""" + pass + + def flushoutput(self): + """Flush all output to the screen (assuming there's some + buffering going on somewhere).""" + pass + + def forgetinput(self): + """Forget all pending, but not yet processed input.""" + pass + + def getpending(self): + """Return the characters that have been typed but not yet + processed.""" + pass + + def wait(self): + """Wait for an event.""" + pass diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/reader.py @@ -0,0 +1,614 @@ +# Copyright 2000-2010 Michael Hudson-Doyle +# Antonio Cuni +# Armin Rigo +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import types +from pyrepl import unicodedata_ +from pyrepl import commands +from pyrepl import input + +def _make_unctrl_map(): + uc_map = {} + for c in map(unichr, range(256)): + if unicodedata_.category(c)[0] <> 'C': + uc_map[c] = c + for i in range(32): + c = unichr(i) + uc_map[c] = u'^' + unichr(ord('A') + i - 1) + uc_map['\t'] = ' ' # display TABs as 4 characters + uc_map['\177'] = u'^?' + for i in range(256): + c = unichr(i) + if not uc_map.has_key(c): + uc_map[c] = u'\\%03o'%i + return uc_map + +# disp_str proved to be a bottleneck for large inputs, so it's been +# rewritten in C; it's not required though. +try: + raise ImportError # currently it's borked by the unicode support + + from _pyrepl_utils import disp_str, init_unctrl_map + + init_unctrl_map(_make_unctrl_map()) + + del init_unctrl_map +except ImportError: + def _my_unctrl(c, u=_make_unctrl_map()): + if c in u: + return u[c] + else: + if unicodedata_.category(c).startswith('C'): + return '\u%04x'%(ord(c),) + else: + return c + + def disp_str(buffer, join=''.join, uc=_my_unctrl): + """ disp_str(buffer:string) -> (string, [int]) + + Return the string that should be the printed represenation of + |buffer| and a list detailing where the characters of |buffer| + get used up. E.g.: + + >>> disp_str(chr(3)) + ('^C', [1, 0]) + + the list always contains 0s or 1s at present; it could conceivably + go higher as and when unicode support happens.""" + s = map(uc, buffer) + return (join(s), + map(ord, join(map(lambda x:'\001'+(len(x)-1)*'\000', s)))) + + del _my_unctrl + +del _make_unctrl_map + +# syntax classes: + +[SYNTAX_WHITESPACE, + SYNTAX_WORD, + SYNTAX_SYMBOL] = range(3) + +def make_default_syntax_table(): + # XXX perhaps should use some unicodedata here? + st = {} + for c in map(unichr, range(256)): + st[c] = SYNTAX_SYMBOL + for c in [a for a in map(unichr, range(256)) if a.isalpha()]: + st[c] = SYNTAX_WORD + st[u'\n'] = st[u' '] = SYNTAX_WHITESPACE + return st + +default_keymap = tuple( + [(r'\C-a', 'beginning-of-line'), + (r'\C-b', 'left'), + (r'\C-c', 'interrupt'), + (r'\C-d', 'delete'), + (r'\C-e', 'end-of-line'), + (r'\C-f', 'right'), + (r'\C-g', 'cancel'), + (r'\C-h', 'backspace'), + (r'\C-j', 'accept'), + (r'\', 'accept'), + (r'\C-k', 'kill-line'), + (r'\C-l', 'clear-screen'), + (r'\C-m', 'accept'), + (r'\C-q', 'quoted-insert'), + (r'\C-t', 'transpose-characters'), + (r'\C-u', 'unix-line-discard'), + (r'\C-v', 'quoted-insert'), + (r'\C-w', 'unix-word-rubout'), + (r'\C-x\C-u', 'upcase-region'), + (r'\C-y', 'yank'), + (r'\C-z', 'suspend'), + + (r'\M-b', 'backward-word'), + (r'\M-c', 'capitalize-word'), + (r'\M-d', 'kill-word'), + (r'\M-f', 'forward-word'), + (r'\M-l', 'downcase-word'), + (r'\M-t', 'transpose-words'), + (r'\M-u', 'upcase-word'), + (r'\M-y', 'yank-pop'), + (r'\M--', 'digit-arg'), + (r'\M-0', 'digit-arg'), + (r'\M-1', 'digit-arg'), + (r'\M-2', 'digit-arg'), + (r'\M-3', 'digit-arg'), + (r'\M-4', 'digit-arg'), + (r'\M-5', 'digit-arg'), + (r'\M-6', 'digit-arg'), + (r'\M-7', 'digit-arg'), + (r'\M-8', 'digit-arg'), + (r'\M-9', 'digit-arg'), + #(r'\M-\n', 'insert-nl'), + ('\\\\', 'self-insert')] + \ + [(c, 'self-insert') + for c in map(chr, range(32, 127)) if c <> '\\'] + \ + [(c, 'self-insert') + for c in map(chr, range(128, 256)) if c.isalpha()] + \ + [(r'\', 'up'), + (r'\', 'down'), + (r'\', 'left'), + (r'\', 'right'), + (r'\', 'quoted-insert'), + (r'\', 'delete'), + (r'\', 'backspace'), + (r'\M-\', 'backward-kill-word'), + (r'\', 'end'), + (r'\', 'home'), + (r'\', 'help'), + (r'\EOF', 'end'), # the entries in the terminfo database for xterms + (r'\EOH', 'home'), # seem to be wrong. this is a less than ideal + # workaround + ]) + +del c # from the listcomps + +class Reader(object): + """The Reader class implements the bare bones of a command reader, + handling such details as editing and cursor motion. What it does + not support are such things as completion or history support - + these are implemented elsewhere. + + Instance variables of note include: + + * buffer: + A *list* (*not* a string at the moment :-) containing all the + characters that have been entered. + * console: + Hopefully encapsulates the OS dependent stuff. + * pos: + A 0-based index into `buffer' for where the insertion point + is. + * screeninfo: + Ahem. This list contains some info needed to move the + insertion point around reasonably efficiently. I'd like to + get rid of it, because its contents are obtuse (to put it + mildly) but I haven't worked out if that is possible yet. + * cxy, lxy: + the position of the insertion point in screen ... XXX + * syntax_table: + Dictionary mapping characters to `syntax class'; read the + emacs docs to see what this means :-) + * commands: + Dictionary mapping command names to command classes. + * arg: + The emacs-style prefix argument. It will be None if no such + argument has been provided. + * dirty: + True if we need to refresh the display. + * kill_ring: + The emacs-style kill-ring; manipulated with yank & yank-pop + * ps1, ps2, ps3, ps4: + prompts. ps1 is the prompt for a one-line input; for a + multiline input it looks like: + ps2> first line of input goes here + ps3> second and further + ps3> lines get ps3 + ... + ps4> and the last one gets ps4 + As with the usual top-level, you can set these to instances if + you like; str() will be called on them (once) at the beginning + of each command. Don't put really long or newline containing + strings here, please! + This is just the default policy; you can change it freely by + overriding get_prompt() (and indeed some standard subclasses + do). + * finished: + handle1 will set this to a true value if a command signals + that we're done. + """ + + help_text = """\ +This is pyrepl. Hear my roar. + +Helpful text may appear here at some point in the future when I'm +feeling more loquacious than I am now.""" + + msg_at_bottom = True + + def __init__(self, console): + self.buffer = [] + self.ps1 = "->> " + self.ps2 = "/>> " + self.ps3 = "|.. " + self.ps4 = "\__ " + self.kill_ring = [] + self.arg = None + self.finished = 0 + self.console = console + self.commands = {} + self.msg = '' + for v in vars(commands).values(): + if ( isinstance(v, type) + and issubclass(v, commands.Command) + and v.__name__[0].islower() ): + self.commands[v.__name__] = v + self.commands[v.__name__.replace('_', '-')] = v + self.syntax_table = make_default_syntax_table() + self.input_trans_stack = [] + self.keymap = self.collect_keymap() + self.input_trans = input.KeymapTranslator( + self.keymap, + invalid_cls='invalid-key', + character_cls='self-insert') + + def collect_keymap(self): + return default_keymap + + def calc_screen(self): + """The purpose of this method is to translate changes in + self.buffer into changes in self.screen. Currently it rips + everything down and starts from scratch, which whilst not + especially efficient is certainly simple(r). + """ + lines = self.get_unicode().split("\n") + screen = [] + screeninfo = [] + w = self.console.width - 1 + p = self.pos + for ln, line in zip(range(len(lines)), lines): + ll = len(line) + if 0 <= p <= ll: + if self.msg and not self.msg_at_bottom: + for mline in self.msg.split("\n"): + screen.append(mline) + screeninfo.append((0, [])) + self.lxy = p, ln + prompt = self.get_prompt(ln, ll >= p >= 0) + while '\n' in prompt: + pre_prompt, _, prompt = prompt.partition('\n') + screen.append(pre_prompt) + screeninfo.append((0, [])) + p -= ll + 1 + prompt, lp = self.process_prompt(prompt) + l, l2 = disp_str(line) + wrapcount = (len(l) + lp) / w + if wrapcount == 0: + screen.append(prompt + l) + screeninfo.append((lp, l2+[1])) + else: + screen.append(prompt + l[:w-lp] + "\\") + screeninfo.append((lp, l2[:w-lp])) + for i in range(-lp + w, -lp + wrapcount*w, w): + screen.append(l[i:i+w] + "\\") + screeninfo.append((0, l2[i:i + w])) + screen.append(l[wrapcount*w - lp:]) + screeninfo.append((0, l2[wrapcount*w - lp:]+[1])) + self.screeninfo = screeninfo + self.cxy = self.pos2xy(self.pos) + if self.msg and self.msg_at_bottom: + for mline in self.msg.split("\n"): + screen.append(mline) + screeninfo.append((0, [])) + return screen + + def process_prompt(self, prompt): + """ Process the prompt. + + This means calculate the length of the prompt. The character \x01 + and \x02 are used to bracket ANSI control sequences and need to be + excluded from the length calculation. So also a copy of the prompt + is returned with these control characters removed. """ + + out_prompt = '' + l = len(prompt) + pos = 0 + while True: + s = prompt.find('\x01', pos) + if s == -1: + break + e = prompt.find('\x02', s) + if e == -1: + break + # Found start and end brackets, subtract from string length + l = l - (e-s+1) + out_prompt += prompt[pos:s] + prompt[s+1:e] + pos = e+1 + out_prompt += prompt[pos:] + return out_prompt, l + + def bow(self, p=None): + """Return the 0-based index of the word break preceding p most + immediately. + + p defaults to self.pos; word boundaries are determined using + self.syntax_table.""" + if p is None: + p = self.pos + st = self.syntax_table + b = self.buffer + p -= 1 + while p >= 0 and st.get(b[p], SYNTAX_WORD) <> SYNTAX_WORD: + p -= 1 + while p >= 0 and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD: + p -= 1 + return p + 1 + + def eow(self, p=None): + """Return the 0-based index of the word break following p most + immediately. + + p defaults to self.pos; word boundaries are determined using + self.syntax_table.""" + if p is None: + p = self.pos + st = self.syntax_table + b = self.buffer + while p < len(b) and st.get(b[p], SYNTAX_WORD) <> SYNTAX_WORD: + p += 1 + while p < len(b) and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD: + p += 1 + return p + + def bol(self, p=None): + """Return the 0-based index of the line break preceding p most + immediately. + + p defaults to self.pos.""" + # XXX there are problems here. + if p is None: + p = self.pos + b = self.buffer + p -= 1 + while p >= 0 and b[p] <> '\n': + p -= 1 + return p + 1 + + def eol(self, p=None): + """Return the 0-based index of the line break following p most + immediately. + + p defaults to self.pos.""" + if p is None: + p = self.pos + b = self.buffer + while p < len(b) and b[p] <> '\n': + p += 1 + return p + + def get_arg(self, default=1): + """Return any prefix argument that the user has supplied, + returning `default' if there is None. `default' defaults + (groan) to 1.""" + if self.arg is None: + return default + else: + return self.arg + + def get_prompt(self, lineno, cursor_on_line): + """Return what should be in the left-hand margin for line + `lineno'.""" + if self.arg is not None and cursor_on_line: + return "(arg: %s) "%self.arg + if "\n" in self.buffer: + if lineno == 0: + return self._ps2 + elif lineno == self.buffer.count("\n"): + return self._ps4 + else: + return self._ps3 + else: + return self._ps1 + + def push_input_trans(self, itrans): + self.input_trans_stack.append(self.input_trans) + self.input_trans = itrans + + def pop_input_trans(self): + self.input_trans = self.input_trans_stack.pop() + + def pos2xy(self, pos): + """Return the x, y coordinates of position 'pos'.""" + # this *is* incomprehensible, yes. + y = 0 + assert 0 <= pos <= len(self.buffer) + if pos == len(self.buffer): + y = len(self.screeninfo) - 1 + p, l2 = self.screeninfo[y] + return p + len(l2) - 1, y + else: + for p, l2 in self.screeninfo: + l = l2.count(1) + if l > pos: + break + else: + pos -= l + y += 1 + c = 0 + i = 0 + while c < pos: + c += l2[i] + i += 1 + while l2[i] == 0: + i += 1 + return p + i, y + + def insert(self, text): + """Insert 'text' at the insertion point.""" + self.buffer[self.pos:self.pos] = list(text) + self.pos += len(text) + self.dirty = 1 + + def update_cursor(self): + """Move the cursor to reflect changes in self.pos""" + self.cxy = self.pos2xy(self.pos) + self.console.move_cursor(*self.cxy) + + def after_command(self, cmd): + """This function is called to allow post command cleanup.""" + if getattr(cmd, "kills_digit_arg", 1): + if self.arg is not None: + self.dirty = 1 + self.arg = None + + def prepare(self): + """Get ready to run. Call restore when finished. You must not + write to the console in between the calls to prepare and + restore.""" + try: + self.console.prepare() + self.arg = None + self.screeninfo = [] + self.finished = 0 + del self.buffer[:] + self.pos = 0 + self.dirty = 1 + self.last_command = None + self._ps1, self._ps2, self._ps3, self._ps4 = \ + map(str, [self.ps1, self.ps2, self.ps3, self.ps4]) + except: + self.restore() + raise + + def last_command_is(self, klass): + if not self.last_command: + return 0 + return issubclass(klass, self.last_command) + + def restore(self): + """Clean up after a run.""" + self.console.restore() + + def finish(self): + """Called when a command signals that we're finished.""" + pass + + def error(self, msg="none"): + self.msg = "! " + msg + " " + self.dirty = 1 + self.console.beep() + + def update_screen(self): + if self.dirty: + self.refresh() + + def refresh(self): + """Recalculate and refresh the screen.""" + # this call sets up self.cxy, so call it first. + screen = self.calc_screen() + self.console.refresh(screen, self.cxy) + self.dirty = 0 # forgot this for a while (blush) + + def do_cmd(self, cmd): + #print cmd + if isinstance(cmd[0], str): + cmd = self.commands.get(cmd[0], + commands.invalid_command)(self, cmd) + elif isinstance(cmd[0], type): + cmd = cmd[0](self, cmd) + + cmd.do() + + self.after_command(cmd) + + if self.dirty: + self.refresh() + else: + self.update_cursor() + + if not isinstance(cmd, commands.digit_arg): + self.last_command = cmd.__class__ + + self.finished = cmd.finish + if self.finished: + self.console.finish() + self.finish() + + def handle1(self, block=1): + """Handle a single event. Wait as long as it takes if block + is true (the default), otherwise return None if no event is + pending.""" + + if self.msg: + self.msg = '' + self.dirty = 1 + + while 1: + event = self.console.get_event(block) + if not event: # can only happen if we're not blocking + return None + + if event.evt == 'key': + self.input_trans.push(event) + elif event.evt == 'scroll': + self.refresh() + elif event.evt == 'resize': + self.refresh() + else: + pass + + cmd = self.input_trans.get() + + if cmd is None: + if block: + continue + else: + return None + + self.do_cmd(cmd) + return 1 + + def push_char(self, char): + self.console.push_char(char) + self.handle1(0) + + def readline(self): + """Read a line. The implementation of this method also shows + how to drive Reader if you want more control over the event + loop.""" + self.prepare() + try: + self.refresh() + while not self.finished: + self.handle1() + return self.get_buffer() + finally: + self.restore() + + def bind(self, spec, command): + self.keymap = self.keymap + ((spec, command),) + self.input_trans = input.KeymapTranslator( + self.keymap, + invalid_cls='invalid-key', + character_cls='self-insert') + + def get_buffer(self, encoding=None): + if encoding is None: + encoding = self.console.encoding + return u''.join(self.buffer).encode(self.console.encoding) + + def get_unicode(self): + """Return the current buffer as a unicode string.""" + return u''.join(self.buffer) + +def test(): + from pyrepl.unix_console import UnixConsole + reader = Reader(UnixConsole()) + reader.ps1 = "**> " + reader.ps2 = "/*> " + reader.ps3 = "|*> " + reader.ps4 = "\*> " + while reader.readline(): + pass + +if __name__=='__main__': + test() diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: @@ -115,46 +114,6 @@ # in the second block! return split_block(annotator, block, 0, _forcelink=block.inputargs) -def remove_direct_loops(annotator, graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def remove_double_links(annotator, graph): - """This can be useful for code generators: it ensures that no block has - more than one incoming links from one and the same other block. It allows - argument passing along links to be implemented with phi nodes since the - value of an argument can be determined by looking from which block the - control passed. """ - def visit(block): - if isinstance(block, Block): - double_links = [] - seen = {} - for link in block.exits: - if link.target in seen: - double_links.append(link) - seen[link.target] = True - for link in double_links: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def no_links_to_startblock(graph): - """Ensure no links to start block.""" - links_to_start_block = False - for block in graph.iterblocks(): - for link in block.exits: - if link.target == graph.startblock: - links_to_start_block = True - break - if links_to_start_block: - insert_empty_startblock(None, graph) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from pypy.annotation import model as annmodel diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -1,9 +1,81 @@ # encoding: iso-8859-15 from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.unicodeobject import Py_UNICODE +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.module.cpyext.unicodeobject import ( + Py_UNICODE, PyUnicodeObject, new_empty_unicode) +from pypy.module.cpyext.api import PyObjectP, PyObject +from pypy.module.cpyext.pyobject import Py_DecRef from pypy.rpython.lltypesystem import rffi, lltype import sys, py +class AppTestUnicodeObject(AppTestCpythonExtensionBase): + def test_unicodeobject(self): + module = self.import_extension('foo', [ + ("get_hello1", "METH_NOARGS", + """ + return PyUnicode_FromStringAndSize( + "Hello world", 11); + """), + ("test_GetSize", "METH_NOARGS", + """ + PyObject* s = PyUnicode_FromString("Hello world"); + int result = 0; + + if(PyUnicode_GetSize(s) == 11) { + result = 1; + } + if(s->ob_type->tp_basicsize != sizeof(void*)*4) + result = 0; + Py_DECREF(s); + return PyBool_FromLong(result); + """), + ("test_GetSize_exception", "METH_NOARGS", + """ + PyObject* f = PyFloat_FromDouble(1.0); + Py_ssize_t size = PyUnicode_GetSize(f); + + Py_DECREF(f); + return NULL; + """), + ("test_is_unicode", "METH_VARARGS", + """ + return PyBool_FromLong(PyUnicode_Check(PyTuple_GetItem(args, 0))); + """)]) + assert module.get_hello1() == u'Hello world' + assert module.test_GetSize() + raises(TypeError, module.test_GetSize_exception) + + assert module.test_is_unicode(u"") + assert not module.test_is_unicode(()) + + def test_unicode_buffer_init(self): + module = self.import_extension('foo', [ + ("getunicode", "METH_NOARGS", + """ + PyObject *s, *t; + Py_UNICODE* c; + Py_ssize_t len; + + s = PyUnicode_FromUnicode(NULL, 4); + if (s == NULL) + return NULL; + t = PyUnicode_FromUnicode(NULL, 3); + if (t == NULL) + return NULL; + Py_DECREF(t); + c = PyUnicode_AsUnicode(s); + c[0] = 'a'; + c[1] = 0xe9; + c[3] = 'c'; + return s; + """), + ]) + s = module.getunicode() + assert len(s) == 4 + assert s == u'a�\x00c' + + + class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): assert api.PyUnicode_GET_SIZE(space.wrap(u'sp�m')) == 4 @@ -77,6 +149,28 @@ assert space.unwrap(w_res) == u'sp�' rffi.free_charp(s) + def test_unicode_resize(self, space, api): + py_uni = new_empty_unicode(space, 10) + ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + py_uni.c_buffer[0] = u'a' + py_uni.c_buffer[1] = u'b' + py_uni.c_buffer[2] = u'c' + ar[0] = rffi.cast(PyObject, py_uni) + api.PyUnicode_Resize(ar, 3) + py_uni = rffi.cast(PyUnicodeObject, ar[0]) + assert py_uni.c_size == 3 + assert py_uni.c_buffer[1] == u'b' + assert py_uni.c_buffer[3] == u'\x00' + # the same for growing + ar[0] = rffi.cast(PyObject, py_uni) + api.PyUnicode_Resize(ar, 10) + py_uni = rffi.cast(PyUnicodeObject, ar[0]) + assert py_uni.c_size == 10 + assert py_uni.c_buffer[1] == 'b' + assert py_uni.c_buffer[10] == '\x00' + Py_DecRef(space, ar[0]) + lltype.free(ar, flavor='raw') + def test_AsUTF8String(self, space, api): w_u = space.wrap(u'sp�m') w_res = api.PyUnicode_AsUTF8String(w_u) @@ -235,13 +329,13 @@ x_chunk = api.PyUnicode_AS_UNICODE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, 4) - w_y = api.PyUnicode_FromUnicode(target_chunk, 4) + w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, 4)) assert space.eq_w(w_y, space.wrap(u"abcd")) size = api.PyUnicode_GET_SIZE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, size) - w_y = api.PyUnicode_FromUnicode(target_chunk, size) + w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, size)) assert space.eq_w(w_y, w_x) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -36,29 +36,35 @@ init_defaults = Defaults([None]) def init__List(space, w_list, __args__): + from pypy.objspace.std.tupleobject import W_TupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - # - # this is the old version of the loop at the end of this function: - # - # w_list.wrappeditems = space.unpackiterable(w_iterable) - # - # This is commented out to avoid assigning a new RPython list to - # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. - # items_w = w_list.wrappeditems del items_w[:] if w_iterable is not None: - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - items_w.append(w_item) + # unfortunately this is duplicating space.unpackiterable to avoid + # assigning a new RPython list to 'wrappeditems', which defeats the + # W_FastSeqIterObject optimization. + if isinstance(w_iterable, W_ListObject): + items_w.extend(w_iterable.wrappeditems) + elif isinstance(w_iterable, W_TupleObject): + items_w.extend(w_iterable.wrappeditems) + else: + _init_from_iterable(space, items_w, w_iterable) + +def _init_from_iterable(space, items_w, w_iterable): + # in its own function to make the JIT look into init__List + # XXX this would need a JIT driver somehow? + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + items_w.append(w_item) def len__List(space, w_list): result = len(w_list.wrappeditems) diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/module/cpyext/include/import.h b/pypy/module/cpyext/include/import.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/import.h @@ -0,0 +1,1 @@ +/* empty */ diff --git a/lib_pypy/pyrepl/python_reader.py b/lib_pypy/pyrepl/python_reader.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/python_reader.py @@ -0,0 +1,392 @@ +# Copyright 2000-2007 Michael Hudson-Doyle +# Bob Ippolito +# Maciek Fijalkowski +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# one impressive collections of imports: +from pyrepl.completing_reader import CompletingReader +from pyrepl.historical_reader import HistoricalReader +from pyrepl import completing_reader, reader +from pyrepl import copy_code, commands, completer +from pyrepl import module_lister +import new, sys, os, re, code, traceback +import atexit, warnings +try: + import cPickle as pickle +except ImportError: + import pickle +try: + import imp + imp.find_module("twisted") + from twisted.internet import reactor + from twisted.internet.abstract import FileDescriptor +except ImportError: + default_interactmethod = "interact" +else: + default_interactmethod = "twistedinteract" + +CommandCompiler = code.CommandCompiler + +def eat_it(*args): + """this function eats warnings, if you were wondering""" + pass + +class maybe_accept(commands.Command): + def do(self): + r = self.reader + text = r.get_unicode() + try: + # ooh, look at the hack: + code = r.compiler("#coding:utf-8\n"+text.encode('utf-8')) + except (OverflowError, SyntaxError, ValueError): + self.finish = 1 + else: + if code is None: + r.insert("\n") + else: + self.finish = 1 + +from_line_prog = re.compile( + "^from\s+(?P[A-Za-z_.0-9]*)\s+import\s+(?P[A-Za-z_.0-9]*)") +import_line_prog = re.compile( + "^(?:import|from)\s+(?P[A-Za-z_.0-9]*)\s*$") + +def mk_saver(reader): + def saver(reader=reader): + try: + file = open(os.path.expanduser("~/.pythoni.hist"), "w") + except IOError: + pass + else: + pickle.dump(reader.history, file) + file.close() + return saver + +class PythonicReader(CompletingReader, HistoricalReader): + def collect_keymap(self): + return super(PythonicReader, self).collect_keymap() + ( + (r'\n', 'maybe-accept'), + (r'\M-\n', 'insert-nl')) + + def __init__(self, console, locals, + compiler=None): + super(PythonicReader, self).__init__(console) + self.completer = completer.Completer(locals) + st = self.syntax_table + for c in "._0123456789": + st[c] = reader.SYNTAX_WORD + self.locals = locals + if compiler is None: + self.compiler = CommandCompiler() + else: + self.compiler = compiler + try: + file = open(os.path.expanduser("~/.pythoni.hist")) + except IOError: + pass + else: + try: + self.history = pickle.load(file) + except: + self.history = [] + self.historyi = len(self.history) + file.close() + atexit.register(mk_saver(self)) + for c in [maybe_accept]: + self.commands[c.__name__] = c + self.commands[c.__name__.replace('_', '-')] = c + + def get_completions(self, stem): + b = self.get_unicode() + m = import_line_prog.match(b) + if m: + if not self._module_list_ready: + module_lister._make_module_list() + self._module_list_ready = True + + mod = m.group("mod") + try: + return module_lister.find_modules(mod) + except ImportError: + pass + m = from_line_prog.match(b) + if m: + mod, name = m.group("mod", "name") + try: + l = module_lister._packages[mod] + except KeyError: + try: + mod = __import__(mod, self.locals, self.locals, ['']) + return [x for x in dir(mod) if x.startswith(name)] + except ImportError: + pass + else: + return [x[len(mod) + 1:] + for x in l if x.startswith(mod + '.' + name)] + try: + l = completing_reader.uniqify(self.completer.complete(stem)) + return l + except (NameError, AttributeError): + return [] + +class ReaderConsole(code.InteractiveInterpreter): + II_init = code.InteractiveInterpreter.__init__ + def __init__(self, console, locals=None): + if locals is None: + locals = {} + self.II_init(locals) + self.compiler = CommandCompiler() + self.compile = self.compiler.compiler + self.reader = PythonicReader(console, locals, self.compiler) + locals['Reader'] = self.reader + + def run_user_init_file(self): + for key in "PYREPLSTARTUP", "PYTHONSTARTUP": + initfile = os.environ.get(key) + if initfile is not None and os.path.exists(initfile): + break + else: + return + try: + execfile(initfile, self.locals, self.locals) + except: + etype, value, tb = sys.exc_info() + traceback.print_exception(etype, value, tb.tb_next) + + def execute(self, text): + try: + # ooh, look at the hack: + code = self.compile("# coding:utf8\n"+text.encode('utf-8'), + '', 'single') + except (OverflowError, SyntaxError, ValueError): + self.showsyntaxerror("") + else: + self.runcode(code) + sys.stdout.flush() + + def interact(self): + while 1: + try: # catches EOFError's and KeyboardInterrupts during execution + try: # catches KeyboardInterrupts during editing + try: # warning saver + # can't have warnings spewed onto terminal + sv = warnings.showwarning + warnings.showwarning = eat_it + l = unicode(self.reader.readline(), 'utf-8') + finally: + warnings.showwarning = sv + except KeyboardInterrupt: + print "KeyboardInterrupt" + else: + if l: + self.execute(l) + except EOFError: + break + except KeyboardInterrupt: + continue + + def prepare(self): + self.sv_sw = warnings.showwarning + warnings.showwarning = eat_it + self.reader.prepare() + self.reader.refresh() # we want :after methods... + + def restore(self): + self.reader.restore() + warnings.showwarning = self.sv_sw + + def handle1(self, block=1): + try: + r = 1 + r = self.reader.handle1(block) + except KeyboardInterrupt: + self.restore() + print "KeyboardInterrupt" + self.prepare() + else: + if self.reader.finished: + text = self.reader.get_unicode() + self.restore() + if text: + self.execute(text) + self.prepare() + return r + + def tkfilehandler(self, file, mask): + try: + self.handle1(block=0) + except: + self.exc_info = sys.exc_info() + + # how the do you get this to work on Windows (without + # createfilehandler)? threads, I guess + def really_tkinteract(self): + import _tkinter + _tkinter.createfilehandler( + self.reader.console.input_fd, _tkinter.READABLE, + self.tkfilehandler) + + self.exc_info = None + while 1: + # dooneevent will return 0 without blocking if there are + # no Tk windows, 1 after blocking until an event otherwise + # so the following does what we want (this wasn't expected + # to be obvious). + if not _tkinter.dooneevent(_tkinter.ALL_EVENTS): + self.handle1(block=1) + if self.exc_info: + type, value, tb = self.exc_info + self.exc_info = None + raise type, value, tb + + def tkinteract(self): + """Run a Tk-aware Python interactive session. + + This function simulates the Python top-level in a way that + allows Tk's mainloop to run.""" + + # attempting to understand the control flow of this function + # without help may cause internal injuries. so, some + # explanation. + + # The outer while loop is there to restart the interaction if + # the user types control-c when execution is deep in our + # innards. I'm not sure this can't leave internals in an + # inconsistent state, but it's a good start. + + # then the inside loop keeps calling self.handle1 until + # _tkinter gets imported; then control shifts to + # self.really_tkinteract, above. + + # this function can only return via an exception; we mask + # EOFErrors (but they end the interaction) and + # KeyboardInterrupts cause a restart. All other exceptions + # are likely bugs in pyrepl (well, 'cept for SystemExit, of + # course). + + while 1: + try: + try: + self.prepare() + try: + while 1: + if sys.modules.has_key("_tkinter"): + self.really_tkinteract() + # really_tkinteract is not expected to + # return except via an exception, but: + break + self.handle1() + except EOFError: + pass + finally: + self.restore() + except KeyboardInterrupt: + continue + else: + break + + def twistedinteract(self): + from twisted.internet import reactor + from twisted.internet.abstract import FileDescriptor + import signal + outerself = self + class Me(FileDescriptor): + def fileno(self): + """ We want to select on FD 0 """ + return 0 + + def doRead(self): + """called when input is ready""" + try: + outerself.handle1() + except EOFError: + reactor.stop() + + reactor.addReader(Me()) + reactor.callWhenRunning(signal.signal, + signal.SIGINT, + signal.default_int_handler) + self.prepare() + try: + reactor.run() + finally: + self.restore() + + + def cocoainteract(self, inputfilehandle=None, outputfilehandle=None): + # only call this when there's a run loop already going! + # note that unlike the other *interact methods, this returns immediately + from cocoasupport import CocoaInteracter + self.cocoainteracter = CocoaInteracter.alloc().init(self, inputfilehandle, outputfilehandle) + + +def main(use_pygame_console=0, interactmethod=default_interactmethod, print_banner=True, clear_main=True): + si, se, so = sys.stdin, sys.stderr, sys.stdout + try: + if 0 and use_pygame_console: # pygame currently borked + from pyrepl.pygame_console import PyGameConsole, FakeStdin, FakeStdout + con = PyGameConsole() + sys.stderr = sys.stdout = FakeStdout(con) + sys.stdin = FakeStdin(con) + else: + from pyrepl.unix_console import UnixConsole + try: + import locale + except ImportError: + encoding = None + else: + if hasattr(locale, 'nl_langinfo') \ + and hasattr(locale, 'CODESET'): + encoding = locale.nl_langinfo(locale.CODESET) + elif os.environ.get('TERM_PROGRAM') == 'Apple_Terminal': + # /me whistles innocently... + code = int(os.popen( + "defaults read com.apple.Terminal StringEncoding" + ).read()) + if code == 4: + encoding = 'utf-8' + # More could go here -- and what's here isn't + # bulletproof. What would be? AppleScript? + # Doesn't seem to be possible. + else: + encoding = None + else: + encoding = None # so you get ASCII... + con = UnixConsole(0, 1, None, encoding) + if print_banner: + print "Python", sys.version, "on", sys.platform + print 'Type "help", "copyright", "credits" or "license" '\ + 'for more information.' + sys.path.insert(0, os.getcwd()) + + if clear_main and __name__ != '__main__': + mainmod = new.module('__main__') + sys.modules['__main__'] = mainmod + else: + mainmod = sys.modules['__main__'] + + rc = ReaderConsole(con, mainmod.__dict__) + rc.reader._module_list_ready = False + rc.run_user_init_file() + getattr(rc, interactmethod)() + finally: + sys.stdin, sys.stderr, sys.stdout = si, se, so + +if __name__ == '__main__': + main() diff --git a/pypy/translator/backendopt/test/test_ssa.py b/pypy/translator/backendopt/test/test_ssa.py --- a/pypy/translator/backendopt/test/test_ssa.py +++ b/pypy/translator/backendopt/test/test_ssa.py @@ -1,6 +1,6 @@ from pypy.translator.backendopt.ssa import * from pypy.translator.translator import TranslationContext -from pypy.objspace.flow.model import flatten, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import SpaceOperation diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pyfile.py @@ -0,0 +1,68 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, CONST_STRING, FILEP, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject) +from pypy.interpreter.error import OperationError +from pypy.module._file.interp_file import W_File + +PyFile_Check, PyFile_CheckExact = build_type_checkers("File", W_File) + + at cpython_api([PyObject, rffi.INT_real], PyObject) +def PyFile_GetLine(space, w_obj, n): + """ + Equivalent to p.readline([n]), this function reads one line from the + object p. p may be a file object or any object with a readline() + method. If n is 0, exactly one line is read, regardless of the length of + the line. If n is greater than 0, no more than n bytes will be read + from the file; a partial line can be returned. In both cases, an empty string + is returned if the end of the file is reached immediately. If n is less than + 0, however, one line is read regardless of length, but EOFError is + raised if the end of the file is reached immediately.""" + try: + w_readline = space.getattr(w_obj, space.wrap('readline')) + except OperationError: + raise OperationError( + space.w_TypeError, space.wrap( + "argument must be a file, or have a readline() method.")) + + n = rffi.cast(lltype.Signed, n) + if space.is_true(space.gt(space.wrap(n), space.wrap(0))): + return space.call_function(w_readline, space.wrap(n)) + elif space.is_true(space.lt(space.wrap(n), space.wrap(0))): + return space.call_function(w_readline) + else: + # XXX Raise EOFError as specified + return space.call_function(w_readline) + + at cpython_api([CONST_STRING, CONST_STRING], PyObject) +def PyFile_FromString(space, filename, mode): + """ + On success, return a new file object that is opened on the file given by + filename, with a file mode given by mode, where mode has the same + semantics as the standard C routine fopen(). On failure, return NULL.""" + w_filename = space.wrap(rffi.charp2str(filename)) + w_mode = space.wrap(rffi.charp2str(mode)) + return space.call_method(space.builtin, 'file', w_filename, w_mode) + + at cpython_api([FILEP, CONST_STRING, CONST_STRING, rffi.VOIDP], PyObject) +def PyFile_FromFile(space, fp, name, mode, close): + """Create a new PyFileObject from the already-open standard C file + pointer, fp. The function close will be called when the file should be + closed. Return NULL on failure.""" + raise NotImplementedError + + at cpython_api([PyObject, rffi.INT_real], lltype.Void) +def PyFile_SetBufSize(space, w_file, n): + """Available on systems with setvbuf() only. This should only be called + immediately after file object creation.""" + raise NotImplementedError + + at cpython_api([CONST_STRING, PyObject], rffi.INT_real, error=-1) +def PyFile_WriteString(space, s, w_p): + """Write string s to file object p. Return 0 on success or -1 on + failure; the appropriate exception will be set.""" + w_s = space.wrap(rffi.charp2str(s)) + space.call_method(w_p, "write", w_s) + return 0 + diff --git a/lib_pypy/pyrepl/pygame_console.py b/lib_pypy/pyrepl/pygame_console.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/pygame_console.py @@ -0,0 +1,353 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +# the pygame console is currently thoroughly broken. + +# there's a fundamental difference from the UnixConsole: here we're +# the terminal emulator too, in effect. This means, e.g., for pythoni +# we really need a separate process (or thread) to monitor for ^C +# during command execution and zap the executor process. Making this +# work on non-Unix is expected to be even more entertaining. + +from pygame.locals import * +from pyrepl.console import Console, Event +from pyrepl import pygame_keymap +import pygame +import types + +lmargin = 5 +rmargin = 5 +tmargin = 5 +bmargin = 5 + +try: + bool +except NameError: + def bool(x): + return not not x + +modcolors = {K_LCTRL:1, + K_RCTRL:1, + K_LMETA:1, + K_RMETA:1, + K_LALT:1, + K_RALT:1, + K_LSHIFT:1, + K_RSHIFT:1} + +class colors: + fg = 250,240,230 + bg = 5, 5, 5 + cursor = 230, 0, 230 + margin = 5, 5, 15 + +class FakeStdout: + def __init__(self, con): + self.con = con + def write(self, text): + self.con.write(text) + def flush(self): + pass + +class FakeStdin: + def __init__(self, con): + self.con = con + def read(self, n=None): + # argh! + raise NotImplementedError + def readline(self, n=None): + from reader import Reader + try: + # this isn't quite right: it will clobber any prompt that's + # been printed. Not sure how to get around this... + return Reader(self.con).readline() + except EOFError: + return '' + +class PyGameConsole(Console): + """Attributes: + + (keymap), + (fd), + screen, + height, + width, + """ + + def __init__(self): + self.pygame_screen = pygame.display.set_mode((800, 600)) + pygame.font.init() + pygame.key.set_repeat(500, 30) + self.font = pygame.font.Font( + "/usr/X11R6/lib/X11/fonts/TTF/luximr.ttf", 15) + self.fw, self.fh = self.fontsize = self.font.size("X") + self.cursor = pygame.Surface(self.fontsize) + self.cursor.fill(colors.cursor) + self.clear() + self.curs_vis = 1 + self.height, self.width = self.getheightwidth() + pygame.display.update() + pygame.event.set_allowed(None) + pygame.event.set_allowed(KEYDOWN) + + def install_keymap(self, keymap): + """Install a given keymap. + + keymap is a tuple of 2-element tuples; each small tuple is a + pair (keyspec, event-name). The format for keyspec is + modelled on that used by readline (so read that manual for + now!).""" + self.k = self.keymap = pygame_keymap.compile_keymap(keymap) + + def char_rect(self, x, y): + return self.char_pos(x, y), self.fontsize + + def char_pos(self, x, y): + return (lmargin + x*self.fw, + tmargin + y*self.fh + self.cur_top + self.scroll) + + def paint_margin(self): + s = self.pygame_screen + c = colors.margin + s.fill(c, [0, 0, 800, tmargin]) + s.fill(c, [0, 0, lmargin, 600]) + s.fill(c, [0, 600 - bmargin, 800, bmargin]) + s.fill(c, [800 - rmargin, 0, lmargin, 600]) + + def refresh(self, screen, (cx, cy)): + self.screen = screen + self.pygame_screen.fill(colors.bg, + [0, tmargin + self.cur_top + self.scroll, + 800, 600]) + self.paint_margin() + + line_top = self.cur_top + width, height = self.fontsize + self.cxy = (cx, cy) + cp = self.char_pos(cx, cy) + if cp[1] < tmargin: + self.scroll = - (cy*self.fh + self.cur_top) + self.repaint() + elif cp[1] + self.fh > 600 - bmargin: + self.scroll += (600 - bmargin) - (cp[1] + self.fh) + self.repaint() + if self.curs_vis: + self.pygame_screen.blit(self.cursor, self.char_pos(cx, cy)) + for line in screen: + if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh): + if line: + ren = self.font.render(line, 1, colors.fg) + self.pygame_screen.blit(ren, (lmargin, + tmargin + line_top + self.scroll)) + line_top += self.fh + pygame.display.update() + + def prepare(self): + self.cmd_buf = '' + self.k = self.keymap + self.height, self.width = self.getheightwidth() + self.curs_vis = 1 + self.cur_top = self.pos[0] + self.event_queue = [] + + def restore(self): + pass + + def blit_a_char(self, linen, charn): + line = self.screen[linen] + if charn < len(line): + text = self.font.render(line[charn], 1, colors.fg) + self.pygame_screen.blit(text, self.char_pos(charn, linen)) + + def move_cursor(self, x, y): + cp = self.char_pos(x, y) + if cp[1] < tmargin or cp[1] + self.fh > 600 - bmargin: + self.event_queue.append(Event('refresh', '', '')) + else: + if self.curs_vis: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + self.pygame_screen.blit(self.cursor, cp) + self.blit_a_char(y, x) + pygame.display.update() + self.cxy = (x, y) + + def set_cursor_vis(self, vis): + self.curs_vis = vis + if vis: + self.move_cursor(*self.cxy) + else: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + pygame.display.update() + + def getheightwidth(self): + """Return (height, width) where height and width are the height + and width of the terminal window in characters.""" + return ((600 - tmargin - bmargin)/self.fh, + (800 - lmargin - rmargin)/self.fw) + + def tr_event(self, pyg_event): + shift = bool(pyg_event.mod & KMOD_SHIFT) + ctrl = bool(pyg_event.mod & KMOD_CTRL) + meta = bool(pyg_event.mod & (KMOD_ALT|KMOD_META)) + + try: + return self.k[(pyg_event.unicode, meta, ctrl)], pyg_event.unicode + except KeyError: + try: + return self.k[(pyg_event.key, meta, ctrl)], pyg_event.unicode + except KeyError: + return "invalid-key", pyg_event.unicode + + def get_event(self, block=1): + """Return an Event instance. Returns None if |block| is false + and there is no event pending, otherwise waits for the + completion of an event.""" + while 1: + if self.event_queue: + return self.event_queue.pop(0) + elif block: + pyg_event = pygame.event.wait() + else: + pyg_event = pygame.event.poll() + if pyg_event.type == NOEVENT: + return + + if pyg_event.key in modcolors: + continue + + k, c = self.tr_event(pyg_event) + self.cmd_buf += c.encode('ascii', 'replace') + self.k = k + + if not isinstance(k, types.DictType): + e = Event(k, self.cmd_buf, []) + self.k = self.keymap + self.cmd_buf = '' + return e + + def beep(self): + # uhh, can't be bothered now. + # pygame.sound.something, I guess. + pass + + def clear(self): + """Wipe the screen""" + self.pygame_screen.fill(colors.bg) + #self.screen = [] + self.pos = [0, 0] + self.grobs = [] + self.cur_top = 0 + self.scroll = 0 + + def finish(self): + """Move the cursor to the end of the display and otherwise get + ready for end. XXX could be merged with restore? Hmm.""" + if self.curs_vis: + cx, cy = self.cxy + self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy)) + self.blit_a_char(cy, cx) + for line in self.screen: + self.write_line(line, 1) + if self.curs_vis: + self.pygame_screen.blit(self.cursor, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + pygame.display.update() + + def flushoutput(self): + """Flush all output to the screen (assuming there's some + buffering going on somewhere)""" + # no buffering here, ma'am (though perhaps there should be!) + pass + + def forgetinput(self): + """Forget all pending, but not yet processed input.""" + while pygame.event.poll().type <> NOEVENT: + pass + + def getpending(self): + """Return the characters that have been typed but not yet + processed.""" + events = [] + while 1: + event = pygame.event.poll() + if event.type == NOEVENT: + break + events.append(event) + + return events + + def wait(self): + """Wait for an event.""" + raise Exception, "erp!" + + def repaint(self): + # perhaps we should consolidate grobs? + self.pygame_screen.fill(colors.bg) + self.paint_margin() + for (y, x), surf, text in self.grobs: + if surf and 0 < y + self.scroll: + self.pygame_screen.blit(surf, (lmargin + x, + tmargin + y + self.scroll)) + pygame.display.update() + + def write_line(self, line, ret): + charsleft = (self.width*self.fw - self.pos[1])/self.fw + while len(line) > charsleft: + self.write_line(line[:charsleft], 1) + line = line[charsleft:] + if line: + ren = self.font.render(line, 1, colors.fg, colors.bg) + self.grobs.append((self.pos[:], ren, line)) + self.pygame_screen.blit(ren, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + else: + self.grobs.append((self.pos[:], None, line)) + if ret: + self.pos[0] += self.fh + if tmargin + self.pos[0] + self.scroll + self.fh > 600 - bmargin: + self.scroll = 600 - bmargin - self.pos[0] - self.fh - tmargin + self.repaint() + self.pos[1] = 0 + else: + self.pos[1] += self.fw*len(line) + + def write(self, text): + lines = text.split("\n") + if self.curs_vis: + self.pygame_screen.fill(colors.bg, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll, + self.fw, self.fh)) + for line in lines[:-1]: + self.write_line(line, 1) + self.write_line(lines[-1], 0) + if self.curs_vis: + self.pygame_screen.blit(self.cursor, + (lmargin + self.pos[1], + tmargin + self.pos[0] + self.scroll)) + pygame.display.update() + + def flush(self): + pass diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,6 +61,12 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,8 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox -from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong, r_uint class X86RegisterManager(RegisterManager): @@ -34,6 +35,12 @@ esi: 2, edi: 3, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + } def call_result_location(self, v): return eax @@ -61,6 +68,19 @@ r14: 4, r15: 5, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + r8: MY_COPY_OF_REGS + 4 * WORD, + r9: MY_COPY_OF_REGS + 5 * WORD, + r10: MY_COPY_OF_REGS + 6 * WORD, + r12: MY_COPY_OF_REGS + 7 * WORD, + r13: MY_COPY_OF_REGS + 8 * WORD, + r14: MY_COPY_OF_REGS + 9 * WORD, + r15: MY_COPY_OF_REGS + 10 * WORD, + } class X86XMMRegisterManager(RegisterManager): @@ -117,6 +137,16 @@ else: return 1 +if WORD == 4: + gpr_reg_mgr_cls = X86RegisterManager + xmm_reg_mgr_cls = X86XMMRegisterManager +elif WORD == 8: + gpr_reg_mgr_cls = X86_64_RegisterManager + xmm_reg_mgr_cls = X86_64_XMMRegisterManager +else: + raise AssertionError("Word size should be 4 or 8") + + class RegAlloc(object): def __init__(self, assembler, translate_support_code=False): @@ -126,6 +156,7 @@ self.translate_support_code = translate_support_code # to be read/used by the assembler too self.jump_target_descr = None + self.close_stack_struct = 0 def _prepare(self, inputargs, operations): self.fm = X86FrameManager() @@ -135,16 +166,6 @@ # compute longevity of variables longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity - # XXX - if cpu.WORD == 4: - gpr_reg_mgr_cls = X86RegisterManager - xmm_reg_mgr_cls = X86XMMRegisterManager - elif cpu.WORD == 8: - gpr_reg_mgr_cls = X86_64_RegisterManager - xmm_reg_mgr_cls = X86_64_XMMRegisterManager - else: - raise AssertionError("Word size should be 4 or 8") - self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) @@ -740,8 +761,12 @@ def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None + self.xrm.before_call(force_store, save_all_regs=save_all_regs) + if not save_all_regs: + gcrootmap = gc_ll_descr = self.assembler.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + save_all_regs = 2 self.rm.before_call(force_store, save_all_regs=save_all_regs) - self.xrm.before_call(force_store, save_all_regs=save_all_regs) if op.result is not None: if op.result.type == FLOAT: resloc = self.xrm.after_call(op.result) @@ -801,6 +826,11 @@ self._consider_call(op, guard_op) def consider_call_release_gil(self, op, guard_op): + # first force the registers like eax into the stack, because of + # the initial call to _close_stack() + self.rm.before_call() + self.xrm.before_call() + # assert guard_op is not None self._consider_call(op, guard_op) @@ -842,31 +872,53 @@ self.PerformDiscard(op, arglocs) self.rm.possibly_free_vars_for_op(op) - def _fastpath_malloc(self, op, descr): + def fastpath_malloc_fixedsize(self, op, descr): assert isinstance(descr, BaseSizeDescr) + self._do_fastpath_malloc(op, descr.size, descr.tid) + + def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + size = basesize + itemsize * num_elem + self._do_fastpath_malloc(op, size, arraydescr.tid) + self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) + + def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) - # We need to force-allocate each of save_around_call_regs now. - # The alternative would be to save and restore them around the - # actual call to malloc(), in the rare case where we need to do - # it; however, mark_gc_roots() would need to be adapted to know - # where the variables end up being saved. Messy. - for reg in self.rm.save_around_call_regs: - if reg is not eax: - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=reg) - self.rm.possibly_free_var(tmp_box) - self.assembler.malloc_cond_fixedsize( + if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + # We need edx as a temporary, but otherwise don't save any more + # register. See comments in _build_malloc_slowpath(). + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=edx) + self.rm.possibly_free_var(tmp_box) + else: + # ---- asmgcc ---- + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) + + self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - descr.size, descr.tid, + size, tid, ) def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): - self._fastpath_malloc(op, op.getdescr()) + self.fastpath_malloc_fixedsize(op, op.getdescr()) else: args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] @@ -876,7 +928,7 @@ classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self._fastpath_malloc(op, descrsize) + self.fastpath_malloc_fixedsize(op, descrsize) self.assembler.set_vtable(eax, imm(classint)) # result of fastpath malloc is in eax else: @@ -935,16 +987,25 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) + return # boehm GC (XXX kill the following code at some point) itemsize, basesize, ofs_length, _, _ = ( self._unpack_arraydescr(op.getdescr())) scale_of_field = _get_scale(itemsize) - return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + self._malloc_varsize(basesize, ofs_length, scale_of_field, + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -1138,7 +1199,7 @@ # call memcpy() self.rm.before_call() self.xrm.before_call() - self.assembler._emit_call(imm(self.assembler.memcpy_addr), + self.assembler._emit_call(-1, imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) self.rm.possibly_free_var(length_box) self.rm.possibly_free_var(dstaddr_box) @@ -1206,18 +1267,24 @@ def consider_jit_debug(self, op): pass - def get_mark_gc_roots(self, gcrootmap): + def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) - gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position)) + gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) for v, reg in self.rm.reg_bindings.items(): if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX - gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + if use_copy_area: + assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS + area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] + gcrootmap.add_frame_offset(shape, area_offset) + else: + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg( + shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link from pypy.objspace.flow.model import SpaceOperation, c_last_exception from pypy.objspace.flow.model import FunctionGraph -from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph +from pypy.objspace.flow.model import mkentrymap, checkgraph from pypy.annotation import model as annmodel from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr from pypy.rpython.lltypesystem.lltype import normalizeptr @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,22 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + + def test_reload(self, space, api): + pdb = api.PyImport_Import(space.wrap("pdb")) + space.delattr(pdb, space.wrap("set_trace")) + pdb = api.PyImport_ReloadModule(pdb) + assert space.getattr(pdb, space.wrap("set_trace")) + class AppTestImportLogic(AppTestCpythonExtensionBase): def test_import_logic(self): skip("leak?") diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/lib_pypy/pyrepl/tests/basic.py b/lib_pypy/pyrepl/tests/basic.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pyrepl/tests/basic.py @@ -0,0 +1,115 @@ +# Copyright 2000-2004 Michael Hudson-Doyle +# +# All Rights Reserved +# +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose is hereby granted without fee, +# provided that the above copyright notice appear in all copies and +# that both that copyright notice and this permission notice appear in +# supporting documentation. +# +# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO +# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, +# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from pyrepl.console import Event +from pyrepl.tests.infrastructure import ReaderTestCase, EA, run_testcase + +class SimpleTestCase(ReaderTestCase): + + def test_basic(self): + self.run_test([(('self-insert', 'a'), ['a']), + ( 'accept', ['a'])]) + + def test_repeat(self): + self.run_test([(('digit-arg', '3'), ['']), + (('self-insert', 'a'), ['aaa']), + ( 'accept', ['aaa'])]) + + def test_kill_line(self): + self.run_test([(('self-insert', 'abc'), ['abc']), + ( 'left', None), + ( 'kill-line', ['ab']), + ( 'accept', ['ab'])]) + + def test_unix_line_discard(self): + self.run_test([(('self-insert', 'abc'), ['abc']), + ( 'left', None), + ( 'unix-word-rubout', ['c']), + ( 'accept', ['c'])]) + + def test_kill_word(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'beginning-of-line', ['ab cd']), + ( 'kill-word', [' cd']), + ( 'accept', [' cd'])]) + + def test_backward_kill_word(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'accept', ['ab '])]) + + def test_yank(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'beginning-of-line', ['ab ']), + ( 'yank', ['cdab ']), + ( 'accept', ['cdab '])]) + + def test_yank_pop(self): + self.run_test([(('self-insert', 'ab cd'), ['ab cd']), + ( 'backward-kill-word', ['ab ']), + ( 'left', ['ab ']), + ( 'backward-kill-word', [' ']), + ( 'yank', ['ab ']), + ( 'yank-pop', ['cd ']), + ( 'accept', ['cd '])]) + + def test_interrupt(self): + try: + self.run_test([( 'interrupt', [''])]) + except KeyboardInterrupt: + pass + else: + self.fail('KeyboardInterrupt got lost') + + # test_suspend -- hah + + def test_up(self): + self.run_test([(('self-insert', 'ab\ncd'), ['ab', 'cd']), + ( 'up', ['ab', 'cd']), + (('self-insert', 'e'), ['abe', 'cd']), + ( 'accept', ['abe', 'cd'])]) + + def test_down(self): + self.run_test([(('self-insert', 'ab\ncd'), ['ab', 'cd']), + ( 'up', ['ab', 'cd']), + (('self-insert', 'e'), ['abe', 'cd']), + ( 'down', ['abe', 'cd']), + (('self-insert', 'f'), ['abe', 'cdf']), + ( 'accept', ['abe', 'cdf'])]) + + def test_left(self): + self.run_test([(('self-insert', 'ab'), ['ab']), + ( 'left', ['ab']), + (('self-insert', 'c'), ['acb']), + ( 'accept', ['acb'])]) + + def test_right(self): + self.run_test([(('self-insert', 'ab'), ['ab']), + ( 'left', ['ab']), + (('self-insert', 'c'), ['acb']), + ( 'right', ['acb']), + (('self-insert', 'd'), ['acbd']), + ( 'accept', ['acbd'])]) + +def test(): + run_testcase(SimpleTestCase) + +if __name__ == '__main__': + test() diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -443,7 +443,8 @@ "ll_upper": Meth([], self.SELFTYPE_T), "ll_lower": Meth([], self.SELFTYPE_T), "ll_substring": Meth([Signed, Signed], self.SELFTYPE_T), # ll_substring(start, count) - "ll_split_chr": Meth([self.CHAR], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_split_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_rsplit_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! "ll_contains": Meth([self.CHAR], Bool), "ll_replace_chr_chr": Meth([self.CHAR, self.CHAR], self.SELFTYPE_T), }) @@ -1480,9 +1481,16 @@ # NOT_RPYTHON return self.make_string(self._str[start:start+count]) - def ll_split_chr(self, ch): + def ll_split_chr(self, ch, max): # NOT_RPYTHON - l = [self.make_string(s) for s in self._str.split(ch)] + l = [self.make_string(s) for s in self._str.split(ch, max)] + res = _array(Array(self._TYPE), len(l)) + res._array[:] = l + return res + + def ll_rsplit_chr(self, ch, max): + # NOT_RPYTHON + l = [self.make_string(s) for s in self._str.rsplit(ch, max)] res = _array(Array(self._TYPE), len(l)) res._array[:] = l return res diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -285,6 +285,15 @@ elif drv.exe_name is None and '__name__' in targetspec_dic: drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s' + # Double check to ensure we are not overwriting the current interpreter + try: + exe_name = str(drv.compute_exe_name()) + assert not os.path.samefile(exe_name, sys.executable), ( + 'Output file %r is the currently running ' + 'interpreter (use --output=...)'% exe_name) + except OSError: + pass + goals = translateconfig.goals try: drv.proceed(goals) diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -151,9 +151,9 @@ class CPythonFakeFrame(eval.Frame): - def __init__(self, space, code, w_globals=None, numlocals=-1): + def __init__(self, space, code, w_globals=None): self.fakecode = code - eval.Frame.__init__(self, space, w_globals, numlocals) + eval.Frame.__init__(self, space, w_globals) def getcode(self): return self.fakecode diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -102,7 +102,7 @@ # first annotate, rtype, and backendoptimize PyPy try: - interp, graph = get_interpreter(entry_point, [], backendopt=True, + interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -8,9 +8,8 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, - X86XMMRegisterManager, get_ebp_ofs, - _get_scale) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, + _get_scale, gpr_reg_mgr_cls) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -78,8 +77,8 @@ self.loop_run_counters = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 - self.malloc_fixedsize_slowpath1 = 0 - self.malloc_fixedsize_slowpath2 = 0 + self.malloc_slowpath1 = 0 + self.malloc_slowpath2 = 0 self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False @@ -124,15 +123,18 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() - if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): - self._build_malloc_fixedsize_slowpath() + if gc_ll_descr.get_malloc_slowpath_addr is not None: + self._build_malloc_slowpath() self._build_stack_check_slowpath() + if gc_ll_descr.gcrootmap: + self._build_close_stack() debug_start('jit-backend-counts') self.set_debug(have_debug_prints()) debug_stop('jit-backend-counts') def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" + self.current_clt = looptoken.compiled_loop_token self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -145,6 +147,7 @@ self.mc = None self.looppos = -1 self.currently_compiling_loop = None + self.current_clt = None def finish_once(self): if self._debug: @@ -170,26 +173,47 @@ self.float_const_neg_addr = float_constants self.float_const_abs_addr = float_constants + 16 - def _build_malloc_fixedsize_slowpath(self): + def _build_malloc_slowpath(self): + # With asmgcc, we need two helpers, so that we can write two CALL + # instructions in assembler, with a mark_gc_roots in between. + # With shadowstack, this is not needed, so we produce a single helper. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + # # ---------- first helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() if self.cpu.supports_floats: # save the XMM registers in for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - if IS_X86_32: - mc.MOV_sr(WORD, edx.value) # save it as the new argument - elif IS_X86_64: - # rdi can be clobbered: its content was forced to the stack - # by _fastpath_malloc(), like all other save_around_call_regs. - mc.MOV_rr(edi.value, edx.value) - - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() - mc.JMP(imm(addr)) # tail call to the real malloc - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart - # ---------- second helper for the slow path of malloc ---------- - mc = codebuf.MachineCodeBlockWrapper() + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() + # + if gcrootmap is not None and gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_br(ofs, reg.value) + mc.SUB_ri(esp.value, 16 - WORD) # stack alignment of 16 bytes + if IS_X86_32: + mc.MOV_sr(0, edx.value) # push argument + elif IS_X86_64: + mc.MOV_rr(edi.value, edx.value) + mc.CALL(imm(addr)) + mc.ADD_ri(esp.value, 16 - WORD) + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_rb(reg.value, ofs) + else: + # ---- asmgcc ---- + if IS_X86_32: + mc.MOV_sr(WORD, edx.value) # save it as the new argument + elif IS_X86_64: + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. + mc.MOV_rr(edi.value, edx.value) + mc.JMP(imm(addr)) # tail call to the real malloc + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.malloc_slowpath1 = rawstart + # ---------- second helper for the slow path of malloc ---------- + mc = codebuf.MachineCodeBlockWrapper() + # if self.cpu.supports_floats: # restore the XMM registers for i in range(self.cpu.NUM_REGS):# from where they were saved mc.MOVSD_xs(i, (WORD*2)+8*i) @@ -197,21 +221,28 @@ mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath2 = rawstart + self.malloc_slowpath2 = rawstart def _build_stack_check_slowpath(self): - from pypy.rlib import rstack _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or self.cpu.exit_frame_with_exception_v < 0: return # no stack check (for tests, or non-translated) # + # make a "function" that is called immediately at the start of + # an assembler function. In particular, the stack looks like: + # + # | ... | <-- aligned to a multiple of 16 + # | retaddr of caller | + # | my own retaddr | <-- esp + # +---------------------+ + # mc = codebuf.MachineCodeBlockWrapper() - mc.PUSH_r(ebp.value) - mc.MOV_rr(ebp.value, esp.value) # + stack_size = WORD if IS_X86_64: # on the x86_64, we have to save all the registers that may # have been used to pass arguments + stack_size += 6*WORD + 8*8 for reg in [edi, esi, edx, ecx, r8, r9]: mc.PUSH_r(reg.value) mc.SUB_ri(esp.value, 8*8) @@ -220,11 +251,13 @@ # if IS_X86_32: mc.LEA_rb(eax.value, +8) + stack_size += 2*WORD + mc.PUSH_r(eax.value) # alignment mc.PUSH_r(eax.value) elif IS_X86_64: mc.LEA_rb(edi.value, +16) - mc.AND_ri(esp.value, -16) # + # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) # mc.MOV(eax, heap(self.cpu.pos_exception())) @@ -232,16 +265,16 @@ mc.J_il8(rx86.Conditions['NZ'], 0) jnz_location = mc.get_relative_pos() # - if IS_X86_64: + if IS_X86_32: + mc.ADD_ri(esp.value, 2*WORD) + elif IS_X86_64: # restore the registers for i in range(7, -1, -1): mc.MOVSD_xs(i, 8*i) - for i, reg in [(6, r9), (5, r8), (4, ecx), - (3, edx), (2, esi), (1, edi)]: - mc.MOV_rb(reg.value, -8*i) + mc.ADD_ri(esp.value, 8*8) + for reg in [r9, r8, ecx, edx, esi, edi]: + mc.POP_r(reg.value) # - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) mc.RET() # # patch the JNZ above @@ -266,14 +299,61 @@ # function, and will instead return to the caller's caller. Note # also that we completely ignore the saved arguments, because we # are interrupting the function. - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) - mc.ADD_ri(esp.value, WORD) + mc.ADD_ri(esp.value, stack_size) mc.RET() # rawstart = mc.materialize(self.cpu.asmmemmgr, []) self.stack_check_slowpath = rawstart + @staticmethod + def _close_stack(css): + # similar to trackgcroot.py:pypy_asm_stackwalk, first part + from pypy.rpython.memory.gctransform import asmgcroot + new = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, css) + next = asmgcroot.gcrootanchor.next + new.next = next + new.prev = asmgcroot.gcrootanchor + asmgcroot.gcrootanchor.next = new + next.prev = new + # and now release the GIL + before = rffi.aroundstate.before + # Store a flag (by abuse in new+2*WORD) that tells if we must + # call the "after" function or not. The issue is that the + # before/after fields can be set at a random point during the + # execution, and we should not call the "after" function if we + # did not call the "before" function. It works by assuming that + # before/after start out being None/None, and are later set (once + # only) to some pair of functions. + css[2] = int(bool(before)) + if before: + before() + + @staticmethod + def _reopen_stack(css): + # first reacquire the GIL + if css[2]: + after = rffi.aroundstate.after + assert after + after() + # similar to trackgcroot.py:pypy_asm_stackwalk, second part + from pypy.rpython.memory.gctransform import asmgcroot + old = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, css) + prev = old.prev + next = old.next + prev.next = next + next.prev = prev + + _CLOSESTACK_FUNC = lltype.Ptr(lltype.FuncType([rffi.LONGP], + lltype.Void)) + + def _build_close_stack(self): + closestack_func = llhelper(self._CLOSESTACK_FUNC, + self._close_stack) + reopenstack_func = llhelper(self._CLOSESTACK_FUNC, + self._reopen_stack) + self.closestack_addr = self.cpu.cast_ptr_to_int(closestack_func) + self.reopenstack_addr = self.cpu.cast_ptr_to_int(reopenstack_func) + def assemble_loop(self, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: _x86_loop_code (an integer giving an address) @@ -537,7 +617,7 @@ def _get_offset_of_ebp_from_esp(self, allocated_depth): # Given that [EBP] is where we saved EBP, i.e. in the last word # of our fixed frame, then the 'words' value is: - words = (self.cpu.FRAME_FIXED_SIZE - 1) + allocated_depth + words = (FRAME_FIXED_SIZE - 1) + allocated_depth # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP return -WORD * aligned_words @@ -550,6 +630,10 @@ for regloc in self.cpu.CALLEE_SAVE_REGISTERS: self.mc.PUSH_r(regloc.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(gcrootmap) + def _call_header_with_stack_check(self): if self.stack_check_slowpath == 0: pass # no stack check (e.g. not translated) @@ -571,12 +655,32 @@ def _call_footer(self): self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) self.mc.POP_r(ebp.value) self.mc.RET() + def _call_header_shadowstack(self, gcrootmap): + # we need to put two words into the shadowstack: the MARKER + # and the address of the frame (ebp, actually) + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] + self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER + self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp + self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + + def _call_footer_shadowstack(self, gcrootmap): + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.SUB_ji8(rst, 2*WORD) # SUB [rootstacktop], 2*WORD + def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): if IS_X86_64: return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) @@ -686,8 +790,8 @@ nonfloatlocs, floatlocs = arglocs self._call_header() stackadjustpos = self._patchable_stackadjust() - tmp = X86RegisterManager.all_regs[0] - xmmtmp = X86XMMRegisterManager.all_regs[0] + tmp = eax + xmmtmp = xmm0 self.mc.begin_reuse_scratch_register() for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] @@ -896,9 +1000,9 @@ self.implement_guard(guard_token, checkfalsecond) return genop_cmp_guard_float - def _emit_call(self, x, arglocs, start=0, tmp=eax): + def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: - return self._emit_call_64(x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start) p = 0 n = len(arglocs) @@ -924,9 +1028,9 @@ self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) - def _emit_call_64(self, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start): src_locs = [] dst_locs = [] xmm_src_locs = [] @@ -984,12 +1088,27 @@ self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) def call(self, addr, args, res): - self._emit_call(imm(addr), args) + force_index = self.write_new_force_index() + self._emit_call(force_index, imm(addr), args) assert res is eax + def write_new_force_index(self): + # for shadowstack only: get a new, unused force_index number and + # write it to FORCE_INDEX_OFS. Used to record the call shape + # (i.e. where the GC pointers are in the stack) around a CALL + # instruction that doesn't already have a force_index. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + clt = self.current_clt + force_index = clt.reserve_and_record_some_faildescr_index() + self.mc.MOV_bi(FORCE_INDEX_OFS, force_index) + return force_index + else: + return 0 + genop_int_neg = _unaryop("NEG") genop_int_invert = _unaryop("NOT") genop_int_add = _binaryop("ADD", True) @@ -1205,6 +1324,11 @@ assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + def set_new_array_length(self, loc, ofs_length, loc_num_elem): + assert isinstance(loc, RegLoc) + assert isinstance(loc_num_elem, ImmedLoc) + self.mc.MOV(mem(loc, ofs_length), loc_num_elem) + # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) def genop_new(self, op, arglocs, result_loc): @@ -1783,6 +1907,10 @@ self.pending_guard_tokens.append(guard_token) def genop_call(self, op, arglocs, resloc): + force_index = self.write_new_force_index() + self._genop_call(op, arglocs, resloc, force_index) + + def _genop_call(self, op, arglocs, resloc, force_index): sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -1796,8 +1924,8 @@ tmp = ecx else: tmp = eax - - self._emit_call(x, arglocs, 3, tmp=tmp) + + self._emit_call(force_index, x, arglocs, 3, tmp=tmp) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return @@ -1828,11 +1956,79 @@ faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - self.genop_call(op, arglocs, result_loc) + self._genop_call(op, arglocs, result_loc, fail_index) self.mc.CMP_bi(FORCE_INDEX_OFS, 0) self.implement_guard(guard_token, 'L') - genop_guard_call_release_gil = genop_guard_call_may_force + def genop_guard_call_release_gil(self, op, guard_op, guard_token, + arglocs, result_loc): + # first, close the stack in the sense of the asmgcc GC root tracker + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + # note that regalloc.py used save_all_regs=True to save all + # registers, so we don't have to care about saving them (other + # than ebp) in the close_stack_struct + self.call_close_stack() + # do the call + faildescr = guard_op.getdescr() + fail_index = self.cpu.get_fail_descr_number(faildescr) + self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) + self.genop_call(op, arglocs, result_loc) + # then reopen the stack + if gcrootmap: + self.call_reopen_stack(result_loc) + # finally, the guard_not_forced + self.mc.CMP_bi(FORCE_INDEX_OFS, 0) + self.implement_guard(guard_token, 'L') + + def call_close_stack(self): + from pypy.rpython.memory.gctransform import asmgcroot + css = self._regalloc.close_stack_struct + if css == 0: + use_words = (2 + max(asmgcroot.INDEX_OF_EBP, + asmgcroot.FRAME_PTR) + 1) + pos = self._regalloc.fm.reserve_location_in_frame(use_words) + css = get_ebp_ofs(pos + use_words - 1) + self._regalloc.close_stack_struct = css + # The location where the future CALL will put its return address + # will be [ESP-WORD], so save that as the next frame's top address + self.mc.LEA_rs(eax.value, -WORD) # LEA EAX, [ESP-4] + frame_ptr = css + WORD * (2+asmgcroot.FRAME_PTR) + self.mc.MOV_br(frame_ptr, eax.value) # MOV [css.frame], EAX + # Save ebp + index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) + self.mc.MOV_br(index_of_ebp, ebp.value) # MOV [css.ebp], EBP + # Call the closestack() function (also releasing the GIL) + if IS_X86_32: + reg = eax + elif IS_X86_64: + reg = edi + self.mc.LEA_rb(reg.value, css) + self._emit_call(imm(self.closestack_addr), [reg]) + + def call_reopen_stack(self, save_loc): + # save the previous result (eax/xmm0) into the stack temporarily + if isinstance(save_loc, RegLoc): + self._regalloc.reserve_param(save_loc.width//WORD) + if save_loc.is_xmm: + self.mc.MOVSD_sx(0, save_loc.value) + else: + self.mc.MOV_sr(0, save_loc.value) + # call the reopenstack() function (also reacquiring the GIL) + css = self._regalloc.close_stack_struct + assert css != 0 + if IS_X86_32: + reg = eax + elif IS_X86_64: + reg = edi + self.mc.LEA_rb(reg.value, css) + self._emit_call(imm(self.reopenstack_addr), [reg]) + # restore the result from the stack + if isinstance(save_loc, RegLoc): + if save_loc.is_xmm: + self.mc.MOVSD_xs(save_loc.value, 0) + else: + self.mc.MOV_rs(save_loc.value, 0) def genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): @@ -1844,8 +2040,8 @@ assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2, - tmp=eax) + self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_void_v @@ -1870,7 +2066,7 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self._emit_call(imm(asm_helper_adr), [eax, arglocs[1]], 0, + self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0, tmp=ecx) if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: self.mc.FSTP_b(result_loc.value) @@ -1897,7 +2093,7 @@ # load the return value from fail_boxes_xxx[0] kind = op.result.type if kind == FLOAT: - xmmtmp = X86XMMRegisterManager.all_regs[0] + xmmtmp = xmm0 adr = self.fail_boxes_float.get_addr_for_num(0) self.mc.MOVSD(xmmtmp, heap(adr)) self.mc.MOVSD(result_loc, xmmtmp) @@ -1992,11 +2188,16 @@ not_implemented("not implemented operation (guard): %s" % op.getopname()) - def mark_gc_roots(self): + def mark_gc_roots(self, force_index, use_copy_area=False): + if force_index < 0: + return # not needed gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: - mark = self._regalloc.get_mark_gc_roots(gcrootmap) - self.mc.insert_gcroot_marker(mark) + mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area) + if gcrootmap.is_shadow_stack: + gcrootmap.write_callshape(mark, force_index) + else: + self.mc.insert_gcroot_marker(mark) def target_arglocs(self, loop_token): return loop_token._x86_arglocs @@ -2008,8 +2209,7 @@ else: self.mc.JMP(imm(loop_token._x86_loop_code)) - def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, - size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) @@ -2017,7 +2217,7 @@ self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - # See comments in _build_malloc_fixedsize_slowpath for the + # See comments in _build_malloc_slowpath for the # details of the two helper functions that we are calling below. # First, we need to call two of them and not just one because we # need to have a mark_gc_roots() in between. Then the calling @@ -2027,22 +2227,30 @@ # result in EAX; slowpath_addr2 additionally returns in EDX a # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + # reserve room for the argument to the real malloc and the # 8 saved XMM regs self._regalloc.reserve_param(1+16) - self.mc.CALL(imm(slowpath_addr1)) - self.mark_gc_roots() - slowpath_addr2 = self.malloc_fixedsize_slowpath2 + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) + if not shadow_stack: + # there are two helpers to call only with asmgcc + slowpath_addr1 = self.malloc_slowpath1 + self.mc.CALL(imm(slowpath_addr1)) + self.mark_gc_roots(self.write_new_force_index(), + use_copy_area=shadow_stack) + slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) # on 64-bits, 'tid' is a value that fits in 31 bits + assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) - + genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST genop_list = [Assembler386.not_implemented_op] * rop._LAST genop_llong_list = {} diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ From commits-noreply at bitbucket.org Wed Apr 13 18:31:24 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 13 Apr 2011 18:31:24 +0200 (CEST) Subject: [pypy-svn] pypy jitypes2: this test hangs :-( Message-ID: <20110413163124.593702A2035@codespeak.net> Author: Antonio Cuni Branch: jitypes2 Changeset: r43332:83bc8bc305fb Date: 2011-04-13 18:29 +0200 http://bitbucket.org/pypy/pypy/changeset/83bc8bc305fb/ Log: this test hangs :-( diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1040,6 +1040,7 @@ """) def test__ffi_call_releases_gil(self): + py.test.skip('fixme') from pypy.rlib.test.test_libffi import get_libc_name def main(libc_name, n): import time From commits-noreply at bitbucket.org Wed Apr 13 19:23:50 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 13 Apr 2011 19:23:50 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: produce a short preamble that is actualy usefull Message-ID: <20110413172350.3B7AB2A2035@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43333:96c29f01f750 Date: 2011-04-13 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/96c29f01f750/ Log: produce a short preamble that is actualy usefull diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -165,7 +165,7 @@ values = [self.getvalue(arg) for arg in jump_args] inputargs = virtual_state.make_inputargs(values) short_boxes = preamble_optimizer.produce_short_preamble_ops(inputargs) - initial_inputargs_len = len(short_boxes) + initial_inputargs_len = len(inputargs) try: inputargs, short = self.inline(self.cloned_operations, @@ -199,6 +199,8 @@ #short = self.create_short_preamble(loop.preamble, loop) if short: + assert short[-1].getopnum() == rop.JUMP + short[-1].setdescr(loop.token) if False: # FIXME: This should save some memory but requires # a lot of tests to be fixed... @@ -248,6 +250,7 @@ values = [self.getvalue(arg) for arg in jump_args] inputargs = virtual_state.make_inputargs(values) + short_jumpargs = inputargs[:] # This loop is equivalent to the main optimization loop in # Optimizer.propagate_all_forward @@ -284,6 +287,7 @@ if a not in inputargs: short_op = short_boxes[a] short.append(short_op) + short_jumpargs.append(short_op.result) newop = short_inliner.inline_op(short_op) self.optimizer.send_extra_operation(newop) inputargs.append(a) @@ -294,6 +298,7 @@ jmp.initarglist(jumpargs) self.optimizer.newoperations.append(jmp) + short.append(ResOperation(rop.JUMP, short_jumpargs, None)) return inputargs, short def sameop(self, op1, op2): From commits-noreply at bitbucket.org Wed Apr 13 20:01:33 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 13 Apr 2011 20:01:33 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: replace "pure" with "invariant", add a precise definition of the term Message-ID: <20110413180133.B83002A2038@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3502:b56ca2a0e014 Date: 2011-04-13 20:01 +0200 http://bitbucket.org/pypy/extradoc/changeset/b56ca2a0e014/ Log: replace "pure" with "invariant", add a precise definition of the term diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -35,7 +35,7 @@ } \newboolean{showcomments} -\setboolean{showcomments}{false} +\setboolean{showcomments}{true} \ifthenelse{\boolean{showcomments}} {\newcommand{\nb}[2]{ \fbox{\bfseries\sffamily\scriptsize#1} @@ -147,7 +147,8 @@ bare meta-tracing. In this paper we present two of these hints that are extensively used in the -PyPy project to improve the performance of its Python interpreter. +PyPy project to improve the performance of its Python interpreter, particularly +of the object model. Conceptually, the significant speed-ups that can be achieved with dynamic compilation depend on feeding into compilation and exploiting @@ -160,8 +161,7 @@ meta-tracing context. \cfbolz{XXX kill the next paragraph? the info is repeated in the list below} -Concretely these hints are used to control how the optimizer of the -tracing JIT can improve the traces of the object model. In particular the hints +In particular the hints influence the constant folding optimization. The first hint makes it possible to turn arbitrary variables in the trace into constant by feeding back runtime values. The @@ -175,8 +175,8 @@ \begin{itemize} \item A hint to turn arbitrary variables into constants in the trace by feeding back runtime information into compilation. - \item A way to annotate operations as pure which the constant folding - optimization then recognizes. + \item A way to annotate operations which the constant folding + optimization then recognizes and exploits. \item A worked-out example of a simple object model of a dynamic language and how it can be improved using these hints. \end{itemize} @@ -190,7 +190,7 @@ \cfbolz{XXX stress more that "the crux of the techniques and a significant portion of new contributions in the paper are from how to refactoring codes to -expose likely runtime constants and pure functions"} +expose likely runtime constants and invariant functions"} \section{Background} @@ -391,8 +391,8 @@ \begin{itemize} \item the arguments of an operation actually need to all be constant, i.e. statically known by the optimizer - \item the operation needs to be \emph{pure}, i.e. always yield the same result given - the same arguments. + \item the operation needs to be \emph{constant-foldable}, i.e. always yield + the same result given the same arguments. \end{itemize} The PyPy JIT generator automatically detects the majority of these conditions. @@ -538,18 +538,24 @@ to type specialization on the language level} -\subsection{Declaring New Pure Operations} +\subsection{Declaring New Foldable Operations} In the previous section we saw a way to turn arbitrary variables into constants. All -pure operations on these constants can be constant-folded. This works well for +foldable operations on these constants can be constant-folded. This works well for constant folding of simple types, e.g. integers. Unfortunately, in the context of an interpreter for a dynamic language, most operations actually manipulate objects, not simple types. The -operations on objects are often not pure and might even have side-effects. If +operations on objects are often not foldable and might even have side-effects. If one reads a field out of a constant reference to an object this cannot necessarily be folded away because the object can be mutated. Therefore, another hint is needed. +This hint can be used to mark functions as \emph{invariant}. A function is +termed invariant if, during the execution of the program, successive calls to +the function with identical arguments always return the same result. From this +definition follows that a call to an invariant function with constant arguments +in a trace can be replaced with the result of the call. + As an example, take the following class: \begin{lstlisting}[mathescape,basicstyle=\ttfamily] @@ -583,7 +589,7 @@ which lets the interpreter author communicate invariants to the optimizer. In this case, she could decide that the \texttt{x} field of instances of \texttt{A} is immutable, and therefore \texttt{compute} -is a pure function. To communicate this, there is a \texttt{purefunction} decorator. +is an invariant function. To communicate this, there is a \texttt{invariant} decorator. If the code in \texttt{compute} should be constant-folded away, we would change the class as follows: \begin{lstlisting}[mathescape,basicstyle=\ttfamily] @@ -596,15 +602,11 @@ promote(self) self.y = self.compute() + val - @purefunction + @invariant def compute(self): return self.x * 2 + 1 \end{lstlisting} -\cfbolz{XXX define the meaning of purefunction more precisely, particularly because add\_attribute has side effects, which is confusing} - -\cfbolz{should we mention that pure functions are not actually called by the optimizer, but the values that are seen during tracing are used?} - Now the trace will look like this: % \begin{lstlisting}[mathescape,basicstyle=\ttfamily] @@ -616,7 +618,7 @@ Here, \texttt{0xb73984a8} is the address of the instance of \texttt{A} that was used during tracing. The call to \texttt{compute} is not inlined, so that the optimizer -has a chance to see it. Since the \texttt{compute} function is marked as pure, and its +has a chance to see it. Since the \texttt{compute} function is marked as invariant, and its argument is a constant reference, the call will be removed by the optimizer. The final trace looks like this: @@ -629,31 +631,32 @@ (assuming that the \texttt{x} field's value is \texttt{4}). -On the one hand, the \texttt{purefunction} annotation is very powerful. It can be +On the one hand, the \texttt{invariant} annotation is very powerful. It can be used to constant-fold arbitrary parts of the computation in the interpreter. -However, the annotation also gives the interpreter author ample opportunity to mess things up. If a -function is annotated to be pure, but is not really, the optimizer can produce +However, the annotation also gives the interpreter author ample opportunity to introduce bugs. If a +function is annotated to be invariant, but is not really, the optimizer can produce subtly wrong code. Therefore, a lot of care has to be taken when using this -annotation\footnote{The most common use case of the \texttt{purefunction} +annotation\footnote{The most common use case of the \texttt{invariant} annotation is indeed to declare the immutability of fields. Because it is so -common, we have special syntactic sugar for it.}. +common, we have special syntactic sugar for it.}. We hope to introduce a +debugging mode which would (slowly) check whether the annotation is applied +incorrectly to mitigate this problem. -\cfbolz{XXX mention a possible debug mode for findings bugs in this, or too many values in promotion. stress more that promote is safe} +\subsubsection{Observably invariant Functions} -\subsubsection{Observably Pure Functions} +\cfbolz{XXX do we kill this section?} Why can't we simply write an analysis to find out that the \texttt{x} fields of the -\texttt{A} instances is immutable and deduce that \texttt{compute} is a pure function, +\texttt{A} instances is immutable and deduce that \texttt{compute} is an invariant function, since it only reads the \texttt{x} field and does not have side effects? This might be possible in this particular case, but in practice the functions that are -annotated with the \texttt{purefunction} decorator are usually more complex. +annotated with the \texttt{invariant} decorator are usually more complex. The easiest example for this is that of a function that uses memoization to cache its results. If this function is analyzed, it looks like the function has side effects, because it changes the memoizing dictionary. However, because this side -effect is not externally visible, the function from the outside is pure. This is -a property that is not easily detectable by analysis. Therefore, the purity -of this function needs to be annotated manually. +effect is not externally visible, the function is still invariant. This is +a property that is not easily detectable by analysis. @@ -675,11 +678,11 @@ The first step in making \texttt{getattr} faster in our object model is to optimize away the dictionary lookups on the instances. The hints we have looked at in the two previous sections don't seem to help with the current object model. There is -no pure function to be seen, and the instance is not a candidate for promotion, +no invariant function to be seen, and the instance is not a candidate for promotion, because there tend to be many instances. This is a common problem when trying to apply hints. Often, the interpreter -needs a small rewrite to expose the pure functions and nearly-constant objects +needs a small rewrite to expose the invariant functions and nearly-constant objects that are implicitly there. In the case of instance fields this rewrite is not entirely obvious. The basic idea is as follows. In theory instances can have arbitrary fields. In practice however many instances share their layout (i.e. @@ -701,9 +704,9 @@ In this implementation instances no longer use dictionaries to store their fields. Instead, they have a reference to a map, which maps field names to indexes into a storage list. The storage list contains the actual field values. Therefore they have to be immutable, which means -that their \texttt{getindex} method is a pure function. When a new attribute is added +that their \texttt{getindex} method is an invariant function. When a new attribute is added to an instance, a new map needs to be chosen, which is done with the -\texttt{add\_attribute} method on the previous map. This function is also pure, +\texttt{add\_attribute} method on the previous map. This function is also invariant, because it caches all new instances of \texttt{Map} that it creates, to make sure that objects with the same layout have the same map. Now that we have introduced maps, it is safe to promote the map everywhere, because we assume @@ -719,7 +722,7 @@ code} The calls to \texttt{Map.getindex} can be optimized away, because they are calls to -a pure function and they have constant arguments. That means that \texttt{index1/2/3} +an invariant function and they have constant arguments. That means that \texttt{index1/2/3} are constant and the guards on them can be removed. All but the first guard on the map will be optimized away too, because the map cannot have changed in between. This trace is already much better than @@ -744,17 +747,17 @@ classes to change at all. This is not totally reasonable (sometimes classes contain counters or similar things) but for this simple example it is good enough. -What we would really like is if the \texttt{Class.find\_method} method were pure. +What we would really like is if the \texttt{Class.find\_method} method were invariant. But it cannot be, because it is always possible to change the class itself. Every time the class changes, \texttt{find\_method} can potentially return a new value. Therefore, we give every class a version object, which is changed every time a class gets changed (i.e., the \texttt{methods} dictionary changes). -This means that the result of \texttt{methods.get()} for a given \texttt{(name, -version)} pair will always be the same, i.e. it is a pure operation. To help +This means that the result of calls to \texttt{methods.get()} for a given \texttt{(name, +version)} pair will always be the same, i.e. it is an invariant operation. To help the JIT to detect this case, we factor it out in a helper method which is -explicitly marked as \texttt{@purefunction}. The refactored \texttt{Class} can +explicitly marked as \texttt{@invariant}. The refactored \texttt{Class} can be seen in Figure~\ref{fig:version} \begin{figure} @@ -765,7 +768,7 @@ What is interesting here is that \texttt{\_find\_method} takes the \texttt{version} argument but it does not use it at all. Its only purpose is to make the call -pure, because when the version object changes, the result of the call might be +invariant, because when the version object changes, the result of the call might be different than the previous one. \begin{figure} @@ -995,7 +998,7 @@ \section*{Acknowledgements} -XXX Peng Wu and David Edelsohn +XXX Peng Wu and David Edelsohn, Laura Creighton \bibliographystyle{abbrv} \bibliography{paper} diff --git a/talk/icooolps2011/code/version.tex b/talk/icooolps2011/code/version.tex --- a/talk/icooolps2011/code/version.tex +++ b/talk/icooolps2011/code/version.tex @@ -14,7 +14,7 @@ promote(version) return self._find_method(name, version) - @purefunction + @invariant def _find_method(self, name, version): return self.methods.get(name) diff --git a/talk/icooolps2011/code/map.tex b/talk/icooolps2011/code/map.tex --- a/talk/icooolps2011/code/map.tex +++ b/talk/icooolps2011/code/map.tex @@ -4,11 +4,11 @@ self.indexes = {} self.other_maps = {} - @purefunction + @invariant def getindex(self, name): return self.indexes.get(name, -1) - @purefunction + @invariant def add_attribute(self, name): if name not in self.other_maps: newmap = Map() From commits-noreply at bitbucket.org Wed Apr 13 20:25:58 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 13 Apr 2011 20:25:58 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: fix a typo and an XXX Message-ID: <20110413182558.3067F2A2038@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3503:f011879ee9ad Date: 2011-04-13 20:23 +0200 http://bitbucket.org/pypy/extradoc/changeset/f011879ee9ad/ Log: fix a typo and an XXX diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -403,7 +403,7 @@ \subsection{Where Do All the Constants Come From} -It is worth clarifying what is a ``constant'' in this context. A variable of +It is worth clarifying what a ``constant'' is in this context. A variable of the trace is said to be constant if its value is statically known by the optimizer. @@ -528,14 +528,12 @@ Promoting integers, as in the examples above, is not used that often. However, the internals of dynamic language interpreters often have values that are variable but vary little in the context of parts of a user -program. An example would be the types of variables in a user function. Even -though in principle the argument to a Python function could be any Python type, -in practice the argument types tend not to vary often. Therefore it is possible to -promote the types. Section~\ref{sec:fastobjmodel} will present a complete example of how -this works. - -\cfbolz{XXX explain how value specialization on the interpreter level can lead -to type specialization on the language level} +program. An example would be the types of variables in a user function, which +rarely change in a dynamic language in practice (even though they could). In the +interpreter, these user-level types are values. Thus promoting them will lead +to type-specialization on the level of the user program. +Section~\ref{sec:fastobjmodel} will present a complete example of how this +works. \subsection{Declaring New Foldable Operations} From commits-noreply at bitbucket.org Wed Apr 13 20:25:58 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 13 Apr 2011 20:25:58 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: footnotize a paragraph Message-ID: <20110413182558.A64D62A2039@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3504:a34441a3ce79 Date: 2011-04-13 20:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/a34441a3ce79/ Log: footnotize a paragraph diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -743,7 +743,9 @@ different instance layouts is small compared to the number of instances. For classes we will make an even stronger assumption. We simply assume that it is rare for classes to change at all. This is not totally reasonable (sometimes classes contain -counters or similar things) but for this simple example it is good enough. +counters or similar things) but for this simple example it is good +enough.\footnote{There is a more complex variant of class versions that can +accommodate class fields that change a lot better.} What we would really like is if the \texttt{Class.find\_method} method were invariant. But it cannot be, because it is always possible to change the class itself. @@ -815,20 +817,14 @@ versions of all the classes inheriting from it need to be changed as well, recursively. This makes class changes expensive, but they should be rare. On the other hand, a method lookup in a complex class hierarchy is as optimized in the -trace as in our object model here. - -A downside of the versioning of classes that we haven't yet fixed in PyPy, is -that some classes \emph{do} change a lot. An example would be a class that keeps a -counter of how many instances have been created so far. This is very slow right -now, but we have ideas about how to fix it in the future. +trace as in our simple object model above. Another optimization is that in practice the shape of an instance is correlated with its class. In our code above, we allow both to vary independently. -In PyPy's Python interpreter we act somewhat more cleverly. The class of -an instance is not stored on the instance itself, but on the map. This means -that we get one fewer promotion (and thus one fewer guard) in the trace, -because the class doesn't need to -be promoted after the map has been. +Therefore we store the class of an instance on the map in PyPy's Python +interpreter. This means that we get one fewer promotion (and thus one fewer +guard) in the trace, because the class doesn't need to be promoted after the +map has been. %___________________________________________________________________________ From commits-noreply at bitbucket.org Wed Apr 13 21:04:01 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 13 Apr 2011 21:04:01 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: update definition, not too great and a bit circular Message-ID: <20110413190401.6D7872A2038@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3505:11b9039d379e Date: 2011-04-13 21:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/11b9039d379e/ Log: update definition, not too great and a bit circular diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -548,11 +548,17 @@ necessarily be folded away because the object can be mutated. Therefore, another hint is needed. +XXX not too happy with the definition + This hint can be used to mark functions as \emph{invariant}. A function is -termed invariant if, during the execution of the program, successive calls to -the function with identical arguments always return the same result. From this +termed invariant if, during the execution of the program, the results of +subsequent calls to the function with identical arguments may be be replaced +with the result of the first call without changing the program's behaviour. +From this definition follows that a call to an invariant function with constant arguments -in a trace can be replaced with the result of the call. +in a trace can be replaced with the result of the call.\footnote{This property +is less strict than that of a "pure" function, because it is only about actual +calls during execution.} As an example, take the following class: From commits-noreply at bitbucket.org Wed Apr 13 21:17:20 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 13 Apr 2011 21:17:20 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: overflow support Message-ID: <20110413191720.0B5442A2038@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43334:2af533e2b048 Date: 2011-04-13 20:12 +0200 http://bitbucket.org/pypy/pypy/changeset/2af533e2b048/ Log: overflow support diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -166,17 +166,17 @@ inputargs = virtual_state.make_inputargs(values) short_boxes = preamble_optimizer.produce_short_preamble_ops(inputargs) initial_inputargs_len = len(inputargs) + - try: - inputargs, short = self.inline(self.cloned_operations, - loop.inputargs, jump_args, - virtual_state, short_boxes) - except KeyError: - debug_print("Unrolling failed.") - loop.preamble.operations = None - jumpop.initarglist(jump_args) - preamble_optimizer.send_extra_operation(jumpop) - return + inputargs, short = self.inline(self.cloned_operations, + loop.inputargs, jump_args, + virtual_state, short_boxes) + #except KeyError: + # debug_print("Unrolling failed.") + # loop.preamble.operations = None + # jumpop.initarglist(jump_args) + # preamble_optimizer.send_extra_operation(jumpop) + # return loop.inputargs = inputargs jmp = ResOperation(rop.JUMP, loop.inputargs[:], None) jmp.setdescr(loop.token) @@ -212,7 +212,9 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - op.setdescr(start_resumedescr.clone_if_mutable()) + descr = start_resumedescr.clone_if_mutable() + self.inliner.inline_descr_inplace(descr) + op.setdescr(descr) short[i] = op short_loop = TreeLoop('short preamble') @@ -291,6 +293,16 @@ newop = short_inliner.inline_op(short_op) self.optimizer.send_extra_operation(newop) inputargs.append(a) + if newop.is_ovf(): + # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here + guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) + short.append(guard) + # FIXME: Emit a proper guard here in case it is not + # removed by the optimizer. + # add test_loop_variant_mul1_ovf + self.optimizer.send_extra_operation(guard) + assert self.optimizer.newoperations[-1] is not guard + box = newop.result if box in self.optimizer.values: box = self.optimizer.values[box].force_box() From commits-noreply at bitbucket.org Wed Apr 13 21:19:25 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 13 Apr 2011 21:19:25 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: hg merge default Message-ID: <20110413191925.E14F32A2038@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43335:14f72e53598a Date: 2011-04-13 21:13 +0200 http://bitbucket.org/pypy/pypy/changeset/14f72e53598a/ Log: hg merge default diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -854,6 +854,9 @@ def op_gc_adr_of_nursery_free(self): raise NotImplementedError + def op_gc_adr_of_root_stack_top(self): + raise NotImplementedError + def op_gc_call_rtti_destructor(self, rtti, addr): if hasattr(rtti._obj, 'destructor_funcptr'): d = rtti._obj.destructor_funcptr diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py --- a/pypy/interpreter/test/test_eval.py +++ b/pypy/interpreter/test/test_eval.py @@ -13,7 +13,8 @@ def __init__(self, space, code, numlocals): self.code = code - Frame.__init__(self, space, numlocals=numlocals) + Frame.__init__(self, space) + self.numlocals = numlocals self.fastlocals_w = [None] * self.numlocals def getcode(self): @@ -24,7 +25,10 @@ def getfastscope(self): return self.fastlocals_w - + + def getfastscopelength(self): + return self.numlocals + self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -179,6 +179,9 @@ """ raise NotImplementedError + def count_fields_if_immutable(self): + return -1 + def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -119,13 +119,16 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. -License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' +License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' ============================================================== Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files -in the 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' directories +in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories are all copyrighted by the Python Software Foundation and licensed under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html @@ -158,21 +161,12 @@ ====================================== The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. - CompositionExclusions-3.2.0.txt - CompositionExclusions-4.1.0.txt - CompositionExclusions-5.0.0.txt - EastAsianWidth-3.2.0.txt - EastAsianWidth-4.1.0.txt - EastAsianWidth-5.0.0.txt - UnicodeData-3.2.0.txt - UnicodeData-4.1.0.txt - UnicodeData-5.0.0.txt - -The following files are derived from files from the above website. The same -terms of use apply. - UnihanNumeric-3.2.0.txt - UnihanNumeric-4.1.0.txt - UnihanNumeric-5.0.0.txt + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -9,7 +9,6 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.test.test_optimizeopt import equaloplists -from pypy.rpython.memory.gctransform import asmgcroot def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -75,8 +74,8 @@ num2a = ((-num2|3) >> 7) | 128 num2b = (-num2|3) & 127 shape = gcrootmap.get_basic_shape() - gcrootmap.add_ebp_offset(shape, num1) - gcrootmap.add_ebp_offset(shape, num2) + gcrootmap.add_frame_offset(shape, num1) + gcrootmap.add_frame_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, @@ -228,6 +227,33 @@ gc.asmgcroot = saved +class TestGcRootMapShadowStack: + class FakeGcDescr: + force_index_ofs = 92 + + def test_make_shapes(self): + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = gcrootmap.get_basic_shape() + gcrootmap.add_frame_offset(shape, 16) + gcrootmap.add_frame_offset(shape, -24) + assert shape == [16, -24] + + def test_compress_callshape(self): + class FakeDataBlockWrapper: + def malloc_aligned(self, size, alignment): + assert alignment == 4 # even on 64-bits + assert size == 12 # 4*3, even on 64-bits + return rffi.cast(lltype.Signed, p) + datablockwrapper = FakeDataBlockWrapper() + p = lltype.malloc(rffi.CArray(rffi.INT), 3, immortal=True) + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = [16, -24] + gcrootmap.compress_callshape(shape, datablockwrapper) + assert rffi.cast(lltype.Signed, p[0]) == 16 + assert rffi.cast(lltype.Signed, p[1]) == -24 + assert rffi.cast(lltype.Signed, p[2]) == 0 + + class FakeLLOp(object): def __init__(self): self.record = [] diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -4,7 +4,7 @@ """ from cStringIO import StringIO -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.ootypesystem import ootype, rclass from pypy.rpython.ootypesystem.module import ll_os from pypy.translator.jvm import node, methods @@ -229,9 +229,15 @@ if not ootype.isSubclass(OOTYPE, SELF): continue mobj = self._function_for_graph( clsobj, mname, False, mimpl.graph) - graphs = OOTYPE._lookup_graphs(mname) - if len(graphs) == 1: - mobj.is_final = True + # XXX: this logic is broken: it might happen that there are + # ootype.Instance which contains a meth whose graph is exactly + # the same as the meth in the superclass: in this case, + # len(graphs) == 1 but we cannot just mark the method as final + # (or we can, but we should avoid to emit the method in the + # subclass, then) + ## graphs = OOTYPE._lookup_graphs(mname) + ## if len(graphs) == 1: + ## mobj.is_final = True clsobj.add_method(mobj) # currently, we always include a special "dump" method for debugging @@ -359,6 +365,7 @@ ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, ootype.String:jvm.PYPYESCAPEDSTRING, ootype.Unicode:jvm.PYPYESCAPEDUNICODE, + rffi.SHORT:jvm.SHORTTOSTRINGS, } def toString_method_for_ootype(self, OOTYPE): @@ -406,6 +413,7 @@ ootype.UniChar: jvm.jChar, ootype.Class: jvm.jClass, ootype.ROOT: jvm.jObject, # treat like a scalar + rffi.SHORT: jvm.jShort, } # Dictionary for non-scalar types; in this case, if we see the key, we diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver, hint, purefunction from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class SendTests(object): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver from pypy.rlib import objectmodel diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -68,6 +68,16 @@ nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') + INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), + ('intval', lltype.Signed)) + INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), + ('intval', lltype.Signed), + hints={'immutable': True}) + intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') + immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -147,7 +157,6 @@ FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token - virtualrefindexdescr = vrefinfo.descr_virtualref_index virtualforceddescr = vrefinfo.descr_forced jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) @@ -156,6 +165,8 @@ register_known_gctype(cpu, node_vtable2, NODE2) register_known_gctype(cpu, u_vtable, U) register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) + register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) + register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) namespace = locals() diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,8 +1,8 @@ from __future__ import with_statement import new import py -from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse -from pypy.objspace.flow.model import flatten, mkentrymap, c_last_exception +from pypy.objspace.flow.model import Constant, Block, Link, Variable +from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph from pypy.objspace.flow.objspace import FlowObjSpace, error @@ -37,12 +37,10 @@ def all_operations(self, graph): result = {} - def visit(node): - if isinstance(node, Block): - for op in node.operations: - result.setdefault(op.opname, 0) - result[op.opname] += 1 - traverse(visit, graph) + for node in graph.iterblocks(): + for op in node.operations: + result.setdefault(op.opname, 0) + result[op.opname] += 1 return result @@ -246,12 +244,9 @@ x = self.codetest(self.implicitException) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock - def implicitAttributeError(x): try: x = getattr(x, "y") @@ -263,10 +258,8 @@ x = self.codetest(self.implicitAttributeError) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock #__________________________________________________________ def implicitException_int_and_id(x): @@ -311,14 +304,12 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: if isinstance(link.args[0], Constant): found[link.args[0].value] = True else: found[link.exitcase] = None - traverse(find_exceptions, x) assert found == {IndexError: True, KeyError: True, Exception: None} def reraiseAnything(x): @@ -332,12 +323,10 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: assert isinstance(link.args[0], Constant) found[link.args[0].value] = True - traverse(find_exceptions, x) assert found == {ValueError: True, ZeroDivisionError: True, OverflowError: True} def loop_in_bare_except_bug(lst): @@ -521,11 +510,9 @@ def test_jump_target_specialization(self): x = self.codetest(self.jump_target_specialization) - def visitor(node): - if isinstance(node, Block): - for op in node.operations: - assert op.opname != 'mul', "mul should have disappeared" - traverse(visitor, x) + for block in x.iterblocks(): + for op in block.operations: + assert op.opname != 'mul', "mul should have disappeared" #__________________________________________________________ def highly_branching_example(a,b,c,d,e,f,g,h,i,j): @@ -573,7 +560,8 @@ def test_highly_branching_example(self): x = self.codetest(self.highly_branching_example) - assert len(flatten(x)) < 60 # roughly 20 blocks + 30 links + # roughly 20 blocks + 30 links + assert len(list(x.iterblocks())) + len(list(x.iterlinks())) < 60 #__________________________________________________________ def test_unfrozen_user_class1(self): @@ -589,11 +577,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 2 def test_unfrozen_user_class2(self): @@ -607,11 +593,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert not isinstance(results[0], Constant) def test_frozen_user_class1(self): @@ -630,11 +614,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 1 def test_frozen_user_class2(self): @@ -650,11 +632,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert results == [Constant(4)] def test_const_star_call(self): @@ -663,14 +643,9 @@ def f(): return g(1,*(2,3)) graph = self.codetest(f) - call_args = [] - def visit(block): - if isinstance(block, Block): - for op in block.operations: - if op.opname == "call_args": - call_args.append(op) - traverse(visit, graph) - assert not call_args + for block in graph.iterblocks(): + for op in block.operations: + assert not op.opname == "call_args" def test_catch_importerror_1(self): def f(): @@ -997,11 +972,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, AttributeError] @@ -1019,11 +992,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, TypeError] diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -1,5 +1,5 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.objspace.flow.model import SpaceOperation, traverse +from pypy.objspace.flow.model import SpaceOperation from pypy.tool.algo.unionfind import UnionFind from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -149,8 +147,7 @@ set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except") set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except") - def visit(node): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname in self.IDENTITY_OPS: # special-case these operations to identify their input @@ -167,7 +164,7 @@ if isinstance(node.exitswitch, Variable): set_use_point(node, node.exitswitch, "exitswitch", node) - if isinstance(node, Link): + for node in graph.iterlinks(): if isinstance(node.last_exception, Variable): set_creation_point(node.prevblock, node.last_exception, "last_exception") @@ -187,7 +184,6 @@ else: d[arg] = True - traverse(visit, graph) return lifetimes.infos() def _try_inline_malloc(self, info): @@ -213,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -333,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -484,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -500,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -546,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -616,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -639,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -79,7 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel **32-bit** environment needs ``4GB``. + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -1,7 +1,7 @@ from pypy.jit.metainterp.warmspot import ll_meta_interp from pypy.rlib.jit import JitDriver, dont_look_inside, purefunction -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.jitprof import * diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, hint from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.lltypesystem import lltype, rclass from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,13 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level @@ -159,8 +152,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class DelTests: diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -17,8 +17,8 @@ '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_wait.py @@ -0,0 +1,51 @@ +from ctypes import CDLL, c_int, POINTER, byref +from ctypes.util import find_library +from resource import _struct_rusage, struct_rusage + +__all__ = ["wait3", "wait4"] + +libc = CDLL(find_library("c")) +c_wait3 = libc.wait3 + +c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] + +c_wait4 = libc.wait4 + +c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] + +def create_struct_rusage(c_struct): + return struct_rusage(( + float(c_struct.ru_utime), + float(c_struct.ru_stime), + c_struct.ru_maxrss, + c_struct.ru_ixrss, + c_struct.ru_idrss, + c_struct.ru_isrss, + c_struct.ru_minflt, + c_struct.ru_majflt, + c_struct.ru_nswap, + c_struct.ru_inblock, + c_struct.ru_oublock, + c_struct.ru_msgsnd, + c_struct.ru_msgrcv, + c_struct.ru_nsignals, + c_struct.ru_nvcsw, + c_struct.ru_nivcsw)) + +def wait3(options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage + +def wait4(pid, options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -34,11 +34,7 @@ @jit.purefunction def _getcell_makenew(self, key): - res = self.content.get(key, None) - if res is not None: - return res - result = self.content[key] = ModuleCell() - return result + return self.content.setdefault(key, ModuleCell()) def impl_setitem(self, w_key, w_value): space = self.space @@ -50,6 +46,16 @@ def impl_setitem_str(self, name, w_value): self.getcell(name, True).w_value = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + cell = self.getcell(space.str_w(w_key), True) + if cell.w_value is None: + cell.w_value = w_default + return cell.w_value + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py --- a/lib_pypy/pyrepl/unix_console.py +++ b/lib_pypy/pyrepl/unix_console.py @@ -27,7 +27,10 @@ from pyrepl.console import Console, Event from pyrepl import unix_eventqueue -_error = (termios.error, curses.error) +class InvalidTerminal(RuntimeError): + pass + +_error = (termios.error, curses.error, InvalidTerminal) # there are arguments for changing this to "refresh" SIGWINCH_EVENT = 'repaint' @@ -38,7 +41,7 @@ def _my_getstr(cap, optional=0): r = curses.tigetstr(cap) if not optional and r is None: - raise RuntimeError, \ + raise InvalidTerminal, \ "terminal doesn't have the required '%s' capability"%cap return r @@ -289,6 +292,12 @@ self.__write_code(self._el) self.__write(newline[x:]) self.__posxy = len(newline), y + + if '\x1b' in newline: + # ANSI escape characters are present, so we can't assume + # anything about the position of the cursor. Moving the cursor + # to the left margin should work to get to a known position. + self.move_cursor(0, y) def __write(self, text): self.__buffer.append((text, 0)) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -174,6 +174,17 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -278,6 +278,22 @@ rex_mem_reg_plus_scaled_reg_plus_const) # ____________________________________________________________ +# Emit a mod/rm referencing an immediate address that fits in 32-bit +# (the immediate address itself must be explicitely encoded as well, +# with immediate(argnum)). + +def encode_abs(mc, _1, _2, orbyte): + # expands to either '\x05' on 32-bit, or '\x04\x25' or 64-bit + if mc.WORD == 8: + mc.writechar(chr(0x04 | orbyte)) + mc.writechar(chr(0x25)) + else: + mc.writechar(chr(0x05 | orbyte)) + return 0 + +abs_ = encode_abs, 0, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -348,7 +364,9 @@ INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) - INSN_rj = insn(rex_w, chr(base+3), register(1,8), '\x05', immediate(2)) + INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2)) + INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1), + immediate(2,'b')) INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b')) INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2)) @@ -366,7 +384,8 @@ INSN_bi32(mc, offset, immed) INSN_bi._always_inline_ = True # try to constant-fold single_byte() - return INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj + return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, + INSN_ji8) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -444,23 +463,25 @@ # ------------------------------ Arithmetic ------------------------------ - ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj = common_modes(0) - OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj = common_modes(1) - AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj = common_modes(4) - SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj = common_modes(5) - SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj = common_modes(3) - XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj = common_modes(6) - CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj = common_modes(7) + ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj, _ = common_modes(0) + OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj, _ = common_modes(1) + AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj, _ = common_modes(4) + SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj, SUB_ji8 = common_modes(5) + SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj, _ = common_modes(3) + XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj, _ = common_modes(6) + CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj, _ = common_modes(7) CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32) CMP_mr = insn(rex_w, '\x39', register(2, 8), mem_reg_plus_const(1)) - CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b')) - CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2)) + CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_, + immediate(1), immediate(2, 'b')) + CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_, + immediate(1), immediate(2)) CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) - CMP_jr = insn(rex_w, '\x39', register(2, 8), '\x05', immediate(1)) + CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_, immediate(1)) CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) @@ -508,7 +529,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) - LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_, immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) @@ -534,12 +555,15 @@ CDQ = insn(rex_nw, '\x99') TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) - TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), '\x05', immediate(1), immediate(2, 'b')) + TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_, immediate(1), immediate(2, 'b')) TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') # x87 instructions FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + # reserved as an illegal instruction + UD2 = insn('\x0F\x0B') + # ------------------------------ SSE2 ------------------------------ # Conversion @@ -639,7 +663,7 @@ add_insn('s', stack_sp(modrm_argnum)) add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) - add_insn('j', '\x05', immediate(modrm_argnum)) + add_insn('j', abs_, immediate(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register @@ -680,7 +704,7 @@ # assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') - add_insn('j', '\x05', immediate(2)) + add_insn('j', abs_, immediate(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/jit/metainterp/virtualref.py b/pypy/jit/metainterp/virtualref.py --- a/pypy/jit/metainterp/virtualref.py +++ b/pypy/jit/metainterp/virtualref.py @@ -13,7 +13,6 @@ self.JIT_VIRTUAL_REF = lltype.GcStruct('JitVirtualRef', ('super', rclass.OBJECT), ('virtual_token', lltype.Signed), - ('virtualref_index', lltype.Signed), ('forced', rclass.OBJECTPTR)) self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE, zero=True, flavor='raw', @@ -27,8 +26,6 @@ fielddescrof = self.cpu.fielddescrof self.descr_virtual_token = fielddescrof(self.JIT_VIRTUAL_REF, 'virtual_token') - self.descr_virtualref_index = fielddescrof(self.JIT_VIRTUAL_REF, - 'virtualref_index') self.descr_forced = fielddescrof(self.JIT_VIRTUAL_REF, 'forced') # # record the type JIT_VIRTUAL_REF explicitly in the rtyper, too diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -519,7 +519,7 @@ return code = frame.pycode if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr <= frame.instr_prev: + if frame.last_instr < frame.instr_prev_plus_one: # We jumped backwards in the same line. executioncontext._trace(frame, 'line', self.space.w_None) else: @@ -557,5 +557,5 @@ frame.f_lineno = line executioncontext._trace(frame, 'line', self.space.w_None) - frame.instr_prev = frame.last_instr + frame.instr_prev_plus_one = frame.last_instr + 1 self.space.frame_trace_action.fire() # continue tracing diff --git a/pypy/translator/backendopt/test/test_merge_if_blocks.py b/pypy/translator/backendopt/test/test_merge_if_blocks.py --- a/pypy/translator/backendopt/test/test_merge_if_blocks.py +++ b/pypy/translator/backendopt/test/test_merge_if_blocks.py @@ -2,7 +2,7 @@ from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof as tgraphof -from pypy.objspace.flow.model import flatten, Block +from pypy.objspace.flow.model import Block from pypy.translator.backendopt.removenoops import remove_same_as from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int diff --git a/pypy/jit/metainterp/test/test_immutable.py b/pypy/jit/metainterp/test/test_immutable.py --- a/pypy/jit/metainterp/test/test_immutable.py +++ b/pypy/jit/metainterp/test/test_immutable.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ImmutableFieldsTests: diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.simplify import (get_graph, transform_dead_op_vars, desugar_isinstance) -from pypy.objspace.flow.model import traverse, Block, Constant, summary +from pypy.objspace.flow.model import Block, Constant, summary from pypy import conftest def translate(func, argtypes, backend_optimize=True): @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/lib_pypy/pypy_test/test_os_wait.py b/lib_pypy/pypy_test/test_os_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_os_wait.py @@ -0,0 +1,44 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + +import os + +from lib_pypy._pypy_wait import wait3, wait4 + +if hasattr(os, 'wait3'): + def test_os_wait3(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait3()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) + +if hasattr(os, 'wait4'): + def test_os_wait4(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait4()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -12,7 +12,7 @@ import py from pypy.jit.metainterp.memmgr import MemoryManager -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside diff --git a/pypy/jit/metainterp/test/test_float.py b/pypy/jit/metainterp/test/test_float.py --- a/pypy/jit/metainterp/test/test_float.py +++ b/pypy/jit/metainterp/test/test_float.py @@ -1,5 +1,5 @@ import math -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class FloatTests: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -382,7 +382,7 @@ send_bridge_to_backend(metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.token) - def copy_all_attrbutes_into(self, res): + def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here res.rd_snapshot = self.rd_snapshot res.rd_frame_info_list = self.rd_frame_info_list @@ -393,13 +393,13 @@ def _clone_if_mutable(self): res = ResumeGuardDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeGuardForcedDescr(ResumeGuardDescr): @@ -473,7 +473,7 @@ def _clone_if_mutable(self): res = ResumeGuardForcedDescr(self.metainterp_sd, self.jitdriver_sd) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -42,3 +42,13 @@ assert arr[1:].tolist() == [2,3,4] assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + + def test_buffer(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + # XXX big-endian + assert str(buffer(arr)) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') + diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -11,6 +11,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,9 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); -int _pypy_math_isnan(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -201,6 +201,23 @@ assert cmpr == 3 assert cmpr != 42 + def test_richcompare(self): + module = self.import_module("comparisons") + cmpr = module.CmpType() + + # should not crash + cmpr < 4 + cmpr <= 4 + cmpr > 4 + cmpr >= 4 + + assert cmpr.__le__(4) is NotImplemented + + def test_tpcompare(self): + module = self.import_module("comparisons") + cmpr = module.OldCmpType() + assert cmpr < cmpr + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,20 +29,14 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) def test_basic_threadstate_dance(self, space, api): # Let extension modules call these functions, @@ -54,5 +48,3 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) - - clear_threadstate(space) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -56,13 +56,10 @@ """A frame is an environment supporting the execution of a code object. Abstract base class.""" - def __init__(self, space, w_globals=None, numlocals=-1): + def __init__(self, space, w_globals=None): self.space = space self.w_globals = w_globals # wrapped dict of globals self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(self.getcode().getvarnames()) - self.numlocals = numlocals def run(self): "Abstract method to override. Runs the frame" @@ -96,6 +93,10 @@ where the order is according to self.getcode().signature().""" raise TypeError, "abstract" + def getfastscopelength(self): + "Abstract. Get the expected number of locals." + raise TypeError, "abstract" + def fast2locals(self): # Copy values from self.fastlocals_w to self.w_locals if self.w_locals is None: @@ -113,10 +114,11 @@ # Copy values from self.w_locals to self.fastlocals_w assert self.w_locals is not None varnames = self.getcode().getvarnames() + numlocals = self.getfastscopelength() - new_fastlocals_w = [None]*self.numlocals - - for i in range(min(len(varnames), self.numlocals)): + new_fastlocals_w = [None] * numlocals + + for i in range(min(len(varnames), numlocals)): w_name = self.space.wrap(varnames[i]) try: w_value = self.space.getitem(self.w_locals, w_name) diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,18 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - -int -_pypy_math_isnan(double x) -{ - return PyPy_IS_NAN(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -1,5 +1,5 @@ import py, sys -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.jit.codewriter.policy import StopAtXPolicy diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -1,17 +1,29 @@ # Constants that depend on whether we are on 32-bit or 64-bit +# The frame size gives the standard fixed part at the start of +# every assembler frame: the saved value of some registers, +# one word for the force_index, and some extra space used only +# during a malloc that needs to go via its slow path. + import sys if sys.maxint == (2**31 - 1): WORD = 4 - # ebp + ebx + esi + edi + force_index = 5 words - FRAME_FIXED_SIZE = 5 + # ebp + ebx + esi + edi + 4 extra words + force_index = 9 words + FRAME_FIXED_SIZE = 9 + FORCE_INDEX_OFS = -8*WORD + MY_COPY_OF_REGS = -7*WORD IS_X86_32 = True IS_X86_64 = False else: WORD = 8 - # rbp + rbx + r12 + r13 + r14 + r15 + force_index = 7 words - FRAME_FIXED_SIZE = 7 + # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 + FRAME_FIXED_SIZE = 18 + FORCE_INDEX_OFS = -17*WORD + MY_COPY_OF_REGS = -16*WORD IS_X86_32 = False IS_X86_64 = True -FORCE_INDEX_OFS = -(FRAME_FIXED_SIZE-1)*WORD +# The extra space has room for almost all registers, apart from eax and edx +# which are used in the malloc itself. They are: +# ecx, ebx, esi, edi [32 and 64 bits] +# r8, r9, r10, r12, r13, r14, r15 [64 bits only] diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -486,6 +486,7 @@ class W_IMap(Wrappable): _error_name = "imap" + _immutable_fields_ = ["w_fun", "iterators_w"] def __init__(self, space, w_fun, args_w): self.space = space diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): @@ -153,128 +167,132 @@ result = formatd(value, tp, precision, flags) return result, special -if USE_SHORT_FLOAT_REPR: - def round_double(value, ndigits): - # The basic idea is very simple: convert and round the double to - # a decimal string using _Py_dg_dtoa, then convert that decimal - # string back to a double with _Py_dg_strtod. There's one minor - # difficulty: Python 2.x expects round to do - # round-half-away-from-zero, while _Py_dg_dtoa does - # round-half-to-even. So we need some way to detect and correct - # the halfway cases. +def round_double(value, ndigits): + if USE_SHORT_FLOAT_REPR: + return round_double_short_repr(value, ndigits) + else: + return round_double_fallback_repr(value, ndigits) - # a halfway value has the form k * 0.5 * 10**-ndigits for some - # odd integer k. Or in other words, a rational number x is - # exactly halfway between two multiples of 10**-ndigits if its - # 2-valuation is exactly -ndigits-1 and its 5-valuation is at - # least -ndigits. For ndigits >= 0 the latter condition is - # automatically satisfied for a binary float x, since any such - # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x - # needs to be an integral multiple of 5**-ndigits; we can check - # this using fmod. For -22 > ndigits, there are no halfway - # cases: 5**23 takes 54 bits to represent exactly, so any odd - # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of - # precision to represent exactly. +def round_double_short_repr(value, ndigits): + # The basic idea is very simple: convert and round the double to + # a decimal string using _Py_dg_dtoa, then convert that decimal + # string back to a double with _Py_dg_strtod. There's one minor + # difficulty: Python 2.x expects round to do + # round-half-away-from-zero, while _Py_dg_dtoa does + # round-half-to-even. So we need some way to detect and correct + # the halfway cases. - sign = copysign(1.0, value) - value = abs(value) + # a halfway value has the form k * 0.5 * 10**-ndigits for some + # odd integer k. Or in other words, a rational number x is + # exactly halfway between two multiples of 10**-ndigits if its + # 2-valuation is exactly -ndigits-1 and its 5-valuation is at + # least -ndigits. For ndigits >= 0 the latter condition is + # automatically satisfied for a binary float x, since any such + # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x + # needs to be an integral multiple of 5**-ndigits; we can check + # this using fmod. For -22 > ndigits, there are no halfway + # cases: 5**23 takes 54 bits to represent exactly, so any odd + # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of + # precision to represent exactly. - # find 2-valuation value - m, expo = math.frexp(value) - while m != math.floor(m): - m *= 2.0 - expo -= 1 + sign = copysign(1.0, value) + value = abs(value) - # determine whether this is a halfway case. - halfway_case = 0 - if expo == -ndigits - 1: - if ndigits >= 0: + # find 2-valuation value + m, expo = math.frexp(value) + while m != math.floor(m): + m *= 2.0 + expo -= 1 + + # determine whether this is a halfway case. + halfway_case = 0 + if expo == -ndigits - 1: + if ndigits >= 0: + halfway_case = 1 + elif ndigits >= -22: + # 22 is the largest k such that 5**k is exactly + # representable as a double + five_pow = 1.0 + for i in range(-ndigits): + five_pow *= 5.0 + if math.fmod(value, five_pow) == 0.0: halfway_case = 1 - elif ndigits >= -22: - # 22 is the largest k such that 5**k is exactly - # representable as a double - five_pow = 1.0 - for i in range(-ndigits): - five_pow *= 5.0 - if math.fmod(value, five_pow) == 0.0: - halfway_case = 1 - # round to a decimal string; use an extra place for halfway case - strvalue = formatd(value, 'f', ndigits + halfway_case) + # round to a decimal string; use an extra place for halfway case + strvalue = formatd(value, 'f', ndigits + halfway_case) - if halfway_case: - buf = [c for c in strvalue] - if ndigits >= 0: - endpos = len(buf) - 1 - else: - endpos = len(buf) + ndigits - # Sanity checks: there should be exactly ndigits+1 places - # following the decimal point, and the last digit in the - # buffer should be a '5' - if not objectmodel.we_are_translated(): - assert buf[endpos] == '5' - if '.' in buf: - assert endpos == len(buf) - 1 - assert buf.index('.') == len(buf) - ndigits - 2 + if halfway_case: + buf = [c for c in strvalue] + if ndigits >= 0: + endpos = len(buf) - 1 + else: + endpos = len(buf) + ndigits + # Sanity checks: there should be exactly ndigits+1 places + # following the decimal point, and the last digit in the + # buffer should be a '5' + if not objectmodel.we_are_translated(): + assert buf[endpos] == '5' + if '.' in buf: + assert endpos == len(buf) - 1 + assert buf.index('.') == len(buf) - ndigits - 2 - # increment and shift right at the same time - i = endpos - 1 - carry = 1 - while i >= 0: + # increment and shift right at the same time + i = endpos - 1 + carry = 1 + while i >= 0: + digit = ord(buf[i]) + if digit == ord('.'): + buf[i+1] = chr(digit) + i -= 1 digit = ord(buf[i]) - if digit == ord('.'): - buf[i+1] = chr(digit) - i -= 1 - digit = ord(buf[i]) - carry += digit - ord('0') - buf[i+1] = chr(carry % 10 + ord('0')) - carry /= 10 - i -= 1 - buf[0] = chr(carry + ord('0')) - if ndigits < 0: - buf.append('0') + carry += digit - ord('0') + buf[i+1] = chr(carry % 10 + ord('0')) + carry /= 10 + i -= 1 + buf[0] = chr(carry + ord('0')) + if ndigits < 0: + buf.append('0') - strvalue = ''.join(buf) + strvalue = ''.join(buf) - return sign * rstring_to_float(strvalue) + return sign * rstring_to_float(strvalue) -else: - # fallback version, to be used when correctly rounded - # binary<->decimal conversions aren't available - def round_double(value, ndigits): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 +# fallback version, to be used when correctly rounded +# binary<->decimal conversions aren't available +def round_double_fallback_repr(value, ndigits): + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow + pow1 = math.pow(10.0, ndigits - 22) + pow2 = 1e22 + else: + pow1 = math.pow(10.0, ndigits) + pow2 = 1.0 - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value + y = (value * pow1) * pow2 + # if y overflows, then rounded value is exactly x + if isinf(y): + return value - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 + else: + pow1 = math.pow(10.0, -ndigits); + pow2 = 1.0 # unused; for translation + y = value / pow1 - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y-z) == 1.0: # obscure case, see the test - z = y + if y >= 0.0: + z = math.floor(y + 0.5) + else: + z = math.ceil(y - 0.5) + if math.fabs(y-z) == 1.0: # obscure case, see the test + z = y - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + return z INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import \ W_DictMultiObject, setitem__DictMulti_ANY_ANY, getitem__DictMulti_ANY, \ @@ -151,6 +152,8 @@ class AppTest_DictObject: + def setup_class(cls): + cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names) def test_equality(self): d = {1:2} @@ -259,7 +262,29 @@ d[33] = 99 assert d == dd assert x == 99 - + + def test_setdefault_fast(self): + class Key(object): + calls = 0 + def __hash__(self): + self.calls += 1 + return object.__hash__(self) + + k = Key() + d = {} + d.setdefault(k, []) + if self.on_pypy: + assert k.calls == 1 + + d.setdefault(k, 1) + if self.on_pypy: + assert k.calls == 2 + + k = Key() + d.setdefault(k, 42) + if self.on_pypy: + assert k.calls == 1 + def test_update(self): d = {1:2, 3:4} dd = d.copy() @@ -704,13 +729,20 @@ class FakeString(str): + hash_count = 0 def unwrap(self, space): self.unwrapped = True return str(self) + def __hash__(self): + self.hash_count += 1 + return str.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: + hash_count = 0 def hash_w(self, obj): + self.hash_count += 1 return hash(obj) def unwrap(self, x): return x @@ -726,6 +758,8 @@ return [] DictObjectCls = W_DictMultiObject def type(self, w_obj): + if isinstance(w_obj, FakeString): + return str return type(w_obj) w_str = str def str_w(self, string): @@ -890,6 +924,19 @@ impl.setitem(x, x) assert impl.r_dict_content is not None + def test_setdefault_fast(self): + on_pypy = "__pypy__" in sys.builtin_module_names + impl = self.impl + key = FakeString(self.string) + x = impl.setdefault(key, 1) + assert x == 1 + if on_pypy: + assert key.hash_count == 1 + x = impl.setdefault(key, 2) + assert x == 1 + if on_pypy: + assert key.hash_count == 2 + class TestStrDictImplementation(BaseTestRDictImplementation): ImplementionClass = StrDictImplementation diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_ajit.py copy from pypy/jit/metainterp/test/test_basic.py copy to pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_basic.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -4,269 +4,17 @@ from pypy.rlib.jit import loop_invariant from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.metainterp.warmspot import get_stats from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong from pypy import conftest from pypy.rlib.rarithmetic import ovfcheck from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class BasicTests: @@ -358,7 +106,7 @@ assert res == 1323 self.check_loop_count(1) self.check_loops(int_mul=1) - + def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): diff --git a/pypy/rpython/rstr.py b/pypy/rpython/rstr.py --- a/pypy/rpython/rstr.py +++ b/pypy/rpython/rstr.py @@ -221,14 +221,33 @@ def rtype_method_split(self, hop): rstr = hop.args_r[0].repr - v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) try: list_type = hop.r_result.lowleveltype.TO except AttributeError: list_type = hop.r_result.lowleveltype cLIST = hop.inputconst(Void, list_type) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr) + return hop.gendirectcall(self.ll.ll_split_chr, cLIST, v_str, v_chr, v_max) + + def rtype_method_rsplit(self, hop): + rstr = hop.args_r[0].repr + if hop.nb_args == 3: + v_str, v_chr, v_max = hop.inputargs(rstr.repr, rstr.char_repr, Signed) + else: + v_str, v_chr = hop.inputargs(rstr.repr, rstr.char_repr) + v_max = hop.inputconst(Signed, -1) + try: + list_type = hop.r_result.lowleveltype.TO + except AttributeError: + list_type = hop.r_result.lowleveltype + cLIST = hop.inputconst(Void, list_type) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_rsplit_chr, cLIST, v_str, v_chr, v_max) def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Block, Constant, Variable, flatten +from pypy.objspace.flow.model import Block, Constant, Variable from pypy.objspace.flow.model import checkgraph, mkentrymap from pypy.translator.backendopt.support import log @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -6,7 +6,7 @@ from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside from pypy.rlib.rarithmetic import intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.rclass import FieldListAccessor from pypy.jit.metainterp.warmspot import get_stats, get_translator from pypy.jit.metainterp import history diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -135,7 +135,7 @@ return type(self) is type(other) # xxx obscure def clone_if_mutable(self): res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res def _sortboxes(boxes): @@ -2760,7 +2760,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops(self): + def test_fold_partially_constant_add_sub(self): ops = """ [i0] i1 = int_sub(i0, 0) @@ -2794,7 +2794,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops_ovf(self): + def test_fold_partially_constant_add_sub_ovf(self): ops = """ [i0] i1 = int_sub_ovf(i0, 0) @@ -2831,6 +2831,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): + ops = """ + [i0] + i1 = int_lshift(i0, 0) + i2 = int_rshift(i1, 0) + i3 = int_eq(i2, i0) + guard_true(i3) [] + jump(i2) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + # ---------- class TestLLtype(OptimizeOptTest, LLtypeMixin): @@ -3115,7 +3130,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3148,7 +3162,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3188,7 +3201,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3255,7 +3267,7 @@ #self.loop.inputargs[0].value = self.nodeobjvalue #self.check_expanded_fail_descr('''p2, p1 # p0.refdescr = p2 - # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + # where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 # where p1 is a node_vtable, nextdescr=p1b # where p1b is a node_vtable, valuedescr=i1 # ''', rop.GUARD_NO_EXCEPTION) @@ -3276,7 +3288,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3302,7 +3313,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -4968,6 +4978,58 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i1, descr=nextdescr) """ + py.test.skip("no test here") + + def test_immutable_not(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_noimmut_vtable)) + setfield_gc(p0, 42, descr=noimmut_intval) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_variable(self): + ops = """ + [i0] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, i0, descr=immut_intval) + escape(p0) + jump(i0) + """ + self.optimize_loop(ops, ops) + + def test_immutable_incomplete(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_constantfold(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, 1242, descr=immut_intval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class IntObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(self, other): + return other.container.intval == 1242 + self.namespace['intobj1242'] = lltype._ptr(llmemory.GCREF, + IntObj1242()) + expected = """ + [] + escape(ConstPtr(intobj1242)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble): diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -112,6 +112,7 @@ try: while True: count = fread(buf, 1, BUF_SIZE, fp) + count = rffi.cast(lltype.Signed, count) source += rffi.charpsize2str(buf, count) if count < BUF_SIZE: if feof(fp): diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -573,12 +573,10 @@ break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(op.getarg(i)) - for i in range(op.numargs())] - resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.getdescr()) - # FIXME: Don't we need to check for an overflow here? - self.make_constant(op.result, resbox.constbox()) + resbox = self.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.make_constant(op.result, resbox) return # did we do the exact same operation already? @@ -598,6 +596,13 @@ if nextop: self.emit_operation(nextop) + def constant_fold(self, op): + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] + resbox = execute_nonspec(self.cpu, None, + op.getopnum(), argboxes, op.getdescr()) + return resbox.constbox() + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -1,6 +1,6 @@ """Tests for multiple JitDrivers.""" from pypy.rlib.jit import JitDriver, unroll_safe -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_basic.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_basic.py +++ /dev/null @@ -1,2428 +0,0 @@ -import py -import sys -from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside -from pypy.rlib.jit import loop_invariant -from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed -from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner -from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value -from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong -from pypy import conftest -from pypy.rlib.rarithmetic import ovfcheck -from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper -from pypy.rpython.lltypesystem import lltype, llmemory -from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - - -class BasicTests: - - def test_basic(self): - def f(x, y): - return x + y - res = self.interp_operations(f, [40, 2]) - assert res == 42 - - def test_basic_inst(self): - class A: - pass - def f(n): - a = A() - a.x = n - return a.x - res = self.interp_operations(f, [42]) - assert res == 42 - - def test_uint_floordiv(self): - from pypy.rlib.rarithmetic import r_uint - - def f(a, b): - a = r_uint(a) - b = r_uint(b) - return a/b - - res = self.interp_operations(f, [-4, 3]) - assert res == long(r_uint(-4)) // 3 - - def test_direct_call(self): - def g(n): - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_direct_call_with_guard(self): - def g(n): - if n < 0: - return 0 - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_loop(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - if self.basic: - found = 0 - for op in get_stats().loops[0]._all_operations(): - if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 3 - for box in liveboxes: - assert isinstance(box, history.BoxInt) - found += 1 - assert found == 1 - - def test_loop_variant_mul1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - x += 1 - res += x * x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 1323 - self.check_loop_count(1) - self.check_loops(int_mul=1) - - def test_loop_invariant_mul1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loop_invariant_mul_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - b = y * 2 - res += ovfcheck(x * x) + b - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 308 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) - - def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - x += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 3427 - self.check_loop_count(3) - - def test_loop_invariant_mul_bridge_maintaining1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - res += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1167 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - - def test_loop_invariant_mul_bridge_maintaining2(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - z = x * x - res += z - if y<16: - res += z - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1692 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - def test_loop_invariant_mul_bridge_maintaining3(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) - def f(x, y, m): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res, m=m) - myjitdriver.jit_merge_point(x=x, y=y, res=res, m=m) - z = x * x - res += z - if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x.intval * x.intval - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loops_are_transient(self): - import gc, weakref - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - if y%2: - res *= 2 - y -= 1 - return res - wr_loops = [] - old_init = history.TreeLoop.__init__.im_func - try: - def track_init(self, name): - old_init(self, name) - wr_loops.append(weakref.ref(self)) - history.TreeLoop.__init__ = track_init - res = self.meta_interp(f, [6, 15], no_stats=True) - finally: - history.TreeLoop.__init__ = old_init - - assert res == f(6, 15) - gc.collect() - - #assert not [wr for wr in wr_loops if wr()] - for loop in [wr for wr in wr_loops if wr()]: - assert loop().name == 'short preamble' - - def test_string(self): - def f(n): - bytecode = 'adlfkj' + chr(n) - if n < len(bytecode): - return bytecode[n] - else: - return "?" - res = self.interp_operations(f, [1]) - assert res == ord("d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord("?") - - def test_chr2str(self): - def f(n): - s = chr(n) - return s[0] - res = self.interp_operations(f, [3]) - assert res == 3 - - def test_unicode(self): - def f(n): - bytecode = u'adlfkj' + unichr(n) - if n < len(bytecode): - return bytecode[n] - else: - return u"?" - res = self.interp_operations(f, [1]) - assert res == ord(u"d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord(u"?") - - def test_residual_call(self): - @dont_look_inside - def externfn(x, y): - return x * y - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) - - def test_residual_call_pure(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - n = hint(n, promote=True) - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is not recorded in the history if all-constant args - self.check_operations_history(int_add=0, int_mul=0, - call=0, call_pure=0) - - def test_residual_call_pure_1(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is recorded in the history if not-all-constant args - self.check_operations_history(int_add=1, int_mul=0, - call=0, call_pure=1) - - def test_residual_call_pure_2(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def externfn(x): - return x - 1 - externfn._pure_function_ = True - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - n = externfn(n) - return n - res = self.meta_interp(f, [7]) - assert res == 0 - # CALL_PURE is recorded in the history, but turned into a CALL - # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) - - def test_constfold_call_pure(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - n -= externfn(m) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_constfold_call_pure_2(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - class V: - def __init__(self, value): - self.value = value - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - v = V(m) - n -= externfn(v.value) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_pure_function_returning_object(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - class V: - def __init__(self, x): - self.x = x - v1 = V(1) - v2 = V(2) - def externfn(x): - if x: - return v1 - else: - return v2 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - m = V(m).x - n -= externfn(m).x + externfn(m + m - m).x - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) - - def test_constant_across_mp(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - class X(object): - pass - def f(n): - while n > -100: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - x = X() - x.arg = 5 - if n <= 0: break - n -= x.arg - x.arg = 6 # prevents 'x.arg' from being annotated as constant - return n - res = self.meta_interp(f, [31]) - assert res == -4 - - def test_stopatxpolicy(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def internfn(y): - return y * 3 - def externfn(y): - return y % 4 - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if y & 7: - f = internfn - else: - f = externfn - f(y) - y -= 1 - return 42 - policy = StopAtXPolicy(externfn) - res = self.meta_interp(f, [31], policy=policy) - assert res == 42 - self.check_loops(int_mul=1, int_mod=0) - - def test_we_are_jitted(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if we_are_jitted(): - x = 1 - else: - x = 10 - y -= x - return y - assert f(55) == -5 - res = self.meta_interp(f, [55]) - assert res == -1 - - def test_confirm_enter_jit(self): - def confirm_enter_jit(x, y): - return x <= 5 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - confirm_enter_jit = confirm_enter_jit) - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - y -= x - return y - # - res = self.meta_interp(f, [10, 84]) - assert res == -6 - self.check_loop_count(0) - # - res = self.meta_interp(f, [3, 19]) - assert res == -2 - self.check_loop_count(1) - - def test_can_never_inline(self): - def can_never_inline(x): - return x > 50 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - can_never_inline = can_never_inline) - @dont_look_inside - def marker(): - pass - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - x += 1 - if x == 4 or x == 61: - marker() - y -= x - return y - # - res = self.meta_interp(f, [3, 6], repeat=7) - assert res == 6 - 4 - 5 - self.check_history(call=0) # because the trace starts in the middle - # - res = self.meta_interp(f, [60, 84], repeat=7) - assert res == 84 - 61 - 62 - self.check_history(call=1) # because the trace starts immediately - - def test_format(self): - def f(n): - return len("<%d>" % n) - res = self.interp_operations(f, [421]) - assert res == 5 - - def test_switch(self): - def f(n): - if n == -5: return 12 - elif n == 2: return 51 - elif n == 7: return 1212 - else: return 42 - res = self.interp_operations(f, [7]) - assert res == 1212 - res = self.interp_operations(f, [12311]) - assert res == 42 - - def test_r_uint(self): - from pypy.rlib.rarithmetic import r_uint - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - y = r_uint(y) - while y > 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - y -= 1 - return y - res = self.meta_interp(f, [10]) - assert res == 0 - - def test_uint_operations(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - return ((r_uint(n) - 123) >> 1) <= r_uint(456) - res = self.interp_operations(f, [50]) - assert res == False - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_uint_condition(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - if ((r_uint(n) - 123) >> 1) <= r_uint(456): - return 24 - else: - return 12 - res = self.interp_operations(f, [50]) - assert res == 12 - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_int_between(self): - # - def check(arg1, arg2, arg3, expect_result, **expect_operations): - from pypy.rpython.lltypesystem import lltype - from pypy.rpython.lltypesystem.lloperation import llop - loc = locals().copy() - exec py.code.Source(""" - def f(n, m, p): - arg1 = %(arg1)s - arg2 = %(arg2)s - arg3 = %(arg3)s - return llop.int_between(lltype.Bool, arg1, arg2, arg3) - """ % locals()).compile() in loc - res = self.interp_operations(loc['f'], [5, 6, 7]) - assert res == expect_result - self.check_operations_history(expect_operations) - # - check('n', 'm', 'p', True, int_sub=2, uint_lt=1) - check('n', 'p', 'm', False, int_sub=2, uint_lt=1) - # - check('n', 'm', 6, False, int_sub=2, uint_lt=1) - # - check('n', 4, 'p', False, int_sub=2, uint_lt=1) - check('n', 5, 'p', True, int_sub=2, uint_lt=1) - check('n', 8, 'p', False, int_sub=2, uint_lt=1) - # - check('n', 6, 7, True, int_sub=2, uint_lt=1) - # - check(-2, 'n', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'm', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'p', 'm', False, int_sub=2, uint_lt=1) - #check(0, 'n', 'p', True, uint_lt=1) xxx implement me - #check(0, 'm', 'p', True, uint_lt=1) - #check(0, 'p', 'm', False, uint_lt=1) - # - check(2, 'n', 6, True, int_sub=1, uint_lt=1) - check(2, 'm', 6, False, int_sub=1, uint_lt=1) - check(2, 'p', 6, False, int_sub=1, uint_lt=1) - check(5, 'n', 6, True, int_eq=1) # 6 == 5+1 - check(5, 'm', 6, False, int_eq=1) # 6 == 5+1 - # - check(2, 6, 'm', False, int_sub=1, uint_lt=1) - check(2, 6, 'p', True, int_sub=1, uint_lt=1) - # - check(2, 40, 6, False) - check(2, 40, 60, True) - - def test_getfield(self): - class A: - pass - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=1) - - def test_getfield_immutable(self): - class A: - _immutable_ = True - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=0) - - def test_setfield_bool(self): - class A: - def __init__(self): - self.flag = True - myjitdriver = JitDriver(greens = [], reds = ['n', 'obj']) - def f(n): - obj = A() - res = False - while n > 0: - myjitdriver.can_enter_jit(n=n, obj=obj) - myjitdriver.jit_merge_point(n=n, obj=obj) - obj.flag = False - n -= 1 - return res - res = self.meta_interp(f, [7]) - assert type(res) == bool - assert not res - - def test_switch_dict(self): - def f(x): - if x == 1: return 61 - elif x == 2: return 511 - elif x == 3: return -22 - elif x == 4: return 81 - elif x == 5: return 17 - elif x == 6: return 54 - elif x == 7: return 987 - elif x == 8: return -12 - elif x == 9: return 321 - return -1 - res = self.interp_operations(f, [5]) - assert res == 17 - res = self.interp_operations(f, [15]) - assert res == -1 - - def test_int_add_ovf(self): - def f(x, y): - try: - return ovfcheck(x + y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -98 - res = self.interp_operations(f, [1, sys.maxint]) - assert res == -42 - - def test_int_sub_ovf(self): - def f(x, y): - try: - return ovfcheck(x - y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -102 - res = self.interp_operations(f, [1, -sys.maxint]) - assert res == -42 - - def test_int_mul_ovf(self): - def f(x, y): - try: - return ovfcheck(x * y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -200 - res = self.interp_operations(f, [-3, sys.maxint//2]) - assert res == -42 - - def test_mod_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'y']) - def f(n, x, y): - while n > 0: - myjitdriver.can_enter_jit(x=x, y=y, n=n) - myjitdriver.jit_merge_point(x=x, y=y, n=n) - n -= ovfcheck(x % y) - return n - res = self.meta_interp(f, [20, 1, 2]) - assert res == 0 - self.check_loops(call=0) - - def test_abs(self): - myjitdriver = JitDriver(greens = [], reds = ['i', 't']) - def f(i): - t = 0 - while i < 10: - myjitdriver.can_enter_jit(i=i, t=t) - myjitdriver.jit_merge_point(i=i, t=t) - t += abs(i) - i += 1 - return t - res = self.meta_interp(f, [-5]) - assert res == 5+4+3+2+1+0+1+2+3+4+5+6+7+8+9 - - def test_float(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - x = float(x) - y = float(y) - res = 0.0 - while y > 0.0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1.0 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42.0 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) - - def test_print(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - print n - n -= 1 - return n - res = self.meta_interp(f, [7]) - assert res == 0 - - def test_bridge_from_interpreter(self): - mydriver = JitDriver(reds = ['n'], greens = []) - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - n -= 1 - - self.meta_interp(f, [20], repeat=7) - self.check_tree_loop_count(2) # the loop and the entry path - # we get: - # ENTER - compile the new loop and the entry bridge - # ENTER - compile the leaving path - self.check_enter_count(2) - - def test_bridge_from_interpreter_2(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n'], greens = []) - glob = [1] - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - if n == 17 and glob[0]: - glob[0] = 0 - x = n + 1 - y = n + 2 - z = n + 3 - k = n + 4 - n -= 1 - n += x + y + z + k - n -= x + y + z + k - n -= 1 - - self.meta_interp(f, [20], repeat=7) - - def test_bridge_from_interpreter_3(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n', 'x', 'y', 'z', 'k'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - glob.x = 1 - x = 0 - y = 0 - z = 0 - k = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x, y=y, z=z, k=k) - mydriver.jit_merge_point(n=n, x=x, y=y, z=z, k=k) - x += 10 - y += 3 - z -= 15 - k += 4 - if n == 17 and glob.x: - glob.x = 0 - x += n + 1 - y += n + 2 - z += n + 3 - k += n + 4 - n -= 1 - n -= 1 - return x + 2*y + 3*z + 5*k + 13*n - - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_bridge_from_interpreter_4(self): - jitdriver = JitDriver(reds = ['n', 'k'], greens = []) - - def f(n, k): - while n > 0: - jitdriver.can_enter_jit(n=n, k=k) - jitdriver.jit_merge_point(n=n, k=k) - if k: - n -= 2 - else: - n -= 1 - return n + k - - from pypy.rpython.test.test_llinterp import get_interpreter, clear_tcache - from pypy.jit.metainterp.warmspot import WarmRunnerDesc - - interp, graph = get_interpreter(f, [0, 0], backendopt=False, - inline_threshold=0, type_system=self.type_system) - clear_tcache() - translator = interp.typer.annotator.translator - translator.config.translation.gc = "boehm" - warmrunnerdesc = WarmRunnerDesc(translator, - CPUClass=self.CPUClass) - state = warmrunnerdesc.jitdrivers_sd[0].warmstate - state.set_param_threshold(3) # for tests - state.set_param_trace_eagerness(0) # for tests - warmrunnerdesc.finish() - for n, k in [(20, 0), (20, 1)]: - interp.eval_graph(graph, [n, k]) - - def test_bridge_leaving_interpreter_5(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - x = 0 - glob.x = 1 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - glob.x += 1 - x += 3 - n -= 1 - glob.x += 100 - return glob.x + x - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_instantiate_classes(self): - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - def f(n): - if n > 5: - cls = A - else: - cls = B - return cls().foo - res = self.interp_operations(f, [3]) - assert res == 8 - res = self.interp_operations(f, [13]) - assert res == 72 - - def test_instantiate_does_not_call(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - - def f(n): - x = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - if n % 2 == 0: - cls = A - else: - cls = B - inst = cls() - x += inst.foo - n -= 1 - return x - res = self.meta_interp(f, [20], enable_opts='') - assert res == f(20) - self.check_loops(call=0) - - def test_zerodivisionerror(self): - # test the case of exception-raising operation that is not delegated - # to the backend at all: ZeroDivisionError - # - def f(n): - assert n >= 0 - try: - return ovfcheck(5 % n) - except ZeroDivisionError: - return -666 - except OverflowError: - return -777 - res = self.interp_operations(f, [0]) - assert res == -666 - # - def f(n): - assert n >= 0 - try: - return ovfcheck(6 // n) - except ZeroDivisionError: - return -667 - except OverflowError: - return -778 - res = self.interp_operations(f, [0]) - assert res == -667 - - def test_div_overflow(self): - import sys - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - try: - res += llop.int_floordiv_ovf(lltype.Signed, - -sys.maxint-1, x) - x += 5 - except OverflowError: - res += 100 - y -= 1 - return res - res = self.meta_interp(f, [-41, 16]) - assert res == ((-sys.maxint-1) // (-41) + - (-sys.maxint-1) // (-36) + - (-sys.maxint-1) // (-31) + - (-sys.maxint-1) // (-26) + - (-sys.maxint-1) // (-21) + - (-sys.maxint-1) // (-16) + - (-sys.maxint-1) // (-11) + - (-sys.maxint-1) // (-6) + - 100 * 8) - - def test_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - if n: - obj = A() - else: - obj = B() - return isinstance(obj, B) - res = self.interp_operations(fn, [0]) - assert res - self.check_operations_history(guard_class=1) - res = self.interp_operations(fn, [1]) - assert not res - - def test_isinstance_2(self): - driver = JitDriver(greens = [], reds = ['n', 'sum', 'x']) - class A: - pass - class B(A): - pass - class C(B): - pass - - def main(): - return f(5, B()) * 10 + f(5, C()) + f(5, A()) * 100 - - def f(n, x): - sum = 0 - while n > 0: - driver.can_enter_jit(x=x, n=n, sum=sum) - driver.jit_merge_point(x=x, n=n, sum=sum) - if isinstance(x, B): - sum += 1 - n -= 1 - return sum - - res = self.meta_interp(main, []) - assert res == 55 - - def test_assert_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - # this should only be called with n != 0 - if n: - obj = B() - obj.a = n - else: - obj = A() - obj.a = 17 - assert isinstance(obj, B) - return obj.a - res = self.interp_operations(fn, [1]) - assert res == 1 - self.check_operations_history(guard_class=0) - if self.type_system == 'ootype': - self.check_operations_history(instanceof=0) - - def test_r_dict(self): - from pypy.rlib.objectmodel import r_dict - class FooError(Exception): - pass - def myeq(n, m): - return n == m - def myhash(n): - if n < 0: - raise FooError - return -n - def f(n): - d = r_dict(myeq, myhash) - for i in range(10): - d[i] = i*i - try: - return d[n] - except FooError: - return 99 - res = self.interp_operations(f, [5]) - assert res == f(5) - - def test_free_object(self): - import weakref - from pypy.rlib import rgc - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - class X(object): - pass - def main(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= x.foo - def g(n): - x = X() - x.foo = 2 - main(n, x) - x.foo = 5 - return weakref.ref(x) - def f(n): - r = g(n) - rgc.collect(); rgc.collect(); rgc.collect() - return r() is None - # - assert f(30) == 1 - res = self.meta_interp(f, [30], no_stats=True) - assert res == 1 - - def test_pass_around(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - - def call(): - pass - - def f(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - if n % 2: - call() - if n == 8: - return x - x = 3 - else: - x = 5 - n -= 1 - return 0 - - self.meta_interp(f, [40, 0]) - - def test_const_inputargs(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'x']) - def f(n, x): - m = 0x7FFFFFFF - while n > 0: - myjitdriver.can_enter_jit(m=m, n=n, x=x) - myjitdriver.jit_merge_point(m=m, n=n, x=x) - x = 42 - n -= 1 - m = m >> 1 - return x - - res = self.meta_interp(f, [50, 1], enable_opts='') - assert res == 42 - - def test_set_param(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - def g(n): - x = 0 - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= 1 - x += n - return x - def f(n, threshold): - myjitdriver.set_param('threshold', threshold) - return g(n) - - res = self.meta_interp(f, [10, 3]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(2) - - res = self.meta_interp(f, [10, 13]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(0) - - def test_dont_look_inside(self): - @dont_look_inside - def g(a, b): - return a + b - def f(a, b): - return g(a, b) - res = self.interp_operations(f, [3, 5]) - assert res == 8 - self.check_operations_history(int_add=0, call=1) - - def test_listcomp(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst']) - def f(x, y): - lst = [0, 0, 0] - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, lst=lst) - myjitdriver.jit_merge_point(x=x, y=y, lst=lst) - lst = [i+x for i in lst if i >=0] - y -= 1 - return lst[0] - res = self.meta_interp(f, [6, 7], listcomp=True, backendopt=True, listops=True) - # XXX: the loop looks inefficient - assert res == 42 - - def test_tuple_immutable(self): - def new(a, b): - return a, b - def f(a, b): - tup = new(a, b) - return tup[1] - res = self.interp_operations(f, [3, 5]) - assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure=1) - - def test_oosend_look_inside_only_one(self): - class A: - pass - class B(A): - def g(self): - return 123 - class C(A): - @dont_look_inside - def g(self): - return 456 - def f(n): - if n > 3: - x = B() - else: - x = C() - return x.g() + x.g() - res = self.interp_operations(f, [10]) - assert res == 123 * 2 - res = self.interp_operations(f, [-10]) - assert res == 456 * 2 - - def test_residual_external_call(self): - import math - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - def f(x, y): - x = float(x) - res = 0.0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - # this is an external call that the default policy ignores - rpart, ipart = math.modf(x) - res += ipart - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops(call=1) - - def test_merge_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 5 - class B(A): - def g(self, y): - return y - 3 - - a1 = A() - a2 = A() - b = B() - def f(x): - l = [a1] * 100 + [a2] * 100 + [b] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - x = a.g(x) - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_value=2) - self.check_loops(guard_class=0, guard_value=5, everywhere=True) - - def test_merge_guardnonnull_guardclass(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=2, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=4, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [b1] * 100 + [None] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=1, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=3, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) - - def test_merge_guardnonnull_guardvalue_2(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=4, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - a2 = A() - b1 = B() - def f(x): - l = [a2] * 100 + [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [399], listops=True) - assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=5, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_residual_call_doesnt_lose_info(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) - - class A(object): - pass - - globall = [""] - @dont_look_inside - def g(x): - globall[0] = str(x) - return x - - def f(x): - y = A() - y.v = x - l = [0] - while y.v > 0: - myjitdriver.can_enter_jit(x=x, y=y, l=l) - myjitdriver.jit_merge_point(x=x, y=y, l=l) - l[0] = y.v - lc = l[0] - y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 - return y.v - res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) - - def test_guard_isnull_nonnull(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - - @dont_look_inside - def create(x): - if x >= -40: - return A() - return None - - def f(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - obj = create(x-1) - if obj is not None: - res += 1 - obj2 = create(x-1000) - if obj2 is None: - res += 1 - x -= 1 - return res - res = self.meta_interp(f, [21]) - assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) - - def test_loop_invariant1(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - a = A() - a.current_a = A() - a.current_a.x = 1 - @loop_invariant - def f(): - return a.current_a - - def g(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - res += f().x - res += f().x - res += f().x - x -= 1 - a.current_a = A() - a.current_a.x = 2 - return res - res = self.meta_interp(g, [21]) - assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) - - def test_bug_optimizeopt_mutates_ops(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) - class A(object): - pass - class B(A): - pass - - glob = A() - glob.a = None - def f(x): - res = 0 - a = A() - a.x = 0 - glob.a = A() - const = 2 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res, a=a, const=const) - myjitdriver.jit_merge_point(x=x, res=res, a=a, const=const) - if type(glob.a) is B: - res += 1 - if a is None: - a = A() - a.x = x - glob.a = B() - const = 2 - else: - const = hint(const, promote=True) - x -= const - res += a.x - a = None - glob.a = A() - const = 1 - return res - res = self.meta_interp(f, [21]) - assert res == f(21) - - def test_getitem_indexerror(self): - lst = [10, 4, 9, 16] - def f(n): - try: - return lst[n] - except IndexError: - return -2 - res = self.interp_operations(f, [2]) - assert res == 9 - res = self.interp_operations(f, [4]) - assert res == -2 - res = self.interp_operations(f, [-4]) - assert res == 10 - res = self.interp_operations(f, [-5]) - assert res == -2 - - def test_guard_always_changing_value(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - a = A() - hint(a, promote=True) - x -= 1 - self.meta_interp(f, [50]) - self.check_loop_count(1) - # this checks that the logic triggered by make_a_counter_per_value() - # works and prevents generating tons of bridges - - def test_swap_values(self): - def f(x, y): - if x > 5: - x, y = y, x - return x - y - res = self.interp_operations(f, [10, 2]) - assert res == -8 - res = self.interp_operations(f, [3, 2]) - assert res == 1 - - def test_raw_malloc_and_access(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Signed) - - def f(n): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = n - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10]) - assert res == 10 - - def test_raw_malloc_and_access_float(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Float) - - def f(n, f): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = f - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10, 3.5]) - assert res == 3.5 - - def test_jit_debug(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - jit_debug("hi there:", x) - jit_debug("foobar") - x -= 1 - return x - res = self.meta_interp(f, [8]) - assert res == 0 - self.check_loops(jit_debug=2) - - def test_assert_green(self): - def f(x, promote): - if promote: - x = hint(x, promote=True) - assert_green(x) - return x - res = self.interp_operations(f, [8, 1]) - assert res == 8 - py.test.raises(AssertGreenFailed, self.interp_operations, f, [8, 0]) - - def test_multiple_specialied_versions1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 7]) - assert res == 6*8 + 6**8 - self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) - - def test_multiple_specialied_versions_array(self): - myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', - 'array']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val - other.val) - def f(x, y): - res = x - array = [1, 2, 3] - array[1] = 7 - idx = 0 - while y > 0: - myjitdriver.can_enter_jit(idx=idx, y=y, x=x, res=res, - array=array) - myjitdriver.jit_merge_point(idx=idx, y=y, x=x, res=res, - array=array) - res = res.binop(x) - res.val += array[idx] + array[1] - if y < 7: - idx = 2 - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - self.check_loop_count(9) - self.check_loops(getarrayitem_gc=6, everywhere=True) - - def test_multiple_specialied_versions_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - b1 = f(B(x), y, B(x)) - b2 = f(B(x), y, B(x)) - assert b1.val == b2.val - c1 = f(B(x), y, A(x)) - c2 = f(B(x), y, A(x)) - assert c1.val == c2.val - d1 = f(A(x), y, B(x)) - d2 = f(A(x), y, B(x)) - assert d1.val == d2.val - return a1.val + b1.val + c1.val + d1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_failing_inlined_guard(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 8: - x = z - return res - def g(x, y): - c1 = f(A(x), y, B(x)) - c2 = f(A(x), y, B(x)) - assert c1.val == c2.val - return c1.val - res = self.meta_interp(g, [3, 16]) - assert res == g(3, 16) - - def test_inlined_guard_in_short_preamble(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class A: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - def binop(self, other): - return A(self.getval() + other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_specialied_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(A(y)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_specialied_bridge_const(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'const', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - const = 7 - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res, const=const) - myjitdriver.jit_merge_point(y=y, x=x, res=res, const=const) - const = hint(const, promote=True) - res = res.binop(A(const)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_multiple_specialied_zigzag(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - def switch(self): - return B(self.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def switch(self): - return A(self.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - if y % 4 == 0: - res = res.switch() - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [3, 23]) - assert res == 7068153 - self.check_loop_count(6) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) - - def test_dont_trace_every_iteration(self): - myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) - - def main(a, b): - i = sa = 0 - #while i < 200: - while i < 200: - myjitdriver.can_enter_jit(a=a, b=b, i=i, sa=sa) - myjitdriver.jit_merge_point(a=a, b=b, i=i, sa=sa) - if a > 0: pass - if b < 2: pass - sa += a % b - i += 1 - return sa - def g(): - return main(10, 20) + main(-10, -20) - res = self.meta_interp(g, []) - assert res == g() - self.check_enter_count(2) - - def test_current_trace_length(self): - myjitdriver = JitDriver(greens = ['g'], reds = ['x']) - @dont_look_inside - def residual(): - print "hi there" - @unroll_safe - def loop(g): - y = 0 - while y < g: - residual() - y += 1 - def f(x, g): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, g=g) - myjitdriver.jit_merge_point(x=x, g=g) - loop(g) - x -= 1 - n = current_trace_length() - return n - res = self.meta_interp(f, [5, 8]) - assert 14 < res < 42 - res = self.meta_interp(f, [5, 2]) - assert 4 < res < 14 - - def test_compute_identity_hash(self): - from pypy.rlib.objectmodel import compute_identity_hash - class A(object): - pass - def f(): - a = A() - return compute_identity_hash(a) == compute_identity_hash(a) - res = self.interp_operations(f, []) - assert res - # a "did not crash" kind of test - - def test_compute_unique_id(self): - from pypy.rlib.objectmodel import compute_unique_id - class A(object): - pass - def f(): - a1 = A() - a2 = A() - return (compute_unique_id(a1) == compute_unique_id(a1) and - compute_unique_id(a1) != compute_unique_id(a2)) - res = self.interp_operations(f, []) - assert res - - def test_wrap_around_add(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x += 1 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint-10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_mul(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x *= 2 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint>>10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_sub(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x < 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x -= 1 - n += 1 - return n - res = self.meta_interp(f, [10-sys.maxint]) - assert res == 12 - self.check_tree_loop_count(2) - - - -class TestOOtype(BasicTests, OOJitMixin): - - def test_oohash(self): - def f(n): - s = ootype.oostring(n, -1) - return s.ll_hash() - res = self.interp_operations(f, [5]) - assert res == ootype.oostring(5, -1).ll_hash() - - def test_identityhash(self): - A = ootype.Instance("A", ootype.ROOT) - def f(): - obj1 = ootype.new(A) - obj2 = ootype.new(A) - return ootype.identityhash(obj1) == ootype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oois(self): - A = ootype.Instance("A", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - if n: - obj2 = obj1 - else: - obj2 = ootype.new(A) - return obj1 is obj2 - res = self.interp_operations(f, [0]) - assert not res - res = self.interp_operations(f, [1]) - assert res - - def test_oostring_instance(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - obj2 = ootype.new(B) - s1 = ootype.oostring(obj1, -1) - s2 = ootype.oostring(obj2, -1) - ch1 = s1.ll_stritem_nonneg(1) - ch2 = s2.ll_stritem_nonneg(1) - return ord(ch1) + ord(ch2) - res = self.interp_operations(f, [0]) - assert res == ord('A') + ord('B') - - def test_subclassof(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", A) - clsA = ootype.runtimeClass(A) - clsB = ootype.runtimeClass(B) - myjitdriver = JitDriver(greens = [], reds = ['n', 'flag', 'res']) - - def getcls(flag): - if flag: - return clsA - else: - return clsB - - def f(flag, n): - res = True - while n > -100: - myjitdriver.can_enter_jit(n=n, flag=flag, res=res) - myjitdriver.jit_merge_point(n=n, flag=flag, res=res) - cls = getcls(flag) - n -= 1 - res = ootype.subclassof(cls, clsB) - return res - - res = self.meta_interp(f, [1, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert not res - - res = self.meta_interp(f, [0, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert res - -class BaseLLtypeTests(BasicTests): - - def test_identityhash(self): - A = lltype.GcStruct("A") - def f(): - obj1 = lltype.malloc(A) - obj2 = lltype.malloc(A) - return lltype.identityhash(obj1) == lltype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oops_on_nongc(self): - from pypy.rpython.lltypesystem import lltype - - TP = lltype.Struct('x') - def f(i1, i2): - p1 = prebuilt[i1] - p2 = prebuilt[i2] - a = p1 is p2 - b = p1 is not p2 - c = bool(p1) - d = not bool(p2) - return 1000*a + 100*b + 10*c + d - prebuilt = [lltype.malloc(TP, flavor='raw', immortal=True)] * 2 - expected = f(0, 1) - assert self.interp_operations(f, [0, 1]) == expected - - def test_casts(self): - py.test.skip("xxx fix or kill") - if not self.basic: - py.test.skip("test written in a style that " - "means it's frontend only") - from pypy.rpython.lltypesystem import lltype, llmemory, rffi - - TP = lltype.GcStruct('S1') - def f(p): - n = lltype.cast_ptr_to_int(p) - return n - x = lltype.malloc(TP) - xref = lltype.cast_opaque_ptr(llmemory.GCREF, x) - res = self.interp_operations(f, [xref]) - y = llmemory.cast_ptr_to_adr(x) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - # - TP = lltype.Struct('S2') - prebuilt = [lltype.malloc(TP, immortal=True), - lltype.malloc(TP, immortal=True)] - def f(x): - p = prebuilt[x] - n = lltype.cast_ptr_to_int(p) - return n - res = self.interp_operations(f, [1]) - y = llmemory.cast_ptr_to_adr(prebuilt[1]) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - - def test_collapsing_ptr_eq(self): - S = lltype.GcStruct('S') - p = lltype.malloc(S) - driver = JitDriver(greens = [], reds = ['n', 'x']) - - def f(n, x): - while n > 0: - driver.can_enter_jit(n=n, x=x) - driver.jit_merge_point(n=n, x=x) - if x: - n -= 1 - n -= 1 - - def main(): - f(10, p) - f(10, lltype.nullptr(S)) - - self.meta_interp(main, []) - - def test_enable_opts(self): - jitdriver = JitDriver(greens = [], reds = ['a']) - - class A(object): - def __init__(self, i): - self.i = i - - def f(): - a = A(0) - - while a.i < 10: - jitdriver.jit_merge_point(a=a) - jitdriver.can_enter_jit(a=a) - a = A(a.i + 1) - - self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) - self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) - -class TestLLtype(BaseLLtypeTests, LLJitMixin): - pass diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py --- a/lib_pypy/pyrepl/readline.py +++ b/lib_pypy/pyrepl/readline.py @@ -381,6 +381,9 @@ def _setup(): global _old_raw_input + if _old_raw_input is not None: + return # don't run _setup twice + try: f_in = sys.stdin.fileno() f_out = sys.stdout.fileno() @@ -401,4 +404,5 @@ _old_raw_input = __builtin__.raw_input __builtin__.raw_input = _wrapper.raw_input +_old_raw_input = None _setup() diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -140,7 +140,7 @@ xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw', immortal=True) registers = rffi.ptradd(xmmregisters, 16) - stacklen = baseloc + 10 + stacklen = baseloc + 30 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw', immortal=True) expected_ints = [0] * len(content) diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -22,8 +22,7 @@ remover = cls.MallocRemover() checkgraph(graph) count1 = count2 = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == cls.MallocRemover.MALLOC_OP: S = op.args[0].value @@ -47,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -158,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -199,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -9,6 +9,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert +from pypy.rlib.objectmodel import we_are_translated from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc from pypy.annotation import model as annmodel @@ -151,8 +152,13 @@ # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.root_stack_jit_hook = None if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + try: + self.root_stack_jit_hook = translator._jit2gc['rootstackhook'] + except KeyError: + pass else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) self.layoutbuilder.transformer = self @@ -500,6 +506,10 @@ s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) + s_gc_data = self.translator.annotator.bookkeeper.valueoftype( + gctypelayout.GCData) + r_gc_data = self.translator.rtyper.getrepr(s_gc_data) + self.c_const_gcdata = rmodel.inputconst(r_gc_data, self.gcdata) self.malloc_zero_filled = GCClass.malloc_zero_filled HDR = self.HDR = self.gcdata.gc.gcheaderbuilder.HDR @@ -786,6 +796,15 @@ resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) + def gct_gc_adr_of_root_stack_top(self, hop): + op = hop.spaceop + ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, + 'inst_root_stack_top') + c_ofs = rmodel.inputconst(lltype.Signed, ofs) + v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], + resulttype=llmemory.Address) + hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) + def gct_gc_x_swap_pool(self, hop): op = hop.spaceop [v_malloced] = op.args @@ -1327,6 +1346,14 @@ return top self.decr_stack = decr_stack + self.rootstackhook = gctransformer.root_stack_jit_hook + if self.rootstackhook is None: + def collect_stack_root(callback, gc, addr): + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return sizeofaddr + self.rootstackhook = collect_stack_root + def push_stack(self, addr): top = self.incr_stack(1) top.address[0] = addr @@ -1336,10 +1363,7 @@ return top.address[0] def allocate_stack(self): - result = llmemory.raw_malloc(self.rootstacksize) - if result: - llmemory.raw_memclear(result, self.rootstacksize) - return result + return llmemory.raw_malloc(self.rootstacksize) def setup_root_walker(self): stackbase = self.allocate_stack() @@ -1351,12 +1375,11 @@ def walk_stack_roots(self, collect_stack_root): gcdata = self.gcdata gc = self.gc + rootstackhook = self.rootstackhook addr = gcdata.root_stack_base end = gcdata.root_stack_top while addr != end: - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - addr += sizeofaddr + addr += rootstackhook(collect_stack_root, gc, addr) if self.collect_stacks_from_other_threads is not None: self.collect_stacks_from_other_threads(collect_stack_root) @@ -1463,12 +1486,11 @@ # collect all valid stacks from the dict (the entry # corresponding to the current thread is not valid) gc = self.gc + rootstackhook = self.rootstackhook end = stacktop - sizeofaddr addr = end.address[0] while addr != end: - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - addr += sizeofaddr + addr += rootstackhook(callback, gc, addr) def collect_more_stacks(callback): ll_assert(get_aid() == gcdata.active_thread, diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,8 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox -from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong, r_uint class X86RegisterManager(RegisterManager): @@ -34,6 +35,12 @@ esi: 2, edi: 3, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + } def call_result_location(self, v): return eax @@ -61,6 +68,19 @@ r14: 4, r15: 5, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + r8: MY_COPY_OF_REGS + 4 * WORD, + r9: MY_COPY_OF_REGS + 5 * WORD, + r10: MY_COPY_OF_REGS + 6 * WORD, + r12: MY_COPY_OF_REGS + 7 * WORD, + r13: MY_COPY_OF_REGS + 8 * WORD, + r14: MY_COPY_OF_REGS + 9 * WORD, + r15: MY_COPY_OF_REGS + 10 * WORD, + } class X86XMMRegisterManager(RegisterManager): @@ -117,6 +137,16 @@ else: return 1 +if WORD == 4: + gpr_reg_mgr_cls = X86RegisterManager + xmm_reg_mgr_cls = X86XMMRegisterManager +elif WORD == 8: + gpr_reg_mgr_cls = X86_64_RegisterManager + xmm_reg_mgr_cls = X86_64_XMMRegisterManager +else: + raise AssertionError("Word size should be 4 or 8") + + class RegAlloc(object): def __init__(self, assembler, translate_support_code=False): @@ -135,16 +165,6 @@ # compute longevity of variables longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity - # XXX - if cpu.WORD == 4: - gpr_reg_mgr_cls = X86RegisterManager - xmm_reg_mgr_cls = X86XMMRegisterManager - elif cpu.WORD == 8: - gpr_reg_mgr_cls = X86_64_RegisterManager - xmm_reg_mgr_cls = X86_64_XMMRegisterManager - else: - raise AssertionError("Word size should be 4 or 8") - self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) @@ -738,8 +758,12 @@ def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None + self.xrm.before_call(force_store, save_all_regs=save_all_regs) + if not save_all_regs: + gcrootmap = gc_ll_descr = self.assembler.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + save_all_regs = 2 self.rm.before_call(force_store, save_all_regs=save_all_regs) - self.xrm.before_call(force_store, save_all_regs=save_all_regs) if op.result is not None: if op.result.type == FLOAT: resloc = self.xrm.after_call(op.result) @@ -836,31 +860,53 @@ self.PerformDiscard(op, arglocs) self.rm.possibly_free_vars_for_op(op) - def _fastpath_malloc(self, op, descr): + def fastpath_malloc_fixedsize(self, op, descr): assert isinstance(descr, BaseSizeDescr) + self._do_fastpath_malloc(op, descr.size, descr.tid) + + def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + size = basesize + itemsize * num_elem + self._do_fastpath_malloc(op, size, arraydescr.tid) + self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) + + def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) - # We need to force-allocate each of save_around_call_regs now. - # The alternative would be to save and restore them around the - # actual call to malloc(), in the rare case where we need to do - # it; however, mark_gc_roots() would need to be adapted to know - # where the variables end up being saved. Messy. - for reg in self.rm.save_around_call_regs: - if reg is not eax: - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=reg) - self.rm.possibly_free_var(tmp_box) - self.assembler.malloc_cond_fixedsize( + if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + # We need edx as a temporary, but otherwise don't save any more + # register. See comments in _build_malloc_slowpath(). + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=edx) + self.rm.possibly_free_var(tmp_box) + else: + # ---- asmgcc ---- + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) + + self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - descr.size, descr.tid, + size, tid, ) def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): - self._fastpath_malloc(op, op.getdescr()) + self.fastpath_malloc_fixedsize(op, op.getdescr()) else: args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] @@ -870,7 +916,7 @@ classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self._fastpath_malloc(op, descrsize) + self.fastpath_malloc_fixedsize(op, descrsize) self.assembler.set_vtable(eax, imm(classint)) # result of fastpath malloc is in eax else: @@ -929,16 +975,25 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) + return # boehm GC (XXX kill the following code at some point) itemsize, basesize, ofs_length, _, _ = ( self._unpack_arraydescr(op.getdescr())) scale_of_field = _get_scale(itemsize) - return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + self._malloc_varsize(basesize, ofs_length, scale_of_field, + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -1132,7 +1187,7 @@ # call memcpy() self.rm.before_call() self.xrm.before_call() - self.assembler._emit_call(imm(self.assembler.memcpy_addr), + self.assembler._emit_call(-1, imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) self.rm.possibly_free_var(length_box) self.rm.possibly_free_var(dstaddr_box) @@ -1200,18 +1255,24 @@ def consider_jit_debug(self, op): pass - def get_mark_gc_roots(self, gcrootmap): + def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) - gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position)) + gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) for v, reg in self.rm.reg_bindings.items(): if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX - gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + if use_copy_area: + assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS + area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] + gcrootmap.add_frame_offset(shape, area_offset) + else: + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg( + shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,6 +46,7 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/jit/tl/pypyjit_child.py b/pypy/jit/tl/pypyjit_child.py --- a/pypy/jit/tl/pypyjit_child.py +++ b/pypy/jit/tl/pypyjit_child.py @@ -2,7 +2,6 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp import warmspot from pypy.module.pypyjit.policy import PyPyJitPolicy -from pypy.rlib.jit import OPTIMIZER_FULL, OPTIMIZER_NO_UNROLL def run_child(glob, loc): @@ -34,6 +33,5 @@ option.view = True warmspot.jittify_and_run(interp, graph, [], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -285,6 +285,15 @@ elif drv.exe_name is None and '__name__' in targetspec_dic: drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s' + # Double check to ensure we are not overwriting the current interpreter + try: + exe_name = str(drv.compute_exe_name()) + assert not os.path.samefile(exe_name, sys.executable), ( + 'Output file %r is the currently running ' + 'interpreter (use --output=...)'% exe_name) + except OSError: + pass + goals = translateconfig.goals try: drv.proceed(goals) diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -746,6 +746,7 @@ def charpsize2str(cp, size): l = [cp[i] for i in range(size)] return emptystr.join(l) + charpsize2str._annenforceargs_ = [None, int] return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -110,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -26,9 +26,10 @@ CPU = getcpuclass() class MockGcRootMap(object): + is_shadow_stack = False def get_basic_shape(self, is_64_bit): return ['shape'] - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): shape.append(offset) def add_callee_save_reg(self, shape, reg_index): index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } @@ -44,7 +45,8 @@ get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new - + get_malloc_slowpath_addr = None + moving_gc = True gcrootmap = MockGcRootMap() @@ -166,26 +168,29 @@ class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - + expected_malloc_slowpath_size = WORD*2 + def __init__(self): GcCache.__init__(self, False) # create a nursery NTP = rffi.CArray(lltype.Signed) self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 64 - # 64 bytes + self.addrs[1] = self.addrs[0] + 16*WORD + self.addrs[2] = 0 + # 16 WORDs def malloc_slowpath(size): - assert size == WORD*2 + assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size + self.addrs[2] += 1 return nadr self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) - self._counter = 123 + self._counter = 123000 def can_inline_malloc(self, descr): return True @@ -204,7 +209,7 @@ def get_nursery_top_addr(self): return rffi.cast(lltype.Signed, self.addrs) + WORD - def get_malloc_fixedsize_slowpath_addr(self): + def get_malloc_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) @@ -220,9 +225,11 @@ cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() - NODE = lltype.Struct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) # xxx hack: NODE is not a GcStruct + # hack: specify 'tid' explicitly, because this test is not running + # with the gc transformer + NODE = lltype.GcStruct('node', ('tid', lltype.Signed), + ('value', lltype.Signed)) + nodedescr = cpu.sizeof(NODE) valuedescr = cpu.fielddescrof(NODE, 'value') self.cpu = cpu @@ -254,6 +261,7 @@ assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called def test_malloc_slowpath(self): ops = ''' @@ -274,6 +282,7 @@ gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nadr + (WORD*2) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once def test_new_with_vtable(self): ops = ''' @@ -289,3 +298,93 @@ assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + +class Seen(Exception): + pass + +class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): + def can_inline_malloc_varsize(self, arraydescr, num_elem): + return num_elem < 5 + def get_funcptr_for_newarray(self): + return 52 + def init_array_descr(self, A, descr): + descr.tid = self._counter + self._counter += 1 + def args_for_new_array(self, descr): + raise Seen("args_for_new_array") + +class TestMallocVarsizeFastpath(BaseTestRegalloc): + def setup_method(self, method): + cpu = CPU(None, None) + cpu.vtable_offset = WORD + cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() + cpu.setup_once() + self.cpu = cpu + + ARRAY = lltype.GcArray(lltype.Signed) + arraydescr = cpu.arraydescrof(ARRAY) + self.arraydescr = arraydescr + + self.namespace = locals().copy() + + def test_malloc_varsize_fastpath(self): + # Hack. Running the GcLLDescr_framework without really having + # a complete GC means that we end up with both the tid and the + # length being at offset 0. In this case, so the length overwrites + # the tid. This is of course only the case in this test class. + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 142, descr=arraydescr) + setarrayitem_gc(p0, 3, 143, descr=arraydescr) + finish(p0) + ''' + self.interpret(ops, []) + # check the nursery + gc_ll_descr = self.cpu.gc_ll_descr + assert gc_ll_descr.nursery[0] == 4 + assert gc_ll_descr.nursery[1] == 142 + assert gc_ll_descr.nursery[4] == 143 + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + def test_malloc_varsize_slowpath(self): + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 420, descr=arraydescr) + setarrayitem_gc(p0, 3, 430, descr=arraydescr) + p1 = new_array(4, descr=arraydescr) + setarrayitem_gc(p1, 0, 421, descr=arraydescr) + setarrayitem_gc(p1, 3, 431, descr=arraydescr) + p2 = new_array(4, descr=arraydescr) + setarrayitem_gc(p2, 0, 422, descr=arraydescr) + setarrayitem_gc(p2, 3, 432, descr=arraydescr) + p3 = new_array(4, descr=arraydescr) + setarrayitem_gc(p3, 0, 423, descr=arraydescr) + setarrayitem_gc(p3, 3, 433, descr=arraydescr) + finish(p0, p1, p2, p3) + ''' + gc_ll_descr = self.cpu.gc_ll_descr + gc_ll_descr.expected_malloc_slowpath_size = 5*WORD + self.interpret(ops, []) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once + + def test_malloc_varsize_too_big(self): + ops = ''' + [] + p0 = new_array(5, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_varsize_variable(self): + ops = ''' + [i0] + p0 = new_array(i0, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -283,9 +283,14 @@ sys.stdout = out = Out() try: raises(UnicodeError, "print unichr(0xa2)") + assert out.data == [] out.encoding = "cp424" print unichr(0xa2) assert out.data == [unichr(0xa2).encode("cp424"), "\n"] + del out.data[:] + del out.encoding + print u"foo\t", u"bar\n", u"trick", u"baz\n" # softspace handling + assert out.data == ["foo\t", "bar\n", "trick", " ", "baz\n", "\n"] finally: sys.stdout = save diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -1,3 +1,4 @@ +from __future__ import with_statement MARKER = 42 class AppTestImpModule: @@ -34,7 +35,8 @@ def test_load_dynamic(self): raises(ImportError, self.imp.load_dynamic, 'foo', 'bar') - raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', 'baz.so') + raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', + open(self.file_module)) def test_suffixes(self): for suffix, mode, type in self.imp.get_suffixes(): diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -1,4 +1,5 @@ from pypy.rpython.ootypesystem import ootype +from pypy.rpython.lltypesystem import rffi from pypy.translator.oosupport.metavm import MicroInstruction from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType import pypy.translator.jvm.typesystem as jvm @@ -94,14 +95,20 @@ (ootype.SignedLongLong, ootype.Signed): jvm.L2I, (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, + (ootype.Signed, rffi.SHORT): jvm.I2S, + (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, + (ootype.Signed, ootype.Unsigned): None, + (ootype.Unsigned, ootype.Signed): None, } class _CastPrimitive(MicroInstruction): def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype + if TO == FROM: + return opcode = CASTS[(FROM, TO)] if opcode: generator.emit(opcode) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -32,6 +32,7 @@ else: SO = ".so" DEFAULT_SOABI = 'pypy-14' +CHECK_FOR_PYW = sys.platform == 'win32' @specialize.memo() def get_so_extension(space): @@ -58,6 +59,12 @@ if os.path.exists(pyfile) and case_ok(pyfile): return PY_SOURCE, ".py", "U" + # on Windows, also check for a .pyw file + if CHECK_FOR_PYW: + pyfile = filepart + ".pyw" + if os.path.exists(pyfile) and case_ok(pyfile): + return PY_SOURCE, ".pyw", "U" + # The .py file does not exist. By default on PyPy, lonepycfiles # is False: if a .py file does not exist, we don't even try to # look for a lone .pyc file. @@ -85,6 +92,9 @@ # XXX that's slow def case_ok(filename): index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) if index < 0: directory = os.curdir else: diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -18,7 +18,6 @@ def should_skip_instruction(self, instrname, argmodes): return ( super(TestRx86_64, self).should_skip_instruction(instrname, argmodes) or - ('j' in argmodes) or # Not testing FSTP on 64-bit for now (instrname == 'FSTP') ) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -311,8 +311,7 @@ # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations # that will be performed later on the flow graph. - def fixegg(link): - if isinstance(link, Link): + for link in list(self.graph.iterlinks()): block = link.target if isinstance(block, EggBlock): if (not block.operations and len(block.exits) == 1 and @@ -324,15 +323,14 @@ link.args = list(link2.args) link.target = link2.target assert link2.exitcase is None - fixegg(link) else: mapping = {} for a in block.inputargs: mapping[a] = Variable(a) block.renamevariables(mapping) - elif isinstance(link, SpamBlock): + for block in self.graph.iterblocks(): + if isinstance(link, SpamBlock): del link.framestate # memory saver - traverse(fixegg, self.graph) def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py --- a/pypy/annotation/unaryop.py +++ b/pypy/annotation/unaryop.py @@ -499,10 +499,14 @@ def getanyitem(str): return str.basecharclass() - def method_split(str, patt): # XXX + def method_split(str, patt, max=-1): getbookkeeper().count("str_split", str, patt) return getbookkeeper().newlist(str.basestringclass()) + def method_rsplit(str, patt, max=-1): + getbookkeeper().count("str_rsplit", str, patt) + return getbookkeeper().newlist(str.basestringclass()) + def method_replace(str, s1, s2): return str.basestringclass() diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py --- a/pypy/rpython/lltypesystem/rstr.py +++ b/pypy/rpython/lltypesystem/rstr.py @@ -722,31 +722,75 @@ newlen = len(s1.chars) - 1 return LLHelpers._ll_stringslice(s1, 0, newlen) - def ll_split_chr(LIST, s, c): + def ll_split_chr(LIST, s, c, max): chars = s.chars strlen = len(chars) count = 1 i = 0 + if max == 0: + i = strlen while i < strlen: if chars[i] == c: count += 1 + if max >= 0 and count > max: + break i += 1 res = LIST.ll_newlist(count) items = res.ll_items() i = 0 j = 0 resindex = 0 + if max == 0: + j = strlen while j < strlen: if chars[j] == c: item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) resindex += 1 i = j + 1 + if max >= 0 and resindex >= max: + j = strlen + break j += 1 item = items[resindex] = s.malloc(j - i) item.copy_contents(s, item, i, 0, j - i) return res + def ll_rsplit_chr(LIST, s, c, max): + chars = s.chars + strlen = len(chars) + count = 1 + i = 0 + if max == 0: + i = strlen + while i < strlen: + if chars[i] == c: + count += 1 + if max >= 0 and count > max: + break + i += 1 + res = LIST.ll_newlist(count) + items = res.ll_items() + i = strlen + j = strlen + resindex = count - 1 + assert resindex >= 0 + if max == 0: + j = 0 + while j > 0: + j -= 1 + if chars[j] == c: + item = items[resindex] = s.malloc(i - j - 1) + item.copy_contents(s, item, j + 1, 0, i - j - 1) + resindex -= 1 + i = j + if resindex == 0: + j = 0 + break + item = items[resindex] = s.malloc(i - j) + item.copy_contents(s, item, j, 0, i - j) + return res + @purefunction def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -43,9 +43,14 @@ class SizeDescr(AbstractDescr): size = 0 # help translation + is_immutable = False - def __init__(self, size): + def __init__(self, size, count_fields_if_immut=-1): self.size = size + self.count_fields_if_immut = count_fields_if_immut + + def count_fields_if_immutable(self): + return self.count_fields_if_immut def repr_of_descr(self): return '' % self.size @@ -62,15 +67,15 @@ return cache[STRUCT] except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) + count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) else: - sizedescr = SizeDescr(size) + sizedescr = SizeDescr(size, count_fields_if_immut) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr - # ____________________________________________________________ # FieldDescrs diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -283,9 +283,15 @@ # These are the worst cases: val2 = loc2.value_i() code1 = loc1.location_code() - if (code1 == 'j' - or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) - or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + if code1 == 'j': + checkvalue = loc1.value_j() + elif code1 == 'm': + checkvalue = loc1.value_m()[1] + elif code1 == 'a': + checkvalue = loc1.value_a()[3] + else: + checkvalue = 0 + if not rx86.fits_in_32bits(checkvalue): # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai # and the constant offset in the address is 64-bit. # Hopefully this doesn't happen too often @@ -330,10 +336,10 @@ if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if self.WORD == 8 and possible_code1 == 'j': + if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif self.WORD == 8 and possible_code2 == 'j': + elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): @@ -378,6 +384,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) diff --git a/pypy/translator/backendopt/test/test_tailrecursion.py b/pypy/translator/backendopt/test/test_tailrecursion.py --- a/pypy/translator/backendopt/test/test_tailrecursion.py +++ b/pypy/translator/backendopt/test/test_tailrecursion.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -69,12 +69,31 @@ }; +static int cmp_compare(PyObject *self, PyObject *other) { + return -1; +} + +PyTypeObject OldCmpType = { + PyVarObject_HEAD_INIT(NULL, 0) + "comparisons.OldCmpType", /* tp_name */ + sizeof(CmpObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)cmp_compare, /* tp_compare */ +}; + + void initcomparisons(void) { PyObject *m, *d; if (PyType_Ready(&CmpType) < 0) return; + if (PyType_Ready(&OldCmpType) < 0) + return; m = Py_InitModule("comparisons", NULL); if (m == NULL) return; @@ -83,4 +102,6 @@ return; if (PyDict_SetItemString(d, "CmpType", (PyObject *)&CmpType) < 0) return; + if (PyDict_SetItemString(d, "OldCmpType", (PyObject *)&OldCmpType) < 0) + return; } diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/jit/backend/x86/test/test_basic.py b/pypy/jit/backend/x86/test/test_basic.py --- a/pypy/jit/backend/x86/test/test_basic.py +++ b/pypy/jit/backend/x86/test/test_basic.py @@ -1,18 +1,18 @@ import py from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.metainterp.warmspot import ll_meta_interp -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver -class Jit386Mixin(test_basic.LLJitMixin): +class Jit386Mixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() def check_jumps(self, maxcount): pass -class TestBasic(Jit386Mixin, test_basic.BaseLLtypeTests): +class TestBasic(Jit386Mixin, test_ajit.BaseLLtypeTests): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py def test_bug(self): diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,8 @@ +import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror +from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop @@ -21,6 +23,8 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 + get_malloc_slowpath_addr = None + def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr @@ -34,6 +38,8 @@ pass def can_inline_malloc(self, descr): return False + def can_inline_malloc_varsize(self, descr, num_elem): + return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): @@ -212,10 +218,12 @@ return addr_ref -class GcRootMap_asmgcc: +class GcRootMap_asmgcc(object): """Handles locating the stack roots in the assembler. This is the class supporting --gcrootfinder=asmgcc. """ + is_shadow_stack = False + LOC_REG = 0 LOC_ESP_PLUS = 1 LOC_EBP_PLUS = 2 @@ -224,7 +232,7 @@ GCMAP_ARRAY = rffi.CArray(lltype.Signed) CALLSHAPE_ARRAY_PTR = rffi.CArrayPtr(rffi.UCHAR) - def __init__(self): + def __init__(self, gcdescr=None): # '_gcmap' is an array of length '_gcmap_maxlength' of addresses. # '_gcmap_curlength' tells how full the array really is. # The addresses are actually grouped in pairs: @@ -237,6 +245,13 @@ self._gcmap_deadentries = 0 self._gcmap_sorted = True + def add_jit2gc_hooks(self, jit2gc): + jit2gc.update({ + 'gcmapstart': lambda: self.gcmapstart(), + 'gcmapend': lambda: self.gcmapend(), + 'gcmarksorted': lambda: self.gcmarksorted(), + }) + def initialize(self): # hack hack hack. Remove these lines and see MissingRTypeAttribute # when the rtyper tries to annotate these methods only when GC-ing... @@ -365,7 +380,7 @@ number >>= 7 shape.append(chr(number | flag)) - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset @@ -388,6 +403,126 @@ return rawaddr +class GcRootMap_shadowstack(object): + """Handles locating the stack roots in the assembler. + This is the class supporting --gcrootfinder=shadowstack. + """ + is_shadow_stack = True + MARKER = 8 + + # The "shadowstack" is a portable way in which the GC finds the + # roots that live in the stack. Normally it is just a list of + # pointers to GC objects. The pointers may be moved around by a GC + # collection. But with the JIT, an entry can also be MARKER, in + # which case the next entry points to an assembler stack frame. + # During a residual CALL from the assembler (which may indirectly + # call the GC), we use the force_index stored in the assembler + # stack frame to identify the call: we can go from the force_index + # to a list of where the GC pointers are in the frame (this is the + # purpose of the present class). + # + # Note that across CALL_MAY_FORCE or CALL_ASSEMBLER, we can also go + # from the force_index to a ResumeGuardForcedDescr instance, which + # is used if the virtualizable or the virtualrefs need to be forced + # (see pypy.jit.backend.model). The force_index number in the stack + # frame is initially set to a non-negative value x, but it is + # occasionally turned into (~x) in case of forcing. + + INTARRAYPTR = rffi.CArrayPtr(rffi.INT) + CALLSHAPES_ARRAY = rffi.CArray(INTARRAYPTR) + + def __init__(self, gcdescr): + self._callshapes = lltype.nullptr(self.CALLSHAPES_ARRAY) + self._callshapes_maxlength = 0 + self.force_index_ofs = gcdescr.force_index_ofs + + def add_jit2gc_hooks(self, jit2gc): + # + def collect_jit_stack_root(callback, gc, addr): + if addr.signed[0] != GcRootMap_shadowstack.MARKER: + # common case + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return WORD + else: + # case of a MARKER followed by an assembler stack frame + follow_stack_frame_of_assembler(callback, gc, addr) + return 2 * WORD + # + def follow_stack_frame_of_assembler(callback, gc, addr): + frame_addr = addr.signed[1] + addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + callshape = self._callshapes[force_index] + n = 0 + while True: + offset = rffi.cast(lltype.Signed, callshape[n]) + if offset == 0: + break + addr = llmemory.cast_int_to_adr(frame_addr + offset) + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + n += 1 + # + jit2gc.update({ + 'rootstackhook': collect_jit_stack_root, + }) + + def initialize(self): + pass + + def get_basic_shape(self, is_64_bit=False): + return [] + + def add_frame_offset(self, shape, offset): + assert offset != 0 + shape.append(offset) + + def add_callee_save_reg(self, shape, register): + msg = "GC pointer in %s was not spilled" % register + os.write(2, '[llsupport/gc] %s\n' % msg) + raise AssertionError(msg) + + def compress_callshape(self, shape, datablockwrapper): + length = len(shape) + SZINT = rffi.sizeof(rffi.INT) + rawaddr = datablockwrapper.malloc_aligned((length + 1) * SZINT, SZINT) + p = rffi.cast(self.INTARRAYPTR, rawaddr) + for i in range(length): + p[i] = rffi.cast(rffi.INT, shape[i]) + p[length] = rffi.cast(rffi.INT, 0) + return p + + def write_callshape(self, p, force_index): + if force_index >= self._callshapes_maxlength: + self._enlarge_callshape_list(force_index + 1) + self._callshapes[force_index] = p + + def _enlarge_callshape_list(self, minsize): + newlength = 250 + (self._callshapes_maxlength // 3) * 4 + if newlength < minsize: + newlength = minsize + newarray = lltype.malloc(self.CALLSHAPES_ARRAY, newlength, + flavor='raw', track_allocation=False) + if self._callshapes: + i = self._callshapes_maxlength - 1 + while i >= 0: + newarray[i] = self._callshapes[i] + i -= 1 + lltype.free(self._callshapes, flavor='raw') + self._callshapes = newarray + self._callshapes_maxlength = newlength + + def freeing_block(self, start, stop): + pass # nothing needed here + + def get_root_stack_top_addr(self): + rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) + return rffi.cast(lltype.Signed, rst_addr) + + class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 @@ -437,7 +572,7 @@ except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls() + gcrootmap = cls(gcdescr) self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) @@ -446,12 +581,9 @@ # where it can be fished and reused by the FrameworkGCTransformer self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = { - 'layoutbuilder': self.layoutbuilder, - 'gcmapstart': lambda: gcrootmap.gcmapstart(), - 'gcmapend': lambda: gcrootmap.gcmapend(), - 'gcmarksorted': lambda: gcrootmap.gcmarksorted(), - } + self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) @@ -461,6 +593,10 @@ self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() + # for the fast path of mallocs, the following must be true, at least + assert self.GCClass.inline_simple_malloc + assert self.GCClass.inline_simple_malloc_varsize + # make a malloc function, with three arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) @@ -539,20 +675,23 @@ x3 = x0 * 0.3 for_test_only.x = x0 + x1 + x2 + x3 # - def malloc_fixedsize_slowpath(size): + def malloc_slowpath(size): if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery try: + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, 0, size, True, False, False) except MemoryError: fatalerror("out of memory (from JITted code)") return 0 return rffi.cast(lltype.Signed, gcref) - self.malloc_fixedsize_slowpath = malloc_fixedsize_slowpath - self.MALLOC_FIXEDSIZE_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) + self.malloc_slowpath = malloc_slowpath + self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -562,9 +701,8 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_fixedsize_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_FIXEDSIZE_SLOWPATH), - self.malloc_fixedsize_slowpath) + def get_malloc_slowpath_addr(self): + fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) def initialize(self): @@ -710,6 +848,16 @@ return True return False + def can_inline_malloc_varsize(self, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + return size < self.max_size_of_young_obj + except OverflowError: + return False + def has_write_barrier_class(self): return WriteBarrierDescr diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -266,7 +265,7 @@ if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -1,10 +1,10 @@ -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rpython.tool import rffi_platform as platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import py, os from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rlib import jit from pypy.rlib.debug import ll_assert from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem.lloperation import llop @@ -79,6 +79,7 @@ # wrappers... + at jit.loop_invariant def get_ident(): return rffi.cast(lltype.Signed, c_thread_get_ident()) @@ -113,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,5 +1,5 @@ -from pypy.jit.metainterp.history import Const, Box +from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated class TempBox(Box): @@ -313,11 +313,12 @@ self.assembler.regalloc_mov(reg, to) # otherwise it's clean - def before_call(self, force_store=[], save_all_regs=False): + def before_call(self, force_store=[], save_all_regs=0): """ Spill registers before a call, as described by 'self.save_around_call_regs'. Registers are not spilled if they don't survive past the current operation, unless they - are listed in 'force_store'. + are listed in 'force_store'. 'save_all_regs' can be 0 (default), + 1 (save all), or 2 (save default+PTRs). """ for v, reg in self.reg_bindings.items(): if v not in force_store and self.longevity[v][1] <= self.position: @@ -325,9 +326,11 @@ del self.reg_bindings[v] self.free_regs.append(reg) continue - if not save_all_regs and reg not in self.save_around_call_regs: - # we don't have to - continue + if save_all_regs != 1 and reg not in self.save_around_call_regs: + if save_all_regs == 0: + continue # we don't have to + if v.type != REF: + continue # only save GC pointers self._sync_var(v) del self.reg_bindings[v] self.free_regs.append(reg) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -3,6 +3,7 @@ import py from py.test import skip import sys, os, re +import subprocess class BytecodeTrace(list): def get_opnames(self, prefix=""): @@ -116,13 +117,12 @@ print >> f, "print 'OK :-)'" f.close() - if sys.platform.startswith('win'): - py.test.skip("XXX this is not Windows-friendly") print logfilepath - child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( - logfilepath, self.pypy_c, filepath), 'r') - result = child_stdout.read() - child_stdout.close() + env = os.environ.copy() + env['PYPYLOG'] = ":%s" % (logfilepath,) + p = subprocess.Popen([self.pypy_c, str(filepath)], + env=env, stdout=subprocess.PIPE) + result, _ = p.communicate() assert result if result.strip().startswith('SKIP:'): py.test.skip(result.strip()) @@ -198,44 +198,6 @@ print print '@' * 79 - def test_f1(self): - self.run_source(''' - def main(n): - "Arbitrary test function." - i = 0 - x = 1 - while i 1: - r *= n - n -= 1 - return r - ''', 28, - ([5], 120), - ([25], 15511210043330985984000000L)) - - def test_factorialrec(self): - self.run_source(''' - def main(n): - if n > 1: - return n * main(n-1) - else: - return 1 - ''', 0, - ([5], 120), - ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -247,529 +209,6 @@ ''' % (sys.path,), 7200, ([], 42)) - def test_simple_call(self): - self.run_source(''' - OFFSET = 0 - def f(i): - return i + 1 + OFFSET - def main(n): - i = 0 - while i < n+OFFSET: - i = f(f(i)) - return i - ''', 98, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOAD_GLOBAL", True) - assert len(ops) == 5 - assert ops[0].get_opnames() == ["guard_value", - "getfield_gc", "guard_value", - "getfield_gc", "guard_isnull", - "getfield_gc", "guard_nonnull_class"] - # the second getfield on the same globals is quicker - assert ops[1].get_opnames() == ["getfield_gc", "guard_nonnull_class"] - assert not ops[2] # second LOAD_GLOBAL of the same name folded away - # LOAD_GLOBAL of the same name but in different function partially - # folded away - # XXX could be improved - assert ops[3].get_opnames() == ["guard_value", - "getfield_gc", "guard_isnull"] - assert not ops[4] - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 10 - - ops = self.get_by_bytecode("LOAD_GLOBAL") - assert len(ops) == 5 - for bytecode in ops: - assert not bytecode - - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for bytecode in ops: - assert len(bytecode) <= 1 - - - def test_method_call(self): - self.run_source(''' - class A(object): - def __init__(self, a): - self.a = a - def f(self, i): - return self.a + i - def main(n): - i = 0 - a = A(1) - while i < n: - x = a.f(i) - i = a.f(x) - return i - ''', 93, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOOKUP_METHOD", True) - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 3 - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert not ops[0] # first LOOKUP_METHOD folded away - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("CALL_METHOD", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 6 - assert len(ops[1]) < len(ops[0]) - - ops = self.get_by_bytecode("CALL_METHOD") - assert len(ops) == 2 - assert len(ops[0]) <= 1 - assert len(ops[1]) <= 1 - - ops = self.get_by_bytecode("LOAD_ATTR", True) - assert len(ops) == 2 - # With mapdict, we get fast access to (so far) the 5 first - # attributes, which means it is done with only the following - # operations. (For the other attributes there is additionally - # a getarrayitem_gc.) - assert ops[0].get_opnames() == ["getfield_gc", - "guard_nonnull_class"] - assert not ops[1] # second LOAD_ATTR folded away - - ops = self.get_by_bytecode("LOAD_ATTR") - assert not ops[0] # first LOAD_ATTR folded away - assert not ops[1] # second LOAD_ATTR folded away - - def test_static_classmethod_call(self): - self.run_source(''' - class A(object): - @classmethod - def f(cls, i): - return i + (cls is A) + 1 - - @staticmethod - def g(i): - return i - 1 - - def main(n): - i = 0 - a = A() - while i < n: - x = a.f(i) - i = a.g(x) - return i - ''', 106, - ([20], 20), - ([31], 31)) - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 2 - assert len(ops[0].get_opnames("getfield")) <= 4 - assert not ops[1] # second LOOKUP_METHOD folded away - - def test_default_and_kw(self): - self.run_source(''' - def f(i, j=1): - return i + j - def main(n): - i = 0 - while i < n: - i = f(f(i), j=1) - return i - ''', 100, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - def test_kwargs(self): - self.run_source(''' - d = {} - - def g(**args): - return len(args) - - def main(x): - s = 0 - d = {} - for i in range(x): - s += g(**d) - d[str(i)] = i - if i % 100 == 99: - d = {} - return s - ''', 100000, ([100], 4950), - ([1000], 49500), - ([10000], 495000), - ([100000], 4950000)) - assert len(self.rawloops) + len(self.rawentrybridges) == 4 - op, = self.get_by_bytecode("CALL_FUNCTION_KW") - # XXX a bit too many guards, but better than before - assert len(op.get_opnames("guard")) <= 12 - - def test_stararg_virtual(self): - self.run_source(''' - d = {} - - def g(*args): - return len(args) - def h(a, b, c): - return c - - def main(x): - s = 0 - for i in range(x): - l = [i, x, 2] - s += g(*l) - s += h(*l) - s += g(i, x, 2) - for i in range(x): - l = [x, 2] - s += g(i, *l) - s += h(i, *l) - return s - ''', 100000, ([100], 1300), - ([1000], 13000), - ([10000], 130000), - ([100000], 1300000)) - assert len(self.loops) == 2 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - assert len(ops) == 4 - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - ops = self.get_by_bytecode("CALL_FUNCTION") - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_stararg(self): - self.run_source(''' - d = {} - - def g(*args): - return args[-1] - def h(*args): - return len(args) - - def main(x): - s = 0 - l = [] - i = 0 - while i < x: - l.append(1) - s += g(*l) - i = h(*l) - return s - ''', 100000, ([100], 100), - ([1000], 1000), - ([2000], 2000), - ([4000], 4000)) - assert len(self.loops) == 1 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - for op in ops: - assert len(op.get_opnames("new_with_vtable")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_virtual_instance(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0 - while i < n: - a = A() - assert isinstance(a, A) - assert not isinstance(a, int) - a.x = 2 - i = i + a.x - return i - ''', 69, - ([20], 20), - ([31], 32)) - - callA, callisinstance1, callisinstance2 = ( - self.get_by_bytecode("CALL_FUNCTION")) - assert not callA.get_opnames("call") - assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 2 - assert not callisinstance1.get_opnames("call") - assert not callisinstance1.get_opnames("new") - assert len(callisinstance1.get_opnames("guard")) <= 2 - # calling isinstance on a builtin type gives zero guards - # because the version_tag of a builtin type is immutable - assert not len(callisinstance1.get_opnames("guard")) - - - bytecode, = self.get_by_bytecode("STORE_ATTR") - assert bytecode.get_opnames() == [] - - def test_load_attr(self): - self.run_source(''' - class A(object): - pass - a = A() - a.x = 2 - def main(n): - i = 0 - while i < n: - i = i + a.x - return i - ''', 41, - ([20], 20), - ([31], 32)) - - load, = self.get_by_bytecode("LOAD_ATTR") - # 1 guard_value for the class - # 1 guard_value for the version_tag - # 1 guard_value for the structure - # 1 guard_nonnull_class for the result since it is used later - assert len(load.get_opnames("guard")) <= 4 - - def test_mixed_type_loop(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0.0 - j = 2 - while i < n: - i = j + i - return i, type(i) is float - ''', 35, - ([20], (20, True)), - ([31], (32, True))) - - bytecode, = self.get_by_bytecode("BINARY_ADD") - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 2 - - def test_call_builtin_function(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 2 - l = [] - while i < n: - i += 1 - l.append(i) - return i, len(l) - ''', 39, - ([20], (20, 18)), - ([31], (31, 29))) - - bytecode, = self.get_by_bytecode("CALL_METHOD") - assert len(bytecode.get_opnames("new_with_vtable")) == 1 # the forcing of the int - assert len(bytecode.get_opnames("call")) == 1 # the call to append - assert len(bytecode.get_opnames("guard")) == 1 # guard for guard_no_exception after the call - bytecode, = self.get_by_bytecode("CALL_METHOD", True) - assert len(bytecode.get_opnames("guard")) == 2 # guard for profiling disabledness + guard_no_exception after the call - - def test_range_iter(self): - self.run_source(''' - def g(n): - return range(n) - - def main(n): - s = 0 - for i in range(n): - s += g(n)[i] - return s - ''', 143, ([1000], 1000 * 999 / 2)) - bytecode, = self.get_by_bytecode("BINARY_SUBSCR", True) - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER", True) # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_value", - "guard_class", # check the class of the iterator - "guard_nonnull", # check that the iterator is not finished - "guard_isnull", # check that the range list is not forced - "guard_false", # check that the index is lower than the current length - ] - - bytecode, = self.get_by_bytecode("BINARY_SUBSCR") - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER") # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is lower than the current length - ] - - def test_exception_inside_loop_1(self): - self.run_source(''' - def main(n): - while n: - try: - raise ValueError - except ValueError: - pass - n -= 1 - return n - ''', 33, - ([30], 0)) - - bytecode, = self.get_by_bytecode("SETUP_EXCEPT") - #assert not bytecode.get_opnames("new") -- currently, we have - # new_with_vtable(pypy.interpreter.pyopcode.ExceptBlock) - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert not bytecode.get_opnames() - - def test_exception_inside_loop_2(self): - self.run_source(''' - def g(n): - raise ValueError(n) - def f(n): - g(n) - def main(n): - while n: - try: - f(n) - except ValueError: - pass - n -= 1 - return n - ''', 51, - ([30], 0)) - - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert len(bytecode.get_opnames()) <= 2 # oois, guard_true - - def test_chain_of_guards(self): - self.run_source(''' - class A(object): - def method_x(self): - return 3 - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - i = 0 - while i < 2000: - name = l[arg] - sum += getattr(a, 'method_' + name)() - i += 1 - return sum - ''', 3000, ([0], 2000*3)) - assert len(self.loops) == 1 - - def test_getattr_with_dynamic_attribute(self): - self.run_source(''' - class A(object): - pass - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - a.a1 = 0 - a.a2 = 0 - a.a3 = 0 - a.a4 = 0 - a.a5 = 0 # workaround, because the first five attributes need a promotion - a.x = 1 - a.y = 2 - i = 0 - while i < 2000: - name = l[i % 2] - sum += getattr(a, name) - i += 1 - return sum - ''', 3000, ([0], 3000)) - assert len(self.loops) == 1 - - def test_blockstack_virtualizable(self): - self.run_source(''' - from pypyjit import residual_call - - def main(): - i = 0 - while i < 100: - try: - residual_call(len, []) - except: - pass - i += 1 - return i - ''', 1000, ([], 100)) - bytecode, = self.get_by_bytecode("CALL_FUNCTION") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('new_with_vtable')) == 2 - - def test_import_in_function(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - from sys import version - i += 1 - return i - ''', 100, ([], 100)) - bytecode, = self.get_by_bytecode('IMPORT_NAME') - bytecode2, = self.get_by_bytecode('IMPORT_FROM') - assert len(bytecode.get_opnames('call')) == 2 # split_chr and list_pop - assert len(bytecode2.get_opnames('call')) == 0 - - def test_arraycopy_disappears(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - t = (1, 2, 3, i + 1) - t2 = t[:] - del t - i = t2[3] - del t2 - return i - ''', 40, ([], 100)) - bytecode, = self.get_by_bytecode('BINARY_SUBSCR') - assert len(bytecode.get_opnames('new_array')) == 0 def test_overflow_checking(self): startvalue = sys.maxint - 2147483647 @@ -783,269 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_boolrewrite_invers(self): - for a, b, res, ops in (('2000', '2000', 20001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 16001700, 83), - ( 'a', 'b', 16001700, 89), - ( 'a', 'a', 13001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if i >= %s: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - def test_boolrewrite_reflex(self): - for a, b, res, ops in (('2000', '2000', 10001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 14001700, 83), - ( 'a', 'b', 14001700, 89), - ( 'a', 'a', 17001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if %s > i: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - - def test_boolrewrite_correct_invers(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b-1, op2, b) * 10000 * (b) - res += opval( b, op2, b) * 10000 - res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if i %s %d: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, op2, b), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if i %s %f: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, op2, float(b)/4.0), 156, ([], res)) - - - def test_boolrewrite_correct_reflex(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b, op2, b-1) * 10000 * (b) - res += opval(b, op2, b) * 10000 - res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if %d %s i: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, b, op2), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if %f %s i: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, float(b)/4.0, op2), 156, ([], res)) - - def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') - for e1 in compares: - for e2 in compares: - a, b, c = 1, 2, 3 - if eval(e1): res = 752 * 1 - else: res = 752 * 2 - if eval(e2): res += 752 * 10000 - else: res += 752 * 20000 - a = b - if eval(e1): res += 248 * 1 - else: res += 248 * 2 - if eval(e2): res += 248 * 10000 - else: res += 248 * 20000 - - - if 'c' in e1 or 'c' in e2: - n = 337 - else: - n = 215 - - print - print 'Test:', e1, e2, n, res - self.run_source(''' - class tst(object): - pass - def main(): - a = tst() - b = tst() - c = tst() - sa = 0 - for i in range(1000): - if %s: sa += 1 - else: sa += 2 - if %s: sa += 10000 - else: sa += 20000 - if i > 750: a = b - return sa - '''%(e1, e2), n, ([], res)) - - def test_array_sum(self): - for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): - res = 19352859 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(): - img = array("%s", range(127) * 5) * 484 - l, i = 0, 0 - while i < 640 * 480: - l += img[i] - i += 1 - return l - ''' % tc, maxops, ([], res)) - - def test_array_sum_char(self): - self.run_source(''' - from array import array - - def main(): - img = array("c", "Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - l += ord(img[i]) - i += 1 - return l - ''', 60, ([], 30720000)) - - def test_array_sum_unicode(self): - self.run_source(''' - from array import array - - def main(): - img = array("u", u"Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - if img[i] == u"l": - l += 1 - i += 1 - return l - ''', 65, ([], 122880)) - - def test_array_intimg(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - for tc, maxops in zip('ilILd', (67, 67, 70, 70, 61)): - print - print '='*65 - print '='*20, 'running test for tc=%r' % (tc,), '='*20 - res = 73574560 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(tc): - img = array(tc, range(3)) * (350 * 480) - intimg = array(tc, (0,)) * (640 * 480) - l, i = 0, 640 - while i < 640 * 480: - l = l + img[i] - intimg[i] = (intimg[i-640] + l) - i += 1 - return intimg[i - 1] - ''', maxops, ([tc], res)) - - def test_unpackiterable(self): - self.run_source(''' - from array import array - - def main(): - i = 0 - t = array('l', (1, 2)) - while i < 2000: - a, b = t - i += 1 - return 3 - - ''', 100, ([], 3)) - bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('call_may_force')) == 0 def test_intbound_simple(self): diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -18,12 +18,33 @@ descr_t = get_size_descr(c0, T) assert descr_s.size == symbolic.get_size(S, False) assert descr_t.size == symbolic.get_size(T, False) + assert descr_s.count_fields_if_immutable() == -1 + assert descr_t.count_fields_if_immutable() == -1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # descr_s = get_size_descr(c1, S) assert isinstance(descr_s.size, Symbolic) + assert descr_s.count_fields_if_immutable() == -1 +def test_get_size_descr_immut(): + S = lltype.GcStruct('S', hints={'immutable': True}) + T = lltype.GcStruct('T', ('parent', S), + ('x', lltype.Char), + hints={'immutable': True}) + U = lltype.GcStruct('U', ('parent', T), + ('u', lltype.Ptr(T)), + ('v', lltype.Signed), + hints={'immutable': True}) + V = lltype.GcStruct('V', ('parent', U), + ('miss1', lltype.Void), + ('miss2', lltype.Void), + hints={'immutable': True}) + for STRUCT, expected in [(S, 0), (T, 1), (U, 3), (V, 3)]: + for translated in [False, True]: + c0 = GcCache(translated) + descr_s = get_size_descr(c0, STRUCT) + assert descr_s.count_fields_if_immutable() == expected def test_get_field_descr(): U = lltype.Struct('U') diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -388,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -38,6 +38,10 @@ public final static int INT_MIN = Integer.MIN_VALUE; public final static double ULONG_MAX = 18446744073709551616.0; + public static boolean int_between(int a, int b, int c) { + return a <= b && b < c; + } + /** * Compares two unsigned integers (value1 and value2) and returns * a value greater than, equal to, or less than zero if value 1 is @@ -163,6 +167,13 @@ return ULONG_MAX + value; } } + + public static long double_to_ulong(double value) { + if (value < 0) + return (long)(ULONG_MAX + value); + else + return (long)value; + } public static int double_to_uint(double value) { if (value <= Integer.MAX_VALUE) @@ -746,11 +757,13 @@ return str.substring(start, end); } - public static Object[] ll_split_chr(String str, char c) { + public static Object[] ll_split_chr(String str, char c, int max) { ArrayList list = new ArrayList(); int lastidx = 0, idx = 0; while ((idx = str.indexOf(c, lastidx)) != -1) { + if (max >= 0 && list.size() >= max) + break; String sub = str.substring(lastidx, idx); list.add(sub); lastidx = idx+1; @@ -759,6 +772,21 @@ return list.toArray(new String[list.size()]); } + public static Object[] ll_rsplit_chr(String str, char c, int max) { + ArrayList list = new ArrayList(); + int lastidx = str.length(), idx = 0; + while ((idx = str.lastIndexOf(c, lastidx - 1)) != -1) + { + if (max >= 0 && list.size() >= max) + break; + String sub = str.substring(idx + 1, lastidx); + list.add(0, sub); + lastidx = idx; + } + list.add(0, str.substring(0, lastidx)); + return list.toArray(new String[list.size()]); + } + public static String ll_substring(String str, int start, int cnt) { return str.substring(start,start+cnt); } @@ -1158,6 +1186,18 @@ return Math.tanh(x); } + public double ll_math_copysign(double x, double y) { + return Math.copySign(x, y); + } + + public boolean ll_math_isnan(double x) { + return Double.isNaN(x); + } + + public boolean ll_math_isinf(double x) { + return Double.isInfinite(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); @@ -1170,9 +1210,42 @@ return Character.toLowerCase(c); } + public int locale_tolower(int chr) + { + return Character.toLowerCase(chr); + } + + public int locale_isupper(int chr) + { + return boolean2int(Character.isUpperCase(chr)); + } + + public int locale_islower(int chr) + { + return boolean2int(Character.isLowerCase(chr)); + } + + public int locale_isalpha(int chr) + { + return boolean2int(Character.isLetter(chr)); + } + + public int locale_isalnum(int chr) + { + return boolean2int(Character.isLetterOrDigit(chr)); + } + + // ---------------------------------------------------------------------- // Self Test + public static int boolean2int(boolean b) + { + if (b) + return 1; + return 0; + } + public static int __counter = 0, __failures = 0; public static void ensure(boolean f) { if (f) { diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class StringTests: diff --git a/pypy/translator/jvm/test/test_builtin.py b/pypy/translator/jvm/test/test_builtin.py --- a/pypy/translator/jvm/test/test_builtin.py +++ b/pypy/translator/jvm/test/test_builtin.py @@ -37,6 +37,15 @@ def test_cast_primitive(self): py.test.skip('fixme!') + def test_os_fstat(self): + import os, stat + def fn(): + fd = os.open(__file__, os.O_RDONLY, 0) + st = os.fstat(fd) + os.close(fd) + return st.st_mode + res = self.interpret(fn, []) + assert stat.S_ISREG(res) class TestJvmTime(JvmTest, BaseTestTime): diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -10,6 +10,30 @@ def int2adr(int): return llmemory.cast_int_to_adr(int) +def count_fields_if_immutable(STRUCT): + assert isinstance(STRUCT, lltype.GcStruct) + if STRUCT._hints.get('immutable', False): + try: + return _count_fields(STRUCT) + except ValueError: + pass + return -1 + +def _count_fields(STRUCT): + if STRUCT == rclass.OBJECT: + return 0 # don't count 'typeptr' + result = 0 + for fieldname, TYPE in STRUCT._flds.items(): + if TYPE is lltype.Void: + pass # ignore Voids + elif not isinstance(TYPE, lltype.ContainerType): + result += 1 + elif isinstance(TYPE, lltype.GcStruct): + result += _count_fields(TYPE) + else: + raise ValueError(TYPE) + return result + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -379,27 +379,6 @@ return result -def traverse(visit, functiongraph): - block = functiongraph.startblock - visit(block) - seen = identity_dict() - seen[block] = True - stack = list(block.exits[::-1]) - while stack: - link = stack.pop() - visit(link) - block = link.target - if block not in seen: - visit(block) - seen[block] = True - stack += block.exits[::-1] - - -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - def flattenobj(*args): for arg in args: try: @@ -497,6 +476,19 @@ assert block.operations == () assert block.exits == () + def definevar(v, only_in_link=None): + assert isinstance(v, Variable) + assert v not in vars, "duplicate variable %r" % (v,) + assert v not in vars_previous_blocks, ( + "variable %r used in more than one block" % (v,)) + vars[v] = only_in_link + + def usevar(v, in_link=None): + assert v in vars + if in_link is not None: + assert vars[v] is None or vars[v] is in_link + + for block in graph.iterblocks(): assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( @@ -506,18 +498,6 @@ assert block in exitblocks vars = {} - def definevar(v, only_in_link=None): - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = only_in_link - - def usevar(v, in_link=None): - assert v in vars - if in_link is not None: - assert vars[v] is None or vars[v] is in_link - for v in block.inputargs: definevar(v) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -46,15 +46,15 @@ w_f_trace = None # For tracing instr_lb = 0 - instr_ub = -1 - instr_prev = -1 + instr_ub = 0 + instr_prev_plus_one = 0 is_being_profiled = False def __init__(self, space, code, w_globals, closure): self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code - eval.Frame.__init__(self, space, w_globals, code.co_nlocals) + eval.Frame.__init__(self, space, w_globals) self.valuestack_w = [None] * code.co_stacksize self.valuestackdepth = 0 self.lastblock = None @@ -63,7 +63,7 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(closure, code) - self.fastlocals_w = [None]*self.numlocals + self.fastlocals_w = [None] * code.co_nlocals make_sure_not_resized(self.fastlocals_w) self.f_lineno = code.co_firstlineno @@ -335,7 +335,7 @@ w(self.instr_lb), #do we need these three (that are for tracing) w(self.instr_ub), - w(self.instr_prev), + w(self.instr_prev_plus_one), w_cells, ] @@ -349,7 +349,7 @@ args_w = space.unpackiterable(w_args) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ - w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev, w_cells = args_w + w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w new_frame = self pycode = space.interp_w(PyCode, w_pycode) @@ -397,7 +397,7 @@ new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev = space.int_w(w_instr_prev) + new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) # XXX what if the frame is in another thread?? @@ -430,7 +430,10 @@ """Initialize cellvars from self.fastlocals_w This is overridden in nestedscope.py""" pass - + + def getfastscopelength(self): + return self.pycode.co_nlocals + def getclosure(self): return None diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() @@ -39,7 +39,7 @@ translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "md5", "cStringIO", "array"])) + "struct", "_md5", "cStringIO", "array"])) working_oo_modules = default_modules.copy() working_oo_modules.update(dict.fromkeys( diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -9,7 +9,7 @@ from pypy.objspace.flow import operation from pypy.objspace.flow.model import (SpaceOperation, Variable, Constant, Block, Link, c_last_exception, checkgraph, - traverse, mkentrymap) + mkentrymap) from pypy.rlib import rarithmetic from pypy.translator import unsimplify from pypy.translator.backendopt import ssa @@ -76,23 +76,19 @@ def desugar_isinstance(graph): """Replace isinstance operation with a call to isinstance.""" constant_isinstance = Constant(isinstance) - def visit(block): - if not isinstance(block, Block): - return + for block in graph.iterblocks(): for i in range(len(block.operations) - 1, -1, -1): op = block.operations[i] if op.opname == "isinstance": args = [constant_isinstance, op.args[0], op.args[1]] new_op = SpaceOperation("simple_call", args, op.result) block.operations[i] = new_op - traverse(visit, graph) def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): + for link in list(graph.iterlinks()): while not link.target.operations: block1 = link.target if block1.exitswitch is not None: @@ -113,7 +109,6 @@ link.args = outputargs link.target = exit.target # the while loop above will simplify recursively the new link - traverse(visit, graph) def transform_ovfcheck(graph): """The special function calls ovfcheck and ovfcheck_lshift need to @@ -174,11 +169,10 @@ def rename(v): return renaming.get(v, v) - def visit(block): - if not (isinstance(block, Block) - and block.exitswitch == clastexc + for block in graph.iterblocks(): + if not (block.exitswitch == clastexc and block.exits[-1].exitcase is Exception): - return + continue covered = [link.exitcase for link in block.exits[1:-1]] seen = [] preserve = list(block.exits[:-1]) @@ -233,8 +227,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) - traverse(visit, graph) - def transform_xxxitem(graph): # xxx setitem too for block in graph.iterblocks(): @@ -262,9 +254,9 @@ return True return False - def visit(block): - if not (isinstance(block, Block) and block.exitswitch == clastexc): - return + for block in list(graph.iterblocks()): + if block.exitswitch != clastexc: + continue exits = [] seen = [] for link in block.exits: @@ -283,8 +275,6 @@ seen.append(case) block.recloseblock(*exits) - traverse(visit, graph) - def join_blocks(graph): """Links can be deleted if they are the single exit of a block and the single entry point of the next block. When this happens, we can @@ -340,8 +330,7 @@ this is how implicit exceptions are removed (see _implicit_ in flowcontext.py). """ - def visit(block): - if isinstance(block, Block): + for block in list(graph.iterblocks()): for i in range(len(block.exits)-1, -1, -1): exit = block.exits[i] if not (exit.target is graph.exceptblock and @@ -361,7 +350,6 @@ lst = list(block.exits) del lst[i] block.recloseblock(*lst) - traverse(visit, graph) # _____________________________________________________________________ @@ -627,12 +615,11 @@ tgts.append((exit.exitcase, tgt)) return tgts - def visit(block): - if isinstance(block, Block) and block.operations and block.operations[-1].opname == 'is_true': + for block in graph.iterblocks(): + if block.operations and block.operations[-1].opname == 'is_true': tgts = has_is_true_exitpath(block) if tgts: candidates.append((block, tgts)) - traverse(visit, graph) while candidates: cand, tgts = candidates.pop() diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -31,6 +50,10 @@ if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' + if hasattr(os, 'wait3'): + appleveldefs['wait3'] = 'app_posix.wait3' + if hasattr(os, 'wait4'): + appleveldefs['wait4'] = 'app_posix.wait4' interpleveldefs = { 'open' : 'interp_posix.open', @@ -156,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ListTests: diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -604,6 +604,18 @@ else: self._as_rdict().impl_fallback_setitem(w_key, w_value) + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + key = space.str_w(w_key) + w_result = self.impl_getitem_str(key) + if w_result is not None: + return w_result + self.impl_setitem_str(key, w_default) + return w_default + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/translator/goal/query.py b/pypy/translator/goal/query.py --- a/pypy/translator/goal/query.py +++ b/pypy/translator/goal/query.py @@ -30,15 +30,13 @@ def polluted_qgen(translator): """list functions with still real SomeObject variables""" annotator = translator.annotator - def visit(block): - if isinstance(block, flowmodel.Block): - for v in block.getvariables(): - s = annotator.binding(v, None) - if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: - raise Found for g in translator.graphs: try: - flowmodel.traverse(visit, g) + for block in g.iterblocks(): + for v in block.getvariables(): + s = annotator.binding(v, None) + if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: + raise Found except Found: line = "%s: %s" % (g, graph_sig(translator, g)) yield line diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,18 +30,18 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno @@ -49,7 +49,7 @@ # produced by gateway.applevel(), such as the ones found in # nanos.py) return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py --- a/pypy/jit/metainterp/test/test_longlong.py +++ b/pypy/jit/metainterp/test/test_longlong.py @@ -1,6 +1,6 @@ import py, sys from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class WrongResult(Exception): pass diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -1,6 +1,9 @@ import _rawffi, sys -import threading +try: + from thread import _local as local +except ImportError: + local = object # no threads class ConvMode: encoding = 'ascii' @@ -28,7 +31,7 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(threading.local): +class ErrorObject(local): def __init__(self): self.errno = 0 self.winerror = 0 diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -194,8 +194,8 @@ c_buf = py_str.c_ob_type.c_tp_as_buffer assert c_buf py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(rffi.INTP.TO)) == 1 - ref = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 assert ref[0] == 10 lltype.free(ref, flavor='raw') diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -255,7 +255,7 @@ x = ord(s[0]) << 7 i = 0 while i < length: - x = (1000003*x) ^ ord(s[i]) + x = intmask((1000003*x) ^ ord(s[i])) i += 1 x ^= length return intmask(x) diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -181,6 +181,7 @@ jIntegerClass = JvmClassType('java.lang.Integer') jLongClass = JvmClassType('java.lang.Long') +jShortClass = JvmClassType('java.lang.Short') jDoubleClass = JvmClassType('java.lang.Double') jByteClass = JvmClassType('java.lang.Byte') jCharClass = JvmClassType('java.lang.Character') @@ -239,6 +240,7 @@ jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue') jByte = JvmScalarType('B', jByteClass, 'byteValue') jChar = JvmScalarType('C', jCharClass, 'charValue') +jShort = JvmScalarType('S', jShortClass, 'shortValue') class Generifier(object): @@ -527,6 +529,7 @@ if desc == 'C': return self._o("i") # Characters if desc == 'B': return self._o("i") # Bytes if desc == 'Z': return self._o("i") # Boolean + if desc == 'S': return self._o("i") # Short assert False, "Unknown argtype=%s" % repr(argtype) raise NotImplementedError @@ -625,6 +628,7 @@ NOP = Opcode('nop') I2D = Opcode('i2d') I2L = Opcode('i2l') +I2S = Opcode('i2s') D2I= Opcode('d2i') #D2L= Opcode('d2l') #PAUL L2I = Opcode('l2i') @@ -891,6 +895,7 @@ SYSTEMIDENTITYHASH = Method.s(jSystem, 'identityHashCode', (jObject,), jInt) SYSTEMGC = Method.s(jSystem, 'gc', (), jVoid) INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString) +SHORTTOSTRINGS = Method.s(jShortClass, 'toString', (jShort,), jString) LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString) DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString) CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString) @@ -922,15 +927,19 @@ CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool) STRINGBUILDERAPPEND = Method.v(jStringBuilder, 'append', (jString,), jStringBuilder) +PYPYINTBETWEEN = Method.s(jPyPy, 'int_between', (jInt,jInt,jInt), jBool) PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt) PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt) PYPYUINTMOD = Method.v(jPyPy, 'uint_mod', (jInt, jInt), jInt) PYPYUINTMUL = Method.v(jPyPy, 'uint_mul', (jInt, jInt), jInt) PYPYUINTDIV = Method.v(jPyPy, 'uint_div', (jInt, jInt), jInt) PYPYULONGMOD = Method.v(jPyPy, 'ulong_mod', (jLong, jLong), jLong) +PYPYUINTTOLONG = Method.s(jPyPy, 'uint_to_long', (jInt,), jLong) PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL +PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) +PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,12 +1,12 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops from pypy.translator.test.snippet import simple_method from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,38 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] + +def ootype_to_mnemonic(FROM, TO, default=None): + if TO == ootype.Float: + return 'r8' + # + try: + size = str(INT_SIZE[TO]) + except KeyError: + return default + if FROM in UNSIGNED_TYPES: + return 'u' + size + else: + return 'i' + size class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + mnemonic = ootype_to_mnemonic(FROM, TO) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -5,7 +5,7 @@ from pypy.rlib.libffi import ArgChain from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestFfiCall(LLJitMixin, _TestLibffiCall): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -138,11 +138,13 @@ # raised after the exception handler block was popped. try: trace = self.w_f_trace - self.w_f_trace = None + if trace is not None: + self.w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: - self.w_f_trace = trace + if trace is not None: + self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( @@ -1421,9 +1423,10 @@ # add a softspace unless we just printed a string which ends in a '\t' # or '\n' -- or more generally any whitespace character but ' ' - if isinstance(x, str) and x and x[-1].isspace() and x[-1]!=' ': - return - # XXX add unicode handling + if isinstance(x, (str, unicode)) and x: + lastchar = x[-1] + if lastchar.isspace() and lastchar != ' ': + return file_softspace(stream, True) print_item_to._annspecialcase_ = "specialize:argtype(0)" diff --git a/pypy/translator/goal/old_queries.py b/pypy/translator/goal/old_queries.py --- a/pypy/translator/goal/old_queries.py +++ b/pypy/translator/goal/old_queries.py @@ -415,12 +415,10 @@ ops = 0 count = Counter() def visit(block): - if isinstance(block, flowmodel.Block): + for block in graph.iterblocks(): count.blocks += 1 count.ops += len(block.operations) - elif isinstance(block, flowmodel.Link): - count.links += 1 - flowmodel.traverse(visit, graph) + count.links = len(list(graph.iterlinks())) return count.blocks, count.links, count.ops # better used before backends opts diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -219,12 +219,14 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] else: - nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + n = len(codeobj.co_freevars) + freevars = [None] * n + while True: + n -= 1 + if n < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -26,7 +26,10 @@ else { string res = ""; foreach(char ch in x) - res+= string.Format("\\x{0:X2}", (int)ch); + if (ch >= 32 && ch < 128) + res+= ch; + else + res+= string.Format("\\x{0:X2}", (int)ch); return string.Format("'{0}'", res); } } @@ -498,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -717,9 +725,31 @@ return s.Substring(start, count); } - public static string[] ll_split_chr(string s, char ch) + public static string[] ll_split_chr(string s, char ch, int max) { - return s.Split(ch); + if (max < 0) + return s.Split(ch); + else + return s.Split(new Char[] {ch}, max + 1); + } + + public static string[] ll_rsplit_chr(string s, char ch, int max) + { + string[] splits = s.Split(ch); + if (max < 0 || splits.Length <= max + 1) + return splits; + else { + /* XXX not very efficient */ + string first = splits[0]; + // join the first (length - max - 1) items + int i; + for (i = 1; i < splits.Length - max; i++) + first += ch + splits[i]; + splits[0] = first; + Array.Copy(splits, i, splits, 1, max); + Array.Resize(ref splits, max + 1); + return splits; + } } public static bool ll_contains(string s, char ch) @@ -1123,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -12,7 +12,7 @@ def __init__(self, space, initargs): self.initargs = initargs ident = thread.get_ident() - self.dicts = {ident: space.newdict()} + self.dicts = {ident: space.newdict(instance=True)} def getdict(self, space): ident = thread.get_ident() @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/jvm/src/pypy/StatResult.java b/pypy/translator/jvm/src/pypy/StatResult.java --- a/pypy/translator/jvm/src/pypy/StatResult.java +++ b/pypy/translator/jvm/src/pypy/StatResult.java @@ -8,7 +8,7 @@ * *

The actual stat() function is defined in PyPy.java. */ -class StatResult { +public class StatResult { public int item0, item3, item4, item5; public long item1, item2, item6; public double item7, item8, item9; diff --git a/pypy/translator/gensupp.py b/pypy/translator/gensupp.py --- a/pypy/translator/gensupp.py +++ b/pypy/translator/gensupp.py @@ -6,15 +6,13 @@ import sys from pypy.objspace.flow.model import Block -from pypy.objspace.flow.model import traverse # ordering the blocks of a graph by source position def ordered_blocks(graph): # collect all blocks allblocks = [] - def visit(block): - if isinstance(block, Block): + for block in graph.iterblocks(): # first we order by offset in the code string if block.operations: ofs = block.operations[0].offset @@ -26,7 +24,6 @@ else: txt = "dummy" allblocks.append((ofs, txt, block)) - traverse(visit, graph) allblocks.sort() #for ofs, txt, block in allblocks: # print ofs, txt, block diff --git a/pypy/translator/jvm/src/pypy/ll_os.java b/pypy/translator/jvm/src/pypy/ll_os.java --- a/pypy/translator/jvm/src/pypy/ll_os.java +++ b/pypy/translator/jvm/src/pypy/ll_os.java @@ -14,10 +14,22 @@ abstract class FileWrapper { + private final String name; + + public FileWrapper(String name) + { + this.name = name; + } + public abstract void write(String buffer); public abstract String read(int count); public abstract void close(); public abstract RandomAccessFile getFile(); + + public String getName() + { + return this.name; + } } class PrintStreamWrapper extends FileWrapper @@ -25,8 +37,9 @@ private final PrintStream stream; private final ll_os os; - public PrintStreamWrapper(PrintStream stream, ll_os os) + public PrintStreamWrapper(String name, PrintStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -58,8 +71,9 @@ private final InputStream stream; private final ll_os os; - public InputStreamWrapper(InputStream stream, ll_os os) + public InputStreamWrapper(String name, InputStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -102,11 +116,13 @@ private final boolean canWrite; private final ll_os os; - public RandomAccessFileWrapper(RandomAccessFile file, + public RandomAccessFileWrapper(String name, + RandomAccessFile file, boolean canRead, boolean canWrite, ll_os os) { + super(name); this.file = file; this.canRead = canRead; this.canWrite = canWrite; @@ -228,9 +244,9 @@ public ll_os(Interlink interlink) { this.interlink = interlink; - FileDescriptors.put(0, new InputStreamWrapper(System.in, this)); - FileDescriptors.put(1, new PrintStreamWrapper(System.out, this)); - FileDescriptors.put(2, new PrintStreamWrapper(System.err, this)); + FileDescriptors.put(0, new InputStreamWrapper("", System.in, this)); + FileDescriptors.put(1, new PrintStreamWrapper("", System.out, this)); + FileDescriptors.put(2, new PrintStreamWrapper("", System.err, this)); fdcount = 2; } @@ -339,7 +355,7 @@ // XXX: we ignore O_CREAT RandomAccessFile file = open_file(name, javaMode, flags); RandomAccessFileWrapper wrapper = - new RandomAccessFileWrapper(file, canRead, canWrite, this); + new RandomAccessFileWrapper(name, file, canRead, canWrite, this); fdcount++; FileDescriptors.put(fdcount, wrapper); @@ -418,6 +434,12 @@ return ll_os_stat(path); // XXX } + public StatResult ll_os_fstat(int fd) + { + String name = getfd(fd).getName(); + return ll_os_stat(name); + } + public String ll_os_strerror(int errno) { String msg = ErrorMessages.remove(errno); diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/translator/backendopt/test/test_mallocprediction.py b/pypy/translator/backendopt/test/test_mallocprediction.py --- a/pypy/translator/backendopt/test/test_mallocprediction.py +++ b/pypy/translator/backendopt/test/test_mallocprediction.py @@ -4,7 +4,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.conftest import option import sys diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -5,7 +5,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem import lltype, llmemory, lloperation @@ -33,8 +33,7 @@ def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 count_calls = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == 'malloc': count_mallocs += 1 @@ -54,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -557,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -770,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/rpython/test/test_rstr.py b/pypy/rpython/test/test_rstr.py --- a/pypy/rpython/test/test_rstr.py +++ b/pypy/rpython/test/test_rstr.py @@ -576,20 +576,56 @@ res = self.interpret(f, [i, newlines]) assert res == f(i, newlines) - def test_split(self): + def _make_split_test(self, split_fn): const = self.const def fn(i): s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] - l = s.split(const('.')) + l = getattr(s, split_fn)(const('.')) sum = 0 for num in l: - if len(num): - sum += ord(num[0]) - ord(const('0')[0]) + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) return sum + len(l) * 100 + return fn + + def test_split(self): + fn = self._make_split_test('split') for i in range(5): res = self.interpret(fn, [i]) assert res == fn(i) + def test_rsplit(self): + fn = self._make_split_test('rsplit') + for i in range(5): + res = self.interpret(fn, [i]) + assert res == fn(i) + + def _make_split_limit_test(self, split_fn): + const = self.const + def fn(i, j): + s = [const(''), const('0.1.2.4.8'), const('.1.2'), const('1.2.'), const('.1.2.4.')][i] + l = getattr(s, split_fn)(const('.'), j) + sum = 0 + for num in l: + if len(num): + sum += ord(num[0]) - ord(const('0')[0]) + return sum + len(l) * 100 + return fn + + def test_split_limit(self): + fn = self._make_split_limit_test('split') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + + def test_rsplit_limit(self): + fn = self._make_split_limit_test('rsplit') + for i in range(5): + for j in range(4): + res = self.interpret(fn, [i, j]) + assert res == fn(i, j) + def test_contains(self): const = self.const constchar = self.constchar diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test import test_loop -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES class LoopUnrollTest(test_loop.LoopTest): diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -245,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, @@ -385,7 +395,7 @@ raise OperationError(space.w_TypeError, space.wrap( "expected a character buffer object")) if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(rffi.INTP.TO)) != 1: + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: raise OperationError(space.w_TypeError, space.wrap( "expected a single-segment buffer object")) size = generic_cpy_call(space, pb.c_bf_getcharbuffer, diff --git a/pypy/translator/jvm/test/test_extreme.py b/pypy/translator/jvm/test/test_extreme.py --- a/pypy/translator/jvm/test/test_extreme.py +++ b/pypy/translator/jvm/test/test_extreme.py @@ -1,5 +1,8 @@ +import py from pypy.translator.jvm.test.runtest import JvmTest from pypy.translator.oosupport.test_template.extreme import BaseTestExtreme class TestExtreme(BaseTestExtreme, JvmTest): - pass + + def test_runtimeerror_due_to_stack_overflow(self): + py.test.skip('hotspot bug') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1645,7 +1645,7 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ .cfi_startproc movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ @@ -1691,6 +1691,12 @@ ret .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: diff --git a/pypy/rpython/memory/gc/env.py b/pypy/rpython/memory/gc/env.py --- a/pypy/rpython/memory/gc/env.py +++ b/pypy/rpython/memory/gc/env.py @@ -259,7 +259,7 @@ get_L2cache = globals().get('get_L2cache_' + sys.platform, lambda: -1) # implement me for other platforms -NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024*1024 +NURSERY_SIZE_UNKNOWN_CACHE = 1024*1024 # arbitrary 1M. better than default of 131k for most cases # in case it didn't work diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -1,6 +1,6 @@ import py -from pypy.jit.metainterp.test.test_basic import JitMixin +from pypy.jit.metainterp.test.support import JitMixin from pypy.jit.tl.spli import interpreter, objects, serializer from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.backend.llgraph import runner diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -5,8 +5,8 @@ soon as possible (at least in a simple case). """ -import weakref, random -import py +import weakref +import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -72,6 +72,20 @@ return entrypoint +def get_functions_to_patch(): + from pypy.jit.backend.llsupport import gc + # + can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc + def can_inline_malloc2(*args): + try: + if os.environ['PYPY_NO_INLINE_MALLOC']: + return False + except KeyError: + pass + return can_inline_malloc1(*args) + # + return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + def compile(f, gc, **kwds): from pypy.annotation.listdef import s_list_of_strings from pypy.translator.translator import TranslationContext @@ -87,8 +101,21 @@ ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy()) ann.build_types(f, [s_list_of_strings], main_entry_point=True) t.buildrtyper().specialize() + if kwds['jit']: - apply_jit(t, enable_opts='') + patch = get_functions_to_patch() + old_value = {} + try: + for (obj, attr), value in patch.items(): + old_value[obj, attr] = getattr(obj, attr) + setattr(obj, attr, value) + # + apply_jit(t, enable_opts='') + # + finally: + for (obj, attr), oldvalue in old_value.items(): + setattr(obj, attr, oldvalue) + cbuilder = genc.CStandaloneBuilder(t, f, t.config) cbuilder.generate_source() cbuilder.compile() @@ -127,7 +154,7 @@ # ______________________________________________________________________ -class TestCompileFramework(object): +class CompileFrameworkTests(object): # Test suite using (so far) the minimark GC. def setup_class(cls): funcs = [] @@ -178,15 +205,21 @@ try: GcLLDescr_framework.DEBUG = True cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, - gcrootfinder="asmgcc", jit=True) + gcrootfinder=cls.gcrootfinder, jit=True) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG + def _run(self, name, n, env): + res = self.cbuilder.cmdexec("%s %d" %(name, n), env=env) + assert int(res) == 20 + def run(self, name, n=2000): pypylog = udir.join('TestCompileFramework.log') - res = self.cbuilder.cmdexec("%s %d" %(name, n), - env={'PYPYLOG': ':%s' % pypylog}) - assert int(res) == 20 + env = {'PYPYLOG': ':%s' % pypylog, + 'PYPY_NO_INLINE_MALLOC': '1'} + self._run(name, n, env) + env['PYPY_NO_INLINE_MALLOC'] = '' + self._run(name, n, env) def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) @@ -576,3 +609,10 @@ def test_compile_framework_minimal_size_in_nursery(self): self.run('compile_framework_minimal_size_in_nursery') + + +class TestShadowStack(CompileFrameworkTests): + gcrootfinder = "shadowstack" + +class TestAsmGcc(CompileFrameworkTests): + gcrootfinder = "asmgcc" diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/support.py @@ -0,0 +1,261 @@ + +import py, sys +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.ootypesystem import ootype +from pypy.jit.backend.llgraph import runner +from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT +from pypy.jit.metainterp import pyjitpl, history +from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.codewriter.policy import JitPolicy +from pypy.jit.codewriter import longlong + +def _get_jitcodes(testself, CPUClass, func, values, type_system, + supports_longlong=False, **kwds): + from pypy.jit.codewriter import support, codewriter + + class FakeJitCell: + __compiled_merge_points = [] + def get_compiled_merge_points(self): + return self.__compiled_merge_points[:] + def set_compiled_merge_points(self, lst): + self.__compiled_merge_points = lst + + class FakeWarmRunnerState: + def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): + pass + + def jit_cell_at_key(self, greenkey): + assert greenkey == [] + return self._cell + _cell = FakeJitCell() + + trace_limit = sys.maxint + enable_opts = ALL_OPTS_DICT + + func._jit_unroll_safe_ = True + rtyper = support.annotate(func, values, type_system=type_system) + graphs = rtyper.annotator.translator.graphs + result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] + + class FakeJitDriverSD: + num_green_args = 0 + portal_graph = graphs[0] + virtualizable_info = None + greenfield_info = None + result_type = result_kind + portal_runner_ptr = "???" + + stats = history.Stats() + cpu = CPUClass(rtyper, stats, None, False) + cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) + testself.cw = cw + policy = JitPolicy() + policy.set_supports_longlong(supports_longlong) + cw.find_all_graphs(policy) + # + testself.warmrunnerstate = FakeWarmRunnerState() + testself.warmrunnerstate.cpu = cpu + FakeJitDriverSD.warmstate = testself.warmrunnerstate + if hasattr(testself, 'finish_setup_for_interp_operations'): + testself.finish_setup_for_interp_operations() + # + cw.make_jitcodes(verbose=True) + +def _run_with_blackhole(testself, args): + from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder + cw = testself.cw + blackholeinterpbuilder = BlackholeInterpBuilder(cw) + blackholeinterp = blackholeinterpbuilder.acquire_interp() + count_i = count_r = count_f = 0 + for value in args: + T = lltype.typeOf(value) + if T == lltype.Signed: + blackholeinterp.setarg_i(count_i, value) + count_i += 1 + elif T == llmemory.GCREF: + blackholeinterp.setarg_r(count_r, value) + count_r += 1 + elif T == lltype.Float: + value = longlong.getfloatstorage(value) + blackholeinterp.setarg_f(count_f, value) + count_f += 1 + else: + raise TypeError(T) + [jitdriver_sd] = cw.callcontrol.jitdrivers_sd + blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) + blackholeinterp.run() + return blackholeinterp._final_result_anytype() + +def _run_with_pyjitpl(testself, args): + + class DoneWithThisFrame(Exception): + pass + + class DoneWithThisFrameRef(DoneWithThisFrame): + def __init__(self, cpu, *args): + DoneWithThisFrame.__init__(self, *args) + + cw = testself.cw + opt = history.Options(listops=True) + metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) + metainterp_sd.finish_setup(cw) + [jitdriver_sd] = metainterp_sd.jitdrivers_sd + metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) + metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame + metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef + metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame + testself.metainterp = metainterp + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + except DoneWithThisFrame, e: + #if conftest.option.view: + # metainterp.stats.view() + return e.args[0] + else: + raise Exception("FAILED") + +def _run_with_machine_code(testself, args): + metainterp = testself.metainterp + num_green_args = metainterp.jitdriver_sd.num_green_args + loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) + if len(loop_tokens) != 1: + return NotImplemented + # a loop was successfully created by _run_with_pyjitpl(); call it + cpu = metainterp.cpu + for i in range(len(args) - num_green_args): + x = args[num_green_args + i] + typecode = history.getkind(lltype.typeOf(x)) + set_future_value(cpu, i, x, typecode) + faildescr = cpu.execute_token(loop_tokens[0]) + assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') + if metainterp.jitdriver_sd.result_type == history.INT: + return cpu.get_latest_value_int(0) + elif metainterp.jitdriver_sd.result_type == history.REF: + return cpu.get_latest_value_ref(0) + elif metainterp.jitdriver_sd.result_type == history.FLOAT: + return cpu.get_latest_value_float(0) + else: + return None + + +class JitMixin: + basic = True + def check_loops(self, expected=None, everywhere=False, **check): + get_stats().check_loops(expected=expected, everywhere=everywhere, + **check) + def check_loop_count(self, count): + """NB. This is a hack; use check_tree_loop_count() or + check_enter_count() for the real thing. + This counts as 1 every bridge in addition to every loop; and it does + not count at all the entry bridges from interpreter, although they + are TreeLoops as well.""" + assert get_stats().compiled_count == count + def check_tree_loop_count(self, count): + assert len(get_stats().loops) == count + def check_loop_count_at_most(self, count): + assert get_stats().compiled_count <= count + def check_enter_count(self, count): + assert get_stats().enter_count == count + def check_enter_count_at_most(self, count): + assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): + assert get_stats().aborted_count == count + def check_aborted_count_at_least(self, count): + assert get_stats().aborted_count >= count + + def meta_interp(self, *args, **kwds): + kwds['CPUClass'] = self.CPUClass + kwds['type_system'] = self.type_system + if "backendopt" not in kwds: + kwds["backendopt"] = False + return ll_meta_interp(*args, **kwds) + + def interp_operations(self, f, args, **kwds): + # get the JitCodes for the function f + _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + # try to run it with blackhole.py + result1 = _run_with_blackhole(self, args) + # try to run it with pyjitpl.py + result2 = _run_with_pyjitpl(self, args) + assert result1 == result2 + # try to run it by running the code compiled just before + result3 = _run_with_machine_code(self, args) + assert result1 == result3 or result3 == NotImplemented + # + if (longlong.supports_longlong and + isinstance(result1, longlong.r_float_storage)): + result1 = longlong.getrealfloat(result1) + return result1 + + def check_history(self, expected=None, **isns): + # this can be used after calling meta_interp + get_stats().check_history(expected, **isns) + + def check_operations_history(self, expected=None, **isns): + # this can be used after interp_operations + if expected is not None: + expected = dict(expected) + expected['jump'] = 1 + self.metainterp.staticdata.stats.check_history(expected, **isns) + + +class LLJitMixin(JitMixin): + type_system = 'lltype' + CPUClass = runner.LLtypeCPU + + @staticmethod + def Ptr(T): + return lltype.Ptr(T) + + @staticmethod + def GcStruct(name, *fields, **kwds): + S = lltype.GcStruct(name, *fields, **kwds) + return S + + malloc = staticmethod(lltype.malloc) + nullptr = staticmethod(lltype.nullptr) + + @staticmethod + def malloc_immortal(T): + return lltype.malloc(T, immortal=True) + + def _get_NODE(self): + NODE = lltype.GcForwardReference() + NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), + ('next', lltype.Ptr(NODE)))) + return NODE + +class OOJitMixin(JitMixin): + type_system = 'ootype' + #CPUClass = runner.OOtypeCPU + + def setup_class(cls): + py.test.skip("ootype tests skipped for now") + + @staticmethod + def Ptr(T): + return T + + @staticmethod + def GcStruct(name, *fields, **kwds): + if 'hints' in kwds: + kwds['_hints'] = kwds['hints'] + del kwds['hints'] + I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) + return I + + malloc = staticmethod(ootype.new) + nullptr = staticmethod(ootype.null) + + @staticmethod + def malloc_immortal(T): + return ootype.new(T) + + def _get_NODE(self): + NODE = ootype.Instance('NODE', ootype.ROOT, {}) + NODE._add_fields({'value': ootype.Signed, + 'next': NODE}) + return NODE diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,13 +22,21 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - return jit.hint(self, promote=True).items + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items + return self.items def getitem(self, idx): return self.getitems()[idx] @@ -44,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -620,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint # from compiler/misc.py @@ -163,7 +163,7 @@ if (not we_are_jitted() or w_self.is_heaptype() or w_self.space.config.objspace.std.mutable_builtintypes): return w_self._version_tag - # heap objects cannot get their version_tag changed + # prebuilt objects cannot get their version_tag changed return w_self._pure_version_tag() @purefunction_promote() @@ -253,7 +253,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -262,6 +262,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -277,6 +277,7 @@ """) def test_default_and_kw(self): + py.test.skip("Wait until we have saner defaults strat") def main(n): def f(i, j=1): return i + j @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): @@ -685,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) @@ -1009,6 +1010,7 @@ """) def test_func_defaults(self): + py.test.skip("until we fix defaults") def main(n): i = 1 while i < n: @@ -1061,11 +1063,11 @@ i23 = int_lt(0, i21) guard_true(i23, descr=) i24 = getfield_gc(p17, descr=) - i25 = getarrayitem_raw(i24, 0, descr=) + i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=) i28 = int_add_ovf(i10, i25) guard_no_overflow(descr=) --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) - """) \ No newline at end of file + """) diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -71,19 +71,6 @@ pieces.headerblock.exits[1], pieces.whileblock.exits[0]] -def test_traverse(): - lst = [] - traverse(lst.append, graph) - assert lst == [pieces.startblock, - pieces.startblock.exits[0], - pieces.headerblock, - pieces.headerblock.exits[0], - graph.returnblock, - pieces.headerblock.exits[1], - pieces.whileblock, - pieces.whileblock.exits[0]] - assert flatten(graph) == lst - def test_mkentrymap(): entrymap = mkentrymap(graph) startlink = entrymap[graph.startblock][0] diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, vref_None from pypy.rlib.jit import virtual_ref, virtual_ref_finish from pypy.rlib.objectmodel import compute_unique_id -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.virtualref import VirtualRefInfo diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder from pypy.jit.metainterp.blackhole import BlackholeInterpreter from pypy.jit.metainterp.blackhole import convert_and_run_from_pyjitpl diff --git a/pypy/jit/metainterp/test/test_tlc.py b/pypy/jit/metainterp/test/test_tlc.py --- a/pypy/jit/metainterp/test/test_tlc.py +++ b/pypy/jit/metainterp/test/test_tlc.py @@ -3,7 +3,7 @@ from pypy.jit.tl import tlc -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class TLCTests: diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver class ListTests(object): diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/jit/metainterp/test/test_dlist.py b/pypy/jit/metainterp/test/test_dlist.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_dlist.py +++ /dev/null @@ -1,165 +0,0 @@ - -import py -from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin -py.test.skip("Disabled") - -class ListTests: - def test_basic(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - n -= 1 - return l[0] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(getarrayitem_gc=0, setarrayitem_gc=1) -# XXX fix codewriter -# guard_exception=0, -# guard_no_exception=1) - - def test_list_escapes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=2, getarrayitem_gc=0) - - def test_list_escapes_but_getitem_goes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - x = l[2] - y = l[1] + l[2] - l[1] = x + y - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=3, getarrayitem_gc=0) - - def test_list_of_ptrs(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - class A(object): - def __init__(self, x): - self.x = x - - def f(n): - l = [A(3)] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0].x + 1 - l[0] = A(x) - n -= 1 - return l[0].x - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=1, getarrayitem_gc=0, - new_with_vtable=1) # A should escape - - def test_list_checklength(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [10, 13], listops=True) - assert res == f(10, 13) - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_list_checklength_run(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) > n: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [50, 13], listops=True) - assert res == 42 - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_checklength_cannot_go_away(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n): - l = [0] * n - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return len(l) - l = [0] * n - n -= 1 - return 0 - - res = self.meta_interp(f, [10], listops=True) - assert res == 2 - self.check_loops(arraylen_gc=1) - - def test_list_indexerror(self): - # this is an example where IndexError is raised before - # even getting to the JIT - py.test.skip("I suspect bug somewhere outside of the JIT") - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - l[n] = n - n -= 1 - return l[3] - - def g(n): - try: - f(n) - return 0 - except IndexError: - return 42 - - res = self.meta_interp(g, [10]) - assert res == 42 - self.check_loops(setitem=2) - -class TestLLtype(ListTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,8 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.executor import execute +from pypy.jit.codewriter.heaptracker import vtable2descr class AbstractVirtualValue(optimizer.OptValue): @@ -72,28 +74,53 @@ assert isinstance(fieldvalue, optimizer.OptValue) self._fields[ofs] = fieldvalue + def _get_descr(self): + raise NotImplementedError + + def _is_immutable_and_filled_with_constants(self): + count = self._get_descr().count_fields_if_immutable() + if count != len(self._fields): # always the case if count == -1 + return False + for value in self._fields.itervalues(): + subbox = value.force_box() + if not isinstance(subbox, Const): + return False + return True + def _really_force(self): - assert self.source_op is not None + op = self.source_op + assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): - self.source_op.name = 'FORCE ' + self.source_op.name - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) + op.name = 'FORCE ' + self.source_op.name + + if self._is_immutable_and_filled_with_constants(): + box = self.optimizer.constant_fold(op) + self.make_constant(box) + for ofs, value in self._fields.iteritems(): + subbox = value.force_box() + assert isinstance(subbox, Const) + execute(self.optimizer.cpu, None, rop.SETFIELD_GC, + ofs, box, subbox) + # keep self._fields, because it's all immutable anyway + else: + newoperations = self.optimizer.newoperations newoperations.append(op) - self._fields = None + self.box = box = op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None def _get_field_descr_list(self): _cached_sorted_fields = self._cached_sorted_fields @@ -155,6 +182,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def _get_descr(self): + return vtable2descr(self.optimizer.cpu, self.known_class.getint()) + def __repr__(self): cls_name = self.known_class.value.adr.ptr._obj._TYPE._name if self._fields is None: @@ -184,6 +214,10 @@ new.box = self.box return new + def _get_descr(self): + return self.structdescr + + class VArrayValue(AbstractVirtualValue): def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): @@ -277,7 +311,6 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info c_cls = vrefinfo.jit_virtual_ref_const_class descr_virtual_token = vrefinfo.descr_virtual_token - descr_virtualref_index = vrefinfo.descr_virtualref_index # # Replace the VIRTUAL_REF operation with a virtual structure of type # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, @@ -287,7 +320,6 @@ tokenbox = BoxInt() self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox)) vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox)) - vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox)) def optimize_VIRTUAL_REF_FINISH(self, op): # Set the 'forced' field of the virtual_ref. diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support from pypy.rlib.nonconst import NonConstant from pypy.rlib.rsre.test.test_match import get_code from pypy.rlib.rsre import rsre_core @@ -45,7 +45,7 @@ assert m._jit_unroll_safe_ -class TestJitRSre(test_basic.LLJitMixin): +class TestJitRSre(support.LLJitMixin): def meta_interp_match(self, pattern, string, repeat=1): r = get_code(pattern) diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -1116,20 +1108,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code @@ -1965,14 +1943,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias diff --git a/pypy/jit/backend/x86/test/test_rx86.py b/pypy/jit/backend/x86/test/test_rx86.py --- a/pypy/jit/backend/x86/test/test_rx86.py +++ b/pypy/jit/backend/x86/test/test_rx86.py @@ -206,3 +206,8 @@ s = CodeBuilder64() s.MOV_rm(edx, (edi, -1)) assert s.getvalue() == '\x48\x8B\x57\xFF' + +def test_movsd_xj_64(): + s = CodeBuilder64() + s.MOVSD_xj(xmm2, 0x01234567) + assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,21 +400,9 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyBufferProcs = lltype.ForwardReference() PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) -def F(ARGS, RESULT=lltype.Signed): - return lltype.Ptr(lltype.FuncType(ARGS, RESULT)) -PyBufferProcsFields = ( - ("bf_getreadbuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getwritebuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getsegcount", F([PyObject, rffi.INTP])), - ("bf_getcharbuffer", F([PyObject, lltype.Signed, rffi.CCHARPP])), -# we don't support new buffer interface for now - ("bf_getbuffer", rffi.VOIDP), - ("bf_releasebuffer", rffi.VOIDP)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -cpython_struct('PyBufferProcs', PyBufferProcsFields, PyBufferProcs) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) @@ -539,7 +527,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): diff --git a/pypy/translator/jvm/test/test_list.py b/pypy/translator/jvm/test/test_list.py --- a/pypy/translator/jvm/test/test_list.py +++ b/pypy/translator/jvm/test/test_list.py @@ -6,7 +6,10 @@ def test_recursive(self): py.test.skip("JVM doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_r_short_list(self): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -7,10 +7,10 @@ from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, + cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - PyBufferProcs, build_type_checkers) + build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, RefcountState, borrow_from) @@ -24,7 +24,7 @@ from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, - PyNumberMethods, PySequenceMethods) + PyNumberMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.interpreter.error import OperationError @@ -361,14 +361,14 @@ # hopefully this does not clash with the memory model assumed in # extension modules - at cpython_api([PyObject, rffi.INTP], lltype.Signed, external=False, + at cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: - ref[0] = rffi.cast(rffi.INT, space.len_w(w_obj)) + ref[0] = space.len_w(w_obj) return 1 - at cpython_api([PyObject, lltype.Signed, rffi.VOIDPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, external=False, error=-1) def str_getreadbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -381,7 +381,7 @@ Py_DecRef(space, pyref) return space.len_w(w_str) - at cpython_api([PyObject, lltype.Signed, rffi.CCHARPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, external=False, error=-1) def str_getcharbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -347,8 +347,9 @@ assert list('') == [] assert list('abc') == ['a', 'b', 'c'] assert list((1, 2)) == [1, 2] - l = [] + l = [1] assert list(l) is not l + assert list(l) == l assert list(range(10)) == range(10) def test_explicit_new_init(self): diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,19 +1,16 @@ try: - import pypyjit - pypyjit.set_param(threshold=3, inlining=True) + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + main(10) - def sqrt(y, n=10000): - x = y / 2 - while n > 0: - #assert y > 0 and x > 0 - if y > 0 and x > 0: pass - n -= 1 - x = (x + y/x) / 2 - return x - - print sqrt(1234, 4) - except Exception, e: print "Exception: ", type(e) print e diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -108,6 +108,11 @@ #return w_value or None return None + def impl_setdefault(self, w_key, w_default): + # here the dict is always empty + self._as_rdict().impl_fallback_setitem(w_key, w_default) + return w_default + def impl_setitem(self, w_key, w_value): self._as_rdict().impl_fallback_setitem(w_key, w_value) @@ -181,6 +186,9 @@ # _________________________________________________________________ # fallback implementation methods + def impl_fallback_setdefault(self, w_key, w_default): + return self.r_dict_content.setdefault(w_key, w_default) + def impl_fallback_setitem(self, w_key, w_value): self.r_dict_content[w_key] = w_value @@ -227,6 +235,7 @@ ("length", 0), ("setitem_str", 2), ("setitem", 2), + ("setdefault", 2), ("delitem", 1), ("iter", 0), ("items", 0), @@ -317,6 +326,14 @@ def impl_setitem_str(self, key, w_value): self.content[key] = w_value + def impl_setdefault(self, w_key, w_default): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + return self.content.setdefault(space.str_w(w_key), w_default) + else: + return self._as_rdict().impl_fallback_setdefault(w_key, w_default) + + def impl_delitem(self, w_key): space = self.space w_key_type = space.type(w_key) @@ -787,13 +804,7 @@ return w_default def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default): - # XXX should be more efficient, with only one dict lookup - w_value = w_dict.getitem(w_key) - if w_value is not None: - return w_value - else: - w_dict.setitem(w_key, w_default) - return w_default + return w_dict.setdefault(w_key, w_default) def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w): len_defaults = len(defaults_w) diff --git a/pypy/translator/backendopt/test/test_inline.py b/pypy/translator/backendopt/test/test_inline.py --- a/pypy/translator/backendopt/test/test_inline.py +++ b/pypy/translator/backendopt/test/test_inline.py @@ -1,7 +1,7 @@ # XXX clean up these tests to use more uniform helpers import py import os -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import last_exception, checkgraph from pypy.translator.backendopt import canraise from pypy.translator.backendopt.inline import simple_inline_function, CannotInline @@ -20,29 +20,27 @@ from pypy.translator.backendopt import removenoops from pypy.objspace.flow.model import summary -def no_missing_concretetype(node): - if isinstance(node, Block): - for v in node.inputargs: - assert hasattr(v, 'concretetype') - for op in node.operations: - for v in op.args: - assert hasattr(v, 'concretetype') - assert hasattr(op.result, 'concretetype') - if isinstance(node, Link): - if node.exitcase is not None: - assert hasattr(node, 'llexitcase') - for v in node.args: - assert hasattr(v, 'concretetype') - if isinstance(node.last_exception, (Variable, Constant)): - assert hasattr(node.last_exception, 'concretetype') - if isinstance(node.last_exc_value, (Variable, Constant)): - assert hasattr(node.last_exc_value, 'concretetype') - def sanity_check(t): # look for missing '.concretetype' for graph in t.graphs: checkgraph(graph) - traverse(no_missing_concretetype, graph) + for node in graph.iterblocks(): + for v in node.inputargs: + assert hasattr(v, 'concretetype') + for op in node.operations: + for v in op.args: + assert hasattr(v, 'concretetype') + assert hasattr(op.result, 'concretetype') + for node in graph.iterlinks(): + if node.exitcase is not None: + assert hasattr(node, 'llexitcase') + for v in node.args: + assert hasattr(v, 'concretetype') + if isinstance(node.last_exception, (Variable, Constant)): + assert hasattr(node.last_exception, 'concretetype') + if isinstance(node.last_exc_value, (Variable, Constant)): + assert hasattr(node.last_exc_value, 'concretetype') + class CustomError1(Exception): def __init__(self): diff --git a/pypy/jit/metainterp/test/test_optimizebasic.py b/pypy/jit/metainterp/test/test_optimizebasic.py --- a/pypy/jit/metainterp/test/test_optimizebasic.py +++ b/pypy/jit/metainterp/test/test_optimizebasic.py @@ -253,7 +253,7 @@ loop.call_pure_results = args_dict() if call_pure_results is not None: for k, v in call_pure_results.items(): - loop.call_pure_results[list(k)] = v + loop.call_pure_results[list(k)] = v metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo @@ -2886,7 +2886,7 @@ # the result of the call, recorded as the first arg), or turned into # a regular CALL. arg_consts = [ConstInt(i) for i in (123456, 4, 5, 6)] - call_pure_results = {tuple(arg_consts): ConstInt(42)} + call_pure_results = {tuple(arg_consts): ConstInt(42)} ops = ''' [i0, i1, i2] escape(i1) @@ -2931,7 +2931,6 @@ i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) - setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -2964,7 +2963,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3005,7 +3003,6 @@ # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) @@ -3062,7 +3059,7 @@ self.loop.inputargs[0].value = self.nodeobjvalue self.check_expanded_fail_descr('''p2, p1 p0.refdescr = p2 - where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3, virtualrefindexdescr=2 + where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3 where p1 is a node_vtable, nextdescr=p1b where p1b is a node_vtable, valuedescr=i1 ''', rop.GUARD_NO_EXCEPTION) @@ -3084,7 +3081,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 7, descr=virtualrefindexdescr) escape(p2) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p1, descr=virtualforceddescr) @@ -3111,7 +3107,6 @@ i3 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) - setfield_gc(p2, 23, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, -3, descr=virtualtokendescr) @@ -3360,7 +3355,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) i3 = int_lt(i2, -5) guard_true(i3) [] @@ -3371,7 +3366,7 @@ i1 = int_lt(i0, 4) guard_true(i1) [] i1p = int_gt(i0, -4) - guard_true(i1p) [] + guard_true(i1p) [] i2 = int_sub(i0, 10) jump(i0) """ diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -51,3 +53,23 @@ from pypy.module.imp.importing import reload return reload(space, w_mod) + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -261,7 +261,8 @@ if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') - return space.wrap(rffi.charp2strn(buf, bufsize_p[0] - 1)) + length = intmask(bufsize_p[0] - 1) + return space.wrap(rffi.charp2strn(buf, length)) def convert_to_regdata(space, w_value, typ): buf = None @@ -445,9 +446,10 @@ continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') + length = intmask(retDataSize[0]) return space.newtuple([ convert_from_regdata(space, databuf, - retDataSize[0], retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) @@ -595,11 +597,11 @@ if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') + length = intmask(retDataSize[0]) return space.newtuple([ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, - retDataSize[0], - retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) diff --git a/pypy/jit/backend/cli/test/test_basic.py b/pypy/jit/backend/cli/test/test_basic.py --- a/pypy/jit/backend/cli/test/test_basic.py +++ b/pypy/jit/backend/cli/test/test_basic.py @@ -1,14 +1,14 @@ import py from pypy.jit.backend.cli.runner import CliCPU -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit -class CliJitMixin(test_basic.OOJitMixin): +class CliJitMixin(suport.OOJitMixin): CPUClass = CliCPU def setup_class(cls): from pypy.translator.cli.support import PythonNet PythonNet.System # possibly raises Skip -class TestBasic(CliJitMixin, test_basic.TestOOtype): +class TestBasic(CliJitMixin, test_ajit.TestOOtype): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -106,6 +106,10 @@ 'debug_catch_exception': Ignore, 'debug_reraise_traceback': Ignore, 'debug_print_traceback': Ignore, + 'debug_start': Ignore, + 'debug_stop': Ignore, + 'debug_print': Ignore, + 'keepalive': Ignore, # __________ numeric operations __________ @@ -144,6 +148,7 @@ 'int_xor_ovf': jvm.IXOR, 'int_floordiv_ovf_zer': jvm.IFLOORDIVZEROVF, 'int_mod_ovf_zer': _check_zer(jvm.IREMOVF), + 'int_between': jvm.PYPYINTBETWEEN, 'uint_invert': 'bitwise_negate', @@ -185,8 +190,8 @@ 'llong_mod_zer': _check_zer(jvm.LREM), 'llong_and': jvm.LAND, 'llong_or': jvm.LOR, - 'llong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'llong_rshift': [PushAllArgs, jvm.L2I, jvm.LSHR, StoreResult], + 'llong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'llong_rshift': [PushAllArgs, jvm.LSHR, StoreResult], 'llong_xor': jvm.LXOR, 'llong_floordiv_ovf': jvm.LFLOORDIVOVF, 'llong_floordiv_ovf_zer': jvm.LFLOORDIVZEROVF, @@ -202,9 +207,11 @@ 'ullong_truediv': None, # TODO 'ullong_floordiv': jvm.LDIV, # valid? 'ullong_mod': jvm.PYPYULONGMOD, - 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], + 'ullong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'ullong_rshift': [PushAllArgs, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, + 'ullong_or': jvm.LOR, + 'ullong_and': jvm.LAND, # when casting from bool we want that every truth value is casted # to 1: we can't simply DoNothing, because the CLI stack could @@ -227,5 +234,8 @@ 'cast_float_to_uint': jvm.PYPYDOUBLETOUINT, 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, + 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, + 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], + 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -1,6 +1,6 @@ import py from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class ToyLanguageTests: diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,12 +25,13 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None): + arg_types=None, count_fields_if_immut=-1): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types + self.count_fields_if_immut = count_fields_if_immut def get_arg_types(self): return self.arg_types @@ -63,6 +64,9 @@ def as_vtable_size_descr(self): return self + def count_fields_if_immutable(self): + return self.count_fields_if_immut + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -109,12 +113,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None): - key = (ofs, typeinfo, extrainfo, name, arg_types) + arg_types=None, count_fields_if_immut=-1): + key = (ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) try: return self._descrs[key] except KeyError: - descr = Descr(ofs, typeinfo, extrainfo, name, arg_types) + descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) self._descrs[key] = descr return descr @@ -284,7 +290,8 @@ def sizeof(self, S): assert not isinstance(S, lltype.Ptr) - return self.getdescr(symbolic.get_size(S)) + count = heaptracker.count_fields_if_immutable(S) + return self.getdescr(symbolic.get_size(S), count_fields_if_immut=count) class LLtypeCPU(BaseCPU): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,9 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import cpython_struct, \ - PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, \ - Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE, \ - PyTypeObject, PyTypeObjectPtr, PyBufferProcs, FILEP +from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -55,6 +54,14 @@ wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) +readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) +charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) +## We don't support new buffer interface for now +getbufferproc = rffi.VOIDP +releasebufferproc = rffi.VOIDP + PyGetSetDef = cpython_struct("PyGetSetDef", ( ("name", rffi.CCHARP), @@ -127,7 +134,6 @@ ("mp_ass_subscript", objobjargproc), )) -""" PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getreadbuffer", readbufferproc), ("bf_getwritebuffer", writebufferproc), @@ -136,7 +142,6 @@ ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), )) -""" PyMemberDef = cpython_struct("PyMemberDef", ( ("name", rffi.CCHARP), diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -190,14 +190,30 @@ def wait(): """ wait() -> (pid, status) - + Wait for completion of a child process. """ return posix.waitpid(-1, 0) + def wait3(options): + """ wait3(options) -> (pid, status, rusage) + + Wait for completion of a child process and provides resource usage informations + """ + from _pypy_wait import wait3 + return wait3(options) + + def wait4(pid, options): + """ wait4(pid, options) -> (pid, status, rusage) + + Wait for completion of the child process "pid" and provides resource usage informations + """ + from _pypy_wait import wait4 + return wait4(pid, options) + else: # Windows implementations - + # Supply os.popen() based on subprocess def popen(cmd, mode="r", bufsize=-1): """popen(command [, mode='r' [, bufsize]]) -> pipe @@ -285,7 +301,7 @@ raise TypeError("invalid cmd type (%s, expected string)" % (type(cmd),)) return cmd - + # A proxy for a file whose close waits for the process class _wrap_close(object): def __init__(self, stream, proc): diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -6,6 +6,7 @@ from pypy.tool.udir import udir from pypy.rlib import streamio from pypy.conftest import gettestobjspace +import pytest import sys, os import tempfile, marshal @@ -109,6 +110,14 @@ p.join('lone.pyc').write(p.join('x.pyc').read(mode='rb'), mode='wb') + # create a .pyw file + p = setuppkg("windows", x = "x = 78") + try: + p.join('x.pyw').remove() + except py.error.ENOENT: + pass + p.join('x.py').rename(p.join('x.pyw')) + return str(root) @@ -177,6 +186,14 @@ import a assert a == a0 + def test_trailing_slash(self): + import sys + try: + sys.path[0] += '/' + import a + finally: + sys.path[0] = sys.path[0].rstrip('/') + def test_import_pkg(self): import sys import pkg @@ -325,6 +342,11 @@ import compiled.x assert compiled.x == sys.modules.get('compiled.x') + @pytest.mark.skipif("sys.platform != 'win32'") + def test_pyw(self): + import windows.x + assert windows.x.__file__.endswith('x.pyw') + def test_cannot_write_pyc(self): import sys, os p = os.path.join(sys.path[-1], 'readonly') @@ -985,7 +1007,8 @@ class AppTestPyPyExtension(object): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['imp', 'zipimport']) + cls.space = gettestobjspace(usemodules=['imp', 'zipimport', + '__pypy__']) cls.w_udir = cls.space.wrap(str(udir)) def test_run_compiled_module(self): diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -135,7 +135,7 @@ return importing.check_sys_modules(space, w_modulename) def new_module(space, w_name): - return space.wrap(Module(space, w_name)) + return space.wrap(Module(space, w_name, add_package=False)) def init_builtin(space, w_name): name = space.str_w(w_name) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -253,8 +253,10 @@ except OperationError, e: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) - return 0 - return 1 + result = 0 + else: + result = 1 + return rffi.cast(rffi.INT, result) callback_type = lltype.Ptr(lltype.FuncType( [rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT)) XML_SetUnknownEncodingHandler = expat_external( diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf', '_pypy_math_isnan'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -57,8 +56,6 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -91,13 +88,13 @@ # # Custom implementations - at jit.purefunction def ll_math_isnan(y): - return bool(math_isnan(y)) + # By not calling into the extenal function the JIT can inline this. Floats + # are awesome. + return y != y - at jit.purefunction def ll_math_isinf(y): - return bool(math_isinf(y)) + return y != 0 and y * .5 == y ll_math_copysign = math_copysign diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -754,6 +754,8 @@ ("{x for x in z}", "set comprehension"), ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), + ("u'str'", "literal"), + ("b'bytes'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), diff --git a/pypy/jit/tl/tla/test_tla.py b/pypy/jit/tl/tla/test_tla.py --- a/pypy/jit/tl/tla/test_tla.py +++ b/pypy/jit/tl/tla/test_tla.py @@ -155,7 +155,7 @@ # ____________________________________________________________ -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestLLtype(LLJitMixin): def test_loop(self): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -19,6 +19,8 @@ def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): + if gcdescr is not None: + gcdescr.force_index_ofs = FORCE_INDEX_OFS AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) @@ -127,7 +129,7 @@ fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) - rffi.cast(TP, addr_of_force_index)[0] = -1 + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index frb = self.assembler._find_failure_recovery_bytecode(faildescr) bytecode = rffi.cast(rffi.UCHARP, frb) # start of "no gc operation!" block @@ -147,7 +149,6 @@ WORD = 4 NUM_REGS = 8 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 supports_longlong = True @@ -163,7 +164,6 @@ WORD = 8 NUM_REGS = 16 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 def __init__(self, *args, **kwargs): assert sys.maxint == (2**63 - 1) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -86,6 +86,8 @@ metainterp.history = History() metainterp.history.operations = loop.operations[:] metainterp.history.inputargs = loop.inputargs[:] + cpu._all_size_descrs_with_vtable = ( + LLtypeMixin.cpu._all_size_descrs_with_vtable) # loop_tokens = [] loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import unroll_safe, dont_look_inside from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.annlowlevel import hlstr from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -487,7 +487,9 @@ # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), # ^^^ returns an address of pointer, since it can change at runtime - + 'gc_adr_of_root_stack_top': LLOp(), + # ^^^ returns the address of gcdata.root_stack_top (for shadowstack only) + # experimental operations in support of thread cloning, only # implemented by the Mark&Sweep GC 'gc_x_swap_pool': LLOp(canraise=(MemoryError,), canunwindgc=True), diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1350,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/translator/cli/ilgenerator.py b/pypy/translator/cli/ilgenerator.py --- a/pypy/translator/cli/ilgenerator.py +++ b/pypy/translator/cli/ilgenerator.py @@ -443,8 +443,8 @@ self.ilasm.opcode('newarr', clitype.itemtype.typename()) def _array_suffix(self, ARRAY, erase_unsigned=False): - from pypy.translator.cli.metavm import OOTYPE_TO_MNEMONIC - suffix = OOTYPE_TO_MNEMONIC.get(ARRAY.ITEM, 'ref') + from pypy.translator.cli.metavm import ootype_to_mnemonic + suffix = ootype_to_mnemonic(ARRAY.ITEM, ARRAY.ITEM, 'ref') if erase_unsigned: suffix = suffix.replace('u', 'i') return suffix diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/translator/backendopt/ssa.py b/pypy/translator/backendopt/ssa.py --- a/pypy/translator/backendopt/ssa.py +++ b/pypy/translator/backendopt/ssa.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block +from pypy.objspace.flow.model import Variable, mkentrymap, Block from pypy.tool.algo.unionfind import UnionFind class DataFlowFamilyBuilder: diff --git a/pypy/rpython/ootypesystem/rstr.py b/pypy/rpython/ootypesystem/rstr.py --- a/pypy/rpython/ootypesystem/rstr.py +++ b/pypy/rpython/ootypesystem/rstr.py @@ -211,8 +211,11 @@ def ll_stringslice_minusone(s): return s.ll_substring(0, s.ll_strlen()-1) - def ll_split_chr(RESULT, s, c): - return RESULT.ll_convert_from_array(s.ll_split_chr(c)) + def ll_split_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_split_chr(c, max)) + + def ll_rsplit_chr(RESULT, s, c, max): + return RESULT.ll_convert_from_array(s.ll_rsplit_chr(c, max)) def ll_int(s, base): if not 2 <= base <= 36: diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -274,8 +274,12 @@ screeninfo.append((0, [])) self.lxy = p, ln prompt = self.get_prompt(ln, ll >= p >= 0) + while '\n' in prompt: + pre_prompt, _, prompt = prompt.partition('\n') + screen.append(pre_prompt) + screeninfo.append((0, [])) p -= ll + 1 - lp = len(prompt) + prompt, lp = self.process_prompt(prompt) l, l2 = disp_str(line) wrapcount = (len(l) + lp) / w if wrapcount == 0: @@ -297,6 +301,31 @@ screeninfo.append((0, [])) return screen + def process_prompt(self, prompt): + """ Process the prompt. + + This means calculate the length of the prompt. The character \x01 + and \x02 are used to bracket ANSI control sequences and need to be + excluded from the length calculation. So also a copy of the prompt + is returned with these control characters removed. """ + + out_prompt = '' + l = len(prompt) + pos = 0 + while True: + s = prompt.find('\x01', pos) + if s == -1: + break + e = prompt.find('\x02', s) + if e == -1: + break + # Found start and end brackets, subtract from string length + l = l - (e-s+1) + out_prompt += prompt[pos:s] + prompt[s+1:e] + pos = e+1 + out_prompt += prompt[pos:] + return out_prompt, l + def bow(self, p=None): """Return the 0-based index of the word break preceding p most immediately. diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: @@ -115,46 +114,6 @@ # in the second block! return split_block(annotator, block, 0, _forcelink=block.inputargs) -def remove_direct_loops(annotator, graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def remove_double_links(annotator, graph): - """This can be useful for code generators: it ensures that no block has - more than one incoming links from one and the same other block. It allows - argument passing along links to be implemented with phi nodes since the - value of an argument can be determined by looking from which block the - control passed. """ - def visit(block): - if isinstance(block, Block): - double_links = [] - seen = {} - for link in block.exits: - if link.target in seen: - double_links.append(link) - seen[link.target] = True - for link in double_links: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def no_links_to_startblock(graph): - """Ensure no links to start block.""" - links_to_start_block = False - for block in graph.iterblocks(): - for link in block.exits: - if link.target == graph.startblock: - links_to_start_block = True - break - if links_to_start_block: - insert_empty_startblock(None, graph) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from pypy.annotation import model as annmodel diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -36,29 +36,35 @@ init_defaults = Defaults([None]) def init__List(space, w_list, __args__): + from pypy.objspace.std.tupleobject import W_TupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - # - # this is the old version of the loop at the end of this function: - # - # w_list.wrappeditems = space.unpackiterable(w_iterable) - # - # This is commented out to avoid assigning a new RPython list to - # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. - # items_w = w_list.wrappeditems del items_w[:] if w_iterable is not None: - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - items_w.append(w_item) + # unfortunately this is duplicating space.unpackiterable to avoid + # assigning a new RPython list to 'wrappeditems', which defeats the + # W_FastSeqIterObject optimization. + if isinstance(w_iterable, W_ListObject): + items_w.extend(w_iterable.wrappeditems) + elif isinstance(w_iterable, W_TupleObject): + items_w.extend(w_iterable.wrappeditems) + else: + _init_from_iterable(space, items_w, w_iterable) + +def _init_from_iterable(space, items_w, w_iterable): + # in its own function to make the JIT look into init__List + # XXX this would need a JIT driver somehow? + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + items_w.append(w_item) def len__List(space, w_list): result = len(w_list.wrappeditems) diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -6,7 +6,7 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import BoxInt -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4.1' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.5-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/translator/backendopt/test/test_ssa.py b/pypy/translator/backendopt/test/test_ssa.py --- a/pypy/translator/backendopt/test/test_ssa.py +++ b/pypy/translator/backendopt/test/test_ssa.py @@ -1,6 +1,6 @@ from pypy.translator.backendopt.ssa import * from pypy.translator.translator import TranslationContext -from pypy.objspace.flow.model import flatten, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import SpaceOperation diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,6 +61,12 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -23,18 +23,22 @@ self.fail_descr_list = [] self.fail_descr_free_list = [] + def reserve_some_free_fail_descr_number(self): + lst = self.fail_descr_list + if len(self.fail_descr_free_list) > 0: + n = self.fail_descr_free_list.pop() + assert lst[n] is None + else: + n = len(lst) + lst.append(None) + return n + def get_fail_descr_number(self, descr): assert isinstance(descr, history.AbstractFailDescr) n = descr.index if n < 0: - lst = self.fail_descr_list - if len(self.fail_descr_free_list) > 0: - n = self.fail_descr_free_list.pop() - assert lst[n] is None - lst[n] = descr - else: - n = len(lst) - lst.append(descr) + n = self.reserve_some_free_fail_descr_number() + self.fail_descr_list[n] = descr descr.index = n return n @@ -294,6 +298,13 @@ def record_faildescr_index(self, n): self.faildescr_indices.append(n) + def reserve_and_record_some_faildescr_index(self): + # like record_faildescr_index(), but invent and return a new, + # unused faildescr index + n = self.cpu.reserve_some_free_fail_descr_number() + self.record_faildescr_index(n) + return n + def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link from pypy.objspace.flow.model import SpaceOperation, c_last_exception from pypy.objspace.flow.model import FunctionGraph -from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph +from pypy.objspace.flow.model import mkentrymap, checkgraph from pypy.annotation import model as annmodel from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr from pypy.rpython.lltypesystem.lltype import normalizeptr @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,16 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + def test_reload(self, space, api): pdb = api.PyImport_Import(space.wrap("pdb")) space.delattr(pdb, space.wrap("set_trace")) diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,16 +1,20 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, PyObject, Py_ssize_t) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, - hashfunc, descrgetfunc, descrsetfunc, objobjproc) + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, readbufferproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.buffer import Buffer as W_Buffer from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import specialize @@ -193,18 +197,59 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) +class CPyBuffer(W_Buffer): + # Similar to Py_buffer + + def __init__(self, ptr, size, w_obj): + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + + def getlength(self): + return self.size + + def getitem(self, index): + return self.ptr[index] + +def wrap_getreadbuffer(space, w_self, w_args, func): + func_target = rffi.cast(readbufferproc, func) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: + index = rffi.cast(Py_ssize_t, 0) + size = generic_cpy_call(space, func_target, w_self, index, ptr) + if size < 0: + space.fromcache(State).check_and_raise_exception(always=True) + return space.wrap(CPyBuffer(ptr[0], size, w_self)) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) check_num_args(space, w_args, 1) - args_w = space.fixedview(w_args) - other_w = args_w[0] + w_other, = space.fixedview(w_args) return generic_cpy_call(space, func_target, - w_self, other_w, rffi.cast(rffi.INT_real, OP_CONST)) + w_self, w_other, rffi.cast(rffi.INT_real, OP_CONST)) return inner richcmp_eq = get_richcmp_func(Py_EQ) richcmp_ne = get_richcmp_func(Py_NE) +richcmp_lt = get_richcmp_func(Py_LT) +richcmp_le = get_richcmp_func(Py_LE) +richcmp_gt = get_richcmp_func(Py_GT) +richcmp_ge = get_richcmp_func(Py_GE) + +def wrap_cmpfunc(space, w_self, w_args, func): + func_target = rffi.cast(cmpfunc, func) + check_num_args(space, w_args, 1) + w_other, = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(w_self), + space.type(w_other))): + raise OperationError(space.w_TypeError, space.wrap( + "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % + (space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space)))) + + return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): @@ -466,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), @@ -571,12 +616,19 @@ for regex, repl in slotdef_replacements: slotdefs_str = re.sub(regex, repl, slotdefs_str) +slotdefs = eval(slotdefs_str) +# PyPy addition +slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), +) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in eval(slotdefs_str)]) + for x in slotdefs]) + slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) - for x in eval(slotdefs_str)]) + for x in slotdefs]) if __name__ == "__main__": print slotdefs_str diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -443,7 +443,8 @@ "ll_upper": Meth([], self.SELFTYPE_T), "ll_lower": Meth([], self.SELFTYPE_T), "ll_substring": Meth([Signed, Signed], self.SELFTYPE_T), # ll_substring(start, count) - "ll_split_chr": Meth([self.CHAR], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_split_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! + "ll_rsplit_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! "ll_contains": Meth([self.CHAR], Bool), "ll_replace_chr_chr": Meth([self.CHAR, self.CHAR], self.SELFTYPE_T), }) @@ -1480,9 +1481,16 @@ # NOT_RPYTHON return self.make_string(self._str[start:start+count]) - def ll_split_chr(self, ch): + def ll_split_chr(self, ch, max): # NOT_RPYTHON - l = [self.make_string(s) for s in self._str.split(ch)] + l = [self.make_string(s) for s in self._str.split(ch, max)] + res = _array(Array(self._TYPE), len(l)) + res._array[:] = l + return res + + def ll_rsplit_chr(self, ch, max): + # NOT_RPYTHON + l = [self.make_string(s) for s in self._str.rsplit(ch, max)] res = _array(Array(self._TYPE), len(l)) res._array[:] = l return res diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -161,6 +161,24 @@ self.emit_operation(op) + def optimize_INT_LSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_RSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_CALL_PURE(self, op): arg_consts = [] for i in range(op.numargs()): diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -151,9 +151,9 @@ class CPythonFakeFrame(eval.Frame): - def __init__(self, space, code, w_globals=None, numlocals=-1): + def __init__(self, space, code, w_globals=None): self.fakecode = code - eval.Frame.__init__(self, space, w_globals, numlocals) + eval.Frame.__init__(self, space, w_globals) def getcode(self): return self.fakecode diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -101,7 +101,7 @@ # first annotate, rtype, and backendoptimize PyPy try: - interp, graph = get_interpreter(entry_point, [], backendopt=True, + interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -8,9 +8,8 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, - X86XMMRegisterManager, get_ebp_ofs, - _get_scale) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, + _get_scale, gpr_reg_mgr_cls) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -78,8 +77,8 @@ self.loop_run_counters = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 - self.malloc_fixedsize_slowpath1 = 0 - self.malloc_fixedsize_slowpath2 = 0 + self.malloc_slowpath1 = 0 + self.malloc_slowpath2 = 0 self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False @@ -124,8 +123,8 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() - if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): - self._build_malloc_fixedsize_slowpath() + if gc_ll_descr.get_malloc_slowpath_addr is not None: + self._build_malloc_slowpath() self._build_stack_check_slowpath() debug_start('jit-backend-counts') self.set_debug(have_debug_prints()) @@ -133,6 +132,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" + self.current_clt = looptoken.compiled_loop_token self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -145,6 +145,7 @@ self.mc = None self.looppos = -1 self.currently_compiling_loop = None + self.current_clt = None def finish_once(self): if self._debug: @@ -170,26 +171,47 @@ self.float_const_neg_addr = float_constants self.float_const_abs_addr = float_constants + 16 - def _build_malloc_fixedsize_slowpath(self): + def _build_malloc_slowpath(self): + # With asmgcc, we need two helpers, so that we can write two CALL + # instructions in assembler, with a mark_gc_roots in between. + # With shadowstack, this is not needed, so we produce a single helper. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + # # ---------- first helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() if self.cpu.supports_floats: # save the XMM registers in for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - if IS_X86_32: - mc.MOV_sr(WORD, edx.value) # save it as the new argument - elif IS_X86_64: - # rdi can be clobbered: its content was forced to the stack - # by _fastpath_malloc(), like all other save_around_call_regs. - mc.MOV_rr(edi.value, edx.value) - - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() - mc.JMP(imm(addr)) # tail call to the real malloc - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart - # ---------- second helper for the slow path of malloc ---------- - mc = codebuf.MachineCodeBlockWrapper() + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() + # + if gcrootmap is not None and gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_br(ofs, reg.value) + mc.SUB_ri(esp.value, 16 - WORD) # stack alignment of 16 bytes + if IS_X86_32: + mc.MOV_sr(0, edx.value) # push argument + elif IS_X86_64: + mc.MOV_rr(edi.value, edx.value) + mc.CALL(imm(addr)) + mc.ADD_ri(esp.value, 16 - WORD) + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_rb(reg.value, ofs) + else: + # ---- asmgcc ---- + if IS_X86_32: + mc.MOV_sr(WORD, edx.value) # save it as the new argument + elif IS_X86_64: + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. + mc.MOV_rr(edi.value, edx.value) + mc.JMP(imm(addr)) # tail call to the real malloc + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.malloc_slowpath1 = rawstart + # ---------- second helper for the slow path of malloc ---------- + mc = codebuf.MachineCodeBlockWrapper() + # if self.cpu.supports_floats: # restore the XMM registers for i in range(self.cpu.NUM_REGS):# from where they were saved mc.MOVSD_xs(i, (WORD*2)+8*i) @@ -197,21 +219,28 @@ mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath2 = rawstart + self.malloc_slowpath2 = rawstart def _build_stack_check_slowpath(self): - from pypy.rlib import rstack _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or self.cpu.exit_frame_with_exception_v < 0: return # no stack check (for tests, or non-translated) # + # make a "function" that is called immediately at the start of + # an assembler function. In particular, the stack looks like: + # + # | ... | <-- aligned to a multiple of 16 + # | retaddr of caller | + # | my own retaddr | <-- esp + # +---------------------+ + # mc = codebuf.MachineCodeBlockWrapper() - mc.PUSH_r(ebp.value) - mc.MOV_rr(ebp.value, esp.value) # + stack_size = WORD if IS_X86_64: # on the x86_64, we have to save all the registers that may # have been used to pass arguments + stack_size += 6*WORD + 8*8 for reg in [edi, esi, edx, ecx, r8, r9]: mc.PUSH_r(reg.value) mc.SUB_ri(esp.value, 8*8) @@ -220,11 +249,13 @@ # if IS_X86_32: mc.LEA_rb(eax.value, +8) + stack_size += 2*WORD + mc.PUSH_r(eax.value) # alignment mc.PUSH_r(eax.value) elif IS_X86_64: mc.LEA_rb(edi.value, +16) - mc.AND_ri(esp.value, -16) # + # esp is now aligned to a multiple of 16 again mc.CALL(imm(slowpathaddr)) # mc.MOV(eax, heap(self.cpu.pos_exception())) @@ -232,16 +263,16 @@ mc.J_il8(rx86.Conditions['NZ'], 0) jnz_location = mc.get_relative_pos() # - if IS_X86_64: + if IS_X86_32: + mc.ADD_ri(esp.value, 2*WORD) + elif IS_X86_64: # restore the registers for i in range(7, -1, -1): mc.MOVSD_xs(i, 8*i) - for i, reg in [(6, r9), (5, r8), (4, ecx), - (3, edx), (2, esi), (1, edi)]: - mc.MOV_rb(reg.value, -8*i) + mc.ADD_ri(esp.value, 8*8) + for reg in [r9, r8, ecx, edx, esi, edi]: + mc.POP_r(reg.value) # - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) mc.RET() # # patch the JNZ above @@ -266,9 +297,7 @@ # function, and will instead return to the caller's caller. Note # also that we completely ignore the saved arguments, because we # are interrupting the function. - mc.MOV_rr(esp.value, ebp.value) - mc.POP_r(ebp.value) - mc.ADD_ri(esp.value, WORD) + mc.ADD_ri(esp.value, stack_size) mc.RET() # rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -537,7 +566,7 @@ def _get_offset_of_ebp_from_esp(self, allocated_depth): # Given that [EBP] is where we saved EBP, i.e. in the last word # of our fixed frame, then the 'words' value is: - words = (self.cpu.FRAME_FIXED_SIZE - 1) + allocated_depth + words = (FRAME_FIXED_SIZE - 1) + allocated_depth # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP return -WORD * aligned_words @@ -550,6 +579,10 @@ for regloc in self.cpu.CALLEE_SAVE_REGISTERS: self.mc.PUSH_r(regloc.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(gcrootmap) + def _call_header_with_stack_check(self): if self.stack_check_slowpath == 0: pass # no stack check (e.g. not translated) @@ -571,12 +604,32 @@ def _call_footer(self): self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) self.mc.POP_r(ebp.value) self.mc.RET() + def _call_header_shadowstack(self, gcrootmap): + # we need to put two words into the shadowstack: the MARKER + # and the address of the frame (ebp, actually) + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] + self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER + self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp + self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + + def _call_footer_shadowstack(self, gcrootmap): + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.SUB_ji8(rst, 2*WORD) # SUB [rootstacktop], 2*WORD + def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): if IS_X86_64: return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) @@ -686,8 +739,8 @@ nonfloatlocs, floatlocs = arglocs self._call_header() stackadjustpos = self._patchable_stackadjust() - tmp = X86RegisterManager.all_regs[0] - xmmtmp = X86XMMRegisterManager.all_regs[0] + tmp = eax + xmmtmp = xmm0 self.mc.begin_reuse_scratch_register() for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] @@ -896,9 +949,9 @@ self.implement_guard(guard_token, checkfalsecond) return genop_cmp_guard_float - def _emit_call(self, x, arglocs, start=0, tmp=eax): + def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: - return self._emit_call_64(x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start) p = 0 n = len(arglocs) @@ -924,9 +977,9 @@ self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) - def _emit_call_64(self, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start): src_locs = [] dst_locs = [] xmm_src_locs = [] @@ -984,12 +1037,27 @@ self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) def call(self, addr, args, res): - self._emit_call(imm(addr), args) + force_index = self.write_new_force_index() + self._emit_call(force_index, imm(addr), args) assert res is eax + def write_new_force_index(self): + # for shadowstack only: get a new, unused force_index number and + # write it to FORCE_INDEX_OFS. Used to record the call shape + # (i.e. where the GC pointers are in the stack) around a CALL + # instruction that doesn't already have a force_index. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + clt = self.current_clt + force_index = clt.reserve_and_record_some_faildescr_index() + self.mc.MOV_bi(FORCE_INDEX_OFS, force_index) + return force_index + else: + return 0 + genop_int_neg = _unaryop("NEG") genop_int_invert = _unaryop("NOT") genop_int_add = _binaryop("ADD", True) @@ -1205,6 +1273,11 @@ assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + def set_new_array_length(self, loc, ofs_length, loc_num_elem): + assert isinstance(loc, RegLoc) + assert isinstance(loc_num_elem, ImmedLoc) + self.mc.MOV(mem(loc, ofs_length), loc_num_elem) + # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) def genop_new(self, op, arglocs, result_loc): @@ -1783,6 +1856,10 @@ self.pending_guard_tokens.append(guard_token) def genop_call(self, op, arglocs, resloc): + force_index = self.write_new_force_index() + self._genop_call(op, arglocs, resloc, force_index) + + def _genop_call(self, op, arglocs, resloc, force_index): sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -1796,8 +1873,8 @@ tmp = ecx else: tmp = eax - - self._emit_call(x, arglocs, 3, tmp=tmp) + + self._emit_call(force_index, x, arglocs, 3, tmp=tmp) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return @@ -1828,7 +1905,7 @@ faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - self.genop_call(op, arglocs, result_loc) + self._genop_call(op, arglocs, result_loc, fail_index) self.mc.CMP_bi(FORCE_INDEX_OFS, 0) self.implement_guard(guard_token, 'L') @@ -1842,8 +1919,8 @@ assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2, - tmp=eax) + self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_void_v @@ -1868,7 +1945,7 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self._emit_call(imm(asm_helper_adr), [eax, arglocs[1]], 0, + self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0, tmp=ecx) if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: self.mc.FSTP_b(result_loc.value) @@ -1895,7 +1972,7 @@ # load the return value from fail_boxes_xxx[0] kind = op.result.type if kind == FLOAT: - xmmtmp = X86XMMRegisterManager.all_regs[0] + xmmtmp = xmm0 adr = self.fail_boxes_float.get_addr_for_num(0) self.mc.MOVSD(xmmtmp, heap(adr)) self.mc.MOVSD(result_loc, xmmtmp) @@ -1990,11 +2067,16 @@ not_implemented("not implemented operation (guard): %s" % op.getopname()) - def mark_gc_roots(self): + def mark_gc_roots(self, force_index, use_copy_area=False): + if force_index < 0: + return # not needed gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: - mark = self._regalloc.get_mark_gc_roots(gcrootmap) - self.mc.insert_gcroot_marker(mark) + mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area) + if gcrootmap.is_shadow_stack: + gcrootmap.write_callshape(mark, force_index) + else: + self.mc.insert_gcroot_marker(mark) def target_arglocs(self, loop_token): return loop_token._x86_arglocs @@ -2006,8 +2088,7 @@ else: self.mc.JMP(imm(loop_token._x86_loop_code)) - def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, - size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) @@ -2015,7 +2096,7 @@ self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - # See comments in _build_malloc_fixedsize_slowpath for the + # See comments in _build_malloc_slowpath for the # details of the two helper functions that we are calling below. # First, we need to call two of them and not just one because we # need to have a mark_gc_roots() in between. Then the calling @@ -2025,19 +2106,27 @@ # result in EAX; slowpath_addr2 additionally returns in EDX a # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + # reserve room for the argument to the real malloc and the # 8 saved XMM regs self._regalloc.reserve_param(1+16) - self.mc.CALL(imm(slowpath_addr1)) - self.mark_gc_roots() - slowpath_addr2 = self.malloc_fixedsize_slowpath2 + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) + if not shadow_stack: + # there are two helpers to call only with asmgcc + slowpath_addr1 = self.malloc_slowpath1 + self.mc.CALL(imm(slowpath_addr1)) + self.mark_gc_roots(self.write_new_force_index(), + use_copy_area=shadow_stack) + slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) # on 64-bits, 'tid' is a value that fits in 31 bits + assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver from pypy.rlib.objectmodel import compute_hash from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import history From commits-noreply at bitbucket.org Wed Apr 13 21:19:27 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 13 Apr 2011 21:19:27 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: added test_loop_variant_mul1_ovf Message-ID: <20110413191927.463112A2038@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43336:4b26e5e188f5 Date: 2011-04-13 21:14 +0200 http://bitbucket.org/pypy/pypy/changeset/4b26e5e188f5/ Log: added test_loop_variant_mul1_ovf diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -298,8 +298,7 @@ guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) short.append(guard) # FIXME: Emit a proper guard here in case it is not - # removed by the optimizer. - # add test_loop_variant_mul1_ovf + # removed by the optimizer. Can that happen? self.optimizer.send_extra_operation(guard) assert self.optimizer.newoperations[-1] is not guard diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -107,6 +107,23 @@ self.check_loop_count(1) self.check_loops(int_mul=1) + def test_loop_variant_mul_ovf(self): + myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + res += ovfcheck(x * x) + x += 1 + res += ovfcheck(x * x) + y -= 1 + return res + res = self.meta_interp(f, [6, 7]) + assert res == 1323 + self.check_loop_count(1) + self.check_loops(int_mul_ovf=1) + def test_loop_invariant_mul1(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) def f(x, y): From commits-noreply at bitbucket.org Thu Apr 14 03:12:24 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Thu, 14 Apr 2011 03:12:24 +0200 (CEST) Subject: [pypy-svn] pypy default: Backed out changeset c7a7acad0692. For now keep using the libc isinf. Message-ID: <20110414011224.2DE17282B9D@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43337:f68534b6ed13 Date: 2011-04-13 21:10 -0400 http://bitbucket.org/pypy/pypy/changeset/f68534b6ed13/ Log: Backed out changeset c7a7acad0692. For now keep using the libc isinf. diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,6 +1,8 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ +int _pypy_math_isinf(double x); + double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,6 +22,12 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) +int +_pypy_math_isinf(double x) +{ + return PyPy_IS_INFINITY(x); +} + /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,7 +20,8 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p'], + '_pypy_math_expm1', '_pypy_math_log1p', + '_pypy_math_isinf'], ) math_prefix = '_pypy_math_' else: @@ -56,6 +57,7 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) +math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -93,8 +95,9 @@ # are awesome. return y != y + at jit.purefunction def ll_math_isinf(y): - return not isnan(y) and isnan(y - y) + return bool(math_isinf(y)) ll_math_copysign = math_copysign From commits-noreply at bitbucket.org Thu Apr 14 03:12:25 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Thu, 14 Apr 2011 03:12:25 +0200 (CEST) Subject: [pypy-svn] pypy default: Merged upstream. Message-ID: <20110414011225.3DAF0282B9D@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43338:134b0a8fd1ad Date: 2011-04-13 21:12 -0400 http://bitbucket.org/pypy/pypy/changeset/134b0a8fd1ad/ Log: Merged upstream. diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,6 +1,8 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ +int _pypy_math_isinf(double x); + double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,6 +22,12 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) +int +_pypy_math_isinf(double x) +{ + return PyPy_IS_INFINITY(x); +} + /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,7 +20,8 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p'], + '_pypy_math_expm1', '_pypy_math_log1p', + '_pypy_math_isinf'], ) math_prefix = '_pypy_math_' else: @@ -56,6 +57,7 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) +math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -93,8 +95,9 @@ # are awesome. return y != y + at jit.purefunction def ll_math_isinf(y): - return y != 0 and y * .5 == y + return bool(math_isinf(y)) ll_math_copysign = math_copysign From commits-noreply at bitbucket.org Thu Apr 14 12:05:40 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 12:05:40 +0200 (CEST) Subject: [pypy-svn] pypy default: ignore everything inside site-packages, in case we easy_install things directly in the checkout Message-ID: <20110414100540.B15EB2A203F@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43339:03d0a08a0c7b Date: 2011-04-14 12:05 +0200 http://bitbucket.org/pypy/pypy/changeset/03d0a08a0c7b/ Log: ignore everything inside site-packages, in case we easy_install things directly in the checkout diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -5,6 +5,8 @@ syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -37,8 +39,6 @@ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ From commits-noreply at bitbucket.org Thu Apr 14 12:44:32 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 12:44:32 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: use the (admittedly obscure) name "trace-elidable", simply because nobody has preconceptions abous it Message-ID: <20110414104432.55FB52A203F@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3506:07a72fa5fba3 Date: 2011-04-14 12:44 +0200 http://bitbucket.org/pypy/extradoc/changeset/07a72fa5fba3/ Log: use the (admittedly obscure) name "trace-elidable", simply because nobody has preconceptions abous it diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -190,7 +190,7 @@ \cfbolz{XXX stress more that "the crux of the techniques and a significant portion of new contributions in the paper are from how to refactoring codes to -expose likely runtime constants and invariant functions"} +expose likely runtime constants and trace-elidable functions"} \section{Background} @@ -550,15 +550,15 @@ XXX not too happy with the definition -This hint can be used to mark functions as \emph{invariant}. A function is -termed invariant if, during the execution of the program, the results of +This hint can be used to mark functions as \emph{trace-elidable}. A function is +termed trace-elidable if, during the execution of the program, the results of subsequent calls to the function with identical arguments may be be replaced with the result of the first call without changing the program's behaviour. From this -definition follows that a call to an invariant function with constant arguments +definition follows that a call to an trace-elidable function with constant arguments in a trace can be replaced with the result of the call.\footnote{This property is less strict than that of a "pure" function, because it is only about actual -calls during execution.} +calls during execution. All pure functions are trace-elidable though.} As an example, take the following class: @@ -593,7 +593,7 @@ which lets the interpreter author communicate invariants to the optimizer. In this case, she could decide that the \texttt{x} field of instances of \texttt{A} is immutable, and therefore \texttt{compute} -is an invariant function. To communicate this, there is a \texttt{invariant} decorator. +is an trace-elidable function. To communicate this, there is a \texttt{elidable} decorator. If the code in \texttt{compute} should be constant-folded away, we would change the class as follows: \begin{lstlisting}[mathescape,basicstyle=\ttfamily] @@ -606,7 +606,7 @@ promote(self) self.y = self.compute() + val - @invariant + @elidable def compute(self): return self.x * 2 + 1 \end{lstlisting} @@ -622,7 +622,7 @@ Here, \texttt{0xb73984a8} is the address of the instance of \texttt{A} that was used during tracing. The call to \texttt{compute} is not inlined, so that the optimizer -has a chance to see it. Since the \texttt{compute} function is marked as invariant, and its +has a chance to see it. Since the \texttt{compute} function is marked as trace-elidable, and its argument is a constant reference, the call will be removed by the optimizer. The final trace looks like this: @@ -635,31 +635,31 @@ (assuming that the \texttt{x} field's value is \texttt{4}). -On the one hand, the \texttt{invariant} annotation is very powerful. It can be +On the one hand, the \texttt{elidable} annotation is very powerful. It can be used to constant-fold arbitrary parts of the computation in the interpreter. However, the annotation also gives the interpreter author ample opportunity to introduce bugs. If a -function is annotated to be invariant, but is not really, the optimizer can produce +function is annotated to be trace-elidable, but is not really, the optimizer can produce subtly wrong code. Therefore, a lot of care has to be taken when using this -annotation\footnote{The most common use case of the \texttt{invariant} +annotation\footnote{The most common use case of the \texttt{elidable} annotation is indeed to declare the immutability of fields. Because it is so common, we have special syntactic sugar for it.}. We hope to introduce a debugging mode which would (slowly) check whether the annotation is applied incorrectly to mitigate this problem. -\subsubsection{Observably invariant Functions} +\subsubsection{Observably trace-elidable Functions} \cfbolz{XXX do we kill this section?} Why can't we simply write an analysis to find out that the \texttt{x} fields of the -\texttt{A} instances is immutable and deduce that \texttt{compute} is an invariant function, +\texttt{A} instances is immutable and deduce that \texttt{compute} is a trace-elidable function, since it only reads the \texttt{x} field and does not have side effects? This might be possible in this particular case, but in practice the functions that are -annotated with the \texttt{invariant} decorator are usually more complex. +annotated with the \texttt{elidable} decorator are usually more complex. The easiest example for this is that of a function that uses memoization to cache its results. If this function is analyzed, it looks like the function has side effects, because it changes the memoizing dictionary. However, because this side -effect is not externally visible, the function is still invariant. This is +effect is not externally visible, the function is still trace-elidable. This is a property that is not easily detectable by analysis. @@ -682,11 +682,11 @@ The first step in making \texttt{getattr} faster in our object model is to optimize away the dictionary lookups on the instances. The hints we have looked at in the two previous sections don't seem to help with the current object model. There is -no invariant function to be seen, and the instance is not a candidate for promotion, +no trace-elidable function to be seen, and the instance is not a candidate for promotion, because there tend to be many instances. This is a common problem when trying to apply hints. Often, the interpreter -needs a small rewrite to expose the invariant functions and nearly-constant objects +needs a small rewrite to expose the trace-elidable functions and nearly-constant objects that are implicitly there. In the case of instance fields this rewrite is not entirely obvious. The basic idea is as follows. In theory instances can have arbitrary fields. In practice however many instances share their layout (i.e. @@ -708,9 +708,9 @@ In this implementation instances no longer use dictionaries to store their fields. Instead, they have a reference to a map, which maps field names to indexes into a storage list. The storage list contains the actual field values. Therefore they have to be immutable, which means -that their \texttt{getindex} method is an invariant function. When a new attribute is added +that their \texttt{getindex} method is an trace-elidable function. When a new attribute is added to an instance, a new map needs to be chosen, which is done with the -\texttt{add\_attribute} method on the previous map. This function is also invariant, +\texttt{add\_attribute} method on the previous map. This function is also trace-elidable, because it caches all new instances of \texttt{Map} that it creates, to make sure that objects with the same layout have the same map. Now that we have introduced maps, it is safe to promote the map everywhere, because we assume @@ -726,7 +726,7 @@ code} The calls to \texttt{Map.getindex} can be optimized away, because they are calls to -an invariant function and they have constant arguments. That means that \texttt{index1/2/3} +a trace-elidable function and they have constant arguments. That means that \texttt{index1/2/3} are constant and the guards on them can be removed. All but the first guard on the map will be optimized away too, because the map cannot have changed in between. This trace is already much better than @@ -753,7 +753,7 @@ enough.\footnote{There is a more complex variant of class versions that can accommodate class fields that change a lot better.} -What we would really like is if the \texttt{Class.find\_method} method were invariant. +What we would really like is if the \texttt{Class.find\_method} method were trace-elidable. But it cannot be, because it is always possible to change the class itself. Every time the class changes, \texttt{find\_method} can potentially return a new value. @@ -761,9 +761,9 @@ Therefore, we give every class a version object, which is changed every time a class gets changed (i.e., the \texttt{methods} dictionary changes). This means that the result of calls to \texttt{methods.get()} for a given \texttt{(name, -version)} pair will always be the same, i.e. it is an invariant operation. To help +version)} pair will always be the same, i.e. it is a trace-elidable operation. To help the JIT to detect this case, we factor it out in a helper method which is -explicitly marked as \texttt{@invariant}. The refactored \texttt{Class} can +explicitly marked as \texttt{@elidable}. The refactored \texttt{Class} can be seen in Figure~\ref{fig:version} \begin{figure} @@ -774,7 +774,7 @@ What is interesting here is that \texttt{\_find\_method} takes the \texttt{version} argument but it does not use it at all. Its only purpose is to make the call -invariant, because when the version object changes, the result of the call might be +trace-elidable, because when the version object changes, the result of the call might be different than the previous one. \begin{figure} @@ -839,12 +839,10 @@ % %The techniques we used above to make instance and class lookups faster are %applicable in more general cases than the one we developed them for. A more -%abstract view of maps is that of splitting a data-structure into a part that -%changes slowly, and a part that changes quickly. In the concrete example of maps -%we split the original dictionary into the map (the slow-changing part) and the -%storage array (the quick-changing part). All the computation on the -%slow-changing part can be constant-folded during tracing so that only the -%manipulation of the quick-changing part remains. +%abstract view of maps is that of splitting a data-structure into an immutable part (\eg the map) +%and a part that changes (\eg the storage array). All the computation on the +%immutable part is trace-elidable so that only the manipulation of the quick-changing +%part remains in the trace after optimization. % %Similarly, versions can be used to constant-fold arbitrary functions of large data %structures. The version needs to be updated carefully every time the result of diff --git a/talk/icooolps2011/code/version.tex b/talk/icooolps2011/code/version.tex --- a/talk/icooolps2011/code/version.tex +++ b/talk/icooolps2011/code/version.tex @@ -14,7 +14,7 @@ promote(version) return self._find_method(name, version) - @invariant + @elidable def _find_method(self, name, version): return self.methods.get(name) diff --git a/talk/icooolps2011/code/map.tex b/talk/icooolps2011/code/map.tex --- a/talk/icooolps2011/code/map.tex +++ b/talk/icooolps2011/code/map.tex @@ -4,11 +4,11 @@ self.indexes = {} self.other_maps = {} - @invariant + @elidable def getindex(self, name): return self.indexes.get(name, -1) - @invariant + @elidable def add_attribute(self, name): if name not in self.other_maps: newmap = Map() diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 74788ae085bc4b96ba57bf6d8c906b05b37dbceb..81f3202fe354f00e3fc30528ea191f8847ebcf66 GIT binary patch [cut] From commits-noreply at bitbucket.org Thu Apr 14 14:43:56 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 14:43:56 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_intbound_simple to test_pypy_c_new Message-ID: <20110414124356.2CCCE2A2042@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43340:502a16c3a752 Date: 2011-04-14 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/502a16c3a752/ Log: port test_intbound_simple to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -920,8 +920,10 @@ self.run_and_check(src, threshold=400) def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') for e1 in compares: for e2 in compares: @@ -1071,3 +1073,39 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) + + def test_intbound_simple(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 500) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + self.run_and_check(src, threshold=400) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -224,43 +224,6 @@ ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_intbound_simple(self): - ops = ('<', '>', '<=', '>=', '==', '!=') - nbr = (3, 7) - for o1 in ops: - for o2 in ops: - for n1 in nbr: - for n2 in nbr: - src = ''' - def f(i): - a, b = 3, 3 - if i %s %d: - a = 0 - else: - a = 1 - if i %s %d: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (o1, n1, o2, n2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 268, ([], res)) - def test_intbound_addsub_mix(self): tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', 'i - 1 > 1', '1 - i > 1', '1 - i < -3', From commits-noreply at bitbucket.org Thu Apr 14 14:43:57 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 14:43:57 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_intbound_addsub_mix to test_pypy_c_new Message-ID: <20110414124357.173F32A2042@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43341:b84f08236871 Date: 2011-04-14 13:46 +0200 http://bitbucket.org/pypy/pypy/changeset/b84f08236871/ Log: port test_intbound_addsub_mix to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1109,3 +1109,38 @@ ''' % (o1, n1, o2, n2) self.run_and_check(src, threshold=400) + + def test_intbound_addsub_mix(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 500) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + self.run_and_check(src, threshold=400) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,44 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_intbound_addsub_mix(self): - tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', - 'i - 1 > 1', '1 - i > 1', '1 - i < -3', - 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') - for t1 in tests: - for t2 in tests: - print t1, t2 - src = ''' - def f(i): - a, b = 3, 3 - if %s: - a = 0 - else: - a = 1 - if %s: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (t1, t2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 280, ([], res)) - def test_intbound_gt(self): self.run_source(''' def main(): From commits-noreply at bitbucket.org Thu Apr 14 14:43:58 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 14:43:58 +0200 (CEST) Subject: [pypy-svn] pypy default: reduce the number of iterations and the threshold of these tests, to make them slightly faster Message-ID: <20110414124358.985502A2045@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43342:eeada999fff0 Date: 2011-04-14 13:54 +0200 http://bitbucket.org/pypy/pypy/changeset/eeada999fff0/ Log: reduce the number of iterations and the threshold of these tests, to make them slightly faster diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -838,7 +838,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -849,7 +849,7 @@ sa += 20000 return sa """ % (op1, a, op2, b) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -867,7 +867,7 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, op2, float(b)/4.0) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_allcases_reflex(self): @@ -888,7 +888,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -899,7 +899,7 @@ sa += 20000 return sa """ % (op1, a, b, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -917,7 +917,7 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, float(b)/4.0, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_ptr(self): """ @@ -935,7 +935,7 @@ b = tst() c = tst() sa = 0 - for i in range(1000): + for i in range(300): if %s: sa += 1 else: @@ -948,7 +948,7 @@ a = b return sa """ % (e1, e2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) def test_array_sum(self): def main(): @@ -1102,13 +1102,13 @@ res = [0] * 4 idx = [] for i in range(15): - idx.extend([i] * 500) + idx.extend([i] * 15) for i in idx: res[f(i)] += 1 return res ''' % (o1, n1, o2, n2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) def test_intbound_addsub_mix(self): """ @@ -1137,10 +1137,10 @@ res = [0] * 4 idx = [] for i in range(15): - idx.extend([i] * 500) + idx.extend([i] * 15) for i in idx: res[f(i)] += 1 return res ''' % (t1, t2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) From commits-noreply at bitbucket.org Thu Apr 14 14:43:59 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 14:43:59 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_intbound_gt to test_pypy_c_new Message-ID: <20110414124359.CB4452A2045@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43343:78c0a8cb522f Date: 2011-04-14 14:27 +0200 http://bitbucket.org/pypy/pypy/changeset/78c0a8cb522f/ Log: port test_intbound_gt to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1144,3 +1144,29 @@ ''' % (t1, t2) self.run_and_check(src, threshold=200) + + def test_intbound_gt(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i17 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=) + """) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,19 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_intbound_gt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i > -1: - a += 1 - if i > -2: - b += 1 - i += 1 - return (a, b) - ''', 48, ([], (2000, 2000))) - def test_intbound_sub_lt(self): self.run_source(''' def main(): From commits-noreply at bitbucket.org Thu Apr 14 14:44:00 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 14:44:00 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_intbound_sub_lt to test_pypy_c_new Message-ID: <20110414124400.5FFB32A2045@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43344:1149f8f0ad09 Date: 2011-04-14 14:35 +0200 http://bitbucket.org/pypy/pypy/changeset/1149f8f0ad09/ Log: port test_intbound_sub_lt to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1170,3 +1170,27 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=) """) + + def test_intbound_sub_lt(self): + def main(): + i, a = 0, 0 + while i < 300: + if i - 10 < 295: + a += 1 + i += 1 + return a + # + log = self.run(main, [], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i5, 300) + guard_true(i7, descr=...) + i9 = int_sub_ovf(i5, 10) + guard_no_overflow(descr=...) + i11 = int_add_ovf(i4, 1) + guard_no_overflow(descr=...) + i13 = int_add(i5, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i13, descr=) + """) From commits-noreply at bitbucket.org Thu Apr 14 14:44:02 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 14:44:02 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_intbound_addsub_ge to test_pypy_c_new Message-ID: <20110414124402.CD7472A204D@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43345:fd3f23ae8324 Date: 2011-04-14 14:42 +0200 http://bitbucket.org/pypy/pypy/changeset/fd3f23ae8324/ Log: port test_intbound_addsub_ge to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1194,3 +1194,33 @@ --TICK-- jump(p0, p1, p2, p3, i11, i13, descr=) """) + + def test_intbound_addsub_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + # XXX: why do we need ovf check here? If we put a literal "300" + # instead of "n", it disappears + i12 = int_add_ovf(i8, 5) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i19 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) + """) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -222,30 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_intbound_sub_lt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i - 10 < 1995: - a += 1 - i += 1 - return (a, b) - ''', 38, ([], (2000, 0))) - - def test_intbound_addsub_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if i - 1 >= -1: - b += 1 - i += 1 - return (a, b) - ''', 56, ([], (2000, 2000))) def test_intbound_addmul_ge(self): self.run_source(''' From commits-noreply at bitbucket.org Thu Apr 14 15:07:48 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 14 Apr 2011 15:07:48 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Implement a tagged pointers hack for x86 backend. I hope it didn't break stuff Message-ID: <20110414130748.DC2EE2A2042@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43346:7882528277e7 Date: 2011-04-14 15:07 +0200 http://bitbucket.org/pypy/pypy/changeset/7882528277e7/ Log: Implement a tagged pointers hack for x86 backend. I hope it didn't break stuff diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py --- a/pypy/jit/metainterp/quasiimmut.py +++ b/pypy/jit/metainterp/quasiimmut.py @@ -48,6 +48,8 @@ class QuasiImmut(object): + llopaque = True + def __init__(self, cpu): self.cpu = cpu # list of weakrefs to the LoopTokens that must be invalidated if diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -575,6 +575,7 @@ _all_callbacks_results = [] _int2obj = {} _callback_exc_info = None +_opaque_objs = [None] def get_rtyper(): llinterp = LLInterpreter.current_interpreter @@ -613,6 +614,10 @@ T = lltype.Ptr(lltype.typeOf(container)) # otherwise it came from integer and we want a c_void_p with # the same valu + if getattr(container, 'llopaque', None): + no = len(_opaque_objs) + _opaque_objs.append(container) + return no * 2 + 1 else: container = llobj._obj if isinstance(T.TO, lltype.FuncType): @@ -1223,7 +1228,9 @@ return not self == other def _cast_to_ptr(self, PTRTYPE): - return force_cast(PTRTYPE, self.intval) + if self.intval & 1: + return _opaque_objs[self.intval // 2] + return force_cast(PTRTYPE, self.intval) ## def _cast_to_int(self): ## return self.intval From commits-noreply at bitbucket.org Thu Apr 14 16:54:00 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 16:54:00 +0200 (CEST) Subject: [pypy-svn] pypy default: remove this XXX, it is nonsense Message-ID: <20110414145400.64B152A2042@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43347:092a08c586d1 Date: 2011-04-14 16:12 +0200 http://bitbucket.org/pypy/pypy/changeset/092a08c586d1/ Log: remove this XXX, it is nonsense diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1212,8 +1212,6 @@ assert loop.match(""" i10 = int_lt(i8, i9) guard_true(i10, descr=...) - # XXX: why do we need ovf check here? If we put a literal "300" - # instead of "n", it disappears i12 = int_add_ovf(i8, 5) guard_no_overflow(descr=...) i14 = int_add_ovf(i7, 1) From commits-noreply at bitbucket.org Thu Apr 14 16:54:01 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 16:54:01 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_intbound_addmul_ge to test_pypy_c_new Message-ID: <20110414145401.3F46C2A2042@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43348:31efcb15ac38 Date: 2011-04-14 16:13 +0200 http://bitbucket.org/pypy/pypy/changeset/31efcb15ac38/ Log: port test_intbound_addmul_ge to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1222,3 +1222,31 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) """) + + def test_intbound_addmul_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < 300: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_add(i8, 5) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_lshift(i8, 1) + i18 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i21 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) + """) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,18 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_intbound_addmul_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if 2 * i >= 0: - b += 1 - i += 1 - return (a, b) - ''', 53, ([], (2000, 2000))) def test_intbound_eq(self): self.run_source(''' From commits-noreply at bitbucket.org Thu Apr 14 16:54:02 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 16:54:02 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_intbound_eq to test_pypy_c_new Message-ID: <20110414145402.43EBD2A2042@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43349:9d453fd0d90a Date: 2011-04-14 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/9d453fd0d90a/ Log: port test_intbound_eq to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1250,3 +1250,35 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) """) + + def test_intbound_eq(self): + def main(a, n): + i, s = 0, 0 + while i < 300: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + # + log = self.run(main, [7, 300], threshold=200) + assert log.result == main(7, 300) + log = self.run(main, [10, 300], threshold=200) + assert log.result == main(10, 300) + log = self.run(main, [42, 300], threshold=200) + assert log.result == main(42, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_eq(i8, 10) + guard_false(i12, descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) + """) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -224,20 +224,6 @@ ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_intbound_eq(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - if a == 7: - s += a + 1 - elif i == 10: - s += i - else: - s += 1 - i += 1 - return s - ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) def test_intbound_mul(self): self.run_source(''' From commits-noreply at bitbucket.org Thu Apr 14 16:54:05 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 16:54:05 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_intbound_mul to test_pypy_c_new Message-ID: <20110414145405.320E82A2042@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43350:4536f510b56f Date: 2011-04-14 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/4536f510b56f/ Log: port test_intbound_mul to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1282,3 +1282,30 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) """) + + def test_intbound_mul(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + # + log = self.run(main, [7], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_lshift(i6, 1) + i12 = int_add_ovf(i5, 1) + guard_no_overflow(descr=...) + i14 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i12, i14, descr=) + """) + diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -224,21 +224,6 @@ ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_intbound_mul(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert i >= 0 - if 2 * i < 30000: - s += 1 - else: - s += a - i += 1 - return s - ''', 43, ([7], 1500)) - def test_assert(self): self.run_source(''' def main(a): From commits-noreply at bitbucket.org Thu Apr 14 16:54:06 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 16:54:06 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_assert to test_pypy_c_new Message-ID: <20110414145406.5FA142A2042@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43351:daa9def466ca Date: 2011-04-14 16:27 +0200 http://bitbucket.org/pypy/pypy/changeset/daa9def466ca/ Log: port test_assert to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1308,4 +1308,24 @@ --TICK-- jump(p0, p1, p2, p3, p4, i12, i14, descr=) """) - + + def test_assert(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert a == 7 + s += a + 1 + i += 1 + return s + log = self.run(main, [7], threshold=200) + assert log.result == 300*8 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_add_ovf(i5, 8) + guard_no_overflow(descr=...) + i12 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i10, i12, descr=) + """) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,18 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_assert(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert a == 7 - s += a + 1 - i += 1 - return s - ''', 38, ([7], 8*1500)) - def test_zeropadded(self): self.run_source(''' from array import array From commits-noreply at bitbucket.org Thu Apr 14 16:54:09 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 16:54:09 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_zeropadded to test_pypy_c_new Message-ID: <20110414145409.7ABEC2A2045@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43352:b75e71b0318c Date: 2011-04-14 16:51 +0200 http://bitbucket.org/pypy/pypy/changeset/b75e71b0318c/ Log: port test_zeropadded to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1329,3 +1329,30 @@ --TICK-- jump(p0, p1, p2, p3, p4, i10, i12, descr=) """) + + def test_zeropadded(self): + def main(): + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= self.__len__(): + return 0 + return array.__getitem__(self, i) # ID: get + # + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + log = self.run(main, [], threshold=200) + assert log.result == 9895050.0 + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? + diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,31 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_zeropadded(self): - self.run_source(''' - from array import array - class ZeroPadded(array): - def __new__(cls, l): - self = array.__new__(cls, 'd', range(l)) - return self - - def __getitem__(self, i): - if i < 0 or i >= self.__len__(): - return 0 - return array.__getitem__(self, i) - - - def main(): - buf = ZeroPadded(2000) - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 232, ([], 9895050.0)) - def test_circular(self): self.run_source(''' from array import array From commits-noreply at bitbucket.org Thu Apr 14 16:54:10 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 14 Apr 2011 16:54:10 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_circular to test_pypy_c_new Message-ID: <20110414145410.BC8FE2A2044@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43353:4f2ae2b89784 Date: 2011-04-14 16:53 +0200 http://bitbucket.org/pypy/pypy/changeset/4f2ae2b89784/ Log: port test_circular to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1355,4 +1355,27 @@ assert log.result == 9895050.0 loop, = log.loops_by_filename(self.filepath) # XXX: what do we want to check here? - + + def test_circular(self): + def main(): + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + # assert self.__len__() == 256 (FIXME: does not improve) + return array.__getitem__(self, i & 255) + # + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + # + log = self.run(main, [], threshold=200) + assert log.result == 1239690.0 + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,28 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_circular(self): - self.run_source(''' - from array import array - class Circular(array): - def __new__(cls): - self = array.__new__(cls, 'd', range(256)) - return self - def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) - return array.__getitem__(self, i & 255) - - def main(): - buf = Circular() - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 170, ([], 1239690.0)) - def test_min_max(self): self.run_source(''' def main(): From commits-noreply at bitbucket.org Thu Apr 14 16:56:22 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 16:56:22 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: go back to old definition and say something about side effects. kill section. Message-ID: <20110414145622.934ED2A2042@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3507:d84ef2a455fe Date: 2011-04-14 14:30 +0200 http://bitbucket.org/pypy/extradoc/changeset/d84ef2a455fe/ Log: go back to old definition and say something about side effects. kill section. diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -548,17 +548,15 @@ necessarily be folded away because the object can be mutated. Therefore, another hint is needed. -XXX not too happy with the definition - This hint can be used to mark functions as \emph{trace-elidable}. A function is -termed trace-elidable if, during the execution of the program, the results of -subsequent calls to the function with identical arguments may be be replaced -with the result of the first call without changing the program's behaviour. -From this -definition follows that a call to an trace-elidable function with constant arguments -in a trace can be replaced with the result of the call.\footnote{This property -is less strict than that of a "pure" function, because it is only about actual -calls during execution. All pure functions are trace-elidable though.} +termed trace-elidable if, during the execution of the program, +successive calls to the function with identical arguments always return the +same result. In addition the function needs to have no side effects or +idempotent side effects\footnote{This property +is less strict than that of a pure function, because it is only about actual +calls during execution. All pure functions are trace-elidable though.}. +From this definition follows that a call to an trace-elidable function with +constant arguments in a trace can be replaced with the result of the call. As an example, take the following class: @@ -647,23 +645,6 @@ incorrectly to mitigate this problem. -\subsubsection{Observably trace-elidable Functions} - -\cfbolz{XXX do we kill this section?} - -Why can't we simply write an analysis to find out that the \texttt{x} fields of the -\texttt{A} instances is immutable and deduce that \texttt{compute} is a trace-elidable function, -since it only reads the \texttt{x} field and does not have side effects? This might -be possible in this particular case, but in practice the functions that are -annotated with the \texttt{elidable} decorator are usually more complex. -The easiest example for this is that of a function that uses memoization to -cache its results. If this function is analyzed, it looks like the function has -side effects, because it changes the memoizing dictionary. However, because this side -effect is not externally visible, the function is still trace-elidable. This is -a property that is not easily detectable by analysis. - - - %___________________________________________________________________________ \section{Putting It All Together} From commits-noreply at bitbucket.org Thu Apr 14 16:56:23 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 16:56:23 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: fix more XXXs, I also found another reference Message-ID: <20110414145623.F0C4D2A2042@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3508:f11a8b46001f Date: 2011-04-14 16:07 +0200 http://bitbucket.org/pypy/extradoc/changeset/f11a8b46001f/ Log: fix more XXXs, I also found another reference diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -160,13 +160,6 @@ we present precisely allow us to implement such feedback and exploitation in a meta-tracing context. -\cfbolz{XXX kill the next paragraph? the info is repeated in the list below} -In particular the hints -influence the constant folding -optimization. The first hint makes it possible to turn arbitrary -variables in the trace into constant by feeding back runtime values. The -second hint allows the definition of additional foldable operations. - Together these hints can be used to express many classic implementation techniques used for object models of dynamic languages, such as maps and polymorphic inline caches. @@ -179,6 +172,8 @@ optimization then recognizes and exploits. \item A worked-out example of a simple object model of a dynamic language and how it can be improved using these hints. + \item This example also exemplifies general techniques for refactoring code to + expose likely runtime constants constant folding opportunities. \end{itemize} The paper is structured as follows: Section~\ref{sec:Background} gives an @@ -188,11 +183,6 @@ the hints are applied to the tiny object model and Section~\ref{sec:evaluation} presents benchmarks. -\cfbolz{XXX stress more that "the crux of the techniques and a significant -portion of new contributions in the paper are from how to refactoring codes to -expose likely runtime constants and trace-elidable functions"} - - \section{Background} \label{sec:Background} @@ -432,10 +422,9 @@ There are cases in which it is useful to turn an arbitrary variable into a constant value. This process is called \emph{promotion} and it is an old idea -in partial evaluation (it's called ``The Trick'' \cite{jones_partial_1993} there). Promotion is also heavily -used by Psyco \cite{rigo_representation-based_2004} and by all older versions -of PyPy's JIT. It is a technique that only works well in JIT compilers; -in static compilers it is significantly less applicable. +in partial evaluation (it's called ``The Trick'' \cite{jones_partial_1993} +there). The technique is substantially more powerful in a JIT compiler than in +the static setting of classic partial evaluation. Promotion is essentially a tool for trace specialization. There are places in the interpreter where knowing that a value is constant opens a lot of @@ -490,8 +479,10 @@ \end{lstlisting} The promotion is turned into a \texttt{guard} operation in the trace. The guard -captures the value of $x_1$ as it was during tracing. \cfbolz{drop the word runtime feedback here?} -From the point of view of the +captures the value of $x_1$ as it was during tracing. Thus the runtime value of +\texttt{x} is being made available to the compiler to exploit. The introduced +guard specializes the trace, because it only works if the value of $x_1$ is +\texttt{4}. From the point of view of the optimizer, this guard is not any different than the one produced by the \texttt{if} statement in the example above. After the guard, the rest of the trace can assume that $x_1$ is equal to \texttt{4}, meaning that the optimizer will turn this @@ -700,11 +691,8 @@ With this changed instance implementation, the trace we had above changes to the following that of see Figure~\ref{fig:trace2}. There \texttt{0xb74af4a8} is the memory address of the \texttt{Map} instance that has been promoted. Operations -that can be optimized away are grayed out. - -\cfbolz{XXX also explain that some forwarding of guarded values is happening, -make clearer which figures show optimized code and which show non-optimized -code} +that can be optimized away are grayed out, their results will be replaced by +fixed values by the constant folding. The calls to \texttt{Map.getindex} can be optimized away, because they are calls to a trace-elidable function and they have constant arguments. That means that \texttt{index1/2/3} @@ -950,7 +938,11 @@ We already explored promotion in other context, such as earlier versions of PyPy's JIT \cite{armin_rigo_jit_2007} as well as a Prolog partial evaluator -\cite{carl_friedrich_bolz_towards_????}. Promotion is quite similar to +\cite{carl_friedrich_bolz_towards_????}. Promotion is also heavily +used by Psyco \cite{rigo_representation-based_2004} (promotion is called +"unlifting" in this paper) a method-based JIT compiler for Python written by +one of the authors. Promotion was also used in DyC \cite{grant_dyc:_2000}, a +runtime partial evaluator for C. Promotion is quite similar to (polymorphic) inline caching and runtime type feedback techniques which were first used in Smalltalk \cite{deutsch_efficient_1984} and SELF \cite{hoelzle_optimizing_1991,hoelzle_optimizing_1994} implementations. diff --git a/talk/icooolps2011/paper.bib b/talk/icooolps2011/paper.bib --- a/talk/icooolps2011/paper.bib +++ b/talk/icooolps2011/paper.bib @@ -181,6 +181,23 @@ pages = {2:1{\textendash}2:11} }, + at article{grant_dyc:_2000, + title = {{DyC:} an expressive annotation-directed dynamic compiler for C}, + volume = {248}, + issn = {0304-3975}, + shorttitle = {{DyC}}, + url = {http://dx.doi.org/10.1016/S0304-3975(00)00051-7}, + doi = {http://dx.doi.org/10.1016/S0304-3975(00)00051-7}, + abstract = {An abstract is not available.}, + journal = {Theoretical Computer Science}, + author = {Brian Grant and Markus Mock and Matthai Philipose and Craig Chambers and Susan J Eggers}, + month = oct, + year = {2000}, + note = {{ACM} {ID:} 357493}, + keywords = {c, c language, compilers, constant folding, dataflow analysis, design, dynamic compilation, languages, optimization, partial evaluation, performance, program optimization, run-time code generation, specialization}, + pages = {147{\textendash}199} +}, + @article{bolz_allocation_2011, series = {{PEPM} '11}, title = {Allocation removal by partial evaluation in a tracing {JIT}}, diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 81f3202fe354f00e3fc30528ea191f8847ebcf66..c7cea8fe8abeff2584be91f02b37f106ea16103f GIT binary patch [cut] From commits-noreply at bitbucket.org Thu Apr 14 16:56:24 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 16:56:24 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: write acknowledgements Message-ID: <20110414145624.77EBA2A2042@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3509:75fd5b4906b2 Date: 2011-04-14 16:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/75fd5b4906b2/ Log: write acknowledgements diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -969,7 +969,8 @@ \section*{Acknowledgements} -XXX Peng Wu and David Edelsohn, Laura Creighton +The authors would like to thank Peng Wu, David Edelsohn and Laura Creighton for +encouragement, fruitful discussions and feedback during the writing of this paper. \bibliographystyle{abbrv} \bibliography{paper} From commits-noreply at bitbucket.org Thu Apr 14 17:24:38 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 17:24:38 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: ruthlessly cut space, mostly by making font smaller. not pretty, but I have no idea what to cut atm. Message-ID: <20110414152438.9D09D2A2042@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3510:176a96686855 Date: 2011-04-14 17:19 +0200 http://bitbucket.org/pypy/extradoc/changeset/176a96686855/ Log: ruthlessly cut space, mostly by making font smaller. not pretty, but I have no idea what to cut atm. diff --git a/talk/icooolps2011/code/trace2.tex b/talk/icooolps2011/code/trace2.tex --- a/talk/icooolps2011/code/trace2.tex +++ b/talk/icooolps2011/code/trace2.tex @@ -1,3 +1,4 @@ +{\smaller \begin{lstlisting}[mathescape,escapechar=|,basicstyle=\ttfamily]] # $inst_1$.getattr("a") $map_1$ = $inst_1$.map @@ -31,3 +32,4 @@ $v_4$ = $v_2$ + $result_3$ return($v_4$) \end{lstlisting} +} diff --git a/talk/icooolps2011/code/trace5.tex b/talk/icooolps2011/code/trace5.tex --- a/talk/icooolps2011/code/trace5.tex +++ b/talk/icooolps2011/code/trace5.tex @@ -1,3 +1,4 @@ +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] # $inst_1$.getattr("a") $map_1$ = $inst_1$.map @@ -16,3 +17,4 @@ $v_4$ = $v_2$ + 17 return($v_4$) \end{lstlisting} +} diff --git a/talk/icooolps2011/code/interpreter-slow.tex b/talk/icooolps2011/code/interpreter-slow.tex --- a/talk/icooolps2011/code/interpreter-slow.tex +++ b/talk/icooolps2011/code/interpreter-slow.tex @@ -1,3 +1,4 @@ +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily,numbers = right] class Class(object): def __init__(self, name): @@ -33,3 +34,4 @@ raise AttributeError return result \end{lstlisting} +} diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index c7cea8fe8abeff2584be91f02b37f106ea16103f..300e83298afbfa11c8262998f23808deb225e9c4 GIT binary patch [cut] diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -16,7 +16,7 @@ \usepackage{listings} \usepackage[T1]{fontenc} -\usepackage[scaled=0.8]{beramono} +\usepackage[scaled=0.82]{beramono} \definecolor{commentgray}{rgb}{0.3,0.3,0.3} @@ -35,7 +35,7 @@ } \newboolean{showcomments} -\setboolean{showcomments}{true} +\setboolean{showcomments}{false} \ifthenelse{\boolean{showcomments}} {\newcommand{\nb}[2]{ \fbox{\bfseries\sffamily\scriptsize#1} @@ -198,8 +198,7 @@ A number of languages have been implemented with PyPy, most importantly a full Python implementation, but also a Prolog interpreter -\cite{carl_friedrich_bolz_towards_2010} and a Smalltalk VM -\cite{carl_friedrich_bolz_back_2008}. +\cite{carl_friedrich_bolz_towards_2010}. The translation of the interpreter to C code adds a number of implementation details into the final executable that are not present in the interpreter implementation, such as @@ -266,7 +265,7 @@ traced many iterations of the interpreter main loop. \begin{figure} -\includegraphics[scale=0.5]{figures/trace-levels} +\includegraphics[scale=0.45]{figures/trace-levels} \caption{The levels involved in tracing} \label{fig:trace-levels} \end{figure} @@ -332,9 +331,11 @@ \anto{I still think it's a bit weird to call them ``methods'' and then use them as attributes in the example} +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] inst.getattr("a") + inst.getattr("b") + inst.getattr("c") \end{lstlisting} +} \begin{figure} \input{code/trace1.tex} @@ -402,18 +403,22 @@ is not a constant in the original source code. For example, consider the following fragment of RPython code: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] if x == 4: y = y + x \end{lstlisting} +} If the fragment is traced with $x_1$ being \texttt{4}, the following trace is produced: % +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($x_1$ == 4) $y_2$ = $y_1$ + $x_1$ \end{lstlisting} +} In the trace above, the value of $x_1$ is statically known after the guard. Remember that a guard is a runtime check. The above trace will run to @@ -434,31 +439,37 @@ a lot of computation depending on the value of that variable. Let's make this more concrete. If we trace a call to the following function: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] def f1(x, y): z = x * 2 + 1 return z + y \end{lstlisting} +} We get a trace that looks like this: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] $v_1$ = $x_1$ * 2 $z_1$ = $v_1$ + 1 $v_2$ = $z_1$ + $y_1$ return($v_2$) \end{lstlisting} +} Observe how the first two operations could be constant-folded if the value of $x_1$ were known. Let's assume that the value of \texttt{x} in the Python code can vary, but does so rarely, i.e. only takes a few different values at runtime. If this is the case, we can add a hint to promote \texttt{x}, like this: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] def f1(x, y): promote(x) z = x * 2 + 1 return z + y \end{lstlisting} +} The hint indicates that \texttt{x} is likely a runtime constant and the JIT should try to perform runtime specialization on it @@ -470,6 +481,7 @@ the arguments \texttt{4} and \texttt{8}. The trace will be the same, except for one operation at the beginning: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($x_1$ == 4) $v_1$ = $x_1$ * 2 @@ -477,6 +489,7 @@ $v_2$ = $z_1$ + $y_1$ return($v_2$) \end{lstlisting} +} The promotion is turned into a \texttt{guard} operation in the trace. The guard captures the value of $x_1$ as it was during tracing. Thus the runtime value of @@ -488,11 +501,13 @@ assume that $x_1$ is equal to \texttt{4}, meaning that the optimizer will turn this trace into: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($x_1$ == 4) $v_2$ = 9 + $y_1$ return($v_2$) \end{lstlisting} +} Notice how the first two arithmetic operations were constant folded. The hope is that the guard is executed quicker than the multiplication and the addition that @@ -504,11 +519,13 @@ capture a different value of $x_1$. If it is e.g. \texttt{2}, then the optimized trace looks like this: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($x_1$ == 2) $v_2$ = 5 + $y_1$ return($v_2$) \end{lstlisting} +} This new trace will be attached to the guard instruction of the first trace. If $x_1$ takes on even more values, a new trace will eventually be made for all of them, @@ -551,6 +568,7 @@ As an example, take the following class: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] class A(object): def __init__(self, x, y): @@ -563,10 +581,12 @@ def compute(self): return self.x * 2 + 1 \end{lstlisting} +} Tracing the call \texttt{a.f(10)} of some instance of \texttt{A} yields the following trace (note how the call to \texttt{compute} is inlined): % +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] $x_1$ = $a_1$.x $v_1$ = $x_1$ * 2 @@ -574,6 +594,7 @@ $v_3$ = $v_2$ + $val_1$ $a_1$.y = $v_3$ \end{lstlisting} +} In this case, adding a promote of \texttt{self} in the \texttt{f} method to get rid of the computation of the first few operations does not help. Even if $a_1$ is a @@ -585,6 +606,7 @@ is an trace-elidable function. To communicate this, there is a \texttt{elidable} decorator. If the code in \texttt{compute} should be constant-folded away, we would change the class as follows: +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] class A(object): def __init__(self, x, y): @@ -599,15 +621,18 @@ def compute(self): return self.x * 2 + 1 \end{lstlisting} +} Now the trace will look like this: % +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($a_1$ == 0xb73984a8) $v_1$ = compute($a_1$) $v_2$ = $v_1$ + $val_1$ $a_1$.y = $v_2$ \end{lstlisting} +} Here, \texttt{0xb73984a8} is the address of the instance of \texttt{A} that was used during tracing. The call to \texttt{compute} is not inlined, so that the optimizer @@ -616,11 +641,13 @@ is a constant reference, the call will be removed by the optimizer. The final trace looks like this: % +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($a_1$ == 0xb73984a8) $v_2$ = 9 + $val_1$ $a_1$.y = $v_2$ \end{lstlisting} +} (assuming that the \texttt{x} field's value is \texttt{4}). @@ -863,7 +890,7 @@ \begin{figure} \begin{center} -{\footnotesize +{\smaller \begin{tabular}{|l|r|r|r|} \hline &CPython &JIT baseline &JIT full\\ @@ -937,8 +964,8 @@ implementation of dynamic languages on top of JVMs easier. The bytecode gives access to user accessible generalized inline cache. It requires of course compilation to JVM bytecode instead of simply writing an interpreter, predictability of performance across JVMs is also an open question. We already explored promotion in other context, such as earlier versions of -PyPy's JIT \cite{armin_rigo_jit_2007} as well as a Prolog partial evaluator -\cite{carl_friedrich_bolz_towards_????}. Promotion is also heavily +PyPy's JIT as well as a Prolog partial evaluator +\cite{bolz_towards_2009}. Promotion is also heavily used by Psyco \cite{rigo_representation-based_2004} (promotion is called "unlifting" in this paper) a method-based JIT compiler for Python written by one of the authors. Promotion was also used in DyC \cite{grant_dyc:_2000}, a diff --git a/talk/icooolps2011/code/map.tex b/talk/icooolps2011/code/map.tex --- a/talk/icooolps2011/code/map.tex +++ b/talk/icooolps2011/code/map.tex @@ -1,3 +1,4 @@ +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] class Map(object): def __init__(self): @@ -46,3 +47,4 @@ def getattr(self, name): ... # as before \end{lstlisting} +} diff --git a/talk/icooolps2011/code/version.tex b/talk/icooolps2011/code/version.tex --- a/talk/icooolps2011/code/version.tex +++ b/talk/icooolps2011/code/version.tex @@ -1,3 +1,4 @@ +{\smaller \begin{lstlisting}[mathescape,basicstyle=\ttfamily] class VersionTag(object): pass @@ -22,3 +23,4 @@ self.methods[name] = value self.version = VersionTag() \end{lstlisting} +} diff --git a/talk/icooolps2011/code/trace4.tex b/talk/icooolps2011/code/trace4.tex --- a/talk/icooolps2011/code/trace4.tex +++ b/talk/icooolps2011/code/trace4.tex @@ -1,3 +1,4 @@ +{\smaller \begin{lstlisting}[escapechar=|,mathescape,basicstyle=\ttfamily] # $inst_1$.getattr("a") $map_1$ = $inst_1$.map @@ -35,3 +36,4 @@ $v_4$ = $v_2$ + $result_3$ return($v_4$) \end{lstlisting} +} diff --git a/talk/icooolps2011/paper.bib b/talk/icooolps2011/paper.bib --- a/talk/icooolps2011/paper.bib +++ b/talk/icooolps2011/paper.bib @@ -1,44 +1,15 @@ - - at inproceedings{carl_friedrich_bolz_towards_????, - series = {{LNCS} 6037 to appear}, - title = {Towards {Just-In-Time} Partial Evaluation of Prolog}, - abstract = {We introduce a just-in-time specializer for Prolog. Just-in- -time specialization attempts to unify of the concepts and benefits of -partial evaluation {(PE)} and just-in-time {(JIT)} compilation. It is a variant -of {PE} that occurs purely at runtime, which lazily generates residual code -and is constantly driven by runtime feedback. -Our prototype is an on-line just-in-time partial evaluator. A major fo- -cus of our work is to remove the overhead incurred when executing an -interpreter written in Prolog. It improves over classical offline {PE} by re- -quiring almost no heuristics nor hints from the author of the interpreter; -it also avoids most termination issues due to interleaving execution and -specialization. We evaluate the performance of our prototype on a small -number of benchmarks.}, - booktitle = {Logic-based Program Synthesis and Transformation {(LOPSTR'2009)}}, - publisher = {{Springer-Verlag}}, - author = {Carl Friedrich Bolz and Michael Leuschel and Armin Rigo} -}, @inproceedings{deutsch_efficient_1984, - address = {Salt Lake City, Utah, United States}, - title = {Efficient implementation of the smalltalk-80 system}, + address = {Salt Lake City, Utah}, + title = {Efficient implementation of the Smalltalk-80 system}, isbn = {0-89791-125-3}, url = {http://portal.acm.org/citation.cfm?id=800017.800542}, doi = {10.1145/800017.800542}, abstract = {The Smalltalk-80* programming language includes dynamic storage allocation, full upward funargs, and universally polymorphic procedures; the Smalltalk-80 programming system features interactive execution with incremental compilation, and implementation portability. These features of modern programming systems are among the most difficult to implement efficiently, even individually. A new implementation of the Smalltalk-80 system, hosted on a small microprocessor-based computer, achieves high performance while retaining complete (object code) compatibility with existing implementations. This paper discusses the most significant optimization techniques developed over the course of the project, many of which are applicable to other languages. The key idea is to represent certain runtime state (both code and data) in more than one form, and to convert between forms when needed.}, - booktitle = {Proceedings of the 11th {ACM} {SIGACT-SIGPLAN} symposium on Principles of programming languages}, + booktitle = {{POPL}}, publisher = {{ACM}}, author = {L. Peter Deutsch and Allan M. Schiffman}, - year = {1984}, - pages = {297--302} -}, - - at phdthesis{cuni_high_2010, - title = {High performance implementation of Python for {CLI/.NET} with {JIT} compiler generation for dynamic languages.}, - school = {Dipartimento di Informatica e Scienze {dell'Informazione,} University of Genova}, - author = {Antonio Cuni}, - year = {2010}, - note = {Technical Report {DISI-TH-2010-05}} + year = {1984} }, @inproceedings{carl_friedrich_bolz_towards_2010, @@ -48,26 +19,11 @@ url = {http://portal.acm.org/citation.cfm?id=1836102}, doi = {10.1145/1836089.1836102}, abstract = {Most Prolog implementations are implemented in low-level languages such as C and are based on a variation of the {WAM} instruction set, which enhances their performance but makes them hard to write. In addition, many of the more dynamic features of Prolog (like assert), despite their popularity, are not well supported. We present a high-level continuation-based Prolog interpreter based on the {PyPy} project. The {PyPy} project makes it possible to easily and efficiently implement dynamic languages. It provides tools that automatically generate a just-in-time compiler for a given interpreter of the target language, by using partial evaluation techniques. The resulting Prolog implementation is surprisingly efficient: it clearly outperforms existing interpreters of Prolog in high-level languages such as Java. Moreover, on some benchmarks, our system outperforms state-of-the-art {WAM-based} Prolog implementations. Our paper aims to show that declarative languages such as Prolog can indeed benefit from having a just-in-time compiler and that {PyPy} can form the basis for implementing programming languages other than Python.}, - booktitle = {Proceedings of the 12th international {ACM} {SIGPLAN} symposium on Principles and practice of declarative programming}, + booktitle = {{PPDP}}, publisher = {{ACM}}, author = {Carl Friedrich Bolz and Michael Leuschel and David Schneider}, year = {2010}, - keywords = {interpreters, jit, logic programming, partial evaluation}, - pages = {99--108} -}, - - at inproceedings{garg_compiling_2010, - address = {Pittsburgh, Pennsylvania}, - title = {Compiling Python to a hybrid execution environment}, - isbn = {978-1-60558-935-0}, - url = {http://portal.acm.org/citation.cfm?id=1735695&dl=GUIDE&coll=GUIDE&CFID=108695705&CFTOKEN=81778166}, - doi = {10.1145/1735688.1735695}, - abstract = {A new compilation framework enables the execution of numerical-intensive applications, written in Python, on a hybrid execution environment formed by a {CPU} and a {GPU.} This compiler automatically computes the set of memory locations that need to be transferred to the {GPU,} and produces the correct mapping between the {CPU} and the {GPU} address spaces. Thus, the programming model implements a virtual shared address space. This framework is implemented as a combination of {unPython,} an ahead-of-time compiler from {Python/NumPy} to the C programming language, and {jit4GPU,} a just-in-time compiler from C to the {AMD} {CAL} interface. Experimental evaluation demonstrates that for some benchmarks the generated {GPU} code is 50 times faster than generated {OpenMP} code. The {GPU} performance also compares favorably with optimized {CPU} {BLAS} code for single-precision computations in most cases.}, - booktitle = {Proceedings of the 3rd Workshop on {General-Purpose} Computation on Graphics Processing Units}, - publisher = {{ACM}}, - author = {Rahul Garg and Jos\'{e} Nelson Amaral}, - year = {2010}, - pages = {19--30} + keywords = {interpreters, jit, logic programming, partial evaluation} }, @inproceedings{bebenita_spur:_2010, @@ -78,12 +34,11 @@ url = {http://portal.acm.org/citation.cfm?id=1869459.1869517&coll=GUIDE&dl=GUIDE&type=series&idx=SERIES318&part=series&WantType=Proceedings&title=OOPSLA%2FSPLASH&CFID=106280261&CFTOKEN=29377718}, doi = {10.1145/1869459.1869517}, abstract = {Tracing just-in-time compilers {(TJITs)} determine frequently executed traces (hot paths and loops) in running programs and focus their optimization effort by emitting optimized machine code specialized to these traces. Prior work has established this strategy to be especially beneficial for dynamic languages such as {JavaScript,} where the {TJIT} interfaces with the interpreter and produces machine code from the {JavaScript} trace.}, - booktitle = {Proceedings of the {ACM} international conference on Object oriented programming systems languages and applications}, + booktitle = {{OOPSLA}}, publisher = {{ACM}}, author = {Michael Bebenita and Florian Brandner and Manuel Fahndrich and Francesco Logozzo and Wolfram Schulte and Nikolai Tillmann and Herman Venter}, year = {2010}, keywords = {cil, dynamic compilation, javascript, just-in-time, tracing}, - pages = {708--725}, annote = {{\textless}h3{\textgreater}{\textless}a {href="http://morepypy.blogspot.com/2010/07/comparing-spur-to-pypy.html"{\textgreater}Comparing} {SPUR} to {PyPy{\textless}/a{\textgreater}{\textless}/h3{\textgreater}} {{\textless}p{\textgreater}Recently,} I've become aware of the {\textless}a {href="http://research.microsoft.com/en-us/projects/spur/"{\textgreater}SPUR} project{\textless}/a{\textgreater} of Microsoft Research and read some of their papers (the tech report {"SPUR:} A {Trace-Based} {JIT} Compiler for {CIL"} is very cool). I found the project to be very interesting and since their approach is in many ways related to what {PyPy} is doing, I now want to compare and contrast the two projects.{\textless}/p{\textgreater} {\textless}div id="a-tracing-jit-for-net"{\textgreater} @@ -150,20 +105,19 @@ }, @inproceedings{gal_trace-based_2009, - address = {New York, {NY,} {USA}}, + address = {New York, New York}, series = {{PLDI} '09}, title = {Trace-based just-in-time type specialization for dynamic languages}, isbn = {978-1-60558-392-1}, location = {Dublin, Ireland}, doi = {10.1145/1542476.1542528}, abstract = {Dynamic languages such as {JavaScript} are more difficult to compile than statically typed ones. Since no concrete type information is available, traditional compilers need to emit generic code that can handle all possible type combinations at runtime. We present an alternative compilation technique for dynamically-typed languages that identifies frequently executed loop traces at run-time and then generates machine code on the fly that is specialized for the actual dynamic types occurring on each path through the loop. Our method provides cheap inter-procedural type specialization, and an elegant and efficient way of incrementally compiling lazily discovered alternative paths through nested loops. We have implemented a dynamic compiler for {JavaScript} based on our technique and we have measured speedups of 10x and more for certain benchmark programs.}, - booktitle = {{ACM} {SIGPLAN} Notices}, + booktitle = {{PLDI}}, publisher = {{ACM}}, author = {Andreas Gal and Brendan Eich and Mike Shaver and David Anderson and David Mandelin and Mohammad R Haghighat and Blake Kaplan and Graydon Hoare and Boris Zbarsky and Jason Orendorff and Jesse Ruderman and Edwin W Smith and Rick Reitmaier and Michael Bebenita and Mason Chang and Michael Franz}, year = {2009}, note = {{ACM} {ID:} 1542528}, - keywords = {code generation, design, dynamically typed languages, experimentation, incremental compilers, languages, measurement, performance, run-time environments, trace-based compilation}, - pages = {465{\textendash}478} + keywords = {code generation, design, dynamically typed languages, experimentation, incremental compilers, languages, measurement, performance, run-time environments, trace-based compilation} }, @article{rose_bytecodes_2009, @@ -176,9 +130,7 @@ journal = {Proceedings of the Third Workshop on Virtual Machines and Intermediate Languages}, author = {John R Rose}, year = {2009}, - note = {{ACM} {ID:} 1711508}, - keywords = {bytecode, code generation, combinator}, - pages = {2:1{\textendash}2:11} + keywords = {bytecode, code generation, combinator} }, @article{grant_dyc:_2000, @@ -198,29 +150,36 @@ pages = {147{\textendash}199} }, - at article{bolz_allocation_2011, + at inproceedings{bolz_towards_2009, + title = {Towards {Just-In-Time} Partial Evaluation of Prolog}, + doi = {10.1007/978-3-642-12592-8_12}, + booktitle = {Logic Program Synthesis and Transformation}, + author = {Carl Friedrich Bolz and Michael Leuschel and Armin Rigo}, + year = {2009}, + pages = {158{\textendash}172} +}, + + at inproceedings{bolz_allocation_2011, series = {{PEPM} '11}, title = {Allocation removal by partial evaluation in a tracing {JIT}}, location = {Austin, Texas, {USA}}, doi = {10.1145/1929501.1929508}, abstract = {The performance of many dynamic language implementations suffers from high allocation rates and runtime type checks. This makes dynamic languages less applicable to purely algorithmic problems, despite their growing popularity. In this paper we present a simple compiler optimization based on online partial evaluation to remove object allocations and runtime type checks in the context of a tracing {JIT.} We evaluate the optimization using a Python {VM} and find that it gives good results for all our (real-life) benchmarks.}, - journal = {Proceedings of the 20th {ACM} {SIGPLAN} workshop on Partial evaluation and program manipulation}, + booktitle = {{PEPM}}, author = {Carl Friedrich Bolz and Antonio Cuni and Maciej Fija\l{}kowski and Michael Leuschel and Samuele Pedroni and Armin Rigo}, year = {2011}, - note = {{ACM} {ID:} 1929508}, - keywords = {code generation, experimentation, interpreters, languages, optimization, partial evaluation, performance, run-time environments, tracing jit}, - pages = {43{\textendash}52} + keywords = {code generation, experimentation, interpreters, languages, optimization, partial evaluation, performance, run-time environments, tracing jit} }, @inproceedings{chang_tracing_2009, - address = {Washington, {DC,} {USA}}, + address = {Washington, {DC}}, title = {Tracing for Web 3.0: Trace Compilation for the Next Generation Web Applications}, isbn = {978-1-60558-375-4}, shorttitle = {Tracing for web 3.0}, url = {http://portal.acm.org/citation.cfm?id=1508293.1508304}, doi = {10.1145/1508293.1508304}, abstract = {Today's web applications are pushing the limits of modern web browsers. The emergence of the browser as the platform of choice for rich client-side applications has shifted the use of in-browser {JavaScript} from small scripting programs to large computationally intensive application logic. For many web applications, {JavaScript} performance has become one of the bottlenecks preventing the development of even more interactive client side applications. While traditional just-in-time compilation is successful for statically typed virtual machine based languages like Java, compiling {JavaScript} turns out to be a challenging task. Many {JavaScript} programs and scripts are short-lived, and users expect a responsive browser during page loading. This leaves little time for compilation of {JavaScript} to generate machine code.}, - booktitle = {Proceedings of the 2009 {ACM} {SIGPLAN/SIGOPS} International Conference on Virtual Execution Environments}, + booktitle = {{VEE}}, publisher = {{ACM}}, author = {Mason Chang and Edwin Smith and Rick Reitmaier and Michael Bebenita and Andreas Gal and Christian Wimmer and Brendan Eich and Michael Franz}, year = {2009}, @@ -228,14 +187,6 @@ pages = {71--80} }, - at phdthesis{carl_friedrich_bolz_automatic_2008, - type = {Master Thesis}, - title = {Automatic {JIT} Compiler Generation with Runtime Partial Evaluation}, - school = {{Heinrich-Heine-Universit\"{a}t} D\"{u}sseldorf}, - author = {Carl Friedrich Bolz}, - year = {2008} -}, - @inproceedings{davide_ancona_rpython:_2007, address = {Montreal, Quebec, Canada}, title = {{RPython:} a step towards reconciling dynamically and statically typed {OO} languages}, @@ -244,12 +195,11 @@ url = {http://portal.acm.org/citation.cfm?id=1297091}, doi = {10.1145/1297081.1297091}, abstract = {Although the C-based interpreter of Python is reasonably fast, implementations on the {CLI} or the {JVM} platforms offers some advantages in terms of robustness and interoperability. Unfortunately, because the {CLI} and {JVM} are primarily designed to execute statically typed, object-oriented languages, most dynamic language implementations cannot use the native bytecodes for common operations like method calls and exception handling; as a result, they are not able to take full advantage of the power offered by the {CLI} and {JVM.}}, - booktitle = {Proceedings of the 2007 symposium on Dynamic languages}, + booktitle = {{DLS}}, publisher = {{ACM}}, author = {Davide Ancona and Massimo Ancona and Antonio Cuni and Nicholas D. Matsakis}, year = {2007}, - keywords = {{JVM,} .net, Python}, - pages = {53--64} + keywords = {{JVM,} .net, Python} }, @article{futamura_partial_1999, @@ -270,7 +220,7 @@ isbn = {0-13-020249-5}, url = {http://portal.acm.org/citation.cfm?id=153676}, abstract = {This book is out of print. For copies, Please refer to the following online page}, - publisher = {{Prentice-Hall,} Inc.}, + publisher = {{Prentice-Hall}}, author = {Neil D. Jones and Carsten K. Gomard and Peter Sestoft}, year = {1993} }, @@ -282,12 +232,11 @@ url = {http://portal.acm.org/citation.cfm?id=1176753}, doi = {10.1145/1176617.1176753}, abstract = {The {PyPy} project seeks to prove both on a research and a practical level the feasibility of constructing a virtual machine {(VM)} for a dynamic language in a dynamic language - in this case, Python. The aim is to translate (i.e. compile) the {VM} to arbitrary target environments, ranging in level from {C/Posix} to {Smalltalk/Squeak} via Java and {CLI/.NET,} while still being of reasonable efficiency within these {environments.A} key tool to achieve this goal is the systematic reuse of the Python language as a system programming language at various levels of our architecture and translation process. For each level, we design a corresponding type system and apply a generic type inference engine - for example, the garbage collector is written in a style that manipulates simulated pointer and address objects, and when translated to C these operations become C-level pointer and address instructions.}, - booktitle = {Companion to the 21st {ACM} {SIGPLAN} conference on Object-oriented programming systems, languages, and applications}, + booktitle = {{DLS}}, publisher = {{ACM}}, author = {Armin Rigo and Samuele Pedroni}, year = {2006}, - keywords = {metacircularity, Python, retargettable code generation, type inference, {VM}}, - pages = {944--953} + keywords = {metacircularity, Python, retargettable code generation, type inference, {VM}} }, @article{georges_statistically_2007, @@ -297,7 +246,7 @@ doi = {10.1145/1297105.1297033}, abstract = {Java performance is far from being trivial to benchmark because it is affected by various factors such as the Java application, its input, the virtual machine, the garbage collector, the heap size, etc. In addition, non-determinism at run-time causes the execution time of a Java program to differ from run to run. There are a number of sources of non-determinism such as {Just-In-Time} {(JIT)} compilation and optimization in the virtual machine {(VM)} driven by timer-based method sampling, thread scheduling, garbage collection, and various.}, number = {10}, - journal = {{SIGPLAN} Not.}, + journal = {{SIGPLAN} Notices}, author = {Andy Georges and Dries Buytaert and Lieven Eeckhout}, year = {2007}, keywords = {benchmarking, data analysis, methodology, statistics}, @@ -313,24 +262,13 @@ url = {http://portal.acm.org/citation.cfm?id=1565827}, doi = {10.1145/1565824.1565827}, abstract = {We attempt to apply the technique of Tracing {JIT} Compilers in the context of the {PyPy} project, i.e., to programs that are interpreters for some dynamic languages, including Python. Tracing {JIT} compilers can greatly speed up programs that spend most of their time in loops in which they take similar code paths. However, applying an unmodified tracing {JIT} to a program that is itself a bytecode interpreter results in very limited or no speedup. In this paper we show how to guide tracing {JIT} compilers to greatly improve the speed of bytecode interpreters. One crucial point is to unroll the bytecode dispatch loop, based on two kinds of hints provided by the implementer of the bytecode interpreter. We evaluate our technique by applying it to two {PyPy} interpreters: one is a small example, and the other one is the full Python interpreter.}, - booktitle = {Proceedings of the 4th workshop on the Implementation, Compilation, Optimization of {Object-Oriented} Languages and Programming Systems}, + booktitle = {{ICOOOLPS}}, publisher = {{ACM}}, author = {Carl Friedrich Bolz and Antonio Cuni and Maciej Fija\l{}kowski and Armin Rigo}, year = {2009}, pages = {18--25} }, - at techreport{armin_rigo_jit_2007, - title = {{JIT} Compiler Architecture}, - url = {http://codespeak.net/pypy/dist/pypy/doc/index-report.html}, - abstract = {{PyPy{\textquoteright}s} translation tool-chain {\textendash} from the interpreter written in {RPython} to generated {VMs} for low-level platforms {\textendash} is now able to extend those {VMs} with an automatically generated dynamic compiler, derived from the interpreter. This is achieved by a pragmatic application of partial evaluation techniques guided by a few hints added to the source of the interpreter. Crucial for the effectiveness of dynamic compilation is the use of run-time information to improve compilation results: in our approach, a novel powerful primitive called {\textquotedblleft}promotion{\textquotedblright} that {\textquotedblleft}promotes{\textquotedblright} run-time values to compile-time is used to that effect. In this report, we describe it along with other novel techniques that allow the approach to scale to something as large as {PyPy{\textquoteright}s} Python interpreter.}, - number = {D08.2}, - institution = {{PyPy}}, - author = {Armin Rigo and Samuele Pedroni}, - month = may, - year = {2007} -}, - @article{bala_dynamo:_2000, title = {Dynamo: a transparent dynamic optimization system}, volume = {35}, @@ -363,12 +301,11 @@ url = {http://portal.acm.org/citation.cfm?doid=1134760.1134780}, doi = {10.1145/1134760.1134780}, abstract = {We present a just-in-time compiler for a Java {VM} that is small enough to fit on resource-constrained devices, yet is surprisingly effective. Our system dynamically identifies traces of frequently executed bytecode instructions (which may span several basic blocks across several methods) and compiles them via Static Single Assignment {(SSA)} construction. Our novel use of {SSA} form in this context allows to hoist instructions across trace side-exits without necessitating expensive compensation code in off-trace paths. The overall memory consumption (code and data) of our system is only 150 {kBytes,} yet benchmarks show a speedup that in some cases rivals heavy-weight just-in-time compilers.}, - booktitle = {Proceedings of the 2nd international conference on Virtual execution environments}, + booktitle = {{VEE}}, publisher = {{ACM}}, author = {Andreas Gal and Christian W. Probst and Michael Franz}, year = {2006}, - keywords = {dynamic compilation, embedded, software trace scheduling, {SSA,} {VM}}, - pages = {144--153} + keywords = {dynamic compilation, embedded, software trace scheduling, {SSA,} {VM}} }, @inproceedings{mario_wolczko_towards_1999, @@ -388,7 +325,7 @@ url = {http://portal.acm.org/citation.cfm?id=178243.178478}, doi = {10.1145/178243.178478}, abstract = {Note: {OCR} errors may be found in this Reference List extracted from the full text article. {ACM} has opted to expose the complete List rather than only correct and linked references.}, - booktitle = {Proceedings of the {ACM} {SIGPLAN} 1994 conference on Programming language design and implementation}, + booktitle = {{PLDI}}, publisher = {{ACM}}, author = {Urs H\"{o}lzle and David Ungar}, year = {1994}, @@ -410,7 +347,7 @@ of guest {VM} bytecodes corresponding to a given execution path through the application program. The host {VM} optimizes and compiles these traces to machine code, thus eliminating the need for a custom just-in-time compiler for the guest {VM.} The guest {VM} only needs to provide basic information about its interpreter loop to the host {VM.}}, - booktitle = {Proceedings of the 5th symposium on Dynamic languages}, + booktitle = {{DLS}}, publisher = {{ACM}}, author = {Alexander Yermolovich and Christian Wimmer and Michael Franz}, year = {2009}, @@ -418,35 +355,22 @@ pages = {79--88} }, - at inproceedings{carl_friedrich_bolz_how_2007, - title = {How to not write a Virtual Machine}, - abstract = {Typical modern dynamic languages have a growing number of implementations. We explore the reasons for this situation, and the limitations it imposes on open source or academic communities that lack the resources to fine-tune and maintain them all. It is sometimes proposed that implementing dynamic languages on top of a standardized general-purpose object-oriented virtual machine (like Java or {.NET)} would help reduce this burden. We propose a complementary alternative to writing custom virtual machine {(VMs)} by hand, validated by the {PyPy} project: flexibly generating {VMs} from a high-level "specification", -inserting features and low-level details automatically {\textendash} including good just-in-time compilers tuned to the dynamic language at hand. -We believe this to be ultimately a better investment of efforts than the development of more and more advanced general-purpose object -oriented {VMs.} In this paper we compare these two approaches in detail.}, - booktitle = {Proceedings of the 3rd Workshop on Dynamic Languages and Applications {(DYLA} 2007)}, - author = {Carl Friedrich Bolz and Armin Rigo}, - year = {2007} -}, - - at article{chambers_efficient_1989, + at inproceedings{chambers_efficient_1989, title = {An efficient implementation of {SELF} a dynamically-typed object-oriented language based on prototypes}, volume = {24}, url = {http://portal.acm.org/citation.cfm?id=74884}, doi = {10.1145/74878.74884}, abstract = {We have developed and implemented techniques that double the performance of dynamically-typed object-oriented languages. Our {SELF} implementation runs twice as fast as the fastest Smalltalk implementation, despite {SELF's} lack of classes and explicit variables. To compensate for the absence of classes, our system uses implementation-level maps to transparently group objects cloned from the same prototype, providing data type information and eliminating the apparent space overhead for prototype-based systems. To compensate for dynamic typing, user-defined control structures, and the lack of explicit variables, our system dynamically compiles multiple versions of a source method, each customized according to its receiver's map. Within each version the type of the receiver is fixed, and thus the compiler can statically bind and inline all messages sent to self. Message splitting and type prediction extract and preserve even more static type information, allowing the compiler to inline many other messages. Inlining dramatically improves performance and eliminates the need to hard-wire low-level methods such as +,==, and {ifTrue:.} Despite inlining and other optimizations, our system still supports interactive programming environments. The system traverses internal dependency lists to invalidate all compiled methods affected by a programming change. The debugger reconstructs inlined stack frames from compiler-generated debugging information, making inlining invisible to the {SELF} programmer.}, - number = {10}, - journal = {{SIGPLAN} Not.}, + booktitle = {{OOPSLA}}, author = {C. Chambers and D. Ungar and E. Lee}, year = {1989}, keywords = {self, specialization}, - pages = {49--70}, annote = {{\textless}p{\textgreater}describes the first implementation of {SELF.} Since {SELF} is highly dynamic, it is not easy to optimize it well.{\textless}/p{\textgreater} -{\textless}p{\textgreater}~{\textless}/p{\textgreater} +{\textless}p{\textgreater}\ {\textless}/p{\textgreater} {{\textless}p{\textgreater}The} first problem is one of space, the prototypical nature of self makes its objects much larger. This is solved by "maps", which are like sharing dicts in pypy: every object has an associated map (structure object) that describes how the layout of the object. In that respect a map is a bit like a class, but user-invisible.{\textless}/p{\textgreater} -{\textless}p{\textgreater}~{\textless}/p{\textgreater} +{\textless}p{\textgreater}\ {\textless}/p{\textgreater} {{\textless}p{\textgreater}The} compilation behavior of {SELF} is such that the every method is specialized for the map of the first argument. Then aggressive inlining is performed, which is particularly useful for self-sends (which are syntactically easy to write in {SELF),} since the lookup of those methods can be done at compile-time since the map is static due to specialization.{\textless}/p{\textgreater} -{\textless}p{\textgreater}~{\textless}/p{\textgreater} +{\textless}p{\textgreater}\ {\textless}/p{\textgreater} {{\textless}p{\textgreater}Further} optimizations are removal of unused closures and method splitting (which essentially prevents merging of paths in the flow graph to keep more information).{\textless}/p{\textgreater}} }, @@ -454,11 +378,10 @@ title = {Optimizing {Dynamically-Typed} {Object-Oriented} Languages With Polymorphic Inline Caches}, isbn = {3-540-54262-0}, url = {http://portal.acm.org/citation.cfm?id=679193&dl=ACM&coll=portal}, - booktitle = {Proceedings of the European Conference on {Object-Oriented} Programming}, + booktitle = {{ECOOP}}, publisher = {{Springer-Verlag}}, author = {Urs H\"{o}lzle and Craig Chambers and David Ungar}, - year = {1991}, - pages = {21--38} + year = {1991} }, @inproceedings{rigo_representation-based_2004, @@ -468,12 +391,11 @@ url = {http://portal.acm.org/citation.cfm?id=1014010}, doi = {10.1145/1014007.1014010}, abstract = {A powerful application of specialization is to remove interpretative overhead: a language can be implemented with an interpreter, whose performance is then improved by specializing it for a given program source. This approach is only moderately successful with very high level languages, where the operation of each single step can be highly dependent on run-time data and context. In the present paper, the Psyco prototype for the Python language is presented. It introduces two novel techniques. The first is just-in-time specialization, or specialization by need, which introduces the "unlifting" ability for a value to be promoted from run-time to compile-time during specialization -- the inverse of the lift operator of partial evaluation. Its presence gives an unusual and powerful perspective on the specialization process. The second technique is representations, a theory of data-oriented specialization generalizing the traditional specialization domains (i.e. the compile-time/run-time dichotomy).}, - booktitle = {Proceedings of the 2004 {ACM} {SIGPLAN} symposium on Partial evaluation and semantics-based program manipulation}, + booktitle = {{PEPM}}, publisher = {{ACM}}, author = {Armin Rigo}, year = {2004}, - keywords = {{JIT,} Python}, - pages = {15--26} + keywords = {{JIT,} Python} }, @inproceedings{sullivan_dynamic_2003, @@ -483,25 +405,9 @@ url = {http://portal.acm.org/citation.cfm?id=858570.858576}, doi = {10.1145/858570.858576}, abstract = {For domain specific languages, "scripting languages", dynamic languages, and for virtual machine-based languages, the most straightforward implementation strategy is to write an interpreter. A simple interpreter consists of a loop that fetches the next bytecode, dispatches to the routine handling that bytecode, then loops. There are many ways to improve upon this simple mechanism, but as long as the execution of the program is driven by a representation of the program other than as a stream of native instructions, there will be some "interpretive {overhead".There} is a long history of approaches to removing interpretive overhead from programming language implementations. In practice, what often happens is that, once an interpreted language becomes popular, pressure builds to improve performance until eventually a project is undertaken to implement a native Just In Time {(JIT)} compiler for the language. Implementing a {JIT} is usually a large effort, affects a significant part of the existing language implementation, and adds a significant amount of code and complexity to the overall code {base.In} this paper, we present an innovative approach that dynamically removes much of the interpreted overhead from language implementations, with minimal instrumentation of the original interpreter. While it does not give the performance improvements of hand-crafted native compilers, our system provides an appealing point on the language implementation spectrum.}, - booktitle = {Proceedings of the 2003 workshop on Interpreters, virtual machines and emulators}, + booktitle = {Workshop on Interpreters, virtual machines and emulators}, publisher = {{ACM}}, author = {Gregory T. Sullivan and Derek L. Bruening and Iris Baron and Timothy Garnett and Saman Amarasinghe}, year = {2003}, - pages = {50--57}, annote = {{{\textless}p{\textgreater}Describes} the application of Dynamo to interpreters. The unchanged dynamo does not fare too well on interpreters, since it traces one iteration of the bytecode loop, and the next iteration is likely to be very different. Matters are improved by adding hints to the interpreter that tell the tracer what the program counter of the interpreter is. Then the tracer only closes loops at the application {level.{\textless}/p{\textgreater}{\textless}p{\textgreater}\ {\textless}/p{\textgreater}{\textless}p{\textgreater}Strong} restrictions due to the fact that things happen on the assembler level.{\textless}/p{\textgreater}} -}, - - at incollection{carl_friedrich_bolz_back_2008, - title = {Back to the Future in One Week {\textemdash} Implementing a Smalltalk {VM} in {PyPy}}, - url = {http://dx.doi.org/10.1007/978-3-540-89275-5_7}, - abstract = {We report on our experiences with the Spy project, including implementation details and benchmark results. Spy is a re-implementation of the Squeak (i.e. Smalltalk-80) {VM} using the {PyPy} toolchain. The {PyPy} project allows code written in {RPython,} a subset of Python, to be translated -to a multitude of different backends and architectures. During the translation, many aspects of the implementation can be -independently tuned, such as the garbage collection algorithm or threading implementation. In this way, a whole host of interpreters -can be derived from one abstract interpreter definition. Spy aims to bring these benefits to Squeak, allowing for greater portability and, eventually, improved performance. The current -Spy codebase is able to run a small set of benchmarks that demonstrate performance superior to many similar Smalltalk {VMs,} but -which still run slower than in Squeak itself. Spy was built from scratch over the course of a week during a joint {Squeak-PyPy} Sprint in Bern last autumn.}, - booktitle = {{Self-Sustaining} Systems}, - author = {Carl Friedrich Bolz and Adrian Kuhn and Adrian Lienhard and Nicholas Matsakis and Oscar Nierstrasz and Lukas Renggli and Armin Rigo and Toon Verwaest}, - year = {2008}, - pages = {123--139} } \ No newline at end of file diff --git a/talk/icooolps2011/code/trace1.tex b/talk/icooolps2011/code/trace1.tex --- a/talk/icooolps2011/code/trace1.tex +++ b/talk/icooolps2011/code/trace1.tex @@ -1,3 +1,4 @@ +{\smaller \begin{lstlisting}[mathescape,xleftmargin=20pt,numberblanklines=false,numbers=right,escapechar=|, firstnumber=27,basicstyle=\ttfamily] # $inst_1$.getattr("a") |\setcounter{lstnumber}{21}| $attributes_1$ = $inst_1$.attributes |\setcounter{lstnumber}{21}| @@ -26,3 +27,4 @@ $v_4$ = $v_2$ + $result_3$ |\setcounter{lstnumber}{-2}| return($v_4$) \end{lstlisting} % XXX find out how to not number lines +} From commits-noreply at bitbucket.org Thu Apr 14 17:24:39 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 17:24:39 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: change abstract a bit Message-ID: <20110414152439.097A82A2043@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3511:cc0be7787c9f Date: 2011-04-14 17:24 +0200 http://bitbucket.org/pypy/extradoc/changeset/cc0be7787c9f/ Log: change abstract a bit diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -102,14 +102,13 @@ Meta-tracing JIT compilers can be applied to a variety of different languages without explicitly encoding language semantics into the compiler. So -far, they lacked a way to feed back runtime information into the -compiler, which restricted their performance. In this paper we describe the -flexible mechanisms in PyPy's meta-tracing JIT that can be used to control runtime feedback in language-specific ways. These mechanisms are flexible -enough to implement classical VM techniques such as maps and polymorphic inline +far, they lacked a way to give the language implementor control over runtime +feedback. This restricted their performance. In this paper we describe the +mechanisms in PyPy's meta-tracing JIT that can be used to control +runtime feedback in language-specific ways. These mechanisms are flexible +enough to express classical VM techniques such as maps and polymorphic inline caches. -\cfbolz{XXX tracing is runtime feed back too, clarify} - \end{abstract} From commits-noreply at bitbucket.org Thu Apr 14 17:38:46 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 17:38:46 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: enable preprint option Message-ID: <20110414153846.A9B0D2A2042@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3512:f60fc5ef077b Date: 2011-04-14 17:38 +0200 http://bitbucket.org/pypy/extradoc/changeset/f60fc5ef077b/ Log: enable preprint option diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -1,4 +1,4 @@ -\documentclass{sigplanconf} +\documentclass[preprint]{sigplanconf} \usepackage{ifthen} \usepackage{fancyvrb} From commits-noreply at bitbucket.org Thu Apr 14 17:38:47 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 17:38:47 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: will ignore this XXX Message-ID: <20110414153847.1D9782A2042@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3513:87256b9a35c9 Date: 2011-04-14 17:38 +0200 http://bitbucket.org/pypy/extradoc/changeset/87256b9a35c9/ Log: will ignore this XXX diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -367,8 +367,6 @@ \section{Hints for Controlling Optimization} \label{sec:hints} -\cfbolz{XXX more precise definition of what promote does} - In this section we will describe how to add two hints that allow the interpreter author to increase the optimization opportunities for constant folding. If applied correctly these techniques can give really big speedups by From commits-noreply at bitbucket.org Thu Apr 14 17:38:47 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 17:38:47 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: this is repetition Message-ID: <20110414153847.90F572A2042@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3514:2592b9379e2b Date: 2011-04-14 17:38 +0200 http://bitbucket.org/pypy/extradoc/changeset/2592b9379e2b/ Log: this is repetition diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -383,10 +383,7 @@ the same result given the same arguments. \end{itemize} -The PyPy JIT generator automatically detects the majority of these conditions. -However, for the cases in which the automatic detection does not work, the -interpreter author can apply \textbf{hints} to improve the optimization -opportunities. There is one kind of hint for both of the conditions above. +There is one kind of hint for both of these conditions. \subsection{Where Do All the Constants Come From} From commits-noreply at bitbucket.org Thu Apr 14 18:52:18 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 14 Apr 2011 18:52:18 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: a slightly different approach to saving space: for the small examples, put code Message-ID: <20110414165218.A1AAA2A2042@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3515:abf859b6fd36 Date: 2011-04-14 18:52 +0200 http://bitbucket.org/pypy/extradoc/changeset/abf859b6fd36/ Log: a slightly different approach to saving space: for the small examples, put code and trace next to each other. this makes it possible to use the larger font again. diff --git a/talk/icooolps2011/code/trace2.tex b/talk/icooolps2011/code/trace2.tex --- a/talk/icooolps2011/code/trace2.tex +++ b/talk/icooolps2011/code/trace2.tex @@ -1,4 +1,4 @@ -{\smaller +{\noop \begin{lstlisting}[mathescape,escapechar=|,basicstyle=\ttfamily]] # $inst_1$.getattr("a") $map_1$ = $inst_1$.map diff --git a/talk/icooolps2011/code/trace5.tex b/talk/icooolps2011/code/trace5.tex --- a/talk/icooolps2011/code/trace5.tex +++ b/talk/icooolps2011/code/trace5.tex @@ -1,4 +1,4 @@ -{\smaller +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] # $inst_1$.getattr("a") $map_1$ = $inst_1$.map diff --git a/talk/icooolps2011/code/interpreter-slow.tex b/talk/icooolps2011/code/interpreter-slow.tex --- a/talk/icooolps2011/code/interpreter-slow.tex +++ b/talk/icooolps2011/code/interpreter-slow.tex @@ -1,4 +1,4 @@ -{\smaller +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily,numbers = right] class Class(object): def __init__(self, name): diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 300e83298afbfa11c8262998f23808deb225e9c4..63d41d94fd08a4192b987890c47be5ad8ec856e0 GIT binary patch [cut] diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -3,6 +3,7 @@ \usepackage{ifthen} \usepackage{fancyvrb} \usepackage{color} +\usepackage{wrapfig} \usepackage{ulem} \usepackage{xspace} \usepackage{relsize} @@ -16,7 +17,7 @@ \usepackage{listings} \usepackage[T1]{fontenc} -\usepackage[scaled=0.82]{beramono} +\usepackage[scaled=0.81]{beramono} \definecolor{commentgray}{rgb}{0.3,0.3,0.3} @@ -55,6 +56,9 @@ \newcommand\pedronis[1]{\nb{PEDRONIS}{#1}} \newcommand{\commentout}[1]{} +\newcommand{\noop}{} + + \newcommand\ie{i.e.,\xspace} \newcommand\eg{e.g.,\xspace} @@ -330,7 +334,7 @@ \anto{I still think it's a bit weird to call them ``methods'' and then use them as attributes in the example} -{\smaller +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] inst.getattr("a") + inst.getattr("b") + inst.getattr("c") \end{lstlisting} @@ -395,26 +399,32 @@ The simplest example of constants are literal values, such as \texttt{1}. However, the optimizer can statically know the value of a variable even if it is not a constant in the original source code. For example, consider the -following fragment of RPython code: +following fragment of RPython code on the left. If the fragment is traced with +$x_1$ being \texttt{4}, the trace on the left is produced: -{\smaller + +\begin{minipage}[b]{0.5\linewidth} +\centering +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] if x == 4: y = y + x \end{lstlisting} } - -If the fragment is traced with $x_1$ being \texttt{4}, the following trace is -produced: -% -{\smaller +\end{minipage} +\vline +\hspace{0.5cm} +\begin{minipage}[b]{0.5\linewidth} +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($x_1$ == 4) $y_2$ = $y_1$ + $x_1$ \end{lstlisting} } +\end{minipage} -In the trace above, the value of $x_1$ is statically known after the guard. + +In the trace, the value of $x_1$ is statically known after the guard. Remember that a guard is a runtime check. The above trace will run to completion when $x_1$ \texttt{== 4}. If the check fails, execution of the trace is stopped and the interpreter continues to run. @@ -432,18 +442,22 @@ typical reason to do that is if there is a lot of computation depending on the value of that variable. -Let's make this more concrete. If we trace a call to the following function: -{\smaller +Let's make this more concrete. If we trace a call to the function on the left, we get the trace on the right: + +\begin{minipage}[b]{0.5\linewidth} +\centering +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] def f1(x, y): z = x * 2 + 1 return z + y \end{lstlisting} } - -We get a trace that looks like this: - -{\smaller +\end{minipage} +\vline +\hspace{0.5cm} +\begin{minipage}[b]{0.5\linewidth} +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] $v_1$ = $x_1$ * 2 $z_1$ = $v_1$ + 1 @@ -451,12 +465,17 @@ return($v_2$) \end{lstlisting} } +\end{minipage} Observe how the first two operations could be constant-folded if the value of $x_1$ were known. Let's assume that the value of \texttt{x} in the Python code can vary, but does so rarely, i.e. only takes a few different values at runtime. If this is the case, we can add a hint to promote \texttt{x}, like this: -{\smaller + + +\begin{minipage}[b]{0.5\linewidth} +\centering +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] def f1(x, y): promote(x) @@ -464,6 +483,20 @@ return z + y \end{lstlisting} } +\end{minipage} +\vline +\hspace{0.5cm} +\begin{minipage}[b]{0.5\linewidth} +{\noop +\begin{lstlisting}[mathescape,basicstyle=\ttfamily] +guard($x_1$ == 4) +$v_1$ = $x_1$ * 2 +$z_1$ = $v_1$ + 1 +$v_2$ = $z_1$ + $y_1$ +return($v_2$) +\end{lstlisting} +} +\end{minipage} The hint indicates that \texttt{x} is likely a runtime constant and the JIT should try to perform runtime specialization on it @@ -473,17 +506,7 @@ effect. When tracing, some extra work is done. Let's assume that this changed function is traced with the arguments \texttt{4} and \texttt{8}. The trace will be the same, except for one -operation at the beginning: - -{\smaller -\begin{lstlisting}[mathescape,basicstyle=\ttfamily] -guard($x_1$ == 4) -$v_1$ = $x_1$ * 2 -$z_1$ = $v_1$ + 1 -$v_2$ = $z_1$ + $y_1$ -return($v_2$) -\end{lstlisting} -} +operation at the beginning. The promotion is turned into a \texttt{guard} operation in the trace. The guard captures the value of $x_1$ as it was during tracing. Thus the runtime value of @@ -495,7 +518,7 @@ assume that $x_1$ is equal to \texttt{4}, meaning that the optimizer will turn this trace into: -{\smaller +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($x_1$ == 4) $v_2$ = 9 + $y_1$ @@ -513,7 +536,7 @@ capture a different value of $x_1$. If it is e.g. \texttt{2}, then the optimized trace looks like this: -{\smaller +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($x_1$ == 2) $v_2$ = 5 + $y_1$ @@ -560,9 +583,12 @@ From this definition follows that a call to an trace-elidable function with constant arguments in a trace can be replaced with the result of the call. -As an example, take the following class: +As an example, take the class on the left. Tracing the call \texttt{a.f(10)} of +some instance of \texttt{A} yields the trace on the right (note how the call to +\texttt{c} is inlined): -{\smaller +\begin{minipage}[t]{0.6\linewidth} +\centering \begin{lstlisting}[mathescape,basicstyle=\ttfamily] class A(object): def __init__(self, x, y): @@ -570,17 +596,15 @@ self.y = y def f(self, val): - self.y = self.compute() + val + self.y = self.c() + val - def compute(self): + def c(self): return self.x * 2 + 1 \end{lstlisting} -} - -Tracing the call \texttt{a.f(10)} of some instance of \texttt{A} yields the following -trace (note how the call to \texttt{compute} is inlined): -% -{\smaller +\end{minipage} +\vline +\hspace{0.5cm} +\begin{minipage}[t]{0.4\linewidth} \begin{lstlisting}[mathescape,basicstyle=\ttfamily] $x_1$ = $a_1$.x $v_1$ = $x_1$ * 2 @@ -588,7 +612,7 @@ $v_3$ = $v_2$ + $val_1$ $a_1$.y = $v_3$ \end{lstlisting} -} +\end{minipage} In this case, adding a promote of \texttt{self} in the \texttt{f} method to get rid of the computation of the first few operations does not help. Even if $a_1$ is a @@ -596,11 +620,13 @@ always yield the same value. To solve this problem, there is another annotation, which lets the interpreter author communicate invariants to the optimizer. In this case, she could decide that the \texttt{x} field of instances of \texttt{A} is -immutable, and therefore \texttt{compute} +immutable, and therefore \texttt{c} is an trace-elidable function. To communicate this, there is a \texttt{elidable} decorator. -If the code in \texttt{compute} should be constant-folded away, we would change the +If the code in \texttt{c} should be constant-folded away, we would change the class as follows: -{\smaller + +\begin{minipage}[t]{0.6\linewidth} +\centering \begin{lstlisting}[mathescape,basicstyle=\ttfamily] class A(object): def __init__(self, x, y): @@ -609,33 +635,33 @@ def f(self, val): promote(self) - self.y = self.compute() + val + self.y = self.c() + val @elidable - def compute(self): + def c(self): return self.x * 2 + 1 \end{lstlisting} -} - -Now the trace will look like this: -% -{\smaller +\end{minipage} +\vline +\hspace{0.4cm} +\begin{minipage}[t]{0.4\linewidth} \begin{lstlisting}[mathescape,basicstyle=\ttfamily] -guard($a_1$ == 0xb73984a8) -$v_1$ = compute($a_1$) +guard($a_1$ == + 0xb73984a8) +$v_1$ = c($a_1$) $v_2$ = $v_1$ + $val_1$ $a_1$.y = $v_2$ \end{lstlisting} -} +\end{minipage} Here, \texttt{0xb73984a8} is the address of the instance of \texttt{A} that was used -during tracing. The call to \texttt{compute} is not inlined, so that the optimizer -has a chance to see it. Since the \texttt{compute} function is marked as trace-elidable, and its +during tracing. The call to \texttt{c} is not inlined, so that the optimizer +has a chance to see it. Since the \texttt{c} function is marked as trace-elidable, and its argument is a constant reference, the call will be removed by the optimizer. The final trace looks like this: % -{\smaller +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($a_1$ == 0xb73984a8) $v_2$ = 9 + $val_1$ diff --git a/talk/icooolps2011/code/map.tex b/talk/icooolps2011/code/map.tex --- a/talk/icooolps2011/code/map.tex +++ b/talk/icooolps2011/code/map.tex @@ -1,4 +1,4 @@ -{\smaller +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] class Map(object): def __init__(self): diff --git a/talk/icooolps2011/code/version.tex b/talk/icooolps2011/code/version.tex --- a/talk/icooolps2011/code/version.tex +++ b/talk/icooolps2011/code/version.tex @@ -1,4 +1,4 @@ -{\smaller +{\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] class VersionTag(object): pass diff --git a/talk/icooolps2011/code/trace4.tex b/talk/icooolps2011/code/trace4.tex --- a/talk/icooolps2011/code/trace4.tex +++ b/talk/icooolps2011/code/trace4.tex @@ -1,4 +1,4 @@ -{\smaller +{\noop \begin{lstlisting}[escapechar=|,mathescape,basicstyle=\ttfamily] # $inst_1$.getattr("a") $map_1$ = $inst_1$.map diff --git a/talk/icooolps2011/code/trace1.tex b/talk/icooolps2011/code/trace1.tex --- a/talk/icooolps2011/code/trace1.tex +++ b/talk/icooolps2011/code/trace1.tex @@ -1,4 +1,4 @@ -{\smaller +{\noop \begin{lstlisting}[mathescape,xleftmargin=20pt,numberblanklines=false,numbers=right,escapechar=|, firstnumber=27,basicstyle=\ttfamily] # $inst_1$.getattr("a") |\setcounter{lstnumber}{21}| $attributes_1$ = $inst_1$.attributes |\setcounter{lstnumber}{21}| From commits-noreply at bitbucket.org Thu Apr 14 19:14:14 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Thu, 14 Apr 2011 19:14:14 +0200 (CEST) Subject: [pypy-svn] pypy default: fixed test_circular Message-ID: <20110414171414.3B30C2A2043@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43354:15fccdf5575a Date: 2011-04-14 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/15fccdf5575a/ Log: fixed test_circular diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1364,7 +1364,7 @@ self = array.__new__(cls, 'd', range(256)) return self def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) + assert len(self) == 256 return array.__getitem__(self, i & 255) # buf = Circular() @@ -1378,4 +1378,28 @@ log = self.run(main, [], threshold=200) assert log.result == 1239690.0 loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + ... + i17 = int_and(i14, 255) + f18 = getarrayitem_raw(i8, i17, descr=...) + i19 = force_token() + f20 = getarrayitem_raw(i8, i9, descr=...) + f21 = float_add(f18, f20) + i22 = force_token() + f23 = getarrayitem_raw(i8, i10, descr=...) + f24 = float_add(f21, f23) + i26 = int_add(i6, 1) + i27 = force_token() + i29 = int_and(i26, 255) + f30 = getarrayitem_raw(i8, i29, descr=...) + f31 = float_add(f24, f30) + i33 = int_add(i6, 2) + i34 = force_token() + i36 = int_and(i33, 255) + f37 = getarrayitem_raw(i8, i36, descr=...) + ... + """) # XXX: what do we want to check here? + # We want to check that the array bound checks are removed, + # so it's this part of the trace. However we dont care about + # the force_token()'s. Can they be ignored? From commits-noreply at bitbucket.org Thu Apr 14 19:28:42 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Thu, 14 Apr 2011 19:28:42 +0200 (CEST) Subject: [pypy-svn] pypy default: fixed test_zeropadded Message-ID: <20110414172842.BC5BA2A2042@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43355:80f982d2345d Date: 2011-04-14 19:28 +0200 http://bitbucket.org/pypy/pypy/changeset/80f982d2345d/ Log: fixed test_zeropadded diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1354,7 +1354,33 @@ log = self.run(main, [], threshold=200) assert log.result == 9895050.0 loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + assert loop.match(""" + ... + i20 = int_ge(i18, i8) + guard_false(i20, descr=...) + f21 = getarrayitem_raw(i13, i18, descr=...) + i22 = force_token() + f23 = getarrayitem_raw(i13, i14, descr=...) + f24 = float_add(f21, f23) + i25 = force_token() + f26 = getarrayitem_raw(i13, i6, descr=...) + f27 = float_add(f24, f26) + i29 = int_add(i6, 1) + i30 = force_token() + i31 = int_ge(i29, i8) + guard_false(i31, descr=...) + f33 = getarrayitem_raw(i13, i29, descr=...) + f34 = float_add(f27, f33) + i36 = int_add(i6, 2) + i37 = force_token() + i38 = int_ge(i36, i8) + guard_false(i38, descr=...) + f39 = getarrayitem_raw(i13, i36, descr=...) + ... + """) + # XXX: what do we want to check here? + # We want to make sure that the overloaded __getitem__ + # not introduceds double array bound checks def test_circular(self): def main(): From commits-noreply at bitbucket.org Thu Apr 14 19:31:27 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Thu, 14 Apr 2011 19:31:27 +0200 (CEST) Subject: [pypy-svn] pypy default: no point in not using a more natural syntax anymore Message-ID: <20110414173127.011772A2042@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43356:c2af8a38dca3 Date: 2011-04-14 19:30 +0200 http://bitbucket.org/pypy/pypy/changeset/c2af8a38dca3/ Log: no point in not using a more natural syntax anymore diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1339,7 +1339,7 @@ return self def __getitem__(self, i): - if i < 0 or i >= self.__len__(): + if i < 0 or i >= len(self): return 0 return array.__getitem__(self, i) # ID: get # From commits-noreply at bitbucket.org Thu Apr 14 20:04:24 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 20:04:24 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Support for descrs of fields of type HiddenGcRef32. Message-ID: <20110414180424.4918B2A2042@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43357:e7e8b4cdebae Date: 2011-04-13 21:14 +0200 http://bitbucket.org/pypy/pypy/changeset/e7e8b4cdebae/ Log: Support for descrs of fields of type HiddenGcRef32. diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -127,6 +127,23 @@ assert descr.is_float_field() assert descr.get_field_size(False) == 8 +def test_get_field_descr_hiddengcref32(): + if sys.maxint == 2147483647: + py.test.skip("HiddenGcRef32: for 64-bit only") + S = lltype.GcStruct('S', ('p', llmemory.HiddenGcRef32)) + c0 = GcCache(False) + descr = get_field_descr(c0, S, 'p') + assert not descr.is_float_field() + assert descr.is_pointer_field() + assert descr.get_field_size(False) == 4 + c1 = GcCache(True) + descr = get_field_descr(c1, S, 'p') + assert not descr.is_float_field() + assert descr.is_pointer_field() + sz = descr.get_field_size(True) + assert isinstance(sz, Symbolic) + assert sz.TYPE == llmemory.HiddenGcRef32 + def test_get_array_descr(): U = lltype.Struct('U') diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -120,10 +120,15 @@ _clsname = 'GcPtrFieldDescr' _is_pointer_field = True +class GcPtrHidden32FieldDescr(GcPtrFieldDescr): + def get_field_size(self, translate_support_code): + return symbolic.get_size(llmemory.HiddenGcRef32,translate_support_code) + def getFieldDescrClass(TYPE): return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr, NonGcPtrFieldDescr, 'Field', 'get_field_size', - '_is_float_field', '_is_field_signed') + '_is_float_field', '_is_field_signed', + GcPtrHidden32FieldDescr) def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field @@ -450,9 +455,12 @@ def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr, nameprefix, methodname, floatattrname, signedattrname, - _cache={}): + GcPtrHidden32Descr=None, _cache={}): if isinstance(TYPE, lltype.Ptr): if TYPE.TO._gckind == 'gc': + if TYPE == llmemory.HiddenGcRef32: + assert GcPtrHidden32Descr is not None + return GcPtrHidden32Descr return GcPtrDescr else: return NonGcPtrDescr diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -265,6 +265,8 @@ restype = get_ctypes_type(T.TO.RESULT) return ctypes.CFUNCTYPE(restype, *argtypes) elif isinstance(T.TO, lltype.OpaqueType): + if T == llmemory.HiddenGcRef32: + return ctypes.c_int return ctypes.c_void_p else: return ctypes.POINTER(get_ctypes_type(T.TO, delayed_builders)) From commits-noreply at bitbucket.org Thu Apr 14 20:04:25 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 20:04:25 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Support GcArray(HiddenGcRef32). Message-ID: <20110414180425.2B31A2A2042@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43358:e03c1f86c2d2 Date: 2011-04-13 21:19 +0200 http://bitbucket.org/pypy/pypy/changeset/e03c1f86c2d2/ Log: Support GcArray(HiddenGcRef32). diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -246,6 +246,24 @@ arraydescr = get_array_descr(c2, RA) assert arraydescr.is_item_signed() == signed +def test_get_array_descr_hiddengcref32(): + if sys.maxint == 2147483647: + py.test.skip("HiddenGcRef32: for 64-bit only") + A = lltype.GcArray(llmemory.HiddenGcRef32) + c0 = GcCache(False) + descr = get_array_descr(c0, A) + assert not descr.is_array_of_floats() + assert descr.is_array_of_pointers() + assert descr.get_item_size(False) == 4 + c1 = GcCache(True) + descr = get_array_descr(c1, A) + assert not descr.is_array_of_floats() + assert descr.is_array_of_pointers() + assert descr.get_item_size(False) == 4 + sz = descr.get_item_size(True) + assert isinstance(sz, Symbolic) + assert sz.TYPE == llmemory.HiddenGcRef32 + def test_get_call_descr_not_translated(): c0 = GcCache(False) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -121,6 +121,7 @@ _is_pointer_field = True class GcPtrHidden32FieldDescr(GcPtrFieldDescr): + _clsname = 'GcPtrHidden32FieldDescr' def get_field_size(self, translate_support_code): return symbolic.get_size(llmemory.HiddenGcRef32,translate_support_code) @@ -191,6 +192,11 @@ _clsname = 'GcPtrArrayDescr' _is_array_of_pointers = True +class GcPtrHidden32ArrayDescr(GcPtrArrayDescr): + _clsname = 'GcPtrHidden32ArrayDescr' + def get_item_size(self, translate_support_code): + return symbolic.get_size(llmemory.HiddenGcRef32,translate_support_code) + class FloatArrayDescr(BaseArrayDescr): _clsname = 'FloatArrayDescr' _is_array_of_floats = True @@ -221,7 +227,8 @@ return FloatArrayDescr return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr, NonGcPtrArrayDescr, 'Array', 'get_item_size', - '_is_array_of_floats', '_is_item_signed') + '_is_array_of_floats', '_is_item_signed', + GcPtrHidden32ArrayDescr) def getArrayNoLengthDescrClass(ARRAY): return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr, From commits-noreply at bitbucket.org Thu Apr 14 20:04:25 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 20:04:25 +0200 (CEST) Subject: [pypy-svn] pypy default: Typo. Message-ID: <20110414180425.CCC212A2042@codespeak.net> Author: Armin Rigo Branch: Changeset: r43359:879cf72cebbf Date: 2011-04-14 20:03 +0200 http://bitbucket.org/pypy/pypy/changeset/879cf72cebbf/ Log: Typo. diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1014,7 +1014,7 @@ node = lltype.malloc(NODE) ref = lltype.cast_opaque_ptr(llmemory.GCREF, node) back = rffi.cast(llmemory.GCREF, rffi.cast(lltype.Signed, ref)) - assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), ref) == node + assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), back) == node def test_gcref_forth_and_back(self): cp = ctypes.c_void_p(1234) From commits-noreply at bitbucket.org Thu Apr 14 20:44:37 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Thu, 14 Apr 2011 20:44:37 +0200 (CEST) Subject: [pypy-svn] pypy default: Remove the calls to libc's isinf again, reimplement it ourselves so the JIT can inline it nicely. Algorithm taken fromboost. Message-ID: <20110414184437.6C2302A2042@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43360:d8ea37b2a1ff Date: 2011-04-14 18:44 +0000 http://bitbucket.org/pypy/pypy/changeset/d8ea37b2a1ff/ Log: Remove the calls to libc's isinf again, reimplement it ourselves so the JIT can inline it nicely. Algorithm taken fromboost. diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,8 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.module import ll_math from pypy.module.math.test.test_direct import MathTests, get_tester +from pypy.translator.c.test.test_genc import compile class TestMath(MathTests): @@ -21,6 +22,13 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isinf(self): + def f(x): + return ll_math.ll_math_isinf(1. / x) + f = compile(f, [float], backendopt=False) + assert f(5.5e-309) + + def make_test_case((fnname, args, expected), dict): # def test_func(self): diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,12 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -95,9 +94,9 @@ # are awesome. return y != y - at jit.purefunction def ll_math_isinf(y): - return bool(math_isinf(y)) + # Use a bitwise OR so the JIT doesn't produce 2 different guards. + return (y == INFINITY) | (y == -INFINITY) ll_math_copysign = math_copysign From commits-noreply at bitbucket.org Thu Apr 14 20:44:38 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Thu, 14 Apr 2011 20:44:38 +0200 (CEST) Subject: [pypy-svn] pypy default: Merged upstream. Message-ID: <20110414184438.CCC682A2042@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43361:cbf75aeca66c Date: 2011-04-14 18:44 +0000 http://bitbucket.org/pypy/pypy/changeset/cbf75aeca66c/ Log: Merged upstream. diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -838,7 +838,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -849,7 +849,7 @@ sa += 20000 return sa """ % (op1, a, op2, b) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -867,7 +867,7 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, op2, float(b)/4.0) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_allcases_reflex(self): @@ -888,7 +888,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -899,7 +899,7 @@ sa += 20000 return sa """ % (op1, a, b, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -917,11 +917,13 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, float(b)/4.0, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') for e1 in compares: for e2 in compares: @@ -933,7 +935,7 @@ b = tst() c = tst() sa = 0 - for i in range(1000): + for i in range(300): if %s: sa += 1 else: @@ -946,7 +948,7 @@ a = b return sa """ % (e1, e2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) def test_array_sum(self): def main(): @@ -1071,3 +1073,359 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) + + def test_intbound_simple(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + self.run_and_check(src, threshold=200) + + def test_intbound_addsub_mix(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + self.run_and_check(src, threshold=200) + + def test_intbound_gt(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i17 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=) + """) + + def test_intbound_sub_lt(self): + def main(): + i, a = 0, 0 + while i < 300: + if i - 10 < 295: + a += 1 + i += 1 + return a + # + log = self.run(main, [], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i5, 300) + guard_true(i7, descr=...) + i9 = int_sub_ovf(i5, 10) + guard_no_overflow(descr=...) + i11 = int_add_ovf(i4, 1) + guard_no_overflow(descr=...) + i13 = int_add(i5, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i13, descr=) + """) + + def test_intbound_addsub_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i8, 5) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i19 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) + """) + + def test_intbound_addmul_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < 300: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_add(i8, 5) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_lshift(i8, 1) + i18 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i21 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) + """) + + def test_intbound_eq(self): + def main(a, n): + i, s = 0, 0 + while i < 300: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + # + log = self.run(main, [7, 300], threshold=200) + assert log.result == main(7, 300) + log = self.run(main, [10, 300], threshold=200) + assert log.result == main(10, 300) + log = self.run(main, [42, 300], threshold=200) + assert log.result == main(42, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_eq(i8, 10) + guard_false(i12, descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) + """) + + def test_intbound_mul(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + # + log = self.run(main, [7], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_lshift(i6, 1) + i12 = int_add_ovf(i5, 1) + guard_no_overflow(descr=...) + i14 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i12, i14, descr=) + """) + + def test_assert(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert a == 7 + s += a + 1 + i += 1 + return s + log = self.run(main, [7], threshold=200) + assert log.result == 300*8 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_add_ovf(i5, 8) + guard_no_overflow(descr=...) + i12 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i10, i12, descr=) + """) + + def test_zeropadded(self): + def main(): + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= len(self): + return 0 + return array.__getitem__(self, i) # ID: get + # + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + log = self.run(main, [], threshold=200) + assert log.result == 9895050.0 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + ... + i20 = int_ge(i18, i8) + guard_false(i20, descr=...) + f21 = getarrayitem_raw(i13, i18, descr=...) + i22 = force_token() + f23 = getarrayitem_raw(i13, i14, descr=...) + f24 = float_add(f21, f23) + i25 = force_token() + f26 = getarrayitem_raw(i13, i6, descr=...) + f27 = float_add(f24, f26) + i29 = int_add(i6, 1) + i30 = force_token() + i31 = int_ge(i29, i8) + guard_false(i31, descr=...) + f33 = getarrayitem_raw(i13, i29, descr=...) + f34 = float_add(f27, f33) + i36 = int_add(i6, 2) + i37 = force_token() + i38 = int_ge(i36, i8) + guard_false(i38, descr=...) + f39 = getarrayitem_raw(i13, i36, descr=...) + ... + """) + # XXX: what do we want to check here? + # We want to make sure that the overloaded __getitem__ + # not introduceds double array bound checks + + def test_circular(self): + def main(): + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + assert len(self) == 256 + return array.__getitem__(self, i & 255) + # + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + # + log = self.run(main, [], threshold=200) + assert log.result == 1239690.0 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + ... + i17 = int_and(i14, 255) + f18 = getarrayitem_raw(i8, i17, descr=...) + i19 = force_token() + f20 = getarrayitem_raw(i8, i9, descr=...) + f21 = float_add(f18, f20) + i22 = force_token() + f23 = getarrayitem_raw(i8, i10, descr=...) + f24 = float_add(f21, f23) + i26 = int_add(i6, 1) + i27 = force_token() + i29 = int_and(i26, 255) + f30 = getarrayitem_raw(i8, i29, descr=...) + f31 = float_add(f24, f30) + i33 = int_add(i6, 2) + i34 = force_token() + i36 = int_and(i33, 255) + f37 = getarrayitem_raw(i8, i36, descr=...) + ... + """) + # XXX: what do we want to check here? + # We want to check that the array bound checks are removed, + # so it's this part of the trace. However we dont care about + # the force_token()'s. Can they be ignored? diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -5,6 +5,8 @@ syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -37,8 +39,6 @@ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -222,218 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - - def test_intbound_simple(self): - ops = ('<', '>', '<=', '>=', '==', '!=') - nbr = (3, 7) - for o1 in ops: - for o2 in ops: - for n1 in nbr: - for n2 in nbr: - src = ''' - def f(i): - a, b = 3, 3 - if i %s %d: - a = 0 - else: - a = 1 - if i %s %d: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (o1, n1, o2, n2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 268, ([], res)) - - def test_intbound_addsub_mix(self): - tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', - 'i - 1 > 1', '1 - i > 1', '1 - i < -3', - 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') - for t1 in tests: - for t2 in tests: - print t1, t2 - src = ''' - def f(i): - a, b = 3, 3 - if %s: - a = 0 - else: - a = 1 - if %s: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (t1, t2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 280, ([], res)) - - def test_intbound_gt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i > -1: - a += 1 - if i > -2: - b += 1 - i += 1 - return (a, b) - ''', 48, ([], (2000, 2000))) - - def test_intbound_sub_lt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i - 10 < 1995: - a += 1 - i += 1 - return (a, b) - ''', 38, ([], (2000, 0))) - - def test_intbound_addsub_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if i - 1 >= -1: - b += 1 - i += 1 - return (a, b) - ''', 56, ([], (2000, 2000))) - - def test_intbound_addmul_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if 2 * i >= 0: - b += 1 - i += 1 - return (a, b) - ''', 53, ([], (2000, 2000))) - - def test_intbound_eq(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - if a == 7: - s += a + 1 - elif i == 10: - s += i - else: - s += 1 - i += 1 - return s - ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) - - def test_intbound_mul(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert i >= 0 - if 2 * i < 30000: - s += 1 - else: - s += a - i += 1 - return s - ''', 43, ([7], 1500)) - - def test_assert(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert a == 7 - s += a + 1 - i += 1 - return s - ''', 38, ([7], 8*1500)) - - def test_zeropadded(self): - self.run_source(''' - from array import array - class ZeroPadded(array): - def __new__(cls, l): - self = array.__new__(cls, 'd', range(l)) - return self - - def __getitem__(self, i): - if i < 0 or i >= self.__len__(): - return 0 - return array.__getitem__(self, i) - - - def main(): - buf = ZeroPadded(2000) - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 232, ([], 9895050.0)) - - def test_circular(self): - self.run_source(''' - from array import array - class Circular(array): - def __new__(cls): - self = array.__new__(cls, 'd', range(256)) - return self - def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) - return array.__getitem__(self, i & 255) - - def main(): - buf = Circular() - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 170, ([], 1239690.0)) def test_min_max(self): self.run_source(''' diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1014,7 +1014,7 @@ node = lltype.malloc(NODE) ref = lltype.cast_opaque_ptr(llmemory.GCREF, node) back = rffi.cast(llmemory.GCREF, rffi.cast(lltype.Signed, ref)) - assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), ref) == node + assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), back) == node def test_gcref_forth_and_back(self): cp = ctypes.c_void_p(1234) From commits-noreply at bitbucket.org Thu Apr 14 21:28:25 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Thu, 14 Apr 2011 21:28:25 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: allow operatins in the short preamble that can be chained back to the inputargs (and not only those whoes arguemnts are directly from the inputargs) Message-ID: <20110414192825.A8AA72A2042@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43362:5d87bd140dfb Date: 2011-04-14 21:28 +0200 http://bitbucket.org/pypy/pypy/changeset/5d87bd140dfb/ Log: allow operatins in the short preamble that can be chained back to the inputargs (and not only those whoes arguemnts are directly from the inputargs) diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -164,13 +164,14 @@ values = [self.getvalue(arg) for arg in jump_args] inputargs = virtual_state.make_inputargs(values) - short_boxes = preamble_optimizer.produce_short_preamble_ops(inputargs) + sb = preamble_optimizer.produce_short_preamble_ops(inputargs) + self.short_boxes = sb initial_inputargs_len = len(inputargs) inputargs, short = self.inline(self.cloned_operations, loop.inputargs, jump_args, - virtual_state, short_boxes) + virtual_state) #except KeyError: # debug_print("Unrolling failed.") # loop.preamble.operations = None @@ -246,8 +247,7 @@ if op.result: op.result.forget_value() - def inline(self, loop_operations, loop_args, jump_args, virtual_state, - short_boxes): + def inline(self, loop_operations, loop_args, jump_args, virtual_state): self.inliner = inliner = Inliner(loop_args, jump_args) values = [self.getvalue(arg) for arg in jump_args] @@ -271,47 +271,57 @@ assert jmp.getopnum() == rop.JUMP self.optimizer.newoperations = newoperations[:-1] - boxes_created_this_iteration = {} + self.boxes_created_this_iteration = {} jumpargs = jmp.getarglist() - short_inliner = Inliner(inputargs, jumpargs) + self.short_inliner = Inliner(inputargs, jumpargs) short = [] # FIXME: Should also loop over operations added by forcing things in this loop for op in newoperations: - boxes_created_this_iteration[op.result] = True + self.boxes_created_this_iteration[op.result] = True args = op.getarglist() if op.is_guard(): args = args + op.getfailargs() for a in args: - if not isinstance(a, Const) and not a in boxes_created_this_iteration: - if a not in inputargs: - short_op = short_boxes[a] - short.append(short_op) - short_jumpargs.append(short_op.result) - newop = short_inliner.inline_op(short_op) - self.optimizer.send_extra_operation(newop) - inputargs.append(a) - if newop.is_ovf(): - # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here - guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - short.append(guard) - # FIXME: Emit a proper guard here in case it is not - # removed by the optimizer. Can that happen? - self.optimizer.send_extra_operation(guard) - assert self.optimizer.newoperations[-1] is not guard - - box = newop.result - if box in self.optimizer.values: - box = self.optimizer.values[box].force_box() - jumpargs.append(box) + self.import_box(a, inputargs, short, short_jumpargs, jumpargs) jmp.initarglist(jumpargs) self.optimizer.newoperations.append(jmp) short.append(ResOperation(rop.JUMP, short_jumpargs, None)) return inputargs, short + def import_box(self, box, inputargs, short, short_jumpargs, jumpargs): + if isinstance(box, Const) or box in inputargs: + return + if box in self.boxes_created_this_iteration: + return + + short_op = self.short_boxes[box] + import pdb; pdb.set_trace() + + for a in short_op.getarglist(): + self.import_box(a, inputargs, short, short_jumpargs, jumpargs) + short.append(short_op) + short_jumpargs.append(short_op.result) + newop = self.short_inliner.inline_op(short_op) + self.optimizer.send_extra_operation(newop) + inputargs.append(box) + if newop.is_ovf(): + # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here + guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) + short.append(guard) + # FIXME: Emit a proper guard here in case it is not + # removed by the optimizer. Can that happen? + self.optimizer.send_extra_operation(guard) + assert self.optimizer.newoperations[-1] is not guard + + box = newop.result + if box in self.optimizer.values: + box = self.optimizer.values[box].force_box() + jumpargs.append(box) + def sameop(self, op1, op2): if op1.getopnum() != op2.getopnum(): return False From commits-noreply at bitbucket.org Thu Apr 14 21:44:28 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 14 Apr 2011 21:44:28 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Finish casting back and forth opaque pointers to tagged pointers Message-ID: <20110414194428.C90922A2042@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43363:6a3590c4fc0d Date: 2011-04-14 21:44 +0200 http://bitbucket.org/pypy/pypy/changeset/6a3590c4fc0d/ Log: Finish casting back and forth opaque pointers to tagged pointers diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -768,6 +768,9 @@ # CFunctionType.__nonzero__ is broken before Python 2.6 return lltype.nullptr(T.TO) if isinstance(T.TO, lltype.Struct): + if ctypes.addressof(cobj[0]) & 1: # a tagged pointer + gcref = _opaque_objs[ctypes.addressof(cobj[0]) // 2].hide() + return lltype.cast_opaque_ptr(T, gcref) REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1286,6 +1286,28 @@ rffi.cast(SP, p).x = 0 lltype.free(chunk, flavor='raw') + def test_opaque_tagged_pointers(self): + from pypy.rpython.annlowlevel import cast_base_ptr_to_instance + from pypy.rpython.annlowlevel import cast_instance_to_base_ptr + from pypy.rpython.lltypesystem import rclass + + class Opaque(object): + llopaque = True + + def hide(self): + ptr = cast_instance_to_base_ptr(self) + return lltype.cast_opaque_ptr(llmemory.GCREF, ptr) + + @staticmethod + def show(gcref): + ptr = lltype.cast_opaque_ptr(lltype.Ptr(rclass.OBJECT), gcref) + return cast_base_ptr_to_instance(Opaque, ptr) + + opaque = Opaque() + round = ctypes2lltype(llmemory.GCREF, lltype2ctypes(opaque.hide())) + assert Opaque.show(round) is opaque + + class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform From commits-noreply at bitbucket.org Thu Apr 14 21:56:48 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 14 Apr 2011 21:56:48 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: simplify Message-ID: <20110414195648.257532A2042@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43364:c66e3772da5e Date: 2011-04-14 21:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c66e3772da5e/ Log: simplify diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -764,12 +764,13 @@ if T is lltype.Void: return None if isinstance(T, lltype.Ptr): - if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer + ptrval = ctypes.cast(cobj, ctypes.c_void_p).value + if not cobj or not ptrval: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 return lltype.nullptr(T.TO) if isinstance(T.TO, lltype.Struct): - if ctypes.addressof(cobj[0]) & 1: # a tagged pointer - gcref = _opaque_objs[ctypes.addressof(cobj[0]) // 2].hide() + if ptrval & 1: # a tagged pointer + gcref = _opaque_objs[ptrval // 2].hide() return lltype.cast_opaque_ptr(T, gcref) REAL_TYPE = T.TO if T.TO._arrayfld is not None: From commits-noreply at bitbucket.org Thu Apr 14 21:56:49 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 14 Apr 2011 21:56:49 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: progress on x86 backend. Segfault so far :) Message-ID: <20110414195649.7D1802A2042@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43365:630787073d32 Date: 2011-04-14 21:56 +0200 http://bitbucket.org/pypy/pypy/changeset/630787073d32/ Log: progress on x86 backend. Segfault so far :) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -48,11 +48,12 @@ class GuardToken(object): - def __init__(self, faildescr, failargs, fail_locs, exc): + def __init__(self, faildescr, failargs, fail_locs, exc, has_jump): self.faildescr = faildescr self.failargs = failargs self.fail_locs = fail_locs self.exc = exc + self.has_jump = has_jump DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed)) @@ -435,15 +436,22 @@ # tok.faildescr._x86_adr_jump_offset to contain the raw address of # the 4-byte target field in the JMP/Jcond instruction, and patch # the field in question to point (initially) to the recovery stub + inv_counter = 0 + clt = self.current_clt for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset tok.faildescr._x86_adr_jump_offset = addr relative_target = tok.pos_recovery_stub - (tok.pos_jump_offset + 4) assert rx86.fits_in_32bits(relative_target) # - mc = codebuf.MachineCodeBlockWrapper() - mc.writeimm32(relative_target) - mc.copy_to_raw_memory(addr) + if tok.has_jump: + mc = codebuf.MachineCodeBlockWrapper() + mc.writeimm32(relative_target) + mc.copy_to_raw_memory(addr) + else: + # guard not invalidate, patch where it jumps + pos, _ = clt.invalidate_positions[inv_counter] + clt.invalidate_positions[inv_counter] = pos, relative_target def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1447,6 +1455,13 @@ self.mc.CMP(heap(self.cpu.pos_exception()), imm0) self.implement_guard(guard_token, 'NZ') + def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, + locs, ign_2): + pos = self.mc.get_relative_pos() + guard_token.pos_jump_offset = pos + self.current_clt.invalidate_positions.append((pos, 0)) + self.pending_guard_tokens.append(guard_token) + def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, locs, resloc): loc = locs[0] @@ -1545,7 +1560,8 @@ exc = (guard_opnum == rop.GUARD_EXCEPTION or guard_opnum == rop.GUARD_NO_EXCEPTION or guard_opnum == rop.GUARD_NOT_FORCED) - return GuardToken(faildescr, failargs, fail_locs, exc) + return GuardToken(faildescr, failargs, fail_locs, exc, has_jump= + guard_opnum != rop.GUARD_NOT_INVALIDATED) def generate_quick_failure(self, guardtok): """Generate the initial code for handling a failure. We try to diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -291,6 +291,7 @@ # that belong to this loop or to a bridge attached to it. # Filled by the frontend calling record_faildescr_index(). self.faildescr_indices = [] + self.invalidate_positions = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc") diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -145,6 +145,13 @@ def redirect_call_assembler(self, oldlooptoken, newlooptoken): self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) + def invalidate_loop(self, looptoken): + from pypy.jit.backend.x86 import codebuf + for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: + mc = codebuf.MachineCodeBlockWrapper() + mc.writeimm32(tgt) + mc.copy_to_raw_memory(addr) + class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -492,6 +492,8 @@ def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) + consider_guard_not_invalidated = consider_guard_no_exception + def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() From commits-noreply at bitbucket.org Thu Apr 14 22:11:47 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:11:47 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Start supporting HiddenGcRef32 in ll2ctypes. (urgh) Message-ID: <20110414201147.C20C12A2042@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43366:55131e7fa88e Date: 2011-04-14 21:58 +0200 http://bitbucket.org/pypy/pypy/changeset/55131e7fa88e/ Log: Start supporting HiddenGcRef32 in ll2ctypes. (urgh) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -266,7 +266,7 @@ return ctypes.CFUNCTYPE(restype, *argtypes) elif isinstance(T.TO, lltype.OpaqueType): if T == llmemory.HiddenGcRef32: - return ctypes.c_int + return ctypes.c_uint32 return ctypes.c_void_p else: return ctypes.POINTER(get_ctypes_type(T.TO, delayed_builders)) @@ -577,6 +577,8 @@ _all_callbacks_results = [] _int2obj = {} _callback_exc_info = None +_hiddengcref32 = {} +_hiddengcref32back = {} def get_rtyper(): llinterp = LLInterpreter.current_interpreter @@ -608,6 +610,8 @@ if not llobj: # NULL pointer if T == llmemory.GCREF: return ctypes.c_void_p(0) + if T == llmemory.HiddenGcRef32: + return ctypes.c_uint32(0) return get_ctypes_type(T)() if T == llmemory.GCREF: @@ -616,7 +620,18 @@ container = llobj._obj.container T = lltype.Ptr(lltype.typeOf(container)) # otherwise it came from integer and we want a c_void_p with - # the same valu + # the same value + elif T == llmemory.HiddenGcRef32: + p = llobj._obj.container._as_ptr() + p = lltype.normalizeptr(p) + container = p._as_obj() + try: + result = _hiddengcref32[container] + except KeyError: + result = 1000 + len(_hiddengcref32) + _hiddengcref32[container] = result + _hiddengcref32back[result] = container + return ctypes.c_uint32(result) else: container = llobj._obj if isinstance(T.TO, lltype.FuncType): @@ -1099,7 +1114,18 @@ TYPE1 = lltype.typeOf(value) cvalue = lltype2ctypes(value) cresulttype = get_ctypes_type(RESTYPE) + if RESTYPE == llmemory.HiddenGcRef32 and isinstance(cvalue, long): + from pypy.rpython.lltypesystem.lloperation import llop + if cvalue: + p = _hiddengcref32back[cvalue]._as_ptr() # -> pointer + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, p) + else: + return lltype.nullptr(llmemory.HiddenGcRef32.TO) if isinstance(TYPE1, lltype.Ptr): + if TYPE1 == llmemory.HiddenGcRef32: + assert isinstance(cvalue, ctypes.c_uint32) + cvalue = int(cvalue.value) + return ctypes2lltype(RESTYPE, cvalue) if isinstance(RESTYPE, lltype.Ptr): # shortcut: ptr->ptr cast cptr = ctypes.cast(cvalue, cresulttype) diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1286,6 +1286,40 @@ rffi.cast(SP, p).x = 0 lltype.free(chunk, flavor='raw') +class TestHiddenGcRef32(object): + def setup_class(cls): + if sys.maxint == 2147483647: + py.test.skip("HiddenGcRef32: for 64-bit only") + + def test_cast_hiddengcref32_numeric(self): + from pypy.rpython.lltypesystem.lloperation import llop + PARENT = lltype.GcStruct('PARENT') + NODE = lltype.GcStruct('NODE', ('parent', PARENT)) + null = lltype.nullptr(NODE) + null32 = llop.hide_into_ptr32(llmemory.HiddenGcRef32, null) + assert rffi.cast(lltype.Signed, rffi.cast(rffi.UINT, null32)) == 0 + node = lltype.malloc(NODE) + node32 = llop.hide_into_ptr32(llmemory.HiddenGcRef32, node) + parent32 = llop.hide_into_ptr32(llmemory.HiddenGcRef32, node.parent) + num_node = rffi.cast(lltype.Signed, rffi.cast(rffi.UINT, node32)) + num_parent = rffi.cast(lltype.Signed, rffi.cast(rffi.UINT, parent32)) + assert num_node != 0 + assert num_parent == num_node + othernode = lltype.malloc(NODE) + num_other = rffi.cast(lltype.Signed, rffi.cast(rffi.UINT, othernode)) + assert num_other != 0 + assert num_node != num_other + + def test_cast_hiddengcref32_back_and_forth(self): + from pypy.rpython.lltypesystem.lloperation import llop + NODE = lltype.GcStruct('NODE') + node = lltype.malloc(NODE) + ref32 = llop.hide_into_ptr32(llmemory.HiddenGcRef32, node) + back = rffi.cast(llmemory.HiddenGcRef32, rffi.cast(rffi.UINT, ref32)) + assert llop.show_from_ptr32(lltype.Ptr(NODE), back) == node + back2 = rffi.cast(llmemory.HiddenGcRef32, rffi.cast(rffi.UINT, 0)) + assert not llop.show_from_ptr32(lltype.Ptr(NODE), back2) + class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform From commits-noreply at bitbucket.org Thu Apr 14 22:11:48 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:11:48 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: More tests, and introduce _llgcopaque32. Message-ID: <20110414201148.D5CE22A2042@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43367:23a5f19495cb Date: 2011-04-14 22:11 +0200 http://bitbucket.org/pypy/pypy/changeset/23a5f19495cb/ Log: More tests, and introduce _llgcopaque32. diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -622,6 +622,8 @@ # otherwise it came from integer and we want a c_void_p with # the same value elif T == llmemory.HiddenGcRef32: + if isinstance(llobj._obj, _llgcopaque32): + return ctypes.c_uint32(llobj._obj.uint32val) p = llobj._obj.container._as_ptr() p = lltype.normalizeptr(p) container = p._as_obj() @@ -778,7 +780,9 @@ if T is lltype.Void: return None if isinstance(T, lltype.Ptr): - if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer + if not cobj or ( + not isinstance(cobj, ctypes.c_uint32) + and not ctypes.cast(cobj, ctypes.c_void_p).value): # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 return lltype.nullptr(T.TO) if isinstance(T.TO, lltype.Struct): @@ -830,6 +834,8 @@ elif isinstance(T.TO, lltype.OpaqueType): if T == llmemory.GCREF: container = _llgcopaque(cobj) + elif T == llmemory.HiddenGcRef32: + container = _llgcopaque32(cobj) else: container = lltype._opaque(T.TO) cbuf = ctypes.cast(cobj, ctypes.c_void_p) @@ -1300,6 +1306,35 @@ return hop.genop('cast_adr_to_int', [adr], resulttype = lltype.Signed) +class _llgcopaque32(lltype._container): + _TYPE = llmemory.HiddenGcRef32.TO + _name = "_llgcopaque32" + + def __init__(self, uint32): + if not isinstance(uint32, int): + uint32 = int(uint32.value) + assert isinstance(uint32, int) + self.uint32val = uint32 + + def __eq__(self, other): + if isinstance(other, _llgcopaque32): + return self.uint32val == other.uint32val + storage = object() + if hasattr(other, 'container'): + storage = other.container._storage + else: + storage = other._storage + + if storage in (None, True): + return False + return force_cast(rffi.UINT, other._as_ptr()) == self.uint32val + + def __ne__(self, other): + return not self == other + +## def _cast_to_ptr(self, PTRTYPE): +## return force_cast(PTRTYPE, self.intval) + # ____________________________________________________________ # errno diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1320,6 +1320,57 @@ back2 = rffi.cast(llmemory.HiddenGcRef32, rffi.cast(rffi.UINT, 0)) assert not llop.show_from_ptr32(lltype.Ptr(NODE), back2) + def test_hiddengcref32_forth_and_back(self): + cp = ctypes.c_uint32(3 * 10**9) + v = ctypes2lltype(llmemory.HiddenGcRef32, cp) + assert lltype.typeOf(v) == llmemory.HiddenGcRef32 + assert lltype2ctypes(v).value == cp.value + v1 = ctypes2lltype(llmemory.HiddenGcRef32, cp) + assert v == v1 + assert v + v2 = ctypes2lltype(llmemory.HiddenGcRef32, ctypes.c_uint32(1234567)) + assert v2 != v + + def test_hiddengcref32_type(self): + NODE = lltype.GcStruct('NODE') + node = lltype.malloc(NODE) + ref = lltype.cast_opaque_ptr(llmemory.HiddenGcRef32, node) + v = lltype2ctypes(ref) + assert isinstance(v, ctypes.c_uint32) + assert v + + def test_hiddengcref32_null(self): + ref = lltype.nullptr(llmemory.HiddenGcRef32.TO) + v = lltype2ctypes(ref) + assert isinstance(v, ctypes.c_uint32) + assert not v + + def test_cast_null_hiddengcref32(self): + ref = lltype.nullptr(llmemory.HiddenGcRef32.TO) + value = rffi.cast(rffi.UINT, ref) + assert rffi.cast(lltype.Signed, value) == 0 + + def test_hiddengcref32_truth(self): + p0 = ctypes.c_uint32(0) + ref0 = ctypes2lltype(llmemory.HiddenGcRef32, p0) + assert not ref0 + + p1234567 = ctypes.c_uint32(1234567) + ref1234567 = ctypes2lltype(llmemory.HiddenGcRef32, p1234567) + assert p1234567 + + def test_hiddengcref32_casts(self): + from pypy.rpython.lltypesystem.lloperation import llop + p0 = ctypes.c_uint32(0) + ref0 = ctypes2lltype(llmemory.HiddenGcRef32, p0) + + NODE = lltype.GcStruct('NODE') + assert llop.show_from_ptr32(lltype.Ptr(NODE), ref0) == lltype.nullptr(NODE) + + node = lltype.malloc(NODE) + ref1 = llop.hide_into_ptr32(llmemory.HiddenGcRef32, node) + + class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform From commits-noreply at bitbucket.org Thu Apr 14 22:16:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:16:15 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Work in progress: support HiddenGcRef32 in some of the Descrs, Message-ID: <20110414201615.2BCC12A2042@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43368:8427783c233c Date: 2011-04-14 22:14 +0200 http://bitbucket.org/pypy/pypy/changeset/8427783c233c/ Log: Work in progress: support HiddenGcRef32 in some of the Descrs, hopefully enough of them. diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -435,6 +435,7 @@ def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): arg_classes = [] for ARG in ARGS: + assert ARG != llmemory.HiddenGcRef32 kind = getkind(ARG) if kind == 'int': arg_classes.append('i') elif kind == 'ref': arg_classes.append('r') diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -44,6 +44,8 @@ return None def freeing_block(self, start, stop): pass + def is_compressed_ptr(self, size): + return False # ____________________________________________________________ @@ -576,6 +578,9 @@ self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) + self.compressptr = gcdescr.config.translation.compressptr + if self.compressptr: + assert rffi.sizeof(rffi.INT) == rffi.sizeof(llmemory.HiddenGcRef32) # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer @@ -864,6 +869,13 @@ def freeing_block(self, start, stop): self.gcrootmap.freeing_block(start, stop) + def is_compressed_ptr(self, size): + if self.compressptr: # constant-folded away + ptrsize = symbolic.get_size_of_ptr(self.translate_support_code) + return size != ptrsize + else: + return False + # ____________________________________________________________ def get_ll_description(gcdescr, translator=None, rtyper=None): diff --git a/pypy/jit/backend/llsupport/test/test_runner.py b/pypy/jit/backend/llsupport/test/test_runner.py --- a/pypy/jit/backend/llsupport/test/test_runner.py +++ b/pypy/jit/backend/llsupport/test/test_runner.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU from pypy.jit.backend.test.runner_test import LLtypeBackendTest @@ -11,6 +11,11 @@ def compile_loop(self, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") + class gcdescr: + @staticmethod + def is_compressed_ptr(size): + return sys.maxint > 2147483647 and size == 4 + class TestAbstractLLCPU(LLtypeBackendTest): diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -208,6 +208,11 @@ return rffi.cast(llmemory.GCREF, x) @staticmethod + def _cast_hidden_int32_to_gcref(x): + hidden = rffi.cast(llmemory.HiddenGcRef32, x) + return llop.show_from_ptr32(llmemory.GCREF, hidden) + + @staticmethod def cast_gcref_to_int(x): return rffi.cast(lltype.Signed, x) @@ -227,16 +232,11 @@ def unpack_fielddescr(self, fielddescr): assert isinstance(fielddescr, BaseFieldDescr) - return fielddescr.offset - unpack_fielddescr._always_inline_ = True - - def unpack_fielddescr_size(self, fielddescr): - assert isinstance(fielddescr, BaseFieldDescr) ofs = fielddescr.offset size = fielddescr.get_field_size(self.translate_support_code) sign = fielddescr.is_field_signed() return ofs, size, sign - unpack_fielddescr_size._always_inline_ = True + unpack_fielddescr._always_inline_ = True def arraydescrof(self, A): return get_array_descr(self.gc_ll_descr, A) @@ -376,7 +376,7 @@ @specialize.argtype(1) def _base_do_getfield_i(self, struct, fielddescr): - ofs, size, sign = self.unpack_fielddescr_size(fielddescr) + ofs, size, sign = self.unpack_fielddescr(fielddescr) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) for STYPE, UTYPE, itemsize in unroll_basic_sizes: @@ -397,17 +397,22 @@ @specialize.argtype(1) def _base_do_getfield_r(self, struct, fielddescr): - ofs = self.unpack_fielddescr(fielddescr) + ofs, size, _ = self.unpack_fielddescr(fielddescr) + icp = self.gcdescr.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) - pval = rffi.cast(rffi.CArrayPtr(lltype.Signed), fieldptr)[0] - pval = self._cast_int_to_gcref(pval) + if 0:# icp: + pval = rffi.cast(rffi.CArrayPtr(rffi.INT), fieldptr)[0] + pval = self._cast_hidden_int32_to_gcref(pval) + else: + pval = rffi.cast(rffi.CArrayPtr(lltype.Signed), fieldptr)[0] + pval = self._cast_int_to_gcref(pval) # --- end of GC unsafe code --- return pval @specialize.argtype(1) def _base_do_getfield_f(self, struct, fielddescr): - ofs = self.unpack_fielddescr(fielddescr) + ofs, _, _ = self.unpack_fielddescr(fielddescr) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) fval = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), fieldptr)[0] @@ -423,7 +428,7 @@ @specialize.argtype(1) def _base_do_setfield_i(self, struct, fielddescr, newvalue): - ofs, size, sign = self.unpack_fielddescr_size(fielddescr) + ofs, size, sign = self.unpack_fielddescr(fielddescr) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) for TYPE, _, itemsize in unroll_basic_sizes: @@ -437,7 +442,7 @@ @specialize.argtype(1) def _base_do_setfield_r(self, struct, fielddescr, newvalue): - ofs = self.unpack_fielddescr(fielddescr) + ofs, _, _ = self.unpack_fielddescr(fielddescr) assert lltype.typeOf(struct) is not lltype.Signed, ( "can't handle write barriers for setfield_raw") self.gc_ll_descr.do_write_barrier(struct, newvalue) @@ -449,7 +454,7 @@ @specialize.argtype(1) def _base_do_setfield_f(self, struct, fielddescr, newvalue): - ofs = self.unpack_fielddescr(fielddescr) + ofs, _, _ = self.unpack_fielddescr(fielddescr) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) fieldptr = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), fieldptr) From commits-noreply at bitbucket.org Thu Apr 14 22:16:16 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:16:16 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: This test now passes. Phew. Message-ID: <20110414201616.00A552A2042@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43369:1a8381f5b898 Date: 2011-04-14 13:15 -0700 http://bitbucket.org/pypy/pypy/changeset/1a8381f5b898/ Log: This test now passes. Phew. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -401,7 +401,7 @@ icp = self.gcdescr.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) - if 0:# icp: + if icp: pval = rffi.cast(rffi.CArrayPtr(rffi.INT), fieldptr)[0] pval = self._cast_hidden_int32_to_gcref(pval) else: diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -11,6 +11,7 @@ from pypy.jit.metainterp.typesystem import deref from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi, rclass +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import llhelper from pypy.rpython.llinterp import LLException @@ -2469,6 +2470,19 @@ assert mem2 < mem1 assert mem2 == mem0 + def test_bh_hiddenptr32(self): + if sys.maxint == 2147483647: + py.test.skip("HiddenGcRef32: for 64-bit only") + cpu = self.cpu + S = lltype.GcStruct('S', ('y', llmemory.HiddenGcRef32)) + s = lltype.malloc(S) + s32 = llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + descrfld_y = cpu.fielddescrof(S, 'y') + s.y = s32 + x = cpu.bh_getfield_gc_r(lltype.cast_opaque_ptr(llmemory.GCREF, s), + descrfld_y) + assert lltype.cast_opaque_ptr(lltype.Ptr(S), x) == s + class OOtypeBackendTest(BaseBackendTest): From commits-noreply at bitbucket.org Thu Apr 14 22:26:42 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:26:42 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: cpu.bh_setfield_gc_r. Message-ID: <20110414202642.B1D6B2A2043@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43370:9c10154d0f68 Date: 2011-04-14 13:26 -0700 http://bitbucket.org/pypy/pypy/changeset/9c10154d0f68/ Log: cpu.bh_setfield_gc_r. diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -1311,9 +1311,11 @@ _name = "_llgcopaque32" def __init__(self, uint32): - if not isinstance(uint32, int): + if isinstance(uint32, (int, long)): + uint32 = int(uint32) + else: uint32 = int(uint32.value) - assert isinstance(uint32, int) + assert isinstance(uint32, int) self.uint32val = uint32 def __eq__(self, other): @@ -1332,8 +1334,11 @@ def __ne__(self, other): return not self == other -## def _cast_to_ptr(self, PTRTYPE): -## return force_cast(PTRTYPE, self.intval) + def _cast_to_ptr(self, PTRTYPE): + if self.uint32val == 0: + return lltype.nullptr(PTRTYPE.TO) + obj = _hiddengcref32back[self.uint32val] + return force_cast(PTRTYPE, obj._as_ptr()) # ____________________________________________________________ # errno diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -217,6 +217,11 @@ return rffi.cast(lltype.Signed, x) @staticmethod + def cast_gcref_to_hidden_uint32(x): + x = llop.hide_into_ptr32(llmemory.HiddenGcRef32, x) + return rffi.cast(rffi.UINT, x) + + @staticmethod def cast_int_to_adr(x): return rffi.cast(llmemory.Address, x) @@ -402,7 +407,7 @@ # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) if icp: - pval = rffi.cast(rffi.CArrayPtr(rffi.INT), fieldptr)[0] + pval = rffi.cast(rffi.CArrayPtr(rffi.UINT), fieldptr)[0] pval = self._cast_hidden_int32_to_gcref(pval) else: pval = rffi.cast(rffi.CArrayPtr(lltype.Signed), fieldptr)[0] @@ -442,14 +447,19 @@ @specialize.argtype(1) def _base_do_setfield_r(self, struct, fielddescr, newvalue): - ofs, _, _ = self.unpack_fielddescr(fielddescr) + ofs, size, _ = self.unpack_fielddescr(fielddescr) assert lltype.typeOf(struct) is not lltype.Signed, ( "can't handle write barriers for setfield_raw") + icp = self.gcdescr.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(struct, newvalue) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) - fieldptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), fieldptr) - fieldptr[0] = self.cast_gcref_to_int(newvalue) + if icp: + fieldptr = rffi.cast(rffi.CArrayPtr(rffi.UINT), fieldptr) + fieldptr[0] = self.cast_gcref_to_hidden_uint32(newvalue) + else: + fieldptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), fieldptr) + fieldptr[0] = self.cast_gcref_to_int(newvalue) # --- end of GC unsafe code --- @specialize.argtype(1) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2482,6 +2482,12 @@ x = cpu.bh_getfield_gc_r(lltype.cast_opaque_ptr(llmemory.GCREF, s), descrfld_y) assert lltype.cast_opaque_ptr(lltype.Ptr(S), x) == s + # + t = lltype.malloc(S) + cpu.bh_setfield_gc_r(lltype.cast_opaque_ptr(llmemory.GCREF, s), + descrfld_y, + lltype.cast_opaque_ptr(llmemory.GCREF, t)) + assert llop.show_from_ptr32(lltype.Ptr(S), s.y) == t class OOtypeBackendTest(BaseBackendTest): From commits-noreply at bitbucket.org Thu Apr 14 22:32:28 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:32:28 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: cpu.bh_getarrayitem_gc_r. Message-ID: <20110414203228.746452A2043@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43371:6cd51cf49802 Date: 2011-04-14 13:29 -0700 http://bitbucket.org/pypy/pypy/changeset/6cd51cf49802/ Log: cpu.bh_getarrayitem_gc_r. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -248,16 +248,11 @@ def unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) - return arraydescr.get_base_size(self.translate_support_code) - unpack_arraydescr._always_inline_ = True - - def unpack_arraydescr_size(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_base_size(self.translate_support_code) size = arraydescr.get_item_size(self.translate_support_code) sign = arraydescr.is_item_signed() return ofs, size, sign - unpack_arraydescr_size._always_inline_ = True + unpack_arraydescr._always_inline_ = True def calldescrof(self, FUNC, ARGS, RESULT, extrainfo=None): return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo) @@ -288,7 +283,7 @@ @specialize.argtype(2) def bh_getarrayitem_gc_i(self, arraydescr, gcref, itemindex): - ofs, size, sign = self.unpack_arraydescr_size(arraydescr) + ofs, size, sign = self.unpack_arraydescr(arraydescr) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) for STYPE, UTYPE, itemsize in unroll_basic_sizes: @@ -307,17 +302,22 @@ raise NotImplementedError("size = %d" % size) def bh_getarrayitem_gc_r(self, arraydescr, gcref, itemindex): - ofs = self.unpack_arraydescr(arraydescr) + ofs, size, _ = self.unpack_arraydescr(arraydescr) + icp = self.gcdescr.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) - items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) - pval = self._cast_int_to_gcref(items[itemindex]) + if icp: + items = rffi.cast(rffi.CArrayPtr(rffi.UINT), items) + pval = self._cast_hidden_int32_to_gcref(items[itemindex]) + else: + items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) + pval = self._cast_int_to_gcref(items[itemindex]) # --- end of GC unsafe code --- return pval @specialize.argtype(2) def bh_getarrayitem_gc_f(self, arraydescr, gcref, itemindex): - ofs = self.unpack_arraydescr(arraydescr) + ofs, _, _ = self.unpack_arraydescr(arraydescr) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items) @@ -327,7 +327,7 @@ @specialize.argtype(2) def bh_setarrayitem_gc_i(self, arraydescr, gcref, itemindex, newvalue): - ofs, size, sign = self.unpack_arraydescr_size(arraydescr) + ofs, size, sign = self.unpack_arraydescr(arraydescr) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) for TYPE, _, itemsize in unroll_basic_sizes: @@ -340,7 +340,7 @@ raise NotImplementedError("size = %d" % size) def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue): - ofs = self.unpack_arraydescr(arraydescr) + ofs, size, _ = self.unpack_arraydescr(arraydescr) self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) @@ -350,7 +350,7 @@ @specialize.argtype(2) def bh_setarrayitem_gc_f(self, arraydescr, gcref, itemindex, newvalue): - ofs = self.unpack_arraydescr(arraydescr) + ofs, _, _ = self.unpack_arraydescr(arraydescr) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2488,6 +2488,15 @@ descrfld_y, lltype.cast_opaque_ptr(llmemory.GCREF, t)) assert llop.show_from_ptr32(lltype.Ptr(S), s.y) == t + # + A = lltype.GcArray(llmemory.HiddenGcRef32) + a = lltype.malloc(A, 10) + descrarray = cpu.arraydescrof(A) + a[4] = s32 + x = cpu.bh_getarrayitem_gc_r(descrarray, + lltype.cast_opaque_ptr(llmemory.GCREF, a), + 4) + assert lltype.cast_opaque_ptr(lltype.Ptr(S), x) == s class OOtypeBackendTest(BaseBackendTest): From commits-noreply at bitbucket.org Thu Apr 14 22:32:29 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:32:29 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: cpu.bh_setarrayitem_gc_r. Message-ID: <20110414203229.5EA582A2043@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43372:57af0689bf34 Date: 2011-04-14 13:31 -0700 http://bitbucket.org/pypy/pypy/changeset/57af0689bf34/ Log: cpu.bh_setarrayitem_gc_r. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -341,11 +341,16 @@ def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue): ofs, size, _ = self.unpack_arraydescr(arraydescr) + icp = self.gcdescr.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) - items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) - items[itemindex] = self.cast_gcref_to_int(newvalue) + if icp: + items = rffi.cast(rffi.CArrayPtr(rffi.UINT), items) + items[itemindex] = self.cast_gcref_to_hidden_uint32(newvalue) + else: + items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) + items[itemindex] = self.cast_gcref_to_int(newvalue) # --- end of GC unsafe code --- @specialize.argtype(2) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2497,6 +2497,12 @@ lltype.cast_opaque_ptr(llmemory.GCREF, a), 4) assert lltype.cast_opaque_ptr(lltype.Ptr(S), x) == s + # + cpu.bh_setarrayitem_gc_r(descrarray, + lltype.cast_opaque_ptr(llmemory.GCREF, a), + 4, + lltype.cast_opaque_ptr(llmemory.GCREF, t)) + assert llop.show_from_ptr32(lltype.Ptr(S), a[4]) == t class OOtypeBackendTest(BaseBackendTest): From commits-noreply at bitbucket.org Thu Apr 14 22:32:34 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:32:34 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: cpu.bh_getarrayitem_gc_r. Message-ID: <20110414203234.4C4502A2049@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43371:6cd51cf49802 Date: 2011-04-14 13:29 -0700 http://bitbucket.org/pypy/pypy/changeset/6cd51cf49802/ Log: cpu.bh_getarrayitem_gc_r. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -248,16 +248,11 @@ def unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) - return arraydescr.get_base_size(self.translate_support_code) - unpack_arraydescr._always_inline_ = True - - def unpack_arraydescr_size(self, arraydescr): - assert isinstance(arraydescr, BaseArrayDescr) ofs = arraydescr.get_base_size(self.translate_support_code) size = arraydescr.get_item_size(self.translate_support_code) sign = arraydescr.is_item_signed() return ofs, size, sign - unpack_arraydescr_size._always_inline_ = True + unpack_arraydescr._always_inline_ = True def calldescrof(self, FUNC, ARGS, RESULT, extrainfo=None): return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo) @@ -288,7 +283,7 @@ @specialize.argtype(2) def bh_getarrayitem_gc_i(self, arraydescr, gcref, itemindex): - ofs, size, sign = self.unpack_arraydescr_size(arraydescr) + ofs, size, sign = self.unpack_arraydescr(arraydescr) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) for STYPE, UTYPE, itemsize in unroll_basic_sizes: @@ -307,17 +302,22 @@ raise NotImplementedError("size = %d" % size) def bh_getarrayitem_gc_r(self, arraydescr, gcref, itemindex): - ofs = self.unpack_arraydescr(arraydescr) + ofs, size, _ = self.unpack_arraydescr(arraydescr) + icp = self.gcdescr.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) - items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) - pval = self._cast_int_to_gcref(items[itemindex]) + if icp: + items = rffi.cast(rffi.CArrayPtr(rffi.UINT), items) + pval = self._cast_hidden_int32_to_gcref(items[itemindex]) + else: + items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) + pval = self._cast_int_to_gcref(items[itemindex]) # --- end of GC unsafe code --- return pval @specialize.argtype(2) def bh_getarrayitem_gc_f(self, arraydescr, gcref, itemindex): - ofs = self.unpack_arraydescr(arraydescr) + ofs, _, _ = self.unpack_arraydescr(arraydescr) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items) @@ -327,7 +327,7 @@ @specialize.argtype(2) def bh_setarrayitem_gc_i(self, arraydescr, gcref, itemindex, newvalue): - ofs, size, sign = self.unpack_arraydescr_size(arraydescr) + ofs, size, sign = self.unpack_arraydescr(arraydescr) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) for TYPE, _, itemsize in unroll_basic_sizes: @@ -340,7 +340,7 @@ raise NotImplementedError("size = %d" % size) def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue): - ofs = self.unpack_arraydescr(arraydescr) + ofs, size, _ = self.unpack_arraydescr(arraydescr) self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) @@ -350,7 +350,7 @@ @specialize.argtype(2) def bh_setarrayitem_gc_f(self, arraydescr, gcref, itemindex, newvalue): - ofs = self.unpack_arraydescr(arraydescr) + ofs, _, _ = self.unpack_arraydescr(arraydescr) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2488,6 +2488,15 @@ descrfld_y, lltype.cast_opaque_ptr(llmemory.GCREF, t)) assert llop.show_from_ptr32(lltype.Ptr(S), s.y) == t + # + A = lltype.GcArray(llmemory.HiddenGcRef32) + a = lltype.malloc(A, 10) + descrarray = cpu.arraydescrof(A) + a[4] = s32 + x = cpu.bh_getarrayitem_gc_r(descrarray, + lltype.cast_opaque_ptr(llmemory.GCREF, a), + 4) + assert lltype.cast_opaque_ptr(lltype.Ptr(S), x) == s class OOtypeBackendTest(BaseBackendTest): From commits-noreply at bitbucket.org Thu Apr 14 22:32:35 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 22:32:35 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: cpu.bh_setarrayitem_gc_r. Message-ID: <20110414203235.360B02A2049@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43372:57af0689bf34 Date: 2011-04-14 13:31 -0700 http://bitbucket.org/pypy/pypy/changeset/57af0689bf34/ Log: cpu.bh_setarrayitem_gc_r. diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -341,11 +341,16 @@ def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue): ofs, size, _ = self.unpack_arraydescr(arraydescr) + icp = self.gcdescr.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) - items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) - items[itemindex] = self.cast_gcref_to_int(newvalue) + if icp: + items = rffi.cast(rffi.CArrayPtr(rffi.UINT), items) + items[itemindex] = self.cast_gcref_to_hidden_uint32(newvalue) + else: + items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) + items[itemindex] = self.cast_gcref_to_int(newvalue) # --- end of GC unsafe code --- @specialize.argtype(2) diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -2497,6 +2497,12 @@ lltype.cast_opaque_ptr(llmemory.GCREF, a), 4) assert lltype.cast_opaque_ptr(lltype.Ptr(S), x) == s + # + cpu.bh_setarrayitem_gc_r(descrarray, + lltype.cast_opaque_ptr(llmemory.GCREF, a), + 4, + lltype.cast_opaque_ptr(llmemory.GCREF, t)) + assert llop.show_from_ptr32(lltype.Ptr(S), a[4]) == t class OOtypeBackendTest(BaseBackendTest): From commits-noreply at bitbucket.org Thu Apr 14 23:06:33 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 23:06:33 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Add a test file, and so minor fixes until it runs (and fails). Message-ID: <20110414210633.0C77C2A2042@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43373:c509250e054a Date: 2011-04-14 13:40 -0700 http://bitbucket.org/pypy/pypy/changeset/c509250e054a/ Log: Add a test file, and so minor fixes until it runs (and fails). diff --git a/pypy/jit/backend/llsupport/test/test_runner.py b/pypy/jit/backend/llsupport/test/test_runner.py --- a/pypy/jit/backend/llsupport/test/test_runner.py +++ b/pypy/jit/backend/llsupport/test/test_runner.py @@ -8,14 +8,15 @@ class MyLLCPU(AbstractLLCPU): supports_floats = True + + def __init__(self, *args, **kwds): + super(MyLLCPU, self).__init__(*args, **kwds) + self.gc_ll_descr.is_compressed_ptr = ( + lambda size: sys.maxint > 2147483647 and size == 4) + def compile_loop(self, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") - class gcdescr: - @staticmethod - def is_compressed_ptr(size): - return sys.maxint > 2147483647 and size == 4 - class TestAbstractLLCPU(LLtypeBackendTest): diff --git a/pypy/jit/backend/x86/test/test_rcompressed.py b/pypy/jit/backend/x86/test/test_rcompressed.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/x86/test/test_rcompressed.py @@ -0,0 +1,8 @@ + +from pypy.jit.metainterp.test import test_rcompressed +from pypy.jit.backend.x86.test.test_basic import Jit386Mixin + +class TestRCompressed(Jit386Mixin, test_rcompressed.TestRCompressed): + # for the individual tests see + # ====> ../../../metainterp/test/test_rcompressed.py + pass diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -303,7 +303,7 @@ def bh_getarrayitem_gc_r(self, arraydescr, gcref, itemindex): ofs, size, _ = self.unpack_arraydescr(arraydescr) - icp = self.gcdescr.is_compressed_ptr(size) + icp = self.gc_ll_descr.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) if icp: @@ -341,7 +341,7 @@ def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue): ofs, size, _ = self.unpack_arraydescr(arraydescr) - icp = self.gcdescr.is_compressed_ptr(size) + icp = self.gc_ll_descr.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) @@ -408,7 +408,7 @@ @specialize.argtype(1) def _base_do_getfield_r(self, struct, fielddescr): ofs, size, _ = self.unpack_fielddescr(fielddescr) - icp = self.gcdescr.is_compressed_ptr(size) + icp = self.gc_ll_descr.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) if icp: @@ -455,7 +455,7 @@ ofs, size, _ = self.unpack_fielddescr(fielddescr) assert lltype.typeOf(struct) is not lltype.Signed, ( "can't handle write barriers for setfield_raw") - icp = self.gcdescr.is_compressed_ptr(size) + icp = self.gc_ll_descr.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(struct, newvalue) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) From commits-noreply at bitbucket.org Thu Apr 14 23:06:37 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 14 Apr 2011 23:06:37 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Add a test file, and so minor fixes until it runs (and fails). Message-ID: <20110414210637.A5B2E2A2045@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43373:c509250e054a Date: 2011-04-14 13:40 -0700 http://bitbucket.org/pypy/pypy/changeset/c509250e054a/ Log: Add a test file, and so minor fixes until it runs (and fails). diff --git a/pypy/jit/backend/llsupport/test/test_runner.py b/pypy/jit/backend/llsupport/test/test_runner.py --- a/pypy/jit/backend/llsupport/test/test_runner.py +++ b/pypy/jit/backend/llsupport/test/test_runner.py @@ -8,14 +8,15 @@ class MyLLCPU(AbstractLLCPU): supports_floats = True + + def __init__(self, *args, **kwds): + super(MyLLCPU, self).__init__(*args, **kwds) + self.gc_ll_descr.is_compressed_ptr = ( + lambda size: sys.maxint > 2147483647 and size == 4) + def compile_loop(self, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") - class gcdescr: - @staticmethod - def is_compressed_ptr(size): - return sys.maxint > 2147483647 and size == 4 - class TestAbstractLLCPU(LLtypeBackendTest): diff --git a/pypy/jit/backend/x86/test/test_rcompressed.py b/pypy/jit/backend/x86/test/test_rcompressed.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/x86/test/test_rcompressed.py @@ -0,0 +1,8 @@ + +from pypy.jit.metainterp.test import test_rcompressed +from pypy.jit.backend.x86.test.test_basic import Jit386Mixin + +class TestRCompressed(Jit386Mixin, test_rcompressed.TestRCompressed): + # for the individual tests see + # ====> ../../../metainterp/test/test_rcompressed.py + pass diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -303,7 +303,7 @@ def bh_getarrayitem_gc_r(self, arraydescr, gcref, itemindex): ofs, size, _ = self.unpack_arraydescr(arraydescr) - icp = self.gcdescr.is_compressed_ptr(size) + icp = self.gc_ll_descr.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) if icp: @@ -341,7 +341,7 @@ def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue): ofs, size, _ = self.unpack_arraydescr(arraydescr) - icp = self.gcdescr.is_compressed_ptr(size) + icp = self.gc_ll_descr.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) @@ -408,7 +408,7 @@ @specialize.argtype(1) def _base_do_getfield_r(self, struct, fielddescr): ofs, size, _ = self.unpack_fielddescr(fielddescr) - icp = self.gcdescr.is_compressed_ptr(size) + icp = self.gc_ll_descr.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) if icp: @@ -455,7 +455,7 @@ ofs, size, _ = self.unpack_fielddescr(fielddescr) assert lltype.typeOf(struct) is not lltype.Signed, ( "can't handle write barriers for setfield_raw") - icp = self.gcdescr.is_compressed_ptr(size) + icp = self.gc_ll_descr.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(struct, newvalue) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) From commits-noreply at bitbucket.org Thu Apr 14 23:14:17 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 14 Apr 2011 23:14:17 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Finish the quasi immutable fields for x86 backend Message-ID: <20110414211417.DC7EA2A2042@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43374:6d53858dbd37 Date: 2011-04-14 23:14 +0200 http://bitbucket.org/pypy/pypy/changeset/6d53858dbd37/ Log: Finish the quasi immutable fields for x86 backend diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -451,7 +451,8 @@ else: # guard not invalidate, patch where it jumps pos, _ = clt.invalidate_positions[inv_counter] - clt.invalidate_positions[inv_counter] = pos, relative_target + clt.invalidate_positions[inv_counter] = (pos + rawstart, + relative_target) def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1457,7 +1458,7 @@ def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, locs, ign_2): - pos = self.mc.get_relative_pos() + pos = self.mc.get_relative_pos() + 1 # after jmp guard_token.pos_jump_offset = pos self.current_clt.invalidate_positions.append((pos, 0)) self.pending_guard_tokens.append(guard_token) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -147,10 +147,11 @@ def invalidate_loop(self, looptoken): from pypy.jit.backend.x86 import codebuf + for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: mc = codebuf.MachineCodeBlockWrapper() - mc.writeimm32(tgt) - mc.copy_to_raw_memory(addr) + mc.JMP_l(tgt) + mc.copy_to_raw_memory(addr - 1) class CPU386(AbstractX86CPU): WORD = 4 From commits-noreply at bitbucket.org Thu Apr 14 23:14:21 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 14 Apr 2011 23:14:21 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Finish the quasi immutable fields for x86 backend Message-ID: <20110414211421.98A122A2048@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43374:6d53858dbd37 Date: 2011-04-14 23:14 +0200 http://bitbucket.org/pypy/pypy/changeset/6d53858dbd37/ Log: Finish the quasi immutable fields for x86 backend diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -451,7 +451,8 @@ else: # guard not invalidate, patch where it jumps pos, _ = clt.invalidate_positions[inv_counter] - clt.invalidate_positions[inv_counter] = pos, relative_target + clt.invalidate_positions[inv_counter] = (pos + rawstart, + relative_target) def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1457,7 +1458,7 @@ def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, locs, ign_2): - pos = self.mc.get_relative_pos() + pos = self.mc.get_relative_pos() + 1 # after jmp guard_token.pos_jump_offset = pos self.current_clt.invalidate_positions.append((pos, 0)) self.pending_guard_tokens.append(guard_token) diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -147,10 +147,11 @@ def invalidate_loop(self, looptoken): from pypy.jit.backend.x86 import codebuf + for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: mc = codebuf.MachineCodeBlockWrapper() - mc.writeimm32(tgt) - mc.copy_to_raw_memory(addr) + mc.JMP_l(tgt) + mc.copy_to_raw_memory(addr - 1) class CPU386(AbstractX86CPU): WORD = 4 From commits-noreply at bitbucket.org Fri Apr 15 00:36:31 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 15 Apr 2011 00:36:31 +0200 (CEST) Subject: [pypy-svn] pypy default: Issue652 (kleptog) new lltype.Typedef, which allows to give different Message-ID: <20110414223631.AC8062A2042@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43375:d381cb2c140b Date: 2011-04-15 00:36 +0200 http://bitbucket.org/pypy/pypy/changeset/d381cb2c140b/ Log: Issue652 (kleptog) new lltype.Typedef, which allows to give different names (in the C backend) to the same type. Use it in cpyext to have "real" Py_ssize_t parameters diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -818,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -70,11 +70,35 @@ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -758,6 +761,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant @@ -100,6 +100,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -804,6 +804,21 @@ hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) assert S._immutable_field('x') == '[*]' +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): From commits-noreply at bitbucket.org Fri Apr 15 00:36:37 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 15 Apr 2011 00:36:37 +0200 (CEST) Subject: [pypy-svn] pypy default: Issue652 (kleptog) new lltype.Typedef, which allows to give different Message-ID: <20110414223637.BB4732A2045@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43375:d381cb2c140b Date: 2011-04-15 00:36 +0200 http://bitbucket.org/pypy/pypy/changeset/d381cb2c140b/ Log: Issue652 (kleptog) new lltype.Typedef, which allows to give different names (in the C backend) to the same type. Use it in cpyext to have "real" Py_ssize_t parameters diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -818,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -70,11 +70,35 @@ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -758,6 +761,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant @@ -100,6 +100,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -804,6 +804,21 @@ hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) assert S._immutable_field('x') == '[*]' +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): From commits-noreply at bitbucket.org Fri Apr 15 10:35:16 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 10:35:16 +0200 (CEST) Subject: [pypy-svn] pypy default: move the comments Message-ID: <20110415083516.B58CF2A2046@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43376:335ce641785d Date: 2011-04-15 09:56 +0200 http://bitbucket.org/pypy/pypy/changeset/335ce641785d/ Log: move the comments diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1354,6 +1354,12 @@ log = self.run(main, [], threshold=200) assert log.result == 9895050.0 loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless assert loop.match(""" ... i20 = int_ge(i18, i8) @@ -1378,9 +1384,6 @@ f39 = getarrayitem_raw(i13, i36, descr=...) ... """) - # XXX: what do we want to check here? - # We want to make sure that the overloaded __getitem__ - # not introduceds double array bound checks def test_circular(self): def main(): @@ -1404,6 +1407,11 @@ log = self.run(main, [], threshold=200) assert log.result == 1239690.0 loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless assert loop.match(""" ... i17 = int_and(i14, 255) @@ -1425,7 +1433,3 @@ f37 = getarrayitem_raw(i8, i36, descr=...) ... """) - # XXX: what do we want to check here? - # We want to check that the array bound checks are removed, - # so it's this part of the trace. However we dont care about - # the force_token()'s. Can they be ignored? From commits-noreply at bitbucket.org Fri Apr 15 10:35:22 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 10:35:22 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_min_max to test_pypy_c_new Message-ID: <20110415083522.B162A2A2049@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43377:7b8591c5ef70 Date: 2011-04-15 10:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7b8591c5ef70/ Log: port test_min_max to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1433,3 +1433,24 @@ f37 = getarrayitem_raw(i8, i36, descr=...) ... """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,16 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) def test_silly_max(self): self.run_source(''' From commits-noreply at bitbucket.org Fri Apr 15 10:35:24 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 10:35:24 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_silly_max and test_iter_max to test_pypy_c_new; we still need to understand what we want to check, though Message-ID: <20110415083524.1A0152A204B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43378:2e5bd737be0c Date: 2011-04-15 10:10 +0200 http://bitbucket.org/pypy/pypy/changeset/2e5bd737be0c/ Log: port test_silly_max and test_iter_max to test_pypy_c_new; we still need to understand what we want to check, though diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1454,3 +1454,29 @@ --TICK-- jump(p0, p1, p2, p3, i11, i9, descr=) """) + + def test_silly_max(self): + def main(): + i=2 + sa=0 + while i < 300: + sa+=max(*range(i)) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? + + def test_iter_max(self): + def main(): + i=2 + sa=0 + while i < 300: + sa+=max(range(i)) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,29 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) - def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name libm_name = get_libm_name(sys.platform) From commits-noreply at bitbucket.org Fri Apr 15 10:35:28 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 10:35:28 +0200 (CEST) Subject: [pypy-svn] pypy default: move the comments Message-ID: <20110415083528.9BEF72A204A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43376:335ce641785d Date: 2011-04-15 09:56 +0200 http://bitbucket.org/pypy/pypy/changeset/335ce641785d/ Log: move the comments diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1354,6 +1354,12 @@ log = self.run(main, [], threshold=200) assert log.result == 9895050.0 loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless assert loop.match(""" ... i20 = int_ge(i18, i8) @@ -1378,9 +1384,6 @@ f39 = getarrayitem_raw(i13, i36, descr=...) ... """) - # XXX: what do we want to check here? - # We want to make sure that the overloaded __getitem__ - # not introduceds double array bound checks def test_circular(self): def main(): @@ -1404,6 +1407,11 @@ log = self.run(main, [], threshold=200) assert log.result == 1239690.0 loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless assert loop.match(""" ... i17 = int_and(i14, 255) @@ -1425,7 +1433,3 @@ f37 = getarrayitem_raw(i8, i36, descr=...) ... """) - # XXX: what do we want to check here? - # We want to check that the array bound checks are removed, - # so it's this part of the trace. However we dont care about - # the force_token()'s. Can they be ignored? From commits-noreply at bitbucket.org Fri Apr 15 10:35:29 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 10:35:29 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_min_max to test_pypy_c_new Message-ID: <20110415083529.6ED4A2A2047@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43377:7b8591c5ef70 Date: 2011-04-15 10:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7b8591c5ef70/ Log: port test_min_max to test_pypy_c_new diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1433,3 +1433,24 @@ f37 = getarrayitem_raw(i8, i36, descr=...) ... """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,16 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) def test_silly_max(self): self.run_source(''' From commits-noreply at bitbucket.org Fri Apr 15 10:35:30 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 10:35:30 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_silly_max and test_iter_max to test_pypy_c_new; we still need to understand what we want to check, though Message-ID: <20110415083530.7086E2A204D@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43378:2e5bd737be0c Date: 2011-04-15 10:10 +0200 http://bitbucket.org/pypy/pypy/changeset/2e5bd737be0c/ Log: port test_silly_max and test_iter_max to test_pypy_c_new; we still need to understand what we want to check, though diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1454,3 +1454,29 @@ --TICK-- jump(p0, p1, p2, p3, i11, i9, descr=) """) + + def test_silly_max(self): + def main(): + i=2 + sa=0 + while i < 300: + sa+=max(*range(i)) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? + + def test_iter_max(self): + def main(): + i=2 + sa=0 + while i < 300: + sa+=max(range(i)) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,29 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) - def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name libm_name = get_libm_name(sys.platform) From commits-noreply at bitbucket.org Fri Apr 15 11:13:55 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 15 Apr 2011 11:13:55 +0200 (CEST) Subject: [pypy-svn] pypy default: Add some decorators about some constructor functions Message-ID: <20110415091355.704342A2046@codespeak.net> Author: Armin Rigo Branch: Changeset: r43379:bfa61b4d5577 Date: 2011-04-15 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/bfa61b4d5577/ Log: Add some decorators about some constructor functions being pure. It should avoid seeing in the jit traces lines like call(ConstClass(fromint), 1). diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -3,7 +3,8 @@ from pypy.rlib.rarithmetic import most_neg_value_of_same_type from pypy.rlib.rfloat import isinf, isnan from pypy.rlib.debug import make_sure_not_resized, check_regular_int -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython import extregistry @@ -122,7 +123,11 @@ def numdigits(self): return len(self._digits) + @staticmethod + @jit.purefunction def fromint(intval): + # This function is marked as pure, so you must not call it and + # then modify the result. check_regular_int(intval) if intval < 0: sign = -1 @@ -149,20 +154,25 @@ t >>= SHIFT p += 1 return v - fromint = staticmethod(fromint) + @staticmethod + @jit.purefunction def frombool(b): + # This function is marked as pure, so you must not call it and + # then modify the result. if b: return rbigint([ONEDIGIT], 1) return rbigint() - frombool = staticmethod(frombool) + @staticmethod def fromlong(l): + "NOT_RPYTHON" return rbigint(*args_from_long(l)) - fromlong = staticmethod(fromlong) + @staticmethod def fromfloat(dval): """ Create a new bigint object from a float """ + # This function is not marked as pure because it can raise sign = 1 if isinf(dval) or isnan(dval): raise OverflowError @@ -183,16 +193,21 @@ frac -= float(bits) frac = math.ldexp(frac, SHIFT) return v - fromfloat = staticmethod(fromfloat) + @staticmethod + @jit.purefunction + @specialize.argtype(0) def fromrarith_int(i): + # This function is marked as pure, so you must not call it and + # then modify the result. return rbigint(*args_from_rarith_int(i)) - fromrarith_int._annspecialcase_ = "specialize:argtype(0)" - fromrarith_int = staticmethod(fromrarith_int) + @staticmethod + @jit.purefunction def fromdecimalstr(s): + # This function is marked as pure, so you must not call it and + # then modify the result. return _decimalstr_to_bigint(s) - fromdecimalstr = staticmethod(fromdecimalstr) def toint(self): """ @@ -1841,7 +1856,7 @@ elif s[p] == '+': p += 1 - a = rbigint.fromint(0) + a = rbigint() tens = 1 dig = 0 ord0 = ord('0') @@ -1859,7 +1874,7 @@ def parse_digit_string(parser): # helper for objspace.std.strutil - a = rbigint.fromint(0) + a = rbigint() base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 From commits-noreply at bitbucket.org Fri Apr 15 11:14:02 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 15 Apr 2011 11:14:02 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110415091402.3A0D42A2047@codespeak.net> Author: Armin Rigo Branch: Changeset: r43380:556eef428694 Date: 2011-04-15 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/556eef428694/ Log: merge heads diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1354,6 +1354,12 @@ log = self.run(main, [], threshold=200) assert log.result == 9895050.0 loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless assert loop.match(""" ... i20 = int_ge(i18, i8) @@ -1378,9 +1384,6 @@ f39 = getarrayitem_raw(i13, i36, descr=...) ... """) - # XXX: what do we want to check here? - # We want to make sure that the overloaded __getitem__ - # not introduceds double array bound checks def test_circular(self): def main(): @@ -1404,6 +1407,11 @@ log = self.run(main, [], threshold=200) assert log.result == 1239690.0 loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless assert loop.match(""" ... i17 = int_and(i14, 255) @@ -1425,7 +1433,50 @@ f37 = getarrayitem_raw(i8, i36, descr=...) ... """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) + + def test_silly_max(self): + def main(): + i=2 + sa=0 + while i < 300: + sa+=max(*range(i)) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) # XXX: what do we want to check here? - # We want to check that the array bound checks are removed, - # so it's this part of the trace. However we dont care about - # the force_token()'s. Can they be ignored? + + def test_iter_max(self): + def main(): + i=2 + sa=0 + while i < 300: + sa+=max(range(i)) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,39 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) - def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name libm_name = get_libm_name(sys.platform) diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -818,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -70,11 +70,35 @@ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -758,6 +761,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.module import ll_math from pypy.module.math.test.test_direct import MathTests, get_tester +from pypy.translator.c.test.test_genc import compile class TestMath(MathTests): @@ -21,6 +22,13 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isinf(self): + def f(x): + return ll_math.ll_math_isinf(1. / x) + f = compile(f, [float], backendopt=False) + assert f(5.5e-309) + + def make_test_case((fnname, args, expected), dict): # def test_func(self): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -95,9 +94,9 @@ # are awesome. return y != y - at jit.purefunction def ll_math_isinf(y): - return bool(math_isinf(y)) + # Use a bitwise OR so the JIT doesn't produce 2 different guards. + return (y == INFINITY) | (y == -INFINITY) ll_math_copysign = math_copysign diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,8 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,12 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant @@ -100,6 +100,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -804,6 +804,21 @@ hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) assert S._immutable_field('x') == '[*]' +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): From commits-noreply at bitbucket.org Fri Apr 15 11:14:05 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 15 Apr 2011 11:14:05 +0200 (CEST) Subject: [pypy-svn] pypy default: Add some decorators about some constructor functions Message-ID: <20110415091405.EBBC72A204C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43379:bfa61b4d5577 Date: 2011-04-15 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/bfa61b4d5577/ Log: Add some decorators about some constructor functions being pure. It should avoid seeing in the jit traces lines like call(ConstClass(fromint), 1). diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -3,7 +3,8 @@ from pypy.rlib.rarithmetic import most_neg_value_of_same_type from pypy.rlib.rfloat import isinf, isnan from pypy.rlib.debug import make_sure_not_resized, check_regular_int -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython import extregistry @@ -122,7 +123,11 @@ def numdigits(self): return len(self._digits) + @staticmethod + @jit.purefunction def fromint(intval): + # This function is marked as pure, so you must not call it and + # then modify the result. check_regular_int(intval) if intval < 0: sign = -1 @@ -149,20 +154,25 @@ t >>= SHIFT p += 1 return v - fromint = staticmethod(fromint) + @staticmethod + @jit.purefunction def frombool(b): + # This function is marked as pure, so you must not call it and + # then modify the result. if b: return rbigint([ONEDIGIT], 1) return rbigint() - frombool = staticmethod(frombool) + @staticmethod def fromlong(l): + "NOT_RPYTHON" return rbigint(*args_from_long(l)) - fromlong = staticmethod(fromlong) + @staticmethod def fromfloat(dval): """ Create a new bigint object from a float """ + # This function is not marked as pure because it can raise sign = 1 if isinf(dval) or isnan(dval): raise OverflowError @@ -183,16 +193,21 @@ frac -= float(bits) frac = math.ldexp(frac, SHIFT) return v - fromfloat = staticmethod(fromfloat) + @staticmethod + @jit.purefunction + @specialize.argtype(0) def fromrarith_int(i): + # This function is marked as pure, so you must not call it and + # then modify the result. return rbigint(*args_from_rarith_int(i)) - fromrarith_int._annspecialcase_ = "specialize:argtype(0)" - fromrarith_int = staticmethod(fromrarith_int) + @staticmethod + @jit.purefunction def fromdecimalstr(s): + # This function is marked as pure, so you must not call it and + # then modify the result. return _decimalstr_to_bigint(s) - fromdecimalstr = staticmethod(fromdecimalstr) def toint(self): """ @@ -1841,7 +1856,7 @@ elif s[p] == '+': p += 1 - a = rbigint.fromint(0) + a = rbigint() tens = 1 dig = 0 ord0 = ord('0') @@ -1859,7 +1874,7 @@ def parse_digit_string(parser): # helper for objspace.std.strutil - a = rbigint.fromint(0) + a = rbigint() base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 From commits-noreply at bitbucket.org Fri Apr 15 11:14:11 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 15 Apr 2011 11:14:11 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110415091411.C54DE2A204D@codespeak.net> Author: Armin Rigo Branch: Changeset: r43380:556eef428694 Date: 2011-04-15 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/556eef428694/ Log: merge heads diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1354,6 +1354,12 @@ log = self.run(main, [], threshold=200) assert log.result == 9895050.0 loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless assert loop.match(""" ... i20 = int_ge(i18, i8) @@ -1378,9 +1384,6 @@ f39 = getarrayitem_raw(i13, i36, descr=...) ... """) - # XXX: what do we want to check here? - # We want to make sure that the overloaded __getitem__ - # not introduceds double array bound checks def test_circular(self): def main(): @@ -1404,6 +1407,11 @@ log = self.run(main, [], threshold=200) assert log.result == 1239690.0 loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless assert loop.match(""" ... i17 = int_and(i14, 255) @@ -1425,7 +1433,50 @@ f37 = getarrayitem_raw(i8, i36, descr=...) ... """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) + + def test_silly_max(self): + def main(): + i=2 + sa=0 + while i < 300: + sa+=max(*range(i)) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) # XXX: what do we want to check here? - # We want to check that the array bound checks are removed, - # so it's this part of the trace. However we dont care about - # the force_token()'s. Can they be ignored? + + def test_iter_max(self): + def main(): + i=2 + sa=0 + while i < 300: + sa+=max(range(i)) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,39 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) - def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name libm_name = get_libm_name(sys.platform) diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -818,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -70,11 +70,35 @@ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -758,6 +761,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.module import ll_math from pypy.module.math.test.test_direct import MathTests, get_tester +from pypy.translator.c.test.test_genc import compile class TestMath(MathTests): @@ -21,6 +22,13 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isinf(self): + def f(x): + return ll_math.ll_math_isinf(1. / x) + f = compile(f, [float], backendopt=False) + assert f(5.5e-309) + + def make_test_case((fnname, args, expected), dict): # def test_func(self): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -95,9 +94,9 @@ # are awesome. return y != y - at jit.purefunction def ll_math_isinf(y): - return bool(math_isinf(y)) + # Use a bitwise OR so the JIT doesn't produce 2 different guards. + return (y == INFINITY) | (y == -INFINITY) ll_math_copysign = math_copysign diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,8 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,12 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant @@ -100,6 +100,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -804,6 +804,21 @@ hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) assert S._immutable_field('x') == '[*]' +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): From commits-noreply at bitbucket.org Fri Apr 15 11:45:22 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 15 Apr 2011 11:45:22 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Phew. More and more cases, until the test half-passes. Message-ID: <20110415094522.393312A2046@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43381:dd9738a16aec Date: 2011-04-15 11:44 +0200 http://bitbucket.org/pypy/pypy/changeset/dd9738a16aec/ Log: Phew. More and more cases, until the test half-passes. diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -30,8 +30,9 @@ ('c', lltype.Char)) SPTR = lltype.Ptr(S) @jit.dont_look_inside - def escape(p): - return p + def escape(ptr): + assert llop.show_from_ptr32(SPTR, ptr.p).n == 42 + return ptr def f(n): y = lltype.malloc(S) y.n = n diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -24,6 +24,7 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 get_malloc_slowpath_addr = None + supports_compressed_ptrs = False def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) @@ -44,8 +45,6 @@ return None def freeing_block(self, start, stop): pass - def is_compressed_ptr(self, size): - return False # ____________________________________________________________ @@ -578,9 +577,9 @@ self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) - self.compressptr = gcdescr.config.translation.compressptr - if self.compressptr: - assert rffi.sizeof(rffi.INT) == rffi.sizeof(llmemory.HiddenGcRef32) + self.supports_compressed_ptrs = gcdescr.config.translation.compressptr + if self.supports_compressed_ptrs: + assert rffi.sizeof(rffi.UINT)==rffi.sizeof(llmemory.HiddenGcRef32) # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer @@ -869,13 +868,6 @@ def freeing_block(self, start, stop): self.gcrootmap.freeing_block(start, stop) - def is_compressed_ptr(self, size): - if self.compressptr: # constant-folded away - ptrsize = symbolic.get_size_of_ptr(self.translate_support_code) - return size != ptrsize - else: - return False - # ____________________________________________________________ def get_ll_description(gcdescr, translator=None, rtyper=None): diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -1271,11 +1271,14 @@ def _cast_to_ptr(self, PTRTYPE): return force_cast(PTRTYPE, self.intval) -## def _cast_to_int(self): -## return self.intval - -## def _cast_to_adr(self): -## return _lladdress(self.intval) + def _cast_to_hiddengcref32(self): + try: + result = _hiddengcref32[self] + except KeyError: + result = 1000 + len(_hiddengcref32) + _hiddengcref32[self] = result + _hiddengcref32back[result] = self + return _llgcopaque32(result) def cast_adr_to_int(addr): if isinstance(addr, llmemory.fakeaddress): @@ -1335,11 +1338,20 @@ return not self == other def _cast_to_ptr(self, PTRTYPE): + # this is supposed to be cast back to GcStruct, not to a plain + # Struct or to any kind of Array, because HiddenGcRef32s are + # supposed to be taken only to GcStructs. + assert isinstance(PTRTYPE.TO, lltype.GcStruct) if self.uint32val == 0: return lltype.nullptr(PTRTYPE.TO) obj = _hiddengcref32back[self.uint32val] return force_cast(PTRTYPE, obj._as_ptr()) + def _cast_to_gcref(self): + obj = _hiddengcref32back[self.uint32val] + assert isinstance(obj, _llgcopaque) + return obj + # ____________________________________________________________ # errno diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -579,15 +579,21 @@ return False def op_hide_into_ptr32(ptr): + if not ptr: + return lltype.nullptr(llmemory.HiddenGcRef32.TO) if lltype.typeOf(ptr) == llmemory.Address: - if not ptr: - return lltype.nullptr(llmemory.HiddenGcRef32.TO) ptr = ptr.ptr if isinstance(lltype.typeOf(ptr).TO, lltype.GcOpaqueType): - ptr = ptr._obj.container._as_ptr() + try: + ptr = ptr._obj.container._as_ptr() + except AttributeError: + # for _llgcopaque objects + return ptr._obj._cast_to_hiddengcref32()._as_ptr() return lltype.cast_opaque_ptr(llmemory.HiddenGcRef32, ptr) def op_show_from_ptr32(RESTYPE, ptr32): + if not ptr32: + return lltype.nullptr(RESTYPE.TO) if RESTYPE == llmemory.Address: if not ptr32: return llmemory.NULL @@ -595,7 +601,11 @@ ptr = lltype.cast_opaque_ptr(PTRTYPE, ptr32) return llmemory.cast_ptr_to_adr(ptr) if isinstance(RESTYPE.TO, lltype.GcOpaqueType): - ptr32 = ptr32._obj.container._as_ptr() + try: + ptr32 = ptr32._obj.container._as_ptr() + except AttributeError: + # for _llgcopaque32 objects + return ptr32._obj._cast_to_gcref()._as_ptr() return lltype.cast_opaque_ptr(RESTYPE, ptr32) op_show_from_ptr32.need_result_type = True diff --git a/pypy/jit/backend/llsupport/test/test_runner.py b/pypy/jit/backend/llsupport/test/test_runner.py --- a/pypy/jit/backend/llsupport/test/test_runner.py +++ b/pypy/jit/backend/llsupport/test/test_runner.py @@ -9,11 +9,6 @@ class MyLLCPU(AbstractLLCPU): supports_floats = True - def __init__(self, *args, **kwds): - super(MyLLCPU, self).__init__(*args, **kwds) - self.gc_ll_descr.is_compressed_ptr = ( - lambda size: sys.maxint > 2147483647 and size == 4) - def compile_loop(self, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -221,6 +221,12 @@ x = llop.hide_into_ptr32(llmemory.HiddenGcRef32, x) return rffi.cast(rffi.UINT, x) + def is_compressed_ptr(self, size): + if we_are_translated(): + if not self.gc_ll_descr.supports_compressed_ptrs: + return False # nicely constant-foldable + return WORD == 8 and size == 4 + @staticmethod def cast_int_to_adr(x): return rffi.cast(llmemory.Address, x) @@ -303,7 +309,7 @@ def bh_getarrayitem_gc_r(self, arraydescr, gcref, itemindex): ofs, size, _ = self.unpack_arraydescr(arraydescr) - icp = self.gc_ll_descr.is_compressed_ptr(size) + icp = self.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) if icp: @@ -341,7 +347,7 @@ def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue): ofs, size, _ = self.unpack_arraydescr(arraydescr) - icp = self.gc_ll_descr.is_compressed_ptr(size) + icp = self.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) @@ -408,7 +414,7 @@ @specialize.argtype(1) def _base_do_getfield_r(self, struct, fielddescr): ofs, size, _ = self.unpack_fielddescr(fielddescr) - icp = self.gc_ll_descr.is_compressed_ptr(size) + icp = self.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) if icp: @@ -455,7 +461,7 @@ ofs, size, _ = self.unpack_fielddescr(fielddescr) assert lltype.typeOf(struct) is not lltype.Signed, ( "can't handle write barriers for setfield_raw") - icp = self.gc_ll_descr.is_compressed_ptr(size) + icp = self.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(struct, newvalue) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) From commits-noreply at bitbucket.org Fri Apr 15 11:45:28 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 15 Apr 2011 11:45:28 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Phew. More and more cases, until the test half-passes. Message-ID: <20110415094528.DD99C2A2049@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43381:dd9738a16aec Date: 2011-04-15 11:44 +0200 http://bitbucket.org/pypy/pypy/changeset/dd9738a16aec/ Log: Phew. More and more cases, until the test half-passes. diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -30,8 +30,9 @@ ('c', lltype.Char)) SPTR = lltype.Ptr(S) @jit.dont_look_inside - def escape(p): - return p + def escape(ptr): + assert llop.show_from_ptr32(SPTR, ptr.p).n == 42 + return ptr def f(n): y = lltype.malloc(S) y.n = n diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -24,6 +24,7 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 get_malloc_slowpath_addr = None + supports_compressed_ptrs = False def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) @@ -44,8 +45,6 @@ return None def freeing_block(self, start, stop): pass - def is_compressed_ptr(self, size): - return False # ____________________________________________________________ @@ -578,9 +577,9 @@ self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) - self.compressptr = gcdescr.config.translation.compressptr - if self.compressptr: - assert rffi.sizeof(rffi.INT) == rffi.sizeof(llmemory.HiddenGcRef32) + self.supports_compressed_ptrs = gcdescr.config.translation.compressptr + if self.supports_compressed_ptrs: + assert rffi.sizeof(rffi.UINT)==rffi.sizeof(llmemory.HiddenGcRef32) # make a TransformerLayoutBuilder and save it on the translator # where it can be fished and reused by the FrameworkGCTransformer @@ -869,13 +868,6 @@ def freeing_block(self, start, stop): self.gcrootmap.freeing_block(start, stop) - def is_compressed_ptr(self, size): - if self.compressptr: # constant-folded away - ptrsize = symbolic.get_size_of_ptr(self.translate_support_code) - return size != ptrsize - else: - return False - # ____________________________________________________________ def get_ll_description(gcdescr, translator=None, rtyper=None): diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -1271,11 +1271,14 @@ def _cast_to_ptr(self, PTRTYPE): return force_cast(PTRTYPE, self.intval) -## def _cast_to_int(self): -## return self.intval - -## def _cast_to_adr(self): -## return _lladdress(self.intval) + def _cast_to_hiddengcref32(self): + try: + result = _hiddengcref32[self] + except KeyError: + result = 1000 + len(_hiddengcref32) + _hiddengcref32[self] = result + _hiddengcref32back[result] = self + return _llgcopaque32(result) def cast_adr_to_int(addr): if isinstance(addr, llmemory.fakeaddress): @@ -1335,11 +1338,20 @@ return not self == other def _cast_to_ptr(self, PTRTYPE): + # this is supposed to be cast back to GcStruct, not to a plain + # Struct or to any kind of Array, because HiddenGcRef32s are + # supposed to be taken only to GcStructs. + assert isinstance(PTRTYPE.TO, lltype.GcStruct) if self.uint32val == 0: return lltype.nullptr(PTRTYPE.TO) obj = _hiddengcref32back[self.uint32val] return force_cast(PTRTYPE, obj._as_ptr()) + def _cast_to_gcref(self): + obj = _hiddengcref32back[self.uint32val] + assert isinstance(obj, _llgcopaque) + return obj + # ____________________________________________________________ # errno diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -579,15 +579,21 @@ return False def op_hide_into_ptr32(ptr): + if not ptr: + return lltype.nullptr(llmemory.HiddenGcRef32.TO) if lltype.typeOf(ptr) == llmemory.Address: - if not ptr: - return lltype.nullptr(llmemory.HiddenGcRef32.TO) ptr = ptr.ptr if isinstance(lltype.typeOf(ptr).TO, lltype.GcOpaqueType): - ptr = ptr._obj.container._as_ptr() + try: + ptr = ptr._obj.container._as_ptr() + except AttributeError: + # for _llgcopaque objects + return ptr._obj._cast_to_hiddengcref32()._as_ptr() return lltype.cast_opaque_ptr(llmemory.HiddenGcRef32, ptr) def op_show_from_ptr32(RESTYPE, ptr32): + if not ptr32: + return lltype.nullptr(RESTYPE.TO) if RESTYPE == llmemory.Address: if not ptr32: return llmemory.NULL @@ -595,7 +601,11 @@ ptr = lltype.cast_opaque_ptr(PTRTYPE, ptr32) return llmemory.cast_ptr_to_adr(ptr) if isinstance(RESTYPE.TO, lltype.GcOpaqueType): - ptr32 = ptr32._obj.container._as_ptr() + try: + ptr32 = ptr32._obj.container._as_ptr() + except AttributeError: + # for _llgcopaque32 objects + return ptr32._obj._cast_to_gcref()._as_ptr() return lltype.cast_opaque_ptr(RESTYPE, ptr32) op_show_from_ptr32.need_result_type = True diff --git a/pypy/jit/backend/llsupport/test/test_runner.py b/pypy/jit/backend/llsupport/test/test_runner.py --- a/pypy/jit/backend/llsupport/test/test_runner.py +++ b/pypy/jit/backend/llsupport/test/test_runner.py @@ -9,11 +9,6 @@ class MyLLCPU(AbstractLLCPU): supports_floats = True - def __init__(self, *args, **kwds): - super(MyLLCPU, self).__init__(*args, **kwds) - self.gc_ll_descr.is_compressed_ptr = ( - lambda size: sys.maxint > 2147483647 and size == 4) - def compile_loop(self, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -221,6 +221,12 @@ x = llop.hide_into_ptr32(llmemory.HiddenGcRef32, x) return rffi.cast(rffi.UINT, x) + def is_compressed_ptr(self, size): + if we_are_translated(): + if not self.gc_ll_descr.supports_compressed_ptrs: + return False # nicely constant-foldable + return WORD == 8 and size == 4 + @staticmethod def cast_int_to_adr(x): return rffi.cast(llmemory.Address, x) @@ -303,7 +309,7 @@ def bh_getarrayitem_gc_r(self, arraydescr, gcref, itemindex): ofs, size, _ = self.unpack_arraydescr(arraydescr) - icp = self.gc_ll_descr.is_compressed_ptr(size) + icp = self.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) if icp: @@ -341,7 +347,7 @@ def bh_setarrayitem_gc_r(self, arraydescr, gcref, itemindex, newvalue): ofs, size, _ = self.unpack_arraydescr(arraydescr) - icp = self.gc_ll_descr.is_compressed_ptr(size) + icp = self.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) @@ -408,7 +414,7 @@ @specialize.argtype(1) def _base_do_getfield_r(self, struct, fielddescr): ofs, size, _ = self.unpack_fielddescr(fielddescr) - icp = self.gc_ll_descr.is_compressed_ptr(size) + icp = self.is_compressed_ptr(size) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) if icp: @@ -455,7 +461,7 @@ ofs, size, _ = self.unpack_fielddescr(fielddescr) assert lltype.typeOf(struct) is not lltype.Signed, ( "can't handle write barriers for setfield_raw") - icp = self.gc_ll_descr.is_compressed_ptr(size) + icp = self.is_compressed_ptr(size) self.gc_ll_descr.do_write_barrier(struct, newvalue) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) From commits-noreply at bitbucket.org Fri Apr 15 11:54:38 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 11:54:38 +0200 (CEST) Subject: [pypy-svn] pypy default: add the possibility to completely skip certain ops during the match Message-ID: <20110415095438.CAE732A2046@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43382:d3939c552848 Date: 2011-04-15 11:42 +0200 http://bitbucket.org/pypy/pypy/changeset/d3939c552848/ Log: add the possibility to completely skip certain ops during the match diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -314,7 +314,7 @@ # it matched! The '...' operator ends here return op - def match_loop(self, expected_ops): + def match_loop(self, expected_ops, ignore_ops): """ A note about partial matching: the '...' operator is non-greedy, i.e. it matches all the operations until it finds one that matches @@ -333,13 +333,16 @@ return op = self.match_until(exp_op, iter_ops) else: - op = self._next_op(iter_ops) + while True: + op = self._next_op(iter_ops) + if op.name not in ignore_ops: + break self.match_op(op, exp_op) # # make sure we exhausted iter_ops self._next_op(iter_ops, assert_raises=True) - def match(self, expected_src): + def match(self, expected_src, ignore_ops=[]): def format(src): if src is None: return '' @@ -348,7 +351,7 @@ expected_src = self.preprocess_expected_src(expected_src) expected_ops = self.parse_ops(expected_src) try: - self.match_loop(expected_ops) + self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 @@ -357,6 +360,7 @@ print e.args print e.msg print + print "Ignore ops:", ignore_ops print "Got:" print format(self.src) print diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -100,11 +100,11 @@ class TestOpMatcher(object): - def match(self, src1, src2): + def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations, src=src1) - return matcher.match(src2) + return matcher.match(src2, **kwds) def test_match_var(self): match_var = OpMatcher([]).match_var @@ -234,6 +234,21 @@ """ assert self.match(loop, expected) + def test_ignore_opcodes(self): + loop = """ + [i0] + i1 = int_add(i0, 1) + i4 = force_token() + i2 = int_sub(i1, 10) + jump(i4) + """ + expected = """ + i1 = int_add(i0, 1) + i2 = int_sub(i1, 10) + jump(i4, descr=...) + """ + assert self.match(loop, expected, ignore_ops=['force_token']) + class TestRunPyPyC(BaseTestPyPyC): From commits-noreply at bitbucket.org Fri Apr 15 11:54:39 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 11:54:39 +0200 (CEST) Subject: [pypy-svn] pypy default: ignore force_tokens in this two tests Message-ID: <20110415095439.A30BE2A2046@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43383:02ea09544fc9 Date: 2011-04-15 11:45 +0200 http://bitbucket.org/pypy/pypy/changeset/02ea09544fc9/ Log: ignore force_tokens in this two tests diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -153,10 +153,10 @@ for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op - def match(self, expected_src): + def match(self, expected_src, **kwds): ops = list(self.allops()) matcher = OpMatcher(ops, src=self.format_ops()) - return matcher.match(expected_src) + return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1360,31 +1360,29 @@ # # The force_token()s are still there, but will be eliminated by the # backend regalloc, so they are harmless - assert loop.match(""" + assert loop.match(ignore_ops=['force_token'], + expected_src=""" ... i20 = int_ge(i18, i8) guard_false(i20, descr=...) f21 = getarrayitem_raw(i13, i18, descr=...) - i22 = force_token() f23 = getarrayitem_raw(i13, i14, descr=...) f24 = float_add(f21, f23) - i25 = force_token() f26 = getarrayitem_raw(i13, i6, descr=...) f27 = float_add(f24, f26) i29 = int_add(i6, 1) - i30 = force_token() i31 = int_ge(i29, i8) guard_false(i31, descr=...) f33 = getarrayitem_raw(i13, i29, descr=...) f34 = float_add(f27, f33) i36 = int_add(i6, 2) - i37 = force_token() i38 = int_ge(i36, i8) guard_false(i38, descr=...) f39 = getarrayitem_raw(i13, i36, descr=...) ... """) + def test_circular(self): def main(): from array import array @@ -1412,23 +1410,20 @@ # # The force_token()s are still there, but will be eliminated by the # backend regalloc, so they are harmless - assert loop.match(""" + assert loop.match(ignore_ops=['force_token'], + expected_src=""" ... i17 = int_and(i14, 255) f18 = getarrayitem_raw(i8, i17, descr=...) - i19 = force_token() f20 = getarrayitem_raw(i8, i9, descr=...) f21 = float_add(f18, f20) - i22 = force_token() f23 = getarrayitem_raw(i8, i10, descr=...) f24 = float_add(f21, f23) i26 = int_add(i6, 1) - i27 = force_token() i29 = int_and(i26, 255) f30 = getarrayitem_raw(i8, i29, descr=...) f31 = float_add(f24, f30) i33 = int_add(i6, 2) - i34 = force_token() i36 = int_and(i33, 255) f37 = getarrayitem_raw(i8, i36, descr=...) ... From commits-noreply at bitbucket.org Fri Apr 15 11:54:45 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 11:54:45 +0200 (CEST) Subject: [pypy-svn] pypy default: add the possibility to completely skip certain ops during the match Message-ID: <20110415095445.0293E2A2047@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43382:d3939c552848 Date: 2011-04-15 11:42 +0200 http://bitbucket.org/pypy/pypy/changeset/d3939c552848/ Log: add the possibility to completely skip certain ops during the match diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -314,7 +314,7 @@ # it matched! The '...' operator ends here return op - def match_loop(self, expected_ops): + def match_loop(self, expected_ops, ignore_ops): """ A note about partial matching: the '...' operator is non-greedy, i.e. it matches all the operations until it finds one that matches @@ -333,13 +333,16 @@ return op = self.match_until(exp_op, iter_ops) else: - op = self._next_op(iter_ops) + while True: + op = self._next_op(iter_ops) + if op.name not in ignore_ops: + break self.match_op(op, exp_op) # # make sure we exhausted iter_ops self._next_op(iter_ops, assert_raises=True) - def match(self, expected_src): + def match(self, expected_src, ignore_ops=[]): def format(src): if src is None: return '' @@ -348,7 +351,7 @@ expected_src = self.preprocess_expected_src(expected_src) expected_ops = self.parse_ops(expected_src) try: - self.match_loop(expected_ops) + self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 @@ -357,6 +360,7 @@ print e.args print e.msg print + print "Ignore ops:", ignore_ops print "Got:" print format(self.src) print diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -100,11 +100,11 @@ class TestOpMatcher(object): - def match(self, src1, src2): + def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations, src=src1) - return matcher.match(src2) + return matcher.match(src2, **kwds) def test_match_var(self): match_var = OpMatcher([]).match_var @@ -234,6 +234,21 @@ """ assert self.match(loop, expected) + def test_ignore_opcodes(self): + loop = """ + [i0] + i1 = int_add(i0, 1) + i4 = force_token() + i2 = int_sub(i1, 10) + jump(i4) + """ + expected = """ + i1 = int_add(i0, 1) + i2 = int_sub(i1, 10) + jump(i4, descr=...) + """ + assert self.match(loop, expected, ignore_ops=['force_token']) + class TestRunPyPyC(BaseTestPyPyC): From commits-noreply at bitbucket.org Fri Apr 15 11:54:45 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 11:54:45 +0200 (CEST) Subject: [pypy-svn] pypy default: ignore force_tokens in this two tests Message-ID: <20110415095445.EECC52A2047@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43383:02ea09544fc9 Date: 2011-04-15 11:45 +0200 http://bitbucket.org/pypy/pypy/changeset/02ea09544fc9/ Log: ignore force_tokens in this two tests diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -153,10 +153,10 @@ for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op - def match(self, expected_src): + def match(self, expected_src, **kwds): ops = list(self.allops()) matcher = OpMatcher(ops, src=self.format_ops()) - return matcher.match(expected_src) + return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1360,31 +1360,29 @@ # # The force_token()s are still there, but will be eliminated by the # backend regalloc, so they are harmless - assert loop.match(""" + assert loop.match(ignore_ops=['force_token'], + expected_src=""" ... i20 = int_ge(i18, i8) guard_false(i20, descr=...) f21 = getarrayitem_raw(i13, i18, descr=...) - i22 = force_token() f23 = getarrayitem_raw(i13, i14, descr=...) f24 = float_add(f21, f23) - i25 = force_token() f26 = getarrayitem_raw(i13, i6, descr=...) f27 = float_add(f24, f26) i29 = int_add(i6, 1) - i30 = force_token() i31 = int_ge(i29, i8) guard_false(i31, descr=...) f33 = getarrayitem_raw(i13, i29, descr=...) f34 = float_add(f27, f33) i36 = int_add(i6, 2) - i37 = force_token() i38 = int_ge(i36, i8) guard_false(i38, descr=...) f39 = getarrayitem_raw(i13, i36, descr=...) ... """) + def test_circular(self): def main(): from array import array @@ -1412,23 +1410,20 @@ # # The force_token()s are still there, but will be eliminated by the # backend regalloc, so they are harmless - assert loop.match(""" + assert loop.match(ignore_ops=['force_token'], + expected_src=""" ... i17 = int_and(i14, 255) f18 = getarrayitem_raw(i8, i17, descr=...) - i19 = force_token() f20 = getarrayitem_raw(i8, i9, descr=...) f21 = float_add(f18, f20) - i22 = force_token() f23 = getarrayitem_raw(i8, i10, descr=...) f24 = float_add(f21, f23) i26 = int_add(i6, 1) - i27 = force_token() i29 = int_and(i26, 255) f30 = getarrayitem_raw(i8, i29, descr=...) f31 = float_add(f24, f30) i33 = int_add(i6, 2) - i34 = force_token() i36 = int_and(i33, 255) f37 = getarrayitem_raw(i8, i36, descr=...) ... From commits-noreply at bitbucket.org Fri Apr 15 11:59:51 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 11:59:51 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: some typos and problems I found while reading again Message-ID: <20110415095951.215E82A2046@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3516:0cc360571fd3 Date: 2011-04-15 11:59 +0200 http://bitbucket.org/pypy/extradoc/changeset/0cc360571fd3/ Log: some typos and problems I found while reading again diff --git a/talk/icooolps2011/code/trace2.tex b/talk/icooolps2011/code/trace2.tex --- a/talk/icooolps2011/code/trace2.tex +++ b/talk/icooolps2011/code/trace2.tex @@ -15,7 +15,7 @@ |{\color{gray}guard($index_2$ == -1)}| $cls_1$ = $inst_1$.cls $methods_1$ = $cls_1$.methods -$result_2$ = dict.get($methods_1$, "b") +$result_2$ = dict.get($methods_1$, "b", None) guard($result_2$ is not None) $v_2$ = $result_1$ + $result_2$ @@ -26,7 +26,7 @@ |{\color{gray}guard($index_3$ == -1)}| |{\color{gray}$cls_2$ = $inst_1$.cls}| |{\color{gray}$methods_2$ = $cls_2$.methods}| -$result_3$ = dict.get($methods_2$, "c") +$result_3$ = dict.get($methods_2$, "c", None) guard($result_3$ is not None) $v_4$ = $v_2$ + $result_3$ diff --git a/talk/icooolps2011/code/interpreter-slow.tex b/talk/icooolps2011/code/interpreter-slow.tex --- a/talk/icooolps2011/code/interpreter-slow.tex +++ b/talk/icooolps2011/code/interpreter-slow.tex @@ -1,5 +1,5 @@ {\noop -\begin{lstlisting}[mathescape,basicstyle=\ttfamily,numbers = right] +\begin{lstlisting}[mathescape,basicstyle=\ttfamily,numbers = right,numberblanklines=false] class Class(object): def __init__(self, name): self.name = name diff --git a/talk/icooolps2011/code/version.tex b/talk/icooolps2011/code/version.tex --- a/talk/icooolps2011/code/version.tex +++ b/talk/icooolps2011/code/version.tex @@ -17,7 +17,8 @@ @elidable def _find_method(self, name, version): - return self.methods.get(name) + assert version is self.version + return self.methods.get(name, None) def write_method(self, name, value): self.methods[name] = value diff --git a/talk/icooolps2011/code/trace3.tex b/talk/icooolps2011/code/trace3.tex --- a/talk/icooolps2011/code/trace3.tex +++ b/talk/icooolps2011/code/trace3.tex @@ -8,14 +8,14 @@ # $inst_1$.getattr("b") $cls_1$ = $inst_1$.cls $methods_1$ = $cls_1$.methods -$result_2$ = dict.get($methods_1$, "b") +$result_2$ = dict.get($methods_1$, "b", None) guard($result_2$ is not None) $v_2$ = $result_1$ + $result_2$ # $inst_1$.getattr("c") $cls_2$ = $inst_1$.cls $methods_2$ = $cls_2$.methods -$result_3$ = dict.get($methods_2$, "c") +$result_3$ = dict.get($methods_2$, "c", None) guard($result_3$ is not None) $v_4$ = $v_2$ + $result_3$ diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 63d41d94fd08a4192b987890c47be5ad8ec856e0..ea5b9ae43afc701b979e8445bb6246eb337e3f88 GIT binary patch [cut] diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -154,8 +154,8 @@ of the object model. Conceptually, the significant speed-ups that can be achieved with -dynamic compilation depend on feeding into compilation and exploiting -values observed at runtime. In particular, if +dynamic compilation depend on feeding into compilation values observed at +runtime and exploiting them. In particular, if there are values which vary very slowly, it is possible to compile multiple specialized versions of the same code, one for each actual value. To exploit the runtime feedback, the implementation code and data structures need to be @@ -176,7 +176,7 @@ \item A worked-out example of a simple object model of a dynamic language and how it can be improved using these hints. \item This example also exemplifies general techniques for refactoring code to - expose likely runtime constants constant folding opportunities. + expose constant folding opportunities of likely runtime constants. \end{itemize} The paper is structured as follows: Section~\ref{sec:Background} gives an @@ -201,7 +201,7 @@ A number of languages have been implemented with PyPy, most importantly a full Python implementation, but also a Prolog interpreter -\cite{carl_friedrich_bolz_towards_2010}. +\cite{carl_friedrich_bolz_towards_2010} and some less mature experiments. The translation of the interpreter to C code adds a number of implementation details into the final executable that are not present in the interpreter implementation, such as @@ -221,8 +221,8 @@ \label{sub:tracing} A recently popular approach to JIT compilers is that of tracing JITs. Tracing -JITs have their origin in the Dynamo project, which used one of them for dynamic -assembler optimization \cite{bala_dynamo:_2000}. Later they were used to implement +JITs have their origin in the Dynamo project, which used the technique for dynamic +machine code optimization \cite{bala_dynamo:_2000}. Later they were used to implement a lightweight JIT for Java \cite{gal_hotpathvm:_2006} and for dynamic languages such as JavaScript \cite{gal_trace-based_2009}. @@ -243,7 +243,7 @@ To be able to do this recording, VMs with a tracing JIT typically contain an interpreter. After a user program is started the interpreter is used; only the most frequently executed paths through the user -program are turned into machine code. The interpreter is also used when a guard +program are traced and turned into machine code. The interpreter is also used when a guard fails to continue the execution from the failing guard. Since PyPy wants to be a general framework, we want to reuse our tracer for @@ -259,7 +259,7 @@ the tracer, its optimizers and backends reusable for a variety of languages. The language semantics do not need to be encoded into the JIT. Instead the tracer just picks them up from the interpreter. This also means that the JIT by -construction supports the full language. +construction supports the full language as correctly as the interpreter. While the operations in a trace are those of the interpreter, the loops that are traced by the tracer are the loops in the @@ -307,7 +307,7 @@ object model that just supports classes and instances, without any inheritance or other advanced features. In the model classes contain methods. Instances have a class. Instances have their own attributes (or fields). When looking up an -attribute on an instance, the instances attributes are searched. If the +attribute on an instance, the instance's attributes are searched. If the attribute is not found there, the class' methods are searched. \begin{figure} @@ -371,7 +371,7 @@ \section{Hints for Controlling Optimization} \label{sec:hints} -In this section we will describe how to add two hints that allow the +In this section we will describe two hints that allow the interpreter author to increase the optimization opportunities for constant folding. If applied correctly these techniques can give really big speedups by pre-computing parts of what happens at runtime. On the other @@ -400,7 +400,7 @@ However, the optimizer can statically know the value of a variable even if it is not a constant in the original source code. For example, consider the following fragment of RPython code on the left. If the fragment is traced with -$x_1$ being \texttt{4}, the trace on the left is produced: +\texttt{x} being \texttt{4}, the trace on the right is produced: \begin{minipage}[b]{0.5\linewidth} @@ -424,10 +424,10 @@ \end{minipage} -In the trace, the value of $x_1$ is statically known after the guard. -Remember that a guard is a runtime check. The above trace will run to -completion when $x_1$ \texttt{== 4}. If the check fails, execution of the trace is -stopped and the interpreter continues to run. +A guard is a runtime check. The above trace will run to completion when $x_1$ +\texttt{== 4}. If the check fails, execution of the trace is stopped and the +interpreter continues to run. Therefore, the value of $x_1$ is statically known +to be \texttt{4} after the guard. There are cases in which it is useful to turn an arbitrary variable into a constant value. This process is called \emph{promotion} and it is an old idea @@ -440,9 +440,10 @@ optimization opportunities, even though it could have different values in practice. In such a place, promotion can be used. The typical reason to do that is if there is -a lot of computation depending on the value of that variable. +a lot of computation depending on the value of one variable. -Let's make this more concrete. If we trace a call to the function on the left, we get the trace on the right: +Let's make this more concrete. If we trace a call to the function (written in +RPython) on the left, we get the trace on the right: \begin{minipage}[b]{0.5\linewidth} \centering @@ -468,7 +469,7 @@ \end{minipage} Observe how the first two operations could be constant-folded if the value of -$x_1$ were known. Let's assume that the value of \texttt{x} in the Python code can vary, but does so +$x_1$ were known. Let's assume that the value of \texttt{x} in the RPython code can vary, but does so rarely, i.e. only takes a few different values at runtime. If this is the case, we can add a hint to promote \texttt{x}, like this: @@ -509,12 +510,12 @@ operation at the beginning. The promotion is turned into a \texttt{guard} operation in the trace. The guard -captures the value of $x_1$ as it was during tracing. Thus the runtime value of -\texttt{x} is being made available to the compiler to exploit. The introduced +captures the runtime value of \texttt{x} as it was during tracing, which can +then be exploited by the compiler. The introduced guard specializes the trace, because it only works if the value of $x_1$ is \texttt{4}. From the point of view of the optimizer, this guard is not any different than the one produced by the \texttt{if} -statement in the example above. After the guard, the rest of the trace can +statement in the first example. After the guard, the rest of the trace can assume that $x_1$ is equal to \texttt{4}, meaning that the optimizer will turn this trace into: @@ -565,9 +566,9 @@ In the previous section we saw a way to turn arbitrary variables into constants. All foldable operations on these constants can be constant-folded. This works well for -constant folding of simple types, e.g. integers. Unfortunately, in the context of an +constant folding of primitive types, e.g. integers. Unfortunately, in the context of an interpreter for a dynamic -language, most operations actually manipulate objects, not simple types. The +language, most operations actually manipulate objects, not primitive types. The operations on objects are often not foldable and might even have side-effects. If one reads a field out of a constant reference to an object this cannot necessarily be folded away because the object can be mutated. Therefore, another @@ -581,7 +582,7 @@ is less strict than that of a pure function, because it is only about actual calls during execution. All pure functions are trace-elidable though.}. From this definition follows that a call to an trace-elidable function with -constant arguments in a trace can be replaced with the result of the call. +constant arguments in a trace can be replaced with the result of the call seen during tracing. As an example, take the class on the left. Tracing the call \texttt{a.f(10)} of some instance of \texttt{A} yields the trace on the right (note how the call to @@ -621,7 +622,7 @@ which lets the interpreter author communicate invariants to the optimizer. In this case, she could decide that the \texttt{x} field of instances of \texttt{A} is immutable, and therefore \texttt{c} -is an trace-elidable function. To communicate this, there is a \texttt{elidable} decorator. +is an trace-elidable function. To communicate this, there is an \texttt{@elidable} decorator. If the code in \texttt{c} should be constant-folded away, we would change the class as follows: @@ -648,18 +649,18 @@ \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($a_1$ == 0xb73984a8) -$v_1$ = c($a_1$) +$v_1$ = A.c($a_1$) $v_2$ = $v_1$ + $val_1$ $a_1$.y = $v_2$ \end{lstlisting} \end{minipage} Here, \texttt{0xb73984a8} is the address of the instance of \texttt{A} that was used -during tracing. The call to \texttt{c} is not inlined, so that the optimizer -has a chance to see it. Since the \texttt{c} function is marked as trace-elidable, and its +during tracing. The call to \texttt{A.c} is not inlined, so that the optimizer +has a chance to see it. Since the \texttt{A.c} method is marked as trace-elidable, and its argument is a constant reference, the call will be removed by the optimizer. The final -trace looks like this: +trace looks like this (assuming that the \texttt{x} field's value is \texttt{4}): % {\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] @@ -669,14 +670,12 @@ \end{lstlisting} } -(assuming that the \texttt{x} field's value is \texttt{4}). - -On the one hand, the \texttt{elidable} annotation is very powerful. It can be +On the one hand, the \texttt{@elidable} annotation is very powerful. It can be used to constant-fold arbitrary parts of the computation in the interpreter. However, the annotation also gives the interpreter author ample opportunity to introduce bugs. If a function is annotated to be trace-elidable, but is not really, the optimizer can produce subtly wrong code. Therefore, a lot of care has to be taken when using this -annotation\footnote{The most common use case of the \texttt{elidable} +annotation\footnote{The most common use case of the \texttt{@elidable} annotation is indeed to declare the immutability of fields. Because it is so common, we have special syntactic sugar for it.}. We hope to introduce a debugging mode which would (slowly) check whether the annotation is applied @@ -726,28 +725,32 @@ In this implementation instances no longer use dictionaries to store their fields. Instead, they have a reference to a map, which maps field names to indexes into a storage list. The -storage list contains the actual field values. Therefore they have to be immutable, which means +storage list contains the actual field values. Maps are shared between +different instances, therefore they have to be immutable, which means that their \texttt{getindex} method is an trace-elidable function. When a new attribute is added to an instance, a new map needs to be chosen, which is done with the \texttt{add\_attribute} method on the previous map. This function is also trace-elidable, because it caches all new instances of \texttt{Map} that it creates, to make -sure that objects with the same layout have the same map. Now that we have +sure that objects with the same layout have the same map, which makes its side +effects idempotent. Now that we have introduced maps, it is safe to promote the map everywhere, because we assume that the number of different instance layouts is small. -With this changed instance implementation, the trace we had above changes to the -following that of see Figure~\ref{fig:trace2}. There \texttt{0xb74af4a8} is the +With this changed instance implementation, the trace we saw in Section~\ref{sub:running} changes to the +that of Figure~\ref{fig:trace2}. There \texttt{0xb74af4a8} is the memory address of the \texttt{Map} instance that has been promoted. Operations -that can be optimized away are grayed out, their results will be replaced by +that can be optimized away are grayed out, their results will be replaced with fixed values by the constant folding. The calls to \texttt{Map.getindex} can be optimized away, because they are calls to -a trace-elidable function and they have constant arguments. That means that \texttt{index1/2/3} +a trace-elidable function and they have constant arguments. That means that $index_{1/2/3}$ are constant and the guards on them can be removed. All but the first guard on the map will be optimized away too, because the map cannot have changed in between. This trace is already much better than the original one. Now we are down from five dictionary lookups to just two. +XXX separation of fast and slow-changing parts + \begin{figure} \input{code/trace2.tex} \caption{Unoptimized Trace After the Introduction of Maps} @@ -761,13 +764,13 @@ \subsection{Versioning of Classes} -Instances were optimized making the assumption that the total number of +Instances were optimized by making the assumption that the total number of different instance layouts is small compared to the number of instances. For classes we will make an even stronger assumption. We simply assume that it is rare for classes to change at all. This is not totally reasonable (sometimes classes contain counters or similar things) but for this simple example it is good -enough.\footnote{There is a more complex variant of class versions that can -accommodate class fields that change a lot better.} +enough.\footnote{There is a more complex variant of the presented technique that can +accommodate quick-changing class fields a lot better.} What we would really like is if the \texttt{Class.find\_method} method were trace-elidable. But it cannot be, because it is always possible to change the class itself. @@ -778,8 +781,8 @@ class gets changed (i.e., the \texttt{methods} dictionary changes). This means that the result of calls to \texttt{methods.get()} for a given \texttt{(name, version)} pair will always be the same, i.e. it is a trace-elidable operation. To help -the JIT to detect this case, we factor it out in a helper method which is -explicitly marked as \texttt{@elidable}. The refactored \texttt{Class} can +the JIT to detect this case, we factor it out in a helper method \texttt{\_find\_method} which is +marked as \texttt{@elidable}. The refactored \texttt{Class} can be seen in Figure~\ref{fig:version} \begin{figure} @@ -811,7 +814,7 @@ \label{fig:trace5} \end{figure} -The index \texttt{0} that is used to read out of the \texttt{storage} array is the result +The index \texttt{0} that is used to read out of the \texttt{storage} list is the result of the constant-folded \texttt{getindex} call. The constants \texttt{41} and \texttt{17} are the results of the folding of the \texttt{\_find\_method} calls. This final trace is now very good. It no longer performs any @@ -843,9 +846,9 @@ Another optimization is that in practice the shape of an instance is correlated with its class. In our code above, we allow both to vary independently. -Therefore we store the class of an instance on the map in PyPy's Python -interpreter. This means that we get one fewer promotion (and thus one fewer -guard) in the trace, because the class doesn't need to be promoted after the +In PyPy's Python interpreter we store the class of an instance on its map. This +means that we get one fewer promotion and thus one fewer +guard in the trace, because the class doesn't need to be promoted after the map has been. @@ -856,7 +859,7 @@ %The techniques we used above to make instance and class lookups faster are %applicable in more general cases than the one we developed them for. A more %abstract view of maps is that of splitting a data-structure into an immutable part (\eg the map) -%and a part that changes (\eg the storage array). All the computation on the +%and a part that changes (\eg the storage list). All the computation on the %immutable part is trace-elidable so that only the manipulation of the quick-changing %part remains in the trace after optimization. % @@ -878,7 +881,7 @@ framework\footnote{\texttt{http://www.djangoproject.com/}}; a Monte-Carlo Go AI\footnote{\texttt{http://shed-skin.blogspot.com/2009/07/ disco-elegant-python-go-player.html}}; a BZ2 decoder; a port of the classical -Richards benchmark in Python; a Python version of the Telco decimal +Richards benchmark to Python; a Python version of the Telco decimal benchmark\footnote{\texttt{http://speleotrove.com/decimal/telco.html}}, using a pure Python decimal floating point implementation. The results we see in these benchmarks seem to repeat themselves in other benchmarks using object-oriented @@ -901,7 +904,7 @@ reported in Figure~\ref{fig:times}, together with the same numbers normalized to those of the full JIT. -The optimizations give a speedup between 80\% and almost 20 times. The Richards +The optimizations give a speedup between 80\% and almost 20 times. The Richards benchmark is a particularly good case for the optimizations as it makes heavy uses of object-oriented features. Pyflate uses mostly imperative code, so does not benefit as much. Together with the optimization, PyPy outperforms CPython in @@ -948,7 +951,7 @@ PyPy uses for the same reasons \cite{bolz_tracing_2009}. Their approach suffers mostly from the low abstraction level that machine code provides. -Yermolovich et. al. describe the use of the Tamarin JavaScript tracing JIT as a +Yermolovich et. al. \cite{yermolovich_optimization_2009} describe the use of the Tamarin JavaScript tracing JIT as a meta-tracer for a Lua interpreter. They compile the normal Lua interpreter in C to ActionScript bytecode. Again, the interpreter is annotated with some hints that indicate the main interpreter loop to the tracer. No further hints are @@ -970,6 +973,8 @@ interpreters into compilers using the second futamura projection \cite{futamura_partial_1999}. Given that classical partial evaluation works strictly ahead of time, it inherently cannot support runtime feedback. +Some partial evaluator work at runtime, such as DyC \cite{grant_dyc:_2000}, +which also supports a concept similar to promotion (called dynamic-to-static promotion). An early attempt at building a general environment for implementing languages efficiently is described by Wolczko et. al. \cite{mario_wolczko_towards_1999}. @@ -988,8 +993,7 @@ \cite{bolz_towards_2009}. Promotion is also heavily used by Psyco \cite{rigo_representation-based_2004} (promotion is called "unlifting" in this paper) a method-based JIT compiler for Python written by -one of the authors. Promotion was also used in DyC \cite{grant_dyc:_2000}, a -runtime partial evaluator for C. Promotion is quite similar to +one of the authors. Promotion is quite similar to (polymorphic) inline caching and runtime type feedback techniques which were first used in Smalltalk \cite{deutsch_efficient_1984} and SELF \cite{hoelzle_optimizing_1991,hoelzle_optimizing_1994} implementations. @@ -1018,6 +1022,8 @@ The authors would like to thank Peng Wu, David Edelsohn and Laura Creighton for encouragement, fruitful discussions and feedback during the writing of this paper. +This research was partially supported by the BMBF funded project PyJIT (nr. 01QE0913B; +Eureka Eurostars). \bibliographystyle{abbrv} \bibliography{paper} diff --git a/talk/icooolps2011/code/trace1.tex b/talk/icooolps2011/code/trace1.tex --- a/talk/icooolps2011/code/trace1.tex +++ b/talk/icooolps2011/code/trace1.tex @@ -7,21 +7,21 @@ # $inst_1$.getattr("b") |\setcounter{lstnumber}{21}| $attributes_2$ = $inst_1$.attributes |\setcounter{lstnumber}{21}| -$v_1$ = dict.get($attributes_2$, "b") |\setcounter{lstnumber}{28}| +$v_1$ = dict.get($attributes_2$, "b", None) |\setcounter{lstnumber}{28}| guard($v_1$ is None) |\setcounter{lstnumber}{29}| $cls_1$ = $inst_1$.cls |\setcounter{lstnumber}{9}| $methods_1$ = cls.methods |\setcounter{lstnumber}{9}| -$result_2$ = dict.get($methods_1$, "b") |\setcounter{lstnumber}{30}| +$result_2$ = dict.get($methods_1$, "b", None) |\setcounter{lstnumber}{30}| guard($result_2$ is not None) |\setcounter{lstnumber}{-2}| $v_2$ = $result_1$ + $result_2$ |\setcounter{lstnumber}{25}| # $inst_1$.getattr("c") |\setcounter{lstnumber}{21}| $attributes_3$ = $inst_1$.attributes |\setcounter{lstnumber}{21}| -$v_3$ = dict.get($attributes_3$, "c") |\setcounter{lstnumber}{28}| +$v_3$ = dict.get($attributes_3$, "c", None) |\setcounter{lstnumber}{28}| guard($v_3$ is None) |\setcounter{lstnumber}{29}| $cls_1$ = $inst_1$.cls |\setcounter{lstnumber}{9}| $methods_2$ = cls.methods |\setcounter{lstnumber}{9}| -$result_3$ = dict.get($methods_2$, "c") |\setcounter{lstnumber}{30}| +$result_3$ = dict.get($methods_2$, "c", None) |\setcounter{lstnumber}{30}| guard($result_3$ is not None) |\setcounter{lstnumber}{-3}| $v_4$ = $v_2$ + $result_3$ |\setcounter{lstnumber}{-2}| From commits-noreply at bitbucket.org Fri Apr 15 11:59:56 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 11:59:56 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: some typos and problems I found while reading again Message-ID: <20110415095956.5789B2A2046@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3516:0cc360571fd3 Date: 2011-04-15 11:59 +0200 http://bitbucket.org/pypy/extradoc/changeset/0cc360571fd3/ Log: some typos and problems I found while reading again diff --git a/talk/icooolps2011/code/trace2.tex b/talk/icooolps2011/code/trace2.tex --- a/talk/icooolps2011/code/trace2.tex +++ b/talk/icooolps2011/code/trace2.tex @@ -15,7 +15,7 @@ |{\color{gray}guard($index_2$ == -1)}| $cls_1$ = $inst_1$.cls $methods_1$ = $cls_1$.methods -$result_2$ = dict.get($methods_1$, "b") +$result_2$ = dict.get($methods_1$, "b", None) guard($result_2$ is not None) $v_2$ = $result_1$ + $result_2$ @@ -26,7 +26,7 @@ |{\color{gray}guard($index_3$ == -1)}| |{\color{gray}$cls_2$ = $inst_1$.cls}| |{\color{gray}$methods_2$ = $cls_2$.methods}| -$result_3$ = dict.get($methods_2$, "c") +$result_3$ = dict.get($methods_2$, "c", None) guard($result_3$ is not None) $v_4$ = $v_2$ + $result_3$ diff --git a/talk/icooolps2011/code/interpreter-slow.tex b/talk/icooolps2011/code/interpreter-slow.tex --- a/talk/icooolps2011/code/interpreter-slow.tex +++ b/talk/icooolps2011/code/interpreter-slow.tex @@ -1,5 +1,5 @@ {\noop -\begin{lstlisting}[mathescape,basicstyle=\ttfamily,numbers = right] +\begin{lstlisting}[mathescape,basicstyle=\ttfamily,numbers = right,numberblanklines=false] class Class(object): def __init__(self, name): self.name = name diff --git a/talk/icooolps2011/code/version.tex b/talk/icooolps2011/code/version.tex --- a/talk/icooolps2011/code/version.tex +++ b/talk/icooolps2011/code/version.tex @@ -17,7 +17,8 @@ @elidable def _find_method(self, name, version): - return self.methods.get(name) + assert version is self.version + return self.methods.get(name, None) def write_method(self, name, value): self.methods[name] = value diff --git a/talk/icooolps2011/code/trace3.tex b/talk/icooolps2011/code/trace3.tex --- a/talk/icooolps2011/code/trace3.tex +++ b/talk/icooolps2011/code/trace3.tex @@ -8,14 +8,14 @@ # $inst_1$.getattr("b") $cls_1$ = $inst_1$.cls $methods_1$ = $cls_1$.methods -$result_2$ = dict.get($methods_1$, "b") +$result_2$ = dict.get($methods_1$, "b", None) guard($result_2$ is not None) $v_2$ = $result_1$ + $result_2$ # $inst_1$.getattr("c") $cls_2$ = $inst_1$.cls $methods_2$ = $cls_2$.methods -$result_3$ = dict.get($methods_2$, "c") +$result_3$ = dict.get($methods_2$, "c", None) guard($result_3$ is not None) $v_4$ = $v_2$ + $result_3$ diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 63d41d94fd08a4192b987890c47be5ad8ec856e0..ea5b9ae43afc701b979e8445bb6246eb337e3f88 GIT binary patch [cut] diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -154,8 +154,8 @@ of the object model. Conceptually, the significant speed-ups that can be achieved with -dynamic compilation depend on feeding into compilation and exploiting -values observed at runtime. In particular, if +dynamic compilation depend on feeding into compilation values observed at +runtime and exploiting them. In particular, if there are values which vary very slowly, it is possible to compile multiple specialized versions of the same code, one for each actual value. To exploit the runtime feedback, the implementation code and data structures need to be @@ -176,7 +176,7 @@ \item A worked-out example of a simple object model of a dynamic language and how it can be improved using these hints. \item This example also exemplifies general techniques for refactoring code to - expose likely runtime constants constant folding opportunities. + expose constant folding opportunities of likely runtime constants. \end{itemize} The paper is structured as follows: Section~\ref{sec:Background} gives an @@ -201,7 +201,7 @@ A number of languages have been implemented with PyPy, most importantly a full Python implementation, but also a Prolog interpreter -\cite{carl_friedrich_bolz_towards_2010}. +\cite{carl_friedrich_bolz_towards_2010} and some less mature experiments. The translation of the interpreter to C code adds a number of implementation details into the final executable that are not present in the interpreter implementation, such as @@ -221,8 +221,8 @@ \label{sub:tracing} A recently popular approach to JIT compilers is that of tracing JITs. Tracing -JITs have their origin in the Dynamo project, which used one of them for dynamic -assembler optimization \cite{bala_dynamo:_2000}. Later they were used to implement +JITs have their origin in the Dynamo project, which used the technique for dynamic +machine code optimization \cite{bala_dynamo:_2000}. Later they were used to implement a lightweight JIT for Java \cite{gal_hotpathvm:_2006} and for dynamic languages such as JavaScript \cite{gal_trace-based_2009}. @@ -243,7 +243,7 @@ To be able to do this recording, VMs with a tracing JIT typically contain an interpreter. After a user program is started the interpreter is used; only the most frequently executed paths through the user -program are turned into machine code. The interpreter is also used when a guard +program are traced and turned into machine code. The interpreter is also used when a guard fails to continue the execution from the failing guard. Since PyPy wants to be a general framework, we want to reuse our tracer for @@ -259,7 +259,7 @@ the tracer, its optimizers and backends reusable for a variety of languages. The language semantics do not need to be encoded into the JIT. Instead the tracer just picks them up from the interpreter. This also means that the JIT by -construction supports the full language. +construction supports the full language as correctly as the interpreter. While the operations in a trace are those of the interpreter, the loops that are traced by the tracer are the loops in the @@ -307,7 +307,7 @@ object model that just supports classes and instances, without any inheritance or other advanced features. In the model classes contain methods. Instances have a class. Instances have their own attributes (or fields). When looking up an -attribute on an instance, the instances attributes are searched. If the +attribute on an instance, the instance's attributes are searched. If the attribute is not found there, the class' methods are searched. \begin{figure} @@ -371,7 +371,7 @@ \section{Hints for Controlling Optimization} \label{sec:hints} -In this section we will describe how to add two hints that allow the +In this section we will describe two hints that allow the interpreter author to increase the optimization opportunities for constant folding. If applied correctly these techniques can give really big speedups by pre-computing parts of what happens at runtime. On the other @@ -400,7 +400,7 @@ However, the optimizer can statically know the value of a variable even if it is not a constant in the original source code. For example, consider the following fragment of RPython code on the left. If the fragment is traced with -$x_1$ being \texttt{4}, the trace on the left is produced: +\texttt{x} being \texttt{4}, the trace on the right is produced: \begin{minipage}[b]{0.5\linewidth} @@ -424,10 +424,10 @@ \end{minipage} -In the trace, the value of $x_1$ is statically known after the guard. -Remember that a guard is a runtime check. The above trace will run to -completion when $x_1$ \texttt{== 4}. If the check fails, execution of the trace is -stopped and the interpreter continues to run. +A guard is a runtime check. The above trace will run to completion when $x_1$ +\texttt{== 4}. If the check fails, execution of the trace is stopped and the +interpreter continues to run. Therefore, the value of $x_1$ is statically known +to be \texttt{4} after the guard. There are cases in which it is useful to turn an arbitrary variable into a constant value. This process is called \emph{promotion} and it is an old idea @@ -440,9 +440,10 @@ optimization opportunities, even though it could have different values in practice. In such a place, promotion can be used. The typical reason to do that is if there is -a lot of computation depending on the value of that variable. +a lot of computation depending on the value of one variable. -Let's make this more concrete. If we trace a call to the function on the left, we get the trace on the right: +Let's make this more concrete. If we trace a call to the function (written in +RPython) on the left, we get the trace on the right: \begin{minipage}[b]{0.5\linewidth} \centering @@ -468,7 +469,7 @@ \end{minipage} Observe how the first two operations could be constant-folded if the value of -$x_1$ were known. Let's assume that the value of \texttt{x} in the Python code can vary, but does so +$x_1$ were known. Let's assume that the value of \texttt{x} in the RPython code can vary, but does so rarely, i.e. only takes a few different values at runtime. If this is the case, we can add a hint to promote \texttt{x}, like this: @@ -509,12 +510,12 @@ operation at the beginning. The promotion is turned into a \texttt{guard} operation in the trace. The guard -captures the value of $x_1$ as it was during tracing. Thus the runtime value of -\texttt{x} is being made available to the compiler to exploit. The introduced +captures the runtime value of \texttt{x} as it was during tracing, which can +then be exploited by the compiler. The introduced guard specializes the trace, because it only works if the value of $x_1$ is \texttt{4}. From the point of view of the optimizer, this guard is not any different than the one produced by the \texttt{if} -statement in the example above. After the guard, the rest of the trace can +statement in the first example. After the guard, the rest of the trace can assume that $x_1$ is equal to \texttt{4}, meaning that the optimizer will turn this trace into: @@ -565,9 +566,9 @@ In the previous section we saw a way to turn arbitrary variables into constants. All foldable operations on these constants can be constant-folded. This works well for -constant folding of simple types, e.g. integers. Unfortunately, in the context of an +constant folding of primitive types, e.g. integers. Unfortunately, in the context of an interpreter for a dynamic -language, most operations actually manipulate objects, not simple types. The +language, most operations actually manipulate objects, not primitive types. The operations on objects are often not foldable and might even have side-effects. If one reads a field out of a constant reference to an object this cannot necessarily be folded away because the object can be mutated. Therefore, another @@ -581,7 +582,7 @@ is less strict than that of a pure function, because it is only about actual calls during execution. All pure functions are trace-elidable though.}. From this definition follows that a call to an trace-elidable function with -constant arguments in a trace can be replaced with the result of the call. +constant arguments in a trace can be replaced with the result of the call seen during tracing. As an example, take the class on the left. Tracing the call \texttt{a.f(10)} of some instance of \texttt{A} yields the trace on the right (note how the call to @@ -621,7 +622,7 @@ which lets the interpreter author communicate invariants to the optimizer. In this case, she could decide that the \texttt{x} field of instances of \texttt{A} is immutable, and therefore \texttt{c} -is an trace-elidable function. To communicate this, there is a \texttt{elidable} decorator. +is an trace-elidable function. To communicate this, there is an \texttt{@elidable} decorator. If the code in \texttt{c} should be constant-folded away, we would change the class as follows: @@ -648,18 +649,18 @@ \begin{lstlisting}[mathescape,basicstyle=\ttfamily] guard($a_1$ == 0xb73984a8) -$v_1$ = c($a_1$) +$v_1$ = A.c($a_1$) $v_2$ = $v_1$ + $val_1$ $a_1$.y = $v_2$ \end{lstlisting} \end{minipage} Here, \texttt{0xb73984a8} is the address of the instance of \texttt{A} that was used -during tracing. The call to \texttt{c} is not inlined, so that the optimizer -has a chance to see it. Since the \texttt{c} function is marked as trace-elidable, and its +during tracing. The call to \texttt{A.c} is not inlined, so that the optimizer +has a chance to see it. Since the \texttt{A.c} method is marked as trace-elidable, and its argument is a constant reference, the call will be removed by the optimizer. The final -trace looks like this: +trace looks like this (assuming that the \texttt{x} field's value is \texttt{4}): % {\noop \begin{lstlisting}[mathescape,basicstyle=\ttfamily] @@ -669,14 +670,12 @@ \end{lstlisting} } -(assuming that the \texttt{x} field's value is \texttt{4}). - -On the one hand, the \texttt{elidable} annotation is very powerful. It can be +On the one hand, the \texttt{@elidable} annotation is very powerful. It can be used to constant-fold arbitrary parts of the computation in the interpreter. However, the annotation also gives the interpreter author ample opportunity to introduce bugs. If a function is annotated to be trace-elidable, but is not really, the optimizer can produce subtly wrong code. Therefore, a lot of care has to be taken when using this -annotation\footnote{The most common use case of the \texttt{elidable} +annotation\footnote{The most common use case of the \texttt{@elidable} annotation is indeed to declare the immutability of fields. Because it is so common, we have special syntactic sugar for it.}. We hope to introduce a debugging mode which would (slowly) check whether the annotation is applied @@ -726,28 +725,32 @@ In this implementation instances no longer use dictionaries to store their fields. Instead, they have a reference to a map, which maps field names to indexes into a storage list. The -storage list contains the actual field values. Therefore they have to be immutable, which means +storage list contains the actual field values. Maps are shared between +different instances, therefore they have to be immutable, which means that their \texttt{getindex} method is an trace-elidable function. When a new attribute is added to an instance, a new map needs to be chosen, which is done with the \texttt{add\_attribute} method on the previous map. This function is also trace-elidable, because it caches all new instances of \texttt{Map} that it creates, to make -sure that objects with the same layout have the same map. Now that we have +sure that objects with the same layout have the same map, which makes its side +effects idempotent. Now that we have introduced maps, it is safe to promote the map everywhere, because we assume that the number of different instance layouts is small. -With this changed instance implementation, the trace we had above changes to the -following that of see Figure~\ref{fig:trace2}. There \texttt{0xb74af4a8} is the +With this changed instance implementation, the trace we saw in Section~\ref{sub:running} changes to the +that of Figure~\ref{fig:trace2}. There \texttt{0xb74af4a8} is the memory address of the \texttt{Map} instance that has been promoted. Operations -that can be optimized away are grayed out, their results will be replaced by +that can be optimized away are grayed out, their results will be replaced with fixed values by the constant folding. The calls to \texttt{Map.getindex} can be optimized away, because they are calls to -a trace-elidable function and they have constant arguments. That means that \texttt{index1/2/3} +a trace-elidable function and they have constant arguments. That means that $index_{1/2/3}$ are constant and the guards on them can be removed. All but the first guard on the map will be optimized away too, because the map cannot have changed in between. This trace is already much better than the original one. Now we are down from five dictionary lookups to just two. +XXX separation of fast and slow-changing parts + \begin{figure} \input{code/trace2.tex} \caption{Unoptimized Trace After the Introduction of Maps} @@ -761,13 +764,13 @@ \subsection{Versioning of Classes} -Instances were optimized making the assumption that the total number of +Instances were optimized by making the assumption that the total number of different instance layouts is small compared to the number of instances. For classes we will make an even stronger assumption. We simply assume that it is rare for classes to change at all. This is not totally reasonable (sometimes classes contain counters or similar things) but for this simple example it is good -enough.\footnote{There is a more complex variant of class versions that can -accommodate class fields that change a lot better.} +enough.\footnote{There is a more complex variant of the presented technique that can +accommodate quick-changing class fields a lot better.} What we would really like is if the \texttt{Class.find\_method} method were trace-elidable. But it cannot be, because it is always possible to change the class itself. @@ -778,8 +781,8 @@ class gets changed (i.e., the \texttt{methods} dictionary changes). This means that the result of calls to \texttt{methods.get()} for a given \texttt{(name, version)} pair will always be the same, i.e. it is a trace-elidable operation. To help -the JIT to detect this case, we factor it out in a helper method which is -explicitly marked as \texttt{@elidable}. The refactored \texttt{Class} can +the JIT to detect this case, we factor it out in a helper method \texttt{\_find\_method} which is +marked as \texttt{@elidable}. The refactored \texttt{Class} can be seen in Figure~\ref{fig:version} \begin{figure} @@ -811,7 +814,7 @@ \label{fig:trace5} \end{figure} -The index \texttt{0} that is used to read out of the \texttt{storage} array is the result +The index \texttt{0} that is used to read out of the \texttt{storage} list is the result of the constant-folded \texttt{getindex} call. The constants \texttt{41} and \texttt{17} are the results of the folding of the \texttt{\_find\_method} calls. This final trace is now very good. It no longer performs any @@ -843,9 +846,9 @@ Another optimization is that in practice the shape of an instance is correlated with its class. In our code above, we allow both to vary independently. -Therefore we store the class of an instance on the map in PyPy's Python -interpreter. This means that we get one fewer promotion (and thus one fewer -guard) in the trace, because the class doesn't need to be promoted after the +In PyPy's Python interpreter we store the class of an instance on its map. This +means that we get one fewer promotion and thus one fewer +guard in the trace, because the class doesn't need to be promoted after the map has been. @@ -856,7 +859,7 @@ %The techniques we used above to make instance and class lookups faster are %applicable in more general cases than the one we developed them for. A more %abstract view of maps is that of splitting a data-structure into an immutable part (\eg the map) -%and a part that changes (\eg the storage array). All the computation on the +%and a part that changes (\eg the storage list). All the computation on the %immutable part is trace-elidable so that only the manipulation of the quick-changing %part remains in the trace after optimization. % @@ -878,7 +881,7 @@ framework\footnote{\texttt{http://www.djangoproject.com/}}; a Monte-Carlo Go AI\footnote{\texttt{http://shed-skin.blogspot.com/2009/07/ disco-elegant-python-go-player.html}}; a BZ2 decoder; a port of the classical -Richards benchmark in Python; a Python version of the Telco decimal +Richards benchmark to Python; a Python version of the Telco decimal benchmark\footnote{\texttt{http://speleotrove.com/decimal/telco.html}}, using a pure Python decimal floating point implementation. The results we see in these benchmarks seem to repeat themselves in other benchmarks using object-oriented @@ -901,7 +904,7 @@ reported in Figure~\ref{fig:times}, together with the same numbers normalized to those of the full JIT. -The optimizations give a speedup between 80\% and almost 20 times. The Richards +The optimizations give a speedup between 80\% and almost 20 times. The Richards benchmark is a particularly good case for the optimizations as it makes heavy uses of object-oriented features. Pyflate uses mostly imperative code, so does not benefit as much. Together with the optimization, PyPy outperforms CPython in @@ -948,7 +951,7 @@ PyPy uses for the same reasons \cite{bolz_tracing_2009}. Their approach suffers mostly from the low abstraction level that machine code provides. -Yermolovich et. al. describe the use of the Tamarin JavaScript tracing JIT as a +Yermolovich et. al. \cite{yermolovich_optimization_2009} describe the use of the Tamarin JavaScript tracing JIT as a meta-tracer for a Lua interpreter. They compile the normal Lua interpreter in C to ActionScript bytecode. Again, the interpreter is annotated with some hints that indicate the main interpreter loop to the tracer. No further hints are @@ -970,6 +973,8 @@ interpreters into compilers using the second futamura projection \cite{futamura_partial_1999}. Given that classical partial evaluation works strictly ahead of time, it inherently cannot support runtime feedback. +Some partial evaluator work at runtime, such as DyC \cite{grant_dyc:_2000}, +which also supports a concept similar to promotion (called dynamic-to-static promotion). An early attempt at building a general environment for implementing languages efficiently is described by Wolczko et. al. \cite{mario_wolczko_towards_1999}. @@ -988,8 +993,7 @@ \cite{bolz_towards_2009}. Promotion is also heavily used by Psyco \cite{rigo_representation-based_2004} (promotion is called "unlifting" in this paper) a method-based JIT compiler for Python written by -one of the authors. Promotion was also used in DyC \cite{grant_dyc:_2000}, a -runtime partial evaluator for C. Promotion is quite similar to +one of the authors. Promotion is quite similar to (polymorphic) inline caching and runtime type feedback techniques which were first used in Smalltalk \cite{deutsch_efficient_1984} and SELF \cite{hoelzle_optimizing_1991,hoelzle_optimizing_1994} implementations. @@ -1018,6 +1022,8 @@ The authors would like to thank Peng Wu, David Edelsohn and Laura Creighton for encouragement, fruitful discussions and feedback during the writing of this paper. +This research was partially supported by the BMBF funded project PyJIT (nr. 01QE0913B; +Eureka Eurostars). \bibliographystyle{abbrv} \bibliography{paper} diff --git a/talk/icooolps2011/code/trace1.tex b/talk/icooolps2011/code/trace1.tex --- a/talk/icooolps2011/code/trace1.tex +++ b/talk/icooolps2011/code/trace1.tex @@ -7,21 +7,21 @@ # $inst_1$.getattr("b") |\setcounter{lstnumber}{21}| $attributes_2$ = $inst_1$.attributes |\setcounter{lstnumber}{21}| -$v_1$ = dict.get($attributes_2$, "b") |\setcounter{lstnumber}{28}| +$v_1$ = dict.get($attributes_2$, "b", None) |\setcounter{lstnumber}{28}| guard($v_1$ is None) |\setcounter{lstnumber}{29}| $cls_1$ = $inst_1$.cls |\setcounter{lstnumber}{9}| $methods_1$ = cls.methods |\setcounter{lstnumber}{9}| -$result_2$ = dict.get($methods_1$, "b") |\setcounter{lstnumber}{30}| +$result_2$ = dict.get($methods_1$, "b", None) |\setcounter{lstnumber}{30}| guard($result_2$ is not None) |\setcounter{lstnumber}{-2}| $v_2$ = $result_1$ + $result_2$ |\setcounter{lstnumber}{25}| # $inst_1$.getattr("c") |\setcounter{lstnumber}{21}| $attributes_3$ = $inst_1$.attributes |\setcounter{lstnumber}{21}| -$v_3$ = dict.get($attributes_3$, "c") |\setcounter{lstnumber}{28}| +$v_3$ = dict.get($attributes_3$, "c", None) |\setcounter{lstnumber}{28}| guard($v_3$ is None) |\setcounter{lstnumber}{29}| $cls_1$ = $inst_1$.cls |\setcounter{lstnumber}{9}| $methods_2$ = cls.methods |\setcounter{lstnumber}{9}| -$result_3$ = dict.get($methods_2$, "c") |\setcounter{lstnumber}{30}| +$result_3$ = dict.get($methods_2$, "c", None) |\setcounter{lstnumber}{30}| guard($result_3$ is not None) |\setcounter{lstnumber}{-3}| $v_4$ = $v_2$ + $result_3$ |\setcounter{lstnumber}{-2}| From commits-noreply at bitbucket.org Fri Apr 15 12:19:08 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 12:19:08 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: :c Message-ID: <20110415101908.3855D2A2048@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3517:c4d5e49c0b81 Date: 2011-04-15 12:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/c4d5e49c0b81/ Log: :c diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -698,7 +698,7 @@ \subsection{Making Instance Attributes Faster Using Maps} The first step in making \texttt{getattr} faster in our object model is to optimize -away the dictionary lookups on the instances. The hints we have looked at in the +away the dictionary lookups on the instances. The hints we have introduced in the two previous sections don't seem to help with the current object model. There is no trace-elidable function to be seen, and the instance is not a candidate for promotion, because there tend to be many instances. @@ -736,7 +736,7 @@ introduced maps, it is safe to promote the map everywhere, because we assume that the number of different instance layouts is small. -With this changed instance implementation, the trace we saw in Section~\ref{sub:running} changes to the +With this adapted instance implementation, the trace we saw in Section~\ref{sub:running} changes to the that of Figure~\ref{fig:trace2}. There \texttt{0xb74af4a8} is the memory address of the \texttt{Map} instance that has been promoted. Operations that can be optimized away are grayed out, their results will be replaced with @@ -764,10 +764,10 @@ \subsection{Versioning of Classes} -Instances were optimized by making the assumption that the total number of +Above we assumed that the total number of different instance layouts is small compared to the number of instances. For classes we will make an even stronger assumption. We simply assume that it is rare for -classes to change at all. This is not totally reasonable (sometimes classes contain +classes to change at all. This is not always reasonable (sometimes classes contain counters or similar things) but for this simple example it is good enough.\footnote{There is a more complex variant of the presented technique that can accommodate quick-changing class fields a lot better.} @@ -945,10 +945,9 @@ The very first meta-tracer is described by Sullivan et. al. \cite{sullivan_dynamic_2003}. They used Dynamo RIO, the successor of Dynamo \cite{bala_dynamo:_2000} to trace through a small synthetic interpreter. As in Dynamo, tracing -happens on the machine code level. The tracer is instructed by some hints in the -tiny interpreter where the main interpreter loop is and for how long to trace to -match loops in the user-level functions. These hints are comparable to the one -PyPy uses for the same reasons \cite{bolz_tracing_2009}. Their approach suffers +happens on the machine code level. The system needs some hints to mark the main +interpreter loop and where the backward jumps in user programs are. +PyPy uses similar hints to achieve this \cite{bolz_tracing_2009}. Their approach suffers mostly from the low abstraction level that machine code provides. Yermolovich et. al. \cite{yermolovich_optimization_2009} describe the use of the Tamarin JavaScript tracing JIT as a @@ -973,7 +972,7 @@ interpreters into compilers using the second futamura projection \cite{futamura_partial_1999}. Given that classical partial evaluation works strictly ahead of time, it inherently cannot support runtime feedback. -Some partial evaluator work at runtime, such as DyC \cite{grant_dyc:_2000}, +Some partial evaluators work at runtime, such as DyC \cite{grant_dyc:_2000}, which also supports a concept similar to promotion (called dynamic-to-static promotion). An early attempt at building a general environment for implementing languages @@ -986,7 +985,7 @@ Somewhat relatedly, the proposed ``invokedynamic'' bytecode \cite{rose_bytecodes_2009} that will be added to the JVM is supposed to make the -implementation of dynamic languages on top of JVMs easier. The bytecode gives access to user accessible generalized inline cache. It requires of course compilation to JVM bytecode instead of simply writing an interpreter, predictability of performance across JVMs is also an open question. +implementation of dynamic languages on top of JVMs easier. The bytecode gives the user access to generalized inline caches. It requires of course compilation to JVM bytecode instead of simply writing an interpreter, predictability of performance across JVMs is also an open question. We already explored promotion in other context, such as earlier versions of PyPy's JIT as well as a Prolog partial evaluator diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index ea5b9ae43afc701b979e8445bb6246eb337e3f88..f8a740101f3b8f4b2e95a30938567c5ae6d11c34 GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 12:19:11 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 12:19:11 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: :c Message-ID: <20110415101911.773D92A204C@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3517:c4d5e49c0b81 Date: 2011-04-15 12:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/c4d5e49c0b81/ Log: :c diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -698,7 +698,7 @@ \subsection{Making Instance Attributes Faster Using Maps} The first step in making \texttt{getattr} faster in our object model is to optimize -away the dictionary lookups on the instances. The hints we have looked at in the +away the dictionary lookups on the instances. The hints we have introduced in the two previous sections don't seem to help with the current object model. There is no trace-elidable function to be seen, and the instance is not a candidate for promotion, because there tend to be many instances. @@ -736,7 +736,7 @@ introduced maps, it is safe to promote the map everywhere, because we assume that the number of different instance layouts is small. -With this changed instance implementation, the trace we saw in Section~\ref{sub:running} changes to the +With this adapted instance implementation, the trace we saw in Section~\ref{sub:running} changes to the that of Figure~\ref{fig:trace2}. There \texttt{0xb74af4a8} is the memory address of the \texttt{Map} instance that has been promoted. Operations that can be optimized away are grayed out, their results will be replaced with @@ -764,10 +764,10 @@ \subsection{Versioning of Classes} -Instances were optimized by making the assumption that the total number of +Above we assumed that the total number of different instance layouts is small compared to the number of instances. For classes we will make an even stronger assumption. We simply assume that it is rare for -classes to change at all. This is not totally reasonable (sometimes classes contain +classes to change at all. This is not always reasonable (sometimes classes contain counters or similar things) but for this simple example it is good enough.\footnote{There is a more complex variant of the presented technique that can accommodate quick-changing class fields a lot better.} @@ -945,10 +945,9 @@ The very first meta-tracer is described by Sullivan et. al. \cite{sullivan_dynamic_2003}. They used Dynamo RIO, the successor of Dynamo \cite{bala_dynamo:_2000} to trace through a small synthetic interpreter. As in Dynamo, tracing -happens on the machine code level. The tracer is instructed by some hints in the -tiny interpreter where the main interpreter loop is and for how long to trace to -match loops in the user-level functions. These hints are comparable to the one -PyPy uses for the same reasons \cite{bolz_tracing_2009}. Their approach suffers +happens on the machine code level. The system needs some hints to mark the main +interpreter loop and where the backward jumps in user programs are. +PyPy uses similar hints to achieve this \cite{bolz_tracing_2009}. Their approach suffers mostly from the low abstraction level that machine code provides. Yermolovich et. al. \cite{yermolovich_optimization_2009} describe the use of the Tamarin JavaScript tracing JIT as a @@ -973,7 +972,7 @@ interpreters into compilers using the second futamura projection \cite{futamura_partial_1999}. Given that classical partial evaluation works strictly ahead of time, it inherently cannot support runtime feedback. -Some partial evaluator work at runtime, such as DyC \cite{grant_dyc:_2000}, +Some partial evaluators work at runtime, such as DyC \cite{grant_dyc:_2000}, which also supports a concept similar to promotion (called dynamic-to-static promotion). An early attempt at building a general environment for implementing languages @@ -986,7 +985,7 @@ Somewhat relatedly, the proposed ``invokedynamic'' bytecode \cite{rose_bytecodes_2009} that will be added to the JVM is supposed to make the -implementation of dynamic languages on top of JVMs easier. The bytecode gives access to user accessible generalized inline cache. It requires of course compilation to JVM bytecode instead of simply writing an interpreter, predictability of performance across JVMs is also an open question. +implementation of dynamic languages on top of JVMs easier. The bytecode gives the user access to generalized inline caches. It requires of course compilation to JVM bytecode instead of simply writing an interpreter, predictability of performance across JVMs is also an open question. We already explored promotion in other context, such as earlier versions of PyPy's JIT as well as a Prolog partial evaluator diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index ea5b9ae43afc701b979e8445bb6246eb337e3f88..f8a740101f3b8f4b2e95a30938567c5ae6d11c34 GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 13:09:19 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 15 Apr 2011 13:09:19 +0200 (CEST) Subject: [pypy-svn] pypy default: We dont want too many guards, but a residual call to min_max_loop Message-ID: <20110415110919.CDEE32A2047@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43384:fc7a4da5aac7 Date: 2011-04-15 13:08 +0200 http://bitbucket.org/pypy/pypy/changeset/fc7a4da5aac7/ Log: We dont want too many guards, but a residual call to min_max_loop diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1452,26 +1452,41 @@ def test_silly_max(self): def main(): - i=2 - sa=0 - while i < 300: - sa+=max(*range(i)) - i+=1 + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + # We dont want too many guards, but a residual call to min_max_loop + assert len([n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')]) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), p73, ConstPtr(ptr75), descr=...) + ... + """) + def test_iter_max(self): def main(): - i=2 - sa=0 + i = 2 + sa = 0 while i < 300: - sa+=max(range(i)) - i+=1 + lst = range(i) + sa += max(lst) # ID: max + i += 1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + # We dont want too many guards, but a residual call to min_max_loop + assert len([n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')]) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), p73, ConstPtr(ptr75), descr=...) + ... + """) From commits-noreply at bitbucket.org Fri Apr 15 13:09:23 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 15 Apr 2011 13:09:23 +0200 (CEST) Subject: [pypy-svn] pypy default: We dont want too many guards, but a residual call to min_max_loop Message-ID: <20110415110923.4560B2A204A@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43384:fc7a4da5aac7 Date: 2011-04-15 13:08 +0200 http://bitbucket.org/pypy/pypy/changeset/fc7a4da5aac7/ Log: We dont want too many guards, but a residual call to min_max_loop diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1452,26 +1452,41 @@ def test_silly_max(self): def main(): - i=2 - sa=0 - while i < 300: - sa+=max(*range(i)) - i+=1 + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + # We dont want too many guards, but a residual call to min_max_loop + assert len([n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')]) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), p73, ConstPtr(ptr75), descr=...) + ... + """) + def test_iter_max(self): def main(): - i=2 - sa=0 + i = 2 + sa = 0 while i < 300: - sa+=max(range(i)) - i+=1 + lst = range(i) + sa += max(lst) # ID: max + i += 1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + # We dont want too many guards, but a residual call to min_max_loop + assert len([n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')]) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), p73, ConstPtr(ptr75), descr=...) + ... + """) From commits-noreply at bitbucket.org Fri Apr 15 13:29:36 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 13:29:36 +0200 (CEST) Subject: [pypy-svn] pypy default: actually check something in these loops Message-ID: <20110415112936.46E0F2A2047@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43385:9f2c65d1f7ef Date: 2011-04-15 11:58 +0200 http://bitbucket.org/pypy/pypy/changeset/9f2c65d1f7ef/ Log: actually check something in these loops diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1455,23 +1455,35 @@ i=2 sa=0 while i < 300: - sa+=max(*range(i)) + sa+=max(*range(i)) # ID: max i+=1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + # check that max() is turned into a residual call and not into 300 + # individual guards + assert loop.match_by_id("max", """ + ... + p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) def test_iter_max(self): def main(): i=2 sa=0 while i < 300: - sa+=max(range(i)) + sa+=max(range(i)) # ID: max i+=1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + # check that max() is turned into a residual call and not into 300 + # individual guards + assert loop.match_by_id("max", """ + ... + p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) From commits-noreply at bitbucket.org Fri Apr 15 13:29:36 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 13:29:36 +0200 (CEST) Subject: [pypy-svn] pypy default: make it possible for the subprocess to skip a test Message-ID: <20110415112936.CEB192A2047@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43386:03c5587320a2 Date: 2011-04-15 12:13 +0200 http://bitbucket.org/pypy/pypy/changeset/03c5587320a2/ Log: make it possible for the subprocess to skip a test diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -52,6 +52,8 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if stderr.startswith('SKIP:'): + py.test.skip(stderr) assert not stderr # # parse the JIT log @@ -268,6 +270,14 @@ log = self.run(src, [30, 12]) assert log.result == 42 + def test_skip(self): + import _pytest + def f(): + import sys + print >> sys.stderr, 'SKIP: foobar' + # + raises(_pytest.runner.Skipped, "self.run(f, [])") + def test_parse_jitlog(self): def f(): i = 0 From commits-noreply at bitbucket.org Fri Apr 15 13:29:37 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 13:29:37 +0200 (CEST) Subject: [pypy-svn] pypy default: port test__ffi_call to test_pypy_c_new; actually untested until we merge it to jitypes2 Message-ID: <20110415112937.CE6A82A204A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43387:2d554823d1fd Date: 2011-04-15 12:20 +0200 http://bitbucket.org/pypy/pypy/changeset/2d554823d1fd/ Log: port test__ffi_call to test_pypy_c_new; actually untested until we merge it to jitypes2 diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1487,3 +1487,42 @@ p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) ... """) + + def test__ffi_call(self): + from pypy.rlib.test.test_libffi import get_libm_name + def main(libm_name): + try: + from _ffi import CDLL, types + except ImportError: + sys.stdout.write('SKIP: cannot import _ffi') + return 0 + + libm = CDLL(libm_name) + pow = libm.getfunc('pow', [types.double, types.double], + types.double) + i = 0 + res = 0 + while i < 300: + res += pow(2, 3) + i += 1 + return pow.getaddr(), res + # + libm_name = get_libm_name(sys.platform) + log = self.run(main, [libm_name], threshold=200) + pow_addr, res = log.result + assert res == 8.0 * 300 + loop, = log.loops_by_filename(self.filepath) + # XXX: write the actual test when we merge this to jitypes2 + ## ops = self.get_by_bytecode('CALL_FUNCTION') + ## assert len(ops) == 2 # we get two loops, because of specialization + ## call_function = ops[0] + ## last_ops = [op.getopname() for op in call_function[-5:]] + ## assert last_ops == ['force_token', + ## 'setfield_gc', + ## 'call_may_force', + ## 'guard_not_forced', + ## 'guard_no_exception'] + ## call = call_function[-3] + ## assert call.getarg(0).value == pow_addr + ## assert call.getarg(1).value == 2.0 + ## assert call.getarg(2).value == 3.0 From commits-noreply at bitbucket.org Fri Apr 15 13:29:39 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 13:29:39 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads; apparently, I and hakan almost did the same thing :-) Message-ID: <20110415112939.3EA782A204A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43388:320b05168395 Date: 2011-04-15 13:29 +0200 http://bitbucket.org/pypy/pypy/changeset/320b05168395/ Log: merge heads; apparently, I and hakan almost did the same thing :-) diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1452,40 +1452,44 @@ def test_silly_max(self): def main(): - i=2 - sa=0 - while i < 300: - sa+=max(*range(i)) # ID: max - i+=1 + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # check that max() is turned into a residual call and not into 300 - # individual guards - assert loop.match_by_id("max", """ - ... - p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) - ... + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... """) - + def test_iter_max(self): def main(): - i=2 - sa=0 + i = 2 + sa = 0 while i < 300: - sa+=max(range(i)) # ID: max - i+=1 + lst = range(i) + sa += max(lst) # ID: max + i += 1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # check that max() is turned into a residual call and not into 300 - # individual guards - assert loop.match_by_id("max", """ - ... - p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) - ... + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... """) def test__ffi_call(self): From commits-noreply at bitbucket.org Fri Apr 15 13:29:45 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 13:29:45 +0200 (CEST) Subject: [pypy-svn] pypy default: actually check something in these loops Message-ID: <20110415112945.CC1012A2048@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43385:9f2c65d1f7ef Date: 2011-04-15 11:58 +0200 http://bitbucket.org/pypy/pypy/changeset/9f2c65d1f7ef/ Log: actually check something in these loops diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1455,23 +1455,35 @@ i=2 sa=0 while i < 300: - sa+=max(*range(i)) + sa+=max(*range(i)) # ID: max i+=1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + # check that max() is turned into a residual call and not into 300 + # individual guards + assert loop.match_by_id("max", """ + ... + p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) def test_iter_max(self): def main(): i=2 sa=0 while i < 300: - sa+=max(range(i)) + sa+=max(range(i)) # ID: max i+=1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # XXX: what do we want to check here? + # check that max() is turned into a residual call and not into 300 + # individual guards + assert loop.match_by_id("max", """ + ... + p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) From commits-noreply at bitbucket.org Fri Apr 15 13:29:46 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 13:29:46 +0200 (CEST) Subject: [pypy-svn] pypy default: make it possible for the subprocess to skip a test Message-ID: <20110415112946.6CD602A2048@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43386:03c5587320a2 Date: 2011-04-15 12:13 +0200 http://bitbucket.org/pypy/pypy/changeset/03c5587320a2/ Log: make it possible for the subprocess to skip a test diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -52,6 +52,8 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if stderr.startswith('SKIP:'): + py.test.skip(stderr) assert not stderr # # parse the JIT log @@ -268,6 +270,14 @@ log = self.run(src, [30, 12]) assert log.result == 42 + def test_skip(self): + import _pytest + def f(): + import sys + print >> sys.stderr, 'SKIP: foobar' + # + raises(_pytest.runner.Skipped, "self.run(f, [])") + def test_parse_jitlog(self): def f(): i = 0 From commits-noreply at bitbucket.org Fri Apr 15 13:29:46 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 13:29:46 +0200 (CEST) Subject: [pypy-svn] pypy default: port test__ffi_call to test_pypy_c_new; actually untested until we merge it to jitypes2 Message-ID: <20110415112946.EBD342A2048@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43387:2d554823d1fd Date: 2011-04-15 12:20 +0200 http://bitbucket.org/pypy/pypy/changeset/2d554823d1fd/ Log: port test__ffi_call to test_pypy_c_new; actually untested until we merge it to jitypes2 diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1487,3 +1487,42 @@ p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) ... """) + + def test__ffi_call(self): + from pypy.rlib.test.test_libffi import get_libm_name + def main(libm_name): + try: + from _ffi import CDLL, types + except ImportError: + sys.stdout.write('SKIP: cannot import _ffi') + return 0 + + libm = CDLL(libm_name) + pow = libm.getfunc('pow', [types.double, types.double], + types.double) + i = 0 + res = 0 + while i < 300: + res += pow(2, 3) + i += 1 + return pow.getaddr(), res + # + libm_name = get_libm_name(sys.platform) + log = self.run(main, [libm_name], threshold=200) + pow_addr, res = log.result + assert res == 8.0 * 300 + loop, = log.loops_by_filename(self.filepath) + # XXX: write the actual test when we merge this to jitypes2 + ## ops = self.get_by_bytecode('CALL_FUNCTION') + ## assert len(ops) == 2 # we get two loops, because of specialization + ## call_function = ops[0] + ## last_ops = [op.getopname() for op in call_function[-5:]] + ## assert last_ops == ['force_token', + ## 'setfield_gc', + ## 'call_may_force', + ## 'guard_not_forced', + ## 'guard_no_exception'] + ## call = call_function[-3] + ## assert call.getarg(0).value == pow_addr + ## assert call.getarg(1).value == 2.0 + ## assert call.getarg(2).value == 3.0 From commits-noreply at bitbucket.org Fri Apr 15 13:29:48 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 13:29:48 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads; apparently, I and hakan almost did the same thing :-) Message-ID: <20110415112948.8E7A02A204E@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43388:320b05168395 Date: 2011-04-15 13:29 +0200 http://bitbucket.org/pypy/pypy/changeset/320b05168395/ Log: merge heads; apparently, I and hakan almost did the same thing :-) diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1452,40 +1452,44 @@ def test_silly_max(self): def main(): - i=2 - sa=0 - while i < 300: - sa+=max(*range(i)) # ID: max - i+=1 + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # check that max() is turned into a residual call and not into 300 - # individual guards - assert loop.match_by_id("max", """ - ... - p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) - ... + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... """) - + def test_iter_max(self): def main(): - i=2 - sa=0 + i = 2 + sa = 0 while i < 300: - sa+=max(range(i)) # ID: max - i+=1 + lst = range(i) + sa += max(lst) # ID: max + i += 1 return sa log = self.run(main, [], threshold=200) assert log.result == main() loop, = log.loops_by_filename(self.filepath) - # check that max() is turned into a residual call and not into 300 - # individual guards - assert loop.match_by_id("max", """ - ... - p58 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) - ... + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... """) def test__ffi_call(self): From commits-noreply at bitbucket.org Fri Apr 15 13:40:48 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 13:40:48 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: replace one itemize by a sentence, which gives me enough space to resurrect the Message-ID: <20110415114048.028EF2A2047@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3518:0e0627c8d4c1 Date: 2011-04-15 13:40 +0200 http://bitbucket.org/pypy/extradoc/changeset/0e0627c8d4c1/ Log: replace one itemize by a sentence, which gives me enough space to resurrect the meta-paragraph. diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -378,15 +378,10 @@ hand, if applied incorrectly they might lead to code bloat, thus making the resulting program actually slower. -For constant folding to work, two conditions need to be met: - -\begin{itemize} - \item the arguments of an operation actually need to all be constant, - i.e. statically known by the optimizer - \item the operation needs to be \emph{constant-foldable}, i.e. always yield - the same result given the same arguments. -\end{itemize} - +For constant folding to work, two conditions need to be met: the arguments of +an operation actually need to all be constant, i.e. statically known by the +optimizer and the operation needs to be \emph{constant-foldable}, i.e. always +yield the same result given the same arguments. There is one kind of hint for both of these conditions. @@ -395,7 +390,6 @@ It is worth clarifying what a ``constant'' is in this context. A variable of the trace is said to be constant if its value is statically known by the optimizer. - The simplest example of constants are literal values, such as \texttt{1}. However, the optimizer can statically know the value of a variable even if it is not a constant in the original source code. For example, consider the @@ -698,8 +692,8 @@ \subsection{Making Instance Attributes Faster Using Maps} The first step in making \texttt{getattr} faster in our object model is to optimize -away the dictionary lookups on the instances. The hints we have introduced in the -two previous sections don't seem to help with the current object model. There is +away the dictionary lookups on the instances. The hints of the previous section +don't seem to help with the current object model. There is no trace-elidable function to be seen, and the instance is not a candidate for promotion, because there tend to be many instances. @@ -749,7 +743,12 @@ between. This trace is already much better than the original one. Now we are down from five dictionary lookups to just two. -XXX separation of fast and slow-changing parts +The technique to make instance lookups faster is applicable in more general +cases. A more abstract view of maps is that of splitting a data-structure into +an immutable part (\eg the map) and a part that changes (\eg the storage list). +All the computation on the immutable part is trace-elidable so that only the +manipulation of the quick-changing part remains in the trace after +optimization. \begin{figure} \input{code/trace2.tex} @@ -856,12 +855,6 @@ %\subsection{More General Patterns} % -%The techniques we used above to make instance and class lookups faster are -%applicable in more general cases than the one we developed them for. A more -%abstract view of maps is that of splitting a data-structure into an immutable part (\eg the map) -%and a part that changes (\eg the storage list). All the computation on the -%immutable part is trace-elidable so that only the manipulation of the quick-changing -%part remains in the trace after optimization. % %Similarly, versions can be used to constant-fold arbitrary functions of large data %structures. The version needs to be updated carefully every time the result of diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index f8a740101f3b8f4b2e95a30938567c5ae6d11c34..295a4bb35ae37f83812c8d212381354e07d8d44d GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 13:40:52 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 13:40:52 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: replace one itemize by a sentence, which gives me enough space to resurrect the Message-ID: <20110415114052.CF4392A204C@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3518:0e0627c8d4c1 Date: 2011-04-15 13:40 +0200 http://bitbucket.org/pypy/extradoc/changeset/0e0627c8d4c1/ Log: replace one itemize by a sentence, which gives me enough space to resurrect the meta-paragraph. diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -378,15 +378,10 @@ hand, if applied incorrectly they might lead to code bloat, thus making the resulting program actually slower. -For constant folding to work, two conditions need to be met: - -\begin{itemize} - \item the arguments of an operation actually need to all be constant, - i.e. statically known by the optimizer - \item the operation needs to be \emph{constant-foldable}, i.e. always yield - the same result given the same arguments. -\end{itemize} - +For constant folding to work, two conditions need to be met: the arguments of +an operation actually need to all be constant, i.e. statically known by the +optimizer and the operation needs to be \emph{constant-foldable}, i.e. always +yield the same result given the same arguments. There is one kind of hint for both of these conditions. @@ -395,7 +390,6 @@ It is worth clarifying what a ``constant'' is in this context. A variable of the trace is said to be constant if its value is statically known by the optimizer. - The simplest example of constants are literal values, such as \texttt{1}. However, the optimizer can statically know the value of a variable even if it is not a constant in the original source code. For example, consider the @@ -698,8 +692,8 @@ \subsection{Making Instance Attributes Faster Using Maps} The first step in making \texttt{getattr} faster in our object model is to optimize -away the dictionary lookups on the instances. The hints we have introduced in the -two previous sections don't seem to help with the current object model. There is +away the dictionary lookups on the instances. The hints of the previous section +don't seem to help with the current object model. There is no trace-elidable function to be seen, and the instance is not a candidate for promotion, because there tend to be many instances. @@ -749,7 +743,12 @@ between. This trace is already much better than the original one. Now we are down from five dictionary lookups to just two. -XXX separation of fast and slow-changing parts +The technique to make instance lookups faster is applicable in more general +cases. A more abstract view of maps is that of splitting a data-structure into +an immutable part (\eg the map) and a part that changes (\eg the storage list). +All the computation on the immutable part is trace-elidable so that only the +manipulation of the quick-changing part remains in the trace after +optimization. \begin{figure} \input{code/trace2.tex} @@ -856,12 +855,6 @@ %\subsection{More General Patterns} % -%The techniques we used above to make instance and class lookups faster are -%applicable in more general cases than the one we developed them for. A more -%abstract view of maps is that of splitting a data-structure into an immutable part (\eg the map) -%and a part that changes (\eg the storage list). All the computation on the -%immutable part is trace-elidable so that only the manipulation of the quick-changing -%part remains in the trace after optimization. % %Similarly, versions can be used to constant-fold arbitrary functions of large data %structures. The version needs to be updated carefully every time the result of diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index f8a740101f3b8f4b2e95a30938567c5ae6d11c34..295a4bb35ae37f83812c8d212381354e07d8d44d GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 14:04:53 2011 From: commits-noreply at bitbucket.org (fijal) Date: Fri, 15 Apr 2011 14:04:53 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: use an out of line guard Message-ID: <20110415120453.2419D2A2047@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43389:82bddec49cab Date: 2011-04-15 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/82bddec49cab/ Log: use an out of line guard diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -85,6 +85,7 @@ 'nslots', 'instancetypedef', 'terminator', + '_version_tag?', ] # for config.objspace.std.getattributeshortcut From commits-noreply at bitbucket.org Fri Apr 15 14:05:16 2011 From: commits-noreply at bitbucket.org (fijal) Date: Fri, 15 Apr 2011 14:05:16 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: merge default Message-ID: <20110415120516.D6A3B2A2047@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43390:58c9e01d357e Date: 2011-04-15 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/58c9e01d357e/ Log: merge default diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support from pypy.rlib.nonconst import NonConstant from pypy.rlib.rsre.test.test_match import get_code from pypy.rlib.rsre import rsre_core @@ -45,7 +45,7 @@ assert m._jit_unroll_safe_ -class TestJitRSre(test_basic.LLJitMixin): +class TestJitRSre(support.LLJitMixin): def meta_interp_match(self, pattern, string, repeat=1): r = get_code(pattern) diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -167,128 +167,132 @@ result = formatd(value, tp, precision, flags) return result, special -if USE_SHORT_FLOAT_REPR: - def round_double(value, ndigits): - # The basic idea is very simple: convert and round the double to - # a decimal string using _Py_dg_dtoa, then convert that decimal - # string back to a double with _Py_dg_strtod. There's one minor - # difficulty: Python 2.x expects round to do - # round-half-away-from-zero, while _Py_dg_dtoa does - # round-half-to-even. So we need some way to detect and correct - # the halfway cases. +def round_double(value, ndigits): + if USE_SHORT_FLOAT_REPR: + return round_double_short_repr(value, ndigits) + else: + return round_double_fallback_repr(value, ndigits) - # a halfway value has the form k * 0.5 * 10**-ndigits for some - # odd integer k. Or in other words, a rational number x is - # exactly halfway between two multiples of 10**-ndigits if its - # 2-valuation is exactly -ndigits-1 and its 5-valuation is at - # least -ndigits. For ndigits >= 0 the latter condition is - # automatically satisfied for a binary float x, since any such - # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x - # needs to be an integral multiple of 5**-ndigits; we can check - # this using fmod. For -22 > ndigits, there are no halfway - # cases: 5**23 takes 54 bits to represent exactly, so any odd - # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of - # precision to represent exactly. +def round_double_short_repr(value, ndigits): + # The basic idea is very simple: convert and round the double to + # a decimal string using _Py_dg_dtoa, then convert that decimal + # string back to a double with _Py_dg_strtod. There's one minor + # difficulty: Python 2.x expects round to do + # round-half-away-from-zero, while _Py_dg_dtoa does + # round-half-to-even. So we need some way to detect and correct + # the halfway cases. - sign = copysign(1.0, value) - value = abs(value) + # a halfway value has the form k * 0.5 * 10**-ndigits for some + # odd integer k. Or in other words, a rational number x is + # exactly halfway between two multiples of 10**-ndigits if its + # 2-valuation is exactly -ndigits-1 and its 5-valuation is at + # least -ndigits. For ndigits >= 0 the latter condition is + # automatically satisfied for a binary float x, since any such + # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x + # needs to be an integral multiple of 5**-ndigits; we can check + # this using fmod. For -22 > ndigits, there are no halfway + # cases: 5**23 takes 54 bits to represent exactly, so any odd + # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of + # precision to represent exactly. - # find 2-valuation value - m, expo = math.frexp(value) - while m != math.floor(m): - m *= 2.0 - expo -= 1 + sign = copysign(1.0, value) + value = abs(value) - # determine whether this is a halfway case. - halfway_case = 0 - if expo == -ndigits - 1: - if ndigits >= 0: + # find 2-valuation value + m, expo = math.frexp(value) + while m != math.floor(m): + m *= 2.0 + expo -= 1 + + # determine whether this is a halfway case. + halfway_case = 0 + if expo == -ndigits - 1: + if ndigits >= 0: + halfway_case = 1 + elif ndigits >= -22: + # 22 is the largest k such that 5**k is exactly + # representable as a double + five_pow = 1.0 + for i in range(-ndigits): + five_pow *= 5.0 + if math.fmod(value, five_pow) == 0.0: halfway_case = 1 - elif ndigits >= -22: - # 22 is the largest k such that 5**k is exactly - # representable as a double - five_pow = 1.0 - for i in range(-ndigits): - five_pow *= 5.0 - if math.fmod(value, five_pow) == 0.0: - halfway_case = 1 - # round to a decimal string; use an extra place for halfway case - strvalue = formatd(value, 'f', ndigits + halfway_case) + # round to a decimal string; use an extra place for halfway case + strvalue = formatd(value, 'f', ndigits + halfway_case) - if halfway_case: - buf = [c for c in strvalue] - if ndigits >= 0: - endpos = len(buf) - 1 - else: - endpos = len(buf) + ndigits - # Sanity checks: there should be exactly ndigits+1 places - # following the decimal point, and the last digit in the - # buffer should be a '5' - if not objectmodel.we_are_translated(): - assert buf[endpos] == '5' - if '.' in buf: - assert endpos == len(buf) - 1 - assert buf.index('.') == len(buf) - ndigits - 2 + if halfway_case: + buf = [c for c in strvalue] + if ndigits >= 0: + endpos = len(buf) - 1 + else: + endpos = len(buf) + ndigits + # Sanity checks: there should be exactly ndigits+1 places + # following the decimal point, and the last digit in the + # buffer should be a '5' + if not objectmodel.we_are_translated(): + assert buf[endpos] == '5' + if '.' in buf: + assert endpos == len(buf) - 1 + assert buf.index('.') == len(buf) - ndigits - 2 - # increment and shift right at the same time - i = endpos - 1 - carry = 1 - while i >= 0: + # increment and shift right at the same time + i = endpos - 1 + carry = 1 + while i >= 0: + digit = ord(buf[i]) + if digit == ord('.'): + buf[i+1] = chr(digit) + i -= 1 digit = ord(buf[i]) - if digit == ord('.'): - buf[i+1] = chr(digit) - i -= 1 - digit = ord(buf[i]) - carry += digit - ord('0') - buf[i+1] = chr(carry % 10 + ord('0')) - carry /= 10 - i -= 1 - buf[0] = chr(carry + ord('0')) - if ndigits < 0: - buf.append('0') + carry += digit - ord('0') + buf[i+1] = chr(carry % 10 + ord('0')) + carry /= 10 + i -= 1 + buf[0] = chr(carry + ord('0')) + if ndigits < 0: + buf.append('0') - strvalue = ''.join(buf) + strvalue = ''.join(buf) - return sign * rstring_to_float(strvalue) + return sign * rstring_to_float(strvalue) -else: - # fallback version, to be used when correctly rounded - # binary<->decimal conversions aren't available - def round_double(value, ndigits): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 +# fallback version, to be used when correctly rounded +# binary<->decimal conversions aren't available +def round_double_fallback_repr(value, ndigits): + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow + pow1 = math.pow(10.0, ndigits - 22) + pow2 = 1e22 + else: + pow1 = math.pow(10.0, ndigits) + pow2 = 1.0 - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value + y = (value * pow1) * pow2 + # if y overflows, then rounded value is exactly x + if isinf(y): + return value - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 + else: + pow1 = math.pow(10.0, -ndigits); + pow2 = 1.0 # unused; for translation + y = value / pow1 - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y-z) == 1.0: # obscure case, see the test - z = y + if y >= 0.0: + z = math.floor(y + 0.5) + else: + z = math.ceil(y - 0.5) + if math.fabs(y-z) == 1.0: # obscure case, see the test + z = y - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + return z INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -119,13 +119,16 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. -License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' +License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' ============================================================== Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files -in the 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' directories +in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories are all copyrighted by the Python Software Foundation and licensed under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html @@ -158,21 +161,12 @@ ====================================== The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. - CompositionExclusions-3.2.0.txt - CompositionExclusions-4.1.0.txt - CompositionExclusions-5.0.0.txt - EastAsianWidth-3.2.0.txt - EastAsianWidth-4.1.0.txt - EastAsianWidth-5.0.0.txt - UnicodeData-3.2.0.txt - UnicodeData-4.1.0.txt - UnicodeData-5.0.0.txt - -The following files are derived from files from the above website. The same -terms of use apply. - UnihanNumeric-3.2.0.txt - UnihanNumeric-4.1.0.txt - UnihanNumeric-5.0.0.txt + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ListTests: diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_ajit.py copy from pypy/jit/metainterp/test/test_basic.py copy to pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_basic.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -4,269 +4,17 @@ from pypy.rlib.jit import loop_invariant from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.metainterp.warmspot import get_stats from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong from pypy import conftest from pypy.rlib.rarithmetic import ovfcheck from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class BasicTests: diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -181,6 +181,7 @@ jIntegerClass = JvmClassType('java.lang.Integer') jLongClass = JvmClassType('java.lang.Long') +jShortClass = JvmClassType('java.lang.Short') jDoubleClass = JvmClassType('java.lang.Double') jByteClass = JvmClassType('java.lang.Byte') jCharClass = JvmClassType('java.lang.Character') @@ -239,6 +240,7 @@ jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue') jByte = JvmScalarType('B', jByteClass, 'byteValue') jChar = JvmScalarType('C', jCharClass, 'charValue') +jShort = JvmScalarType('S', jShortClass, 'shortValue') class Generifier(object): @@ -527,6 +529,7 @@ if desc == 'C': return self._o("i") # Characters if desc == 'B': return self._o("i") # Bytes if desc == 'Z': return self._o("i") # Boolean + if desc == 'S': return self._o("i") # Short assert False, "Unknown argtype=%s" % repr(argtype) raise NotImplementedError @@ -625,6 +628,7 @@ NOP = Opcode('nop') I2D = Opcode('i2d') I2L = Opcode('i2l') +I2S = Opcode('i2s') D2I= Opcode('d2i') #D2L= Opcode('d2l') #PAUL L2I = Opcode('l2i') @@ -891,6 +895,7 @@ SYSTEMIDENTITYHASH = Method.s(jSystem, 'identityHashCode', (jObject,), jInt) SYSTEMGC = Method.s(jSystem, 'gc', (), jVoid) INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString) +SHORTTOSTRINGS = Method.s(jShortClass, 'toString', (jShort,), jString) LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString) DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString) CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString) @@ -922,15 +927,19 @@ CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool) STRINGBUILDERAPPEND = Method.v(jStringBuilder, 'append', (jString,), jStringBuilder) +PYPYINTBETWEEN = Method.s(jPyPy, 'int_between', (jInt,jInt,jInt), jBool) PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt) PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt) PYPYUINTMOD = Method.v(jPyPy, 'uint_mod', (jInt, jInt), jInt) PYPYUINTMUL = Method.v(jPyPy, 'uint_mul', (jInt, jInt), jInt) PYPYUINTDIV = Method.v(jPyPy, 'uint_div', (jInt, jInt), jInt) PYPYULONGMOD = Method.v(jPyPy, 'ulong_mod', (jLong, jLong), jLong) +PYPYUINTTOLONG = Method.s(jPyPy, 'uint_to_long', (jInt,), jLong) PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL +PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) +PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,19 +1,16 @@ try: - import pypyjit - pypyjit.set_param(threshold=3, inlining=True) + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + main(10) - def sqrt(y, n=10000): - x = y / 2 - while n > 0: - #assert y > 0 and x > 0 - if y > 0 and x > 0: pass - n -= 1 - x = (x + y/x) / 2 - return x - - print sqrt(1234, 4) - except Exception, e: print "Exception: ", type(e) print e diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -4,7 +4,7 @@ """ from cStringIO import StringIO -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.ootypesystem import ootype, rclass from pypy.rpython.ootypesystem.module import ll_os from pypy.translator.jvm import node, methods @@ -229,9 +229,15 @@ if not ootype.isSubclass(OOTYPE, SELF): continue mobj = self._function_for_graph( clsobj, mname, False, mimpl.graph) - graphs = OOTYPE._lookup_graphs(mname) - if len(graphs) == 1: - mobj.is_final = True + # XXX: this logic is broken: it might happen that there are + # ootype.Instance which contains a meth whose graph is exactly + # the same as the meth in the superclass: in this case, + # len(graphs) == 1 but we cannot just mark the method as final + # (or we can, but we should avoid to emit the method in the + # subclass, then) + ## graphs = OOTYPE._lookup_graphs(mname) + ## if len(graphs) == 1: + ## mobj.is_final = True clsobj.add_method(mobj) # currently, we always include a special "dump" method for debugging @@ -359,6 +365,7 @@ ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, ootype.String:jvm.PYPYESCAPEDSTRING, ootype.Unicode:jvm.PYPYESCAPEDUNICODE, + rffi.SHORT:jvm.SHORTTOSTRINGS, } def toString_method_for_ootype(self, OOTYPE): @@ -406,6 +413,7 @@ ootype.UniChar: jvm.jChar, ootype.Class: jvm.jClass, ootype.ROOT: jvm.jObject, # treat like a scalar + rffi.SHORT: jvm.jShort, } # Dictionary for non-scalar types; in this case, if we see the key, we diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -6,7 +6,7 @@ from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside from pypy.rlib.rarithmetic import intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.rclass import FieldListAccessor from pypy.jit.metainterp.warmspot import get_stats, get_translator from pypy.jit.metainterp import history diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py --- a/pypy/jit/metainterp/test/test_longlong.py +++ b/pypy/jit/metainterp/test/test_longlong.py @@ -1,6 +1,6 @@ import py, sys from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class WrongResult(Exception): pass diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -277,6 +277,7 @@ """) def test_default_and_kw(self): + py.test.skip("Wait until we have saner defaults strat") def main(n): def f(i, j=1): return i + j @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): @@ -685,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, -1, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) @@ -1009,6 +1010,7 @@ """) def test_func_defaults(self): + py.test.skip("until we fix defaults") def main(n): i = 1 while i < n: @@ -1061,7 +1063,7 @@ i23 = int_lt(0, i21) guard_true(i23, descr=) i24 = getfield_gc(p17, descr=) - i25 = getarrayitem_raw(i24, 0, descr=) + i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=) i28 = int_add_ovf(i10, i25) diff --git a/pypy/jit/metainterp/test/test_dlist.py b/pypy/jit/metainterp/test/test_dlist.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_dlist.py +++ /dev/null @@ -1,165 +0,0 @@ - -import py -from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin -py.test.skip("Disabled") - -class ListTests: - def test_basic(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - n -= 1 - return l[0] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(getarrayitem_gc=0, setarrayitem_gc=1) -# XXX fix codewriter -# guard_exception=0, -# guard_no_exception=1) - - def test_list_escapes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=2, getarrayitem_gc=0) - - def test_list_escapes_but_getitem_goes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - x = l[2] - y = l[1] + l[2] - l[1] = x + y - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=3, getarrayitem_gc=0) - - def test_list_of_ptrs(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - class A(object): - def __init__(self, x): - self.x = x - - def f(n): - l = [A(3)] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0].x + 1 - l[0] = A(x) - n -= 1 - return l[0].x - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=1, getarrayitem_gc=0, - new_with_vtable=1) # A should escape - - def test_list_checklength(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [10, 13], listops=True) - assert res == f(10, 13) - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_list_checklength_run(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) > n: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [50, 13], listops=True) - assert res == 42 - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_checklength_cannot_go_away(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n): - l = [0] * n - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return len(l) - l = [0] * n - n -= 1 - return 0 - - res = self.meta_interp(f, [10], listops=True) - assert res == 2 - self.check_loops(arraylen_gc=1) - - def test_list_indexerror(self): - # this is an example where IndexError is raised before - # even getting to the JIT - py.test.skip("I suspect bug somewhere outside of the JIT") - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - l[n] = n - n -= 1 - return l[3] - - def g(n): - try: - f(n) - return 0 - except IndexError: - return 42 - - res = self.meta_interp(g, [10]) - assert res == 42 - self.check_loops(setitem=2) - -class TestLLtype(ListTests, LLJitMixin): - pass diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -1,6 +1,6 @@ """Tests for multiple JitDrivers.""" from pypy.rlib.jit import JitDriver, unroll_safe -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -2757,7 +2757,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops(self): + def test_fold_partially_constant_add_sub(self): ops = """ [i0] i1 = int_sub(i0, 0) @@ -2791,7 +2791,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops_ovf(self): + def test_fold_partially_constant_add_sub_ovf(self): ops = """ [i0] i1 = int_sub_ovf(i0, 0) @@ -2828,6 +2828,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): + ops = """ + [i0] + i1 = int_lshift(i0, 0) + i2 = int_rshift(i1, 0) + i3 = int_eq(i2, i0) + guard_true(i3) [] + jump(i2) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + # ---------- class TestLLtype(OptimizeOptTest, LLtypeMixin): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver, hint, purefunction from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class SendTests(object): diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_basic.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_basic.py +++ /dev/null @@ -1,2411 +0,0 @@ -import py -import sys -from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside -from pypy.rlib.jit import loop_invariant -from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed -from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner -from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value -from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong -from pypy import conftest -from pypy.rlib.rarithmetic import ovfcheck -from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper -from pypy.rpython.lltypesystem import lltype, llmemory -from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - - -class BasicTests: - - def test_basic(self): - def f(x, y): - return x + y - res = self.interp_operations(f, [40, 2]) - assert res == 42 - - def test_basic_inst(self): - class A: - pass - def f(n): - a = A() - a.x = n - return a.x - res = self.interp_operations(f, [42]) - assert res == 42 - - def test_uint_floordiv(self): - from pypy.rlib.rarithmetic import r_uint - - def f(a, b): - a = r_uint(a) - b = r_uint(b) - return a/b - - res = self.interp_operations(f, [-4, 3]) - assert res == long(r_uint(-4)) // 3 - - def test_direct_call(self): - def g(n): - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_direct_call_with_guard(self): - def g(n): - if n < 0: - return 0 - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_loop(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - if self.basic: - found = 0 - for op in get_stats().loops[0]._all_operations(): - if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 3 - for box in liveboxes: - assert isinstance(box, history.BoxInt) - found += 1 - assert found == 1 - - def test_loop_invariant_mul1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loop_invariant_mul_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - b = y * 2 - res += ovfcheck(x * x) + b - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 308 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) - - def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - x += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 3427 - self.check_loop_count(3) - - def test_loop_invariant_mul_bridge_maintaining1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - res += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1167 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - - def test_loop_invariant_mul_bridge_maintaining2(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - z = x * x - res += z - if y<16: - res += z - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1692 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - def test_loop_invariant_mul_bridge_maintaining3(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) - def f(x, y, m): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res, m=m) - myjitdriver.jit_merge_point(x=x, y=y, res=res, m=m) - z = x * x - res += z - if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x.intval * x.intval - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loops_are_transient(self): - import gc, weakref - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - if y%2: - res *= 2 - y -= 1 - return res - wr_loops = [] - old_init = history.TreeLoop.__init__.im_func - try: - def track_init(self, name): - old_init(self, name) - wr_loops.append(weakref.ref(self)) - history.TreeLoop.__init__ = track_init - res = self.meta_interp(f, [6, 15], no_stats=True) - finally: - history.TreeLoop.__init__ = old_init - - assert res == f(6, 15) - gc.collect() - - #assert not [wr for wr in wr_loops if wr()] - for loop in [wr for wr in wr_loops if wr()]: - assert loop().name == 'short preamble' - - def test_string(self): - def f(n): - bytecode = 'adlfkj' + chr(n) - if n < len(bytecode): - return bytecode[n] - else: - return "?" - res = self.interp_operations(f, [1]) - assert res == ord("d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord("?") - - def test_chr2str(self): - def f(n): - s = chr(n) - return s[0] - res = self.interp_operations(f, [3]) - assert res == 3 - - def test_unicode(self): - def f(n): - bytecode = u'adlfkj' + unichr(n) - if n < len(bytecode): - return bytecode[n] - else: - return u"?" - res = self.interp_operations(f, [1]) - assert res == ord(u"d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord(u"?") - - def test_residual_call(self): - @dont_look_inside - def externfn(x, y): - return x * y - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) - - def test_residual_call_pure(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - n = hint(n, promote=True) - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is not recorded in the history if all-constant args - self.check_operations_history(int_add=0, int_mul=0, - call=0, call_pure=0) - - def test_residual_call_pure_1(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is recorded in the history if not-all-constant args - self.check_operations_history(int_add=1, int_mul=0, - call=0, call_pure=1) - - def test_residual_call_pure_2(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def externfn(x): - return x - 1 - externfn._pure_function_ = True - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - n = externfn(n) - return n - res = self.meta_interp(f, [7]) - assert res == 0 - # CALL_PURE is recorded in the history, but turned into a CALL - # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) - - def test_constfold_call_pure(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - n -= externfn(m) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_constfold_call_pure_2(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - class V: - def __init__(self, value): - self.value = value - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - v = V(m) - n -= externfn(v.value) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_pure_function_returning_object(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - class V: - def __init__(self, x): - self.x = x - v1 = V(1) - v2 = V(2) - def externfn(x): - if x: - return v1 - else: - return v2 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - m = V(m).x - n -= externfn(m).x + externfn(m + m - m).x - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) - - def test_constant_across_mp(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - class X(object): - pass - def f(n): - while n > -100: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - x = X() - x.arg = 5 - if n <= 0: break - n -= x.arg - x.arg = 6 # prevents 'x.arg' from being annotated as constant - return n - res = self.meta_interp(f, [31]) - assert res == -4 - - def test_stopatxpolicy(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def internfn(y): - return y * 3 - def externfn(y): - return y % 4 - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if y & 7: - f = internfn - else: - f = externfn - f(y) - y -= 1 - return 42 - policy = StopAtXPolicy(externfn) - res = self.meta_interp(f, [31], policy=policy) - assert res == 42 - self.check_loops(int_mul=1, int_mod=0) - - def test_we_are_jitted(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if we_are_jitted(): - x = 1 - else: - x = 10 - y -= x - return y - assert f(55) == -5 - res = self.meta_interp(f, [55]) - assert res == -1 - - def test_confirm_enter_jit(self): - def confirm_enter_jit(x, y): - return x <= 5 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - confirm_enter_jit = confirm_enter_jit) - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - y -= x - return y - # - res = self.meta_interp(f, [10, 84]) - assert res == -6 - self.check_loop_count(0) - # - res = self.meta_interp(f, [3, 19]) - assert res == -2 - self.check_loop_count(1) - - def test_can_never_inline(self): - def can_never_inline(x): - return x > 50 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - can_never_inline = can_never_inline) - @dont_look_inside - def marker(): - pass - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - x += 1 - if x == 4 or x == 61: - marker() - y -= x - return y - # - res = self.meta_interp(f, [3, 6], repeat=7) - assert res == 6 - 4 - 5 - self.check_history(call=0) # because the trace starts in the middle - # - res = self.meta_interp(f, [60, 84], repeat=7) - assert res == 84 - 61 - 62 - self.check_history(call=1) # because the trace starts immediately - - def test_format(self): - def f(n): - return len("<%d>" % n) - res = self.interp_operations(f, [421]) - assert res == 5 - - def test_switch(self): - def f(n): - if n == -5: return 12 - elif n == 2: return 51 - elif n == 7: return 1212 - else: return 42 - res = self.interp_operations(f, [7]) - assert res == 1212 - res = self.interp_operations(f, [12311]) - assert res == 42 - - def test_r_uint(self): - from pypy.rlib.rarithmetic import r_uint - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - y = r_uint(y) - while y > 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - y -= 1 - return y - res = self.meta_interp(f, [10]) - assert res == 0 - - def test_uint_operations(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - return ((r_uint(n) - 123) >> 1) <= r_uint(456) - res = self.interp_operations(f, [50]) - assert res == False - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_uint_condition(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - if ((r_uint(n) - 123) >> 1) <= r_uint(456): - return 24 - else: - return 12 - res = self.interp_operations(f, [50]) - assert res == 12 - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_int_between(self): - # - def check(arg1, arg2, arg3, expect_result, **expect_operations): - from pypy.rpython.lltypesystem import lltype - from pypy.rpython.lltypesystem.lloperation import llop - loc = locals().copy() - exec py.code.Source(""" - def f(n, m, p): - arg1 = %(arg1)s - arg2 = %(arg2)s - arg3 = %(arg3)s - return llop.int_between(lltype.Bool, arg1, arg2, arg3) - """ % locals()).compile() in loc - res = self.interp_operations(loc['f'], [5, 6, 7]) - assert res == expect_result - self.check_operations_history(expect_operations) - # - check('n', 'm', 'p', True, int_sub=2, uint_lt=1) - check('n', 'p', 'm', False, int_sub=2, uint_lt=1) - # - check('n', 'm', 6, False, int_sub=2, uint_lt=1) - # - check('n', 4, 'p', False, int_sub=2, uint_lt=1) - check('n', 5, 'p', True, int_sub=2, uint_lt=1) - check('n', 8, 'p', False, int_sub=2, uint_lt=1) - # - check('n', 6, 7, True, int_sub=2, uint_lt=1) - # - check(-2, 'n', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'm', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'p', 'm', False, int_sub=2, uint_lt=1) - #check(0, 'n', 'p', True, uint_lt=1) xxx implement me - #check(0, 'm', 'p', True, uint_lt=1) - #check(0, 'p', 'm', False, uint_lt=1) - # - check(2, 'n', 6, True, int_sub=1, uint_lt=1) - check(2, 'm', 6, False, int_sub=1, uint_lt=1) - check(2, 'p', 6, False, int_sub=1, uint_lt=1) - check(5, 'n', 6, True, int_eq=1) # 6 == 5+1 - check(5, 'm', 6, False, int_eq=1) # 6 == 5+1 - # - check(2, 6, 'm', False, int_sub=1, uint_lt=1) - check(2, 6, 'p', True, int_sub=1, uint_lt=1) - # - check(2, 40, 6, False) - check(2, 40, 60, True) - - def test_getfield(self): - class A: - pass - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=1) - - def test_getfield_immutable(self): - class A: - _immutable_ = True - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=0) - - def test_setfield_bool(self): - class A: - def __init__(self): - self.flag = True - myjitdriver = JitDriver(greens = [], reds = ['n', 'obj']) - def f(n): - obj = A() - res = False - while n > 0: - myjitdriver.can_enter_jit(n=n, obj=obj) - myjitdriver.jit_merge_point(n=n, obj=obj) - obj.flag = False - n -= 1 - return res - res = self.meta_interp(f, [7]) - assert type(res) == bool - assert not res - - def test_switch_dict(self): - def f(x): - if x == 1: return 61 - elif x == 2: return 511 - elif x == 3: return -22 - elif x == 4: return 81 - elif x == 5: return 17 - elif x == 6: return 54 - elif x == 7: return 987 - elif x == 8: return -12 - elif x == 9: return 321 - return -1 - res = self.interp_operations(f, [5]) - assert res == 17 - res = self.interp_operations(f, [15]) - assert res == -1 - - def test_int_add_ovf(self): - def f(x, y): - try: - return ovfcheck(x + y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -98 - res = self.interp_operations(f, [1, sys.maxint]) - assert res == -42 - - def test_int_sub_ovf(self): - def f(x, y): - try: - return ovfcheck(x - y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -102 - res = self.interp_operations(f, [1, -sys.maxint]) - assert res == -42 - - def test_int_mul_ovf(self): - def f(x, y): - try: - return ovfcheck(x * y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -200 - res = self.interp_operations(f, [-3, sys.maxint//2]) - assert res == -42 - - def test_mod_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'y']) - def f(n, x, y): - while n > 0: - myjitdriver.can_enter_jit(x=x, y=y, n=n) - myjitdriver.jit_merge_point(x=x, y=y, n=n) - n -= ovfcheck(x % y) - return n - res = self.meta_interp(f, [20, 1, 2]) - assert res == 0 - self.check_loops(call=0) - - def test_abs(self): - myjitdriver = JitDriver(greens = [], reds = ['i', 't']) - def f(i): - t = 0 - while i < 10: - myjitdriver.can_enter_jit(i=i, t=t) - myjitdriver.jit_merge_point(i=i, t=t) - t += abs(i) - i += 1 - return t - res = self.meta_interp(f, [-5]) - assert res == 5+4+3+2+1+0+1+2+3+4+5+6+7+8+9 - - def test_float(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - x = float(x) - y = float(y) - res = 0.0 - while y > 0.0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1.0 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42.0 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) - - def test_print(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - print n - n -= 1 - return n - res = self.meta_interp(f, [7]) - assert res == 0 - - def test_bridge_from_interpreter(self): - mydriver = JitDriver(reds = ['n'], greens = []) - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - n -= 1 - - self.meta_interp(f, [20], repeat=7) - self.check_tree_loop_count(2) # the loop and the entry path - # we get: - # ENTER - compile the new loop and the entry bridge - # ENTER - compile the leaving path - self.check_enter_count(2) - - def test_bridge_from_interpreter_2(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n'], greens = []) - glob = [1] - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - if n == 17 and glob[0]: - glob[0] = 0 - x = n + 1 - y = n + 2 - z = n + 3 - k = n + 4 - n -= 1 - n += x + y + z + k - n -= x + y + z + k - n -= 1 - - self.meta_interp(f, [20], repeat=7) - - def test_bridge_from_interpreter_3(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n', 'x', 'y', 'z', 'k'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - glob.x = 1 - x = 0 - y = 0 - z = 0 - k = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x, y=y, z=z, k=k) - mydriver.jit_merge_point(n=n, x=x, y=y, z=z, k=k) - x += 10 - y += 3 - z -= 15 - k += 4 - if n == 17 and glob.x: - glob.x = 0 - x += n + 1 - y += n + 2 - z += n + 3 - k += n + 4 - n -= 1 - n -= 1 - return x + 2*y + 3*z + 5*k + 13*n - - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_bridge_from_interpreter_4(self): - jitdriver = JitDriver(reds = ['n', 'k'], greens = []) - - def f(n, k): - while n > 0: - jitdriver.can_enter_jit(n=n, k=k) - jitdriver.jit_merge_point(n=n, k=k) - if k: - n -= 2 - else: - n -= 1 - return n + k - - from pypy.rpython.test.test_llinterp import get_interpreter, clear_tcache - from pypy.jit.metainterp.warmspot import WarmRunnerDesc - - interp, graph = get_interpreter(f, [0, 0], backendopt=False, - inline_threshold=0, type_system=self.type_system) - clear_tcache() - translator = interp.typer.annotator.translator - translator.config.translation.gc = "boehm" - warmrunnerdesc = WarmRunnerDesc(translator, - CPUClass=self.CPUClass) - state = warmrunnerdesc.jitdrivers_sd[0].warmstate - state.set_param_threshold(3) # for tests - state.set_param_trace_eagerness(0) # for tests - warmrunnerdesc.finish() - for n, k in [(20, 0), (20, 1)]: - interp.eval_graph(graph, [n, k]) - - def test_bridge_leaving_interpreter_5(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - x = 0 - glob.x = 1 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - glob.x += 1 - x += 3 - n -= 1 - glob.x += 100 - return glob.x + x - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_instantiate_classes(self): - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - def f(n): - if n > 5: - cls = A - else: - cls = B - return cls().foo - res = self.interp_operations(f, [3]) - assert res == 8 - res = self.interp_operations(f, [13]) - assert res == 72 - - def test_instantiate_does_not_call(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - - def f(n): - x = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - if n % 2 == 0: - cls = A - else: - cls = B - inst = cls() - x += inst.foo - n -= 1 - return x - res = self.meta_interp(f, [20], enable_opts='') - assert res == f(20) - self.check_loops(call=0) - - def test_zerodivisionerror(self): - # test the case of exception-raising operation that is not delegated - # to the backend at all: ZeroDivisionError - # - def f(n): - assert n >= 0 - try: - return ovfcheck(5 % n) - except ZeroDivisionError: - return -666 - except OverflowError: - return -777 - res = self.interp_operations(f, [0]) - assert res == -666 - # - def f(n): - assert n >= 0 - try: - return ovfcheck(6 // n) - except ZeroDivisionError: - return -667 - except OverflowError: - return -778 - res = self.interp_operations(f, [0]) - assert res == -667 - - def test_div_overflow(self): - import sys - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - try: - res += llop.int_floordiv_ovf(lltype.Signed, - -sys.maxint-1, x) - x += 5 - except OverflowError: - res += 100 - y -= 1 - return res - res = self.meta_interp(f, [-41, 16]) - assert res == ((-sys.maxint-1) // (-41) + - (-sys.maxint-1) // (-36) + - (-sys.maxint-1) // (-31) + - (-sys.maxint-1) // (-26) + - (-sys.maxint-1) // (-21) + - (-sys.maxint-1) // (-16) + - (-sys.maxint-1) // (-11) + - (-sys.maxint-1) // (-6) + - 100 * 8) - - def test_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - if n: - obj = A() - else: - obj = B() - return isinstance(obj, B) - res = self.interp_operations(fn, [0]) - assert res - self.check_operations_history(guard_class=1) - res = self.interp_operations(fn, [1]) - assert not res - - def test_isinstance_2(self): - driver = JitDriver(greens = [], reds = ['n', 'sum', 'x']) - class A: - pass - class B(A): - pass - class C(B): - pass - - def main(): - return f(5, B()) * 10 + f(5, C()) + f(5, A()) * 100 - - def f(n, x): - sum = 0 - while n > 0: - driver.can_enter_jit(x=x, n=n, sum=sum) - driver.jit_merge_point(x=x, n=n, sum=sum) - if isinstance(x, B): - sum += 1 - n -= 1 - return sum - - res = self.meta_interp(main, []) - assert res == 55 - - def test_assert_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - # this should only be called with n != 0 - if n: - obj = B() - obj.a = n - else: - obj = A() - obj.a = 17 - assert isinstance(obj, B) - return obj.a - res = self.interp_operations(fn, [1]) - assert res == 1 - self.check_operations_history(guard_class=0) - if self.type_system == 'ootype': - self.check_operations_history(instanceof=0) - - def test_r_dict(self): - from pypy.rlib.objectmodel import r_dict - class FooError(Exception): - pass - def myeq(n, m): - return n == m - def myhash(n): - if n < 0: - raise FooError - return -n - def f(n): - d = r_dict(myeq, myhash) - for i in range(10): - d[i] = i*i - try: - return d[n] - except FooError: - return 99 - res = self.interp_operations(f, [5]) - assert res == f(5) - - def test_free_object(self): - import weakref - from pypy.rlib import rgc - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - class X(object): - pass - def main(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= x.foo - def g(n): - x = X() - x.foo = 2 - main(n, x) - x.foo = 5 - return weakref.ref(x) - def f(n): - r = g(n) - rgc.collect(); rgc.collect(); rgc.collect() - return r() is None - # - assert f(30) == 1 - res = self.meta_interp(f, [30], no_stats=True) - assert res == 1 - - def test_pass_around(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - - def call(): - pass - - def f(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - if n % 2: - call() - if n == 8: - return x - x = 3 - else: - x = 5 - n -= 1 - return 0 - - self.meta_interp(f, [40, 0]) - - def test_const_inputargs(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'x']) - def f(n, x): - m = 0x7FFFFFFF - while n > 0: - myjitdriver.can_enter_jit(m=m, n=n, x=x) - myjitdriver.jit_merge_point(m=m, n=n, x=x) - x = 42 - n -= 1 - m = m >> 1 - return x - - res = self.meta_interp(f, [50, 1], enable_opts='') - assert res == 42 - - def test_set_param(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - def g(n): - x = 0 - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= 1 - x += n - return x - def f(n, threshold): - myjitdriver.set_param('threshold', threshold) - return g(n) - - res = self.meta_interp(f, [10, 3]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(2) - - res = self.meta_interp(f, [10, 13]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(0) - - def test_dont_look_inside(self): - @dont_look_inside - def g(a, b): - return a + b - def f(a, b): - return g(a, b) - res = self.interp_operations(f, [3, 5]) - assert res == 8 - self.check_operations_history(int_add=0, call=1) - - def test_listcomp(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst']) - def f(x, y): - lst = [0, 0, 0] - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, lst=lst) - myjitdriver.jit_merge_point(x=x, y=y, lst=lst) - lst = [i+x for i in lst if i >=0] - y -= 1 - return lst[0] - res = self.meta_interp(f, [6, 7], listcomp=True, backendopt=True, listops=True) - # XXX: the loop looks inefficient - assert res == 42 - - def test_tuple_immutable(self): - def new(a, b): - return a, b - def f(a, b): - tup = new(a, b) - return tup[1] - res = self.interp_operations(f, [3, 5]) - assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure=1) - - def test_oosend_look_inside_only_one(self): - class A: - pass - class B(A): - def g(self): - return 123 - class C(A): - @dont_look_inside - def g(self): - return 456 - def f(n): - if n > 3: - x = B() - else: - x = C() - return x.g() + x.g() - res = self.interp_operations(f, [10]) - assert res == 123 * 2 - res = self.interp_operations(f, [-10]) - assert res == 456 * 2 - - def test_residual_external_call(self): - import math - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - def f(x, y): - x = float(x) - res = 0.0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - # this is an external call that the default policy ignores - rpart, ipart = math.modf(x) - res += ipart - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops(call=1) - - def test_merge_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 5 - class B(A): - def g(self, y): - return y - 3 - - a1 = A() - a2 = A() - b = B() - def f(x): - l = [a1] * 100 + [a2] * 100 + [b] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - x = a.g(x) - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_value=2) - self.check_loops(guard_class=0, guard_value=5, everywhere=True) - - def test_merge_guardnonnull_guardclass(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=2, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=4, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [b1] * 100 + [None] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=1, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=3, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) - - def test_merge_guardnonnull_guardvalue_2(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=4, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - a2 = A() - b1 = B() - def f(x): - l = [a2] * 100 + [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [399], listops=True) - assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=5, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_residual_call_doesnt_lose_info(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) - - class A(object): - pass - - globall = [""] - @dont_look_inside - def g(x): - globall[0] = str(x) - return x - - def f(x): - y = A() - y.v = x - l = [0] - while y.v > 0: - myjitdriver.can_enter_jit(x=x, y=y, l=l) - myjitdriver.jit_merge_point(x=x, y=y, l=l) - l[0] = y.v - lc = l[0] - y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 - return y.v - res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) - - def test_guard_isnull_nonnull(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - - @dont_look_inside - def create(x): - if x >= -40: - return A() - return None - - def f(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - obj = create(x-1) - if obj is not None: - res += 1 - obj2 = create(x-1000) - if obj2 is None: - res += 1 - x -= 1 - return res - res = self.meta_interp(f, [21]) - assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) - - def test_loop_invariant1(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - a = A() - a.current_a = A() - a.current_a.x = 1 - @loop_invariant - def f(): - return a.current_a - - def g(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - res += f().x - res += f().x - res += f().x - x -= 1 - a.current_a = A() - a.current_a.x = 2 - return res - res = self.meta_interp(g, [21]) - assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) - - def test_bug_optimizeopt_mutates_ops(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) - class A(object): - pass - class B(A): - pass - - glob = A() - glob.a = None - def f(x): - res = 0 - a = A() - a.x = 0 - glob.a = A() - const = 2 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res, a=a, const=const) - myjitdriver.jit_merge_point(x=x, res=res, a=a, const=const) - if type(glob.a) is B: - res += 1 - if a is None: - a = A() - a.x = x - glob.a = B() - const = 2 - else: - const = hint(const, promote=True) - x -= const - res += a.x - a = None - glob.a = A() - const = 1 - return res - res = self.meta_interp(f, [21]) - assert res == f(21) - - def test_getitem_indexerror(self): - lst = [10, 4, 9, 16] - def f(n): - try: - return lst[n] - except IndexError: - return -2 - res = self.interp_operations(f, [2]) - assert res == 9 - res = self.interp_operations(f, [4]) - assert res == -2 - res = self.interp_operations(f, [-4]) - assert res == 10 - res = self.interp_operations(f, [-5]) - assert res == -2 - - def test_guard_always_changing_value(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - a = A() - hint(a, promote=True) - x -= 1 - self.meta_interp(f, [50]) - self.check_loop_count(1) - # this checks that the logic triggered by make_a_counter_per_value() - # works and prevents generating tons of bridges - - def test_swap_values(self): - def f(x, y): - if x > 5: - x, y = y, x - return x - y - res = self.interp_operations(f, [10, 2]) - assert res == -8 - res = self.interp_operations(f, [3, 2]) - assert res == 1 - - def test_raw_malloc_and_access(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Signed) - - def f(n): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = n - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10]) - assert res == 10 - - def test_raw_malloc_and_access_float(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Float) - - def f(n, f): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = f - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10, 3.5]) - assert res == 3.5 - - def test_jit_debug(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - jit_debug("hi there:", x) - jit_debug("foobar") - x -= 1 - return x - res = self.meta_interp(f, [8]) - assert res == 0 - self.check_loops(jit_debug=2) - - def test_assert_green(self): - def f(x, promote): - if promote: - x = hint(x, promote=True) - assert_green(x) - return x - res = self.interp_operations(f, [8, 1]) - assert res == 8 - py.test.raises(AssertGreenFailed, self.interp_operations, f, [8, 0]) - - def test_multiple_specialied_versions1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 7]) - assert res == 6*8 + 6**8 - self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) - - def test_multiple_specialied_versions_array(self): - myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', - 'array']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val - other.val) - def f(x, y): - res = x - array = [1, 2, 3] - array[1] = 7 - idx = 0 - while y > 0: - myjitdriver.can_enter_jit(idx=idx, y=y, x=x, res=res, - array=array) - myjitdriver.jit_merge_point(idx=idx, y=y, x=x, res=res, - array=array) - res = res.binop(x) - res.val += array[idx] + array[1] - if y < 7: - idx = 2 - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - self.check_loop_count(9) - self.check_loops(getarrayitem_gc=6, everywhere=True) - - def test_multiple_specialied_versions_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - b1 = f(B(x), y, B(x)) - b2 = f(B(x), y, B(x)) - assert b1.val == b2.val - c1 = f(B(x), y, A(x)) - c2 = f(B(x), y, A(x)) - assert c1.val == c2.val - d1 = f(A(x), y, B(x)) - d2 = f(A(x), y, B(x)) - assert d1.val == d2.val - return a1.val + b1.val + c1.val + d1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_failing_inlined_guard(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 8: - x = z - return res - def g(x, y): - c1 = f(A(x), y, B(x)) - c2 = f(A(x), y, B(x)) - assert c1.val == c2.val - return c1.val - res = self.meta_interp(g, [3, 16]) - assert res == g(3, 16) - - def test_inlined_guard_in_short_preamble(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class A: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - def binop(self, other): - return A(self.getval() + other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_specialied_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(A(y)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_specialied_bridge_const(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'const', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - const = 7 - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res, const=const) - myjitdriver.jit_merge_point(y=y, x=x, res=res, const=const) - const = hint(const, promote=True) - res = res.binop(A(const)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_multiple_specialied_zigzag(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - def switch(self): - return B(self.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def switch(self): - return A(self.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - if y % 4 == 0: - res = res.switch() - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [3, 23]) - assert res == 7068153 - self.check_loop_count(6) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) - - def test_dont_trace_every_iteration(self): - myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) - - def main(a, b): - i = sa = 0 - #while i < 200: - while i < 200: - myjitdriver.can_enter_jit(a=a, b=b, i=i, sa=sa) - myjitdriver.jit_merge_point(a=a, b=b, i=i, sa=sa) - if a > 0: pass - if b < 2: pass - sa += a % b - i += 1 - return sa - def g(): - return main(10, 20) + main(-10, -20) - res = self.meta_interp(g, []) - assert res == g() - self.check_enter_count(2) - - def test_current_trace_length(self): - myjitdriver = JitDriver(greens = ['g'], reds = ['x']) - @dont_look_inside - def residual(): - print "hi there" - @unroll_safe - def loop(g): - y = 0 - while y < g: - residual() - y += 1 - def f(x, g): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, g=g) - myjitdriver.jit_merge_point(x=x, g=g) - loop(g) - x -= 1 - n = current_trace_length() - return n - res = self.meta_interp(f, [5, 8]) - assert 14 < res < 42 - res = self.meta_interp(f, [5, 2]) - assert 4 < res < 14 - - def test_compute_identity_hash(self): - from pypy.rlib.objectmodel import compute_identity_hash - class A(object): - pass - def f(): - a = A() - return compute_identity_hash(a) == compute_identity_hash(a) - res = self.interp_operations(f, []) - assert res - # a "did not crash" kind of test - - def test_compute_unique_id(self): - from pypy.rlib.objectmodel import compute_unique_id - class A(object): - pass - def f(): - a1 = A() - a2 = A() - return (compute_unique_id(a1) == compute_unique_id(a1) and - compute_unique_id(a1) != compute_unique_id(a2)) - res = self.interp_operations(f, []) - assert res - - def test_wrap_around_add(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x += 1 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint-10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_mul(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x *= 2 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint>>10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_sub(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x < 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x -= 1 - n += 1 - return n - res = self.meta_interp(f, [10-sys.maxint]) - assert res == 12 - self.check_tree_loop_count(2) - - - -class TestOOtype(BasicTests, OOJitMixin): - - def test_oohash(self): - def f(n): - s = ootype.oostring(n, -1) - return s.ll_hash() - res = self.interp_operations(f, [5]) - assert res == ootype.oostring(5, -1).ll_hash() - - def test_identityhash(self): - A = ootype.Instance("A", ootype.ROOT) - def f(): - obj1 = ootype.new(A) - obj2 = ootype.new(A) - return ootype.identityhash(obj1) == ootype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oois(self): - A = ootype.Instance("A", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - if n: - obj2 = obj1 - else: - obj2 = ootype.new(A) - return obj1 is obj2 - res = self.interp_operations(f, [0]) - assert not res - res = self.interp_operations(f, [1]) - assert res - - def test_oostring_instance(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - obj2 = ootype.new(B) - s1 = ootype.oostring(obj1, -1) - s2 = ootype.oostring(obj2, -1) - ch1 = s1.ll_stritem_nonneg(1) - ch2 = s2.ll_stritem_nonneg(1) - return ord(ch1) + ord(ch2) - res = self.interp_operations(f, [0]) - assert res == ord('A') + ord('B') - - def test_subclassof(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", A) - clsA = ootype.runtimeClass(A) - clsB = ootype.runtimeClass(B) - myjitdriver = JitDriver(greens = [], reds = ['n', 'flag', 'res']) - - def getcls(flag): - if flag: - return clsA - else: - return clsB - - def f(flag, n): - res = True - while n > -100: - myjitdriver.can_enter_jit(n=n, flag=flag, res=res) - myjitdriver.jit_merge_point(n=n, flag=flag, res=res) - cls = getcls(flag) - n -= 1 - res = ootype.subclassof(cls, clsB) - return res - - res = self.meta_interp(f, [1, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert not res - - res = self.meta_interp(f, [0, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert res - -class BaseLLtypeTests(BasicTests): - - def test_identityhash(self): - A = lltype.GcStruct("A") - def f(): - obj1 = lltype.malloc(A) - obj2 = lltype.malloc(A) - return lltype.identityhash(obj1) == lltype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oops_on_nongc(self): - from pypy.rpython.lltypesystem import lltype - - TP = lltype.Struct('x') - def f(i1, i2): - p1 = prebuilt[i1] - p2 = prebuilt[i2] - a = p1 is p2 - b = p1 is not p2 - c = bool(p1) - d = not bool(p2) - return 1000*a + 100*b + 10*c + d - prebuilt = [lltype.malloc(TP, flavor='raw', immortal=True)] * 2 - expected = f(0, 1) - assert self.interp_operations(f, [0, 1]) == expected - - def test_casts(self): - py.test.skip("xxx fix or kill") - if not self.basic: - py.test.skip("test written in a style that " - "means it's frontend only") - from pypy.rpython.lltypesystem import lltype, llmemory, rffi - - TP = lltype.GcStruct('S1') - def f(p): - n = lltype.cast_ptr_to_int(p) - return n - x = lltype.malloc(TP) - xref = lltype.cast_opaque_ptr(llmemory.GCREF, x) - res = self.interp_operations(f, [xref]) - y = llmemory.cast_ptr_to_adr(x) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - # - TP = lltype.Struct('S2') - prebuilt = [lltype.malloc(TP, immortal=True), - lltype.malloc(TP, immortal=True)] - def f(x): - p = prebuilt[x] - n = lltype.cast_ptr_to_int(p) - return n - res = self.interp_operations(f, [1]) - y = llmemory.cast_ptr_to_adr(prebuilt[1]) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - - def test_collapsing_ptr_eq(self): - S = lltype.GcStruct('S') - p = lltype.malloc(S) - driver = JitDriver(greens = [], reds = ['n', 'x']) - - def f(n, x): - while n > 0: - driver.can_enter_jit(n=n, x=x) - driver.jit_merge_point(n=n, x=x) - if x: - n -= 1 - n -= 1 - - def main(): - f(10, p) - f(10, lltype.nullptr(S)) - - self.meta_interp(main, []) - - def test_enable_opts(self): - jitdriver = JitDriver(greens = [], reds = ['a']) - - class A(object): - def __init__(self, i): - self.i = i - - def f(): - a = A(0) - - while a.i < 10: - jitdriver.jit_merge_point(a=a) - jitdriver.can_enter_jit(a=a) - a = A(a.i + 1) - - self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) - self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) - -class TestLLtype(BaseLLtypeTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/support.py @@ -0,0 +1,261 @@ + +import py, sys +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.ootypesystem import ootype +from pypy.jit.backend.llgraph import runner +from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT +from pypy.jit.metainterp import pyjitpl, history +from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.codewriter.policy import JitPolicy +from pypy.jit.codewriter import longlong + +def _get_jitcodes(testself, CPUClass, func, values, type_system, + supports_longlong=False, **kwds): + from pypy.jit.codewriter import support, codewriter + + class FakeJitCell: + __compiled_merge_points = [] + def get_compiled_merge_points(self): + return self.__compiled_merge_points[:] + def set_compiled_merge_points(self, lst): + self.__compiled_merge_points = lst + + class FakeWarmRunnerState: + def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): + pass + + def jit_cell_at_key(self, greenkey): + assert greenkey == [] + return self._cell + _cell = FakeJitCell() + + trace_limit = sys.maxint + enable_opts = ALL_OPTS_DICT + + func._jit_unroll_safe_ = True + rtyper = support.annotate(func, values, type_system=type_system) + graphs = rtyper.annotator.translator.graphs + result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] + + class FakeJitDriverSD: + num_green_args = 0 + portal_graph = graphs[0] + virtualizable_info = None + greenfield_info = None + result_type = result_kind + portal_runner_ptr = "???" + + stats = history.Stats() + cpu = CPUClass(rtyper, stats, None, False) + cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) + testself.cw = cw + policy = JitPolicy() + policy.set_supports_longlong(supports_longlong) + cw.find_all_graphs(policy) + # + testself.warmrunnerstate = FakeWarmRunnerState() + testself.warmrunnerstate.cpu = cpu + FakeJitDriverSD.warmstate = testself.warmrunnerstate + if hasattr(testself, 'finish_setup_for_interp_operations'): + testself.finish_setup_for_interp_operations() + # + cw.make_jitcodes(verbose=True) + +def _run_with_blackhole(testself, args): + from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder + cw = testself.cw + blackholeinterpbuilder = BlackholeInterpBuilder(cw) + blackholeinterp = blackholeinterpbuilder.acquire_interp() + count_i = count_r = count_f = 0 + for value in args: + T = lltype.typeOf(value) + if T == lltype.Signed: + blackholeinterp.setarg_i(count_i, value) + count_i += 1 + elif T == llmemory.GCREF: + blackholeinterp.setarg_r(count_r, value) + count_r += 1 + elif T == lltype.Float: + value = longlong.getfloatstorage(value) + blackholeinterp.setarg_f(count_f, value) + count_f += 1 + else: + raise TypeError(T) + [jitdriver_sd] = cw.callcontrol.jitdrivers_sd + blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) + blackholeinterp.run() + return blackholeinterp._final_result_anytype() + +def _run_with_pyjitpl(testself, args): + + class DoneWithThisFrame(Exception): + pass + + class DoneWithThisFrameRef(DoneWithThisFrame): + def __init__(self, cpu, *args): + DoneWithThisFrame.__init__(self, *args) + + cw = testself.cw + opt = history.Options(listops=True) + metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) + metainterp_sd.finish_setup(cw) + [jitdriver_sd] = metainterp_sd.jitdrivers_sd + metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) + metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame + metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef + metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame + testself.metainterp = metainterp + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + except DoneWithThisFrame, e: + #if conftest.option.view: + # metainterp.stats.view() + return e.args[0] + else: + raise Exception("FAILED") + +def _run_with_machine_code(testself, args): + metainterp = testself.metainterp + num_green_args = metainterp.jitdriver_sd.num_green_args + loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) + if len(loop_tokens) != 1: + return NotImplemented + # a loop was successfully created by _run_with_pyjitpl(); call it + cpu = metainterp.cpu + for i in range(len(args) - num_green_args): + x = args[num_green_args + i] + typecode = history.getkind(lltype.typeOf(x)) + set_future_value(cpu, i, x, typecode) + faildescr = cpu.execute_token(loop_tokens[0]) + assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') + if metainterp.jitdriver_sd.result_type == history.INT: + return cpu.get_latest_value_int(0) + elif metainterp.jitdriver_sd.result_type == history.REF: + return cpu.get_latest_value_ref(0) + elif metainterp.jitdriver_sd.result_type == history.FLOAT: + return cpu.get_latest_value_float(0) + else: + return None + + +class JitMixin: + basic = True + def check_loops(self, expected=None, everywhere=False, **check): + get_stats().check_loops(expected=expected, everywhere=everywhere, + **check) + def check_loop_count(self, count): + """NB. This is a hack; use check_tree_loop_count() or + check_enter_count() for the real thing. + This counts as 1 every bridge in addition to every loop; and it does + not count at all the entry bridges from interpreter, although they + are TreeLoops as well.""" + assert get_stats().compiled_count == count + def check_tree_loop_count(self, count): + assert len(get_stats().loops) == count + def check_loop_count_at_most(self, count): + assert get_stats().compiled_count <= count + def check_enter_count(self, count): + assert get_stats().enter_count == count + def check_enter_count_at_most(self, count): + assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): + assert get_stats().aborted_count == count + def check_aborted_count_at_least(self, count): + assert get_stats().aborted_count >= count + + def meta_interp(self, *args, **kwds): + kwds['CPUClass'] = self.CPUClass + kwds['type_system'] = self.type_system + if "backendopt" not in kwds: + kwds["backendopt"] = False + return ll_meta_interp(*args, **kwds) + + def interp_operations(self, f, args, **kwds): + # get the JitCodes for the function f + _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + # try to run it with blackhole.py + result1 = _run_with_blackhole(self, args) + # try to run it with pyjitpl.py + result2 = _run_with_pyjitpl(self, args) + assert result1 == result2 + # try to run it by running the code compiled just before + result3 = _run_with_machine_code(self, args) + assert result1 == result3 or result3 == NotImplemented + # + if (longlong.supports_longlong and + isinstance(result1, longlong.r_float_storage)): + result1 = longlong.getrealfloat(result1) + return result1 + + def check_history(self, expected=None, **isns): + # this can be used after calling meta_interp + get_stats().check_history(expected, **isns) + + def check_operations_history(self, expected=None, **isns): + # this can be used after interp_operations + if expected is not None: + expected = dict(expected) + expected['jump'] = 1 + self.metainterp.staticdata.stats.check_history(expected, **isns) + + +class LLJitMixin(JitMixin): + type_system = 'lltype' + CPUClass = runner.LLtypeCPU + + @staticmethod + def Ptr(T): + return lltype.Ptr(T) + + @staticmethod + def GcStruct(name, *fields, **kwds): + S = lltype.GcStruct(name, *fields, **kwds) + return S + + malloc = staticmethod(lltype.malloc) + nullptr = staticmethod(lltype.nullptr) + + @staticmethod + def malloc_immortal(T): + return lltype.malloc(T, immortal=True) + + def _get_NODE(self): + NODE = lltype.GcForwardReference() + NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), + ('next', lltype.Ptr(NODE)))) + return NODE + +class OOJitMixin(JitMixin): + type_system = 'ootype' + #CPUClass = runner.OOtypeCPU + + def setup_class(cls): + py.test.skip("ootype tests skipped for now") + + @staticmethod + def Ptr(T): + return T + + @staticmethod + def GcStruct(name, *fields, **kwds): + if 'hints' in kwds: + kwds['_hints'] = kwds['hints'] + del kwds['hints'] + I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) + return I + + malloc = staticmethod(ootype.new) + nullptr = staticmethod(ootype.null) + + @staticmethod + def malloc_immortal(T): + return ootype.new(T) + + def _get_NODE(self): + NODE = ootype.Instance('NODE', ootype.ROOT, {}) + NODE._add_fields({'value': ootype.Signed, + 'next': NODE}) + return NODE diff --git a/pypy/jit/backend/cli/test/test_basic.py b/pypy/jit/backend/cli/test/test_basic.py --- a/pypy/jit/backend/cli/test/test_basic.py +++ b/pypy/jit/backend/cli/test/test_basic.py @@ -1,14 +1,14 @@ import py from pypy.jit.backend.cli.runner import CliCPU -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit -class CliJitMixin(test_basic.OOJitMixin): +class CliJitMixin(suport.OOJitMixin): CPUClass = CliCPU def setup_class(cls): from pypy.translator.cli.support import PythonNet PythonNet.System # possibly raises Skip -class TestBasic(CliJitMixin, test_basic.TestOOtype): +class TestBasic(CliJitMixin, test_ajit.TestOOtype): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf', '_pypy_math_isnan'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -57,8 +56,6 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -91,13 +88,13 @@ # # Custom implementations - at jit.purefunction def ll_math_isnan(y): - return bool(math_isnan(y)) + # By not calling into the extenal function the JIT can inline this. Floats + # are awesome. + return y != y - at jit.purefunction def ll_math_isinf(y): - return bool(math_isinf(y)) + return y != 0 and y * .5 == y ll_math_copysign = math_copysign diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver from pypy.rlib import objectmodel diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,3 +1,5 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype @@ -509,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -1,7 +1,7 @@ from pypy.jit.metainterp.warmspot import ll_meta_interp from pypy.rlib.jit import JitDriver, dont_look_inside, purefunction -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.jitprof import * diff --git a/pypy/jit/tl/pypyjit_child.py b/pypy/jit/tl/pypyjit_child.py --- a/pypy/jit/tl/pypyjit_child.py +++ b/pypy/jit/tl/pypyjit_child.py @@ -2,7 +2,6 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp import warmspot from pypy.module.pypyjit.policy import PyPyJitPolicy -from pypy.rlib.jit import OPTIMIZER_FULL, OPTIMIZER_NO_UNROLL def run_child(glob, loc): @@ -34,6 +33,5 @@ option.view = True warmspot.jittify_and_run(interp, graph, [], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -285,6 +285,15 @@ elif drv.exe_name is None and '__name__' in targetspec_dic: drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s' + # Double check to ensure we are not overwriting the current interpreter + try: + exe_name = str(drv.compute_exe_name()) + assert not os.path.samefile(exe_name, sys.executable), ( + 'Output file %r is the currently running ' + 'interpreter (use --output=...)'% exe_name) + except OSError: + pass + goals = translateconfig.goals try: drv.proceed(goals) diff --git a/pypy/jit/backend/x86/test/test_basic.py b/pypy/jit/backend/x86/test/test_basic.py --- a/pypy/jit/backend/x86/test/test_basic.py +++ b/pypy/jit/backend/x86/test/test_basic.py @@ -1,18 +1,18 @@ import py from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.metainterp.warmspot import ll_meta_interp -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver -class Jit386Mixin(test_basic.LLJitMixin): +class Jit386Mixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() def check_jumps(self, maxcount): pass -class TestBasic(Jit386Mixin, test_basic.BaseLLtypeTests): +class TestBasic(Jit386Mixin, test_ajit.BaseLLtypeTests): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py def test_bug(self): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -154,6 +154,24 @@ self.emit_operation(op) + def optimize_INT_LSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_RSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_CALL_PURE(self, op): arg_consts = [] for i in range(op.numargs()): diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class DelTests: diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -5,7 +5,7 @@ from pypy.rlib.libffi import ArgChain from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestFfiCall(LLJitMixin, _TestLibffiCall): diff --git a/pypy/translator/jvm/test/test_list.py b/pypy/translator/jvm/test/test_list.py --- a/pypy/translator/jvm/test/test_list.py +++ b/pypy/translator/jvm/test/test_list.py @@ -6,7 +6,10 @@ def test_recursive(self): py.test.skip("JVM doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_r_short_list(self): diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -106,6 +106,10 @@ 'debug_catch_exception': Ignore, 'debug_reraise_traceback': Ignore, 'debug_print_traceback': Ignore, + 'debug_start': Ignore, + 'debug_stop': Ignore, + 'debug_print': Ignore, + 'keepalive': Ignore, # __________ numeric operations __________ @@ -144,6 +148,7 @@ 'int_xor_ovf': jvm.IXOR, 'int_floordiv_ovf_zer': jvm.IFLOORDIVZEROVF, 'int_mod_ovf_zer': _check_zer(jvm.IREMOVF), + 'int_between': jvm.PYPYINTBETWEEN, 'uint_invert': 'bitwise_negate', @@ -185,8 +190,8 @@ 'llong_mod_zer': _check_zer(jvm.LREM), 'llong_and': jvm.LAND, 'llong_or': jvm.LOR, - 'llong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'llong_rshift': [PushAllArgs, jvm.L2I, jvm.LSHR, StoreResult], + 'llong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'llong_rshift': [PushAllArgs, jvm.LSHR, StoreResult], 'llong_xor': jvm.LXOR, 'llong_floordiv_ovf': jvm.LFLOORDIVOVF, 'llong_floordiv_ovf_zer': jvm.LFLOORDIVZEROVF, @@ -202,9 +207,11 @@ 'ullong_truediv': None, # TODO 'ullong_floordiv': jvm.LDIV, # valid? 'ullong_mod': jvm.PYPYULONGMOD, - 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], + 'ullong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'ullong_rshift': [PushAllArgs, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, + 'ullong_or': jvm.LOR, + 'ullong_and': jvm.LAND, # when casting from bool we want that every truth value is casted # to 1: we can't simply DoNothing, because the CLI stack could @@ -227,5 +234,8 @@ 'cast_float_to_uint': jvm.PYPYDOUBLETOUINT, 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, + 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, + 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], + 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/translator/jvm/src/pypy/StatResult.java b/pypy/translator/jvm/src/pypy/StatResult.java --- a/pypy/translator/jvm/src/pypy/StatResult.java +++ b/pypy/translator/jvm/src/pypy/StatResult.java @@ -8,7 +8,7 @@ * *

The actual stat() function is defined in PyPy.java. */ -class StatResult { +public class StatResult { public int item0, item3, item4, item5; public long item1, item2, item6; public double item7, item8, item9; diff --git a/pypy/jit/metainterp/test/test_float.py b/pypy/jit/metainterp/test/test_float.py --- a/pypy/jit/metainterp/test/test_float.py +++ b/pypy/jit/metainterp/test/test_float.py @@ -1,5 +1,5 @@ import math -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class FloatTests: diff --git a/pypy/jit/tl/tla/test_tla.py b/pypy/jit/tl/tla/test_tla.py --- a/pypy/jit/tl/tla/test_tla.py +++ b/pypy/jit/tl/tla/test_tla.py @@ -155,7 +155,7 @@ # ____________________________________________________________ -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestLLtype(LLJitMixin): def test_loop(self): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test import test_loop -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES class LoopUnrollTest(test_loop.LoopTest): diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,18 +30,18 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno @@ -49,7 +49,7 @@ # produced by gateway.applevel(), such as the ones found in # nanos.py) return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import unroll_safe, dont_look_inside from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.annlowlevel import hlstr from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/translator/jvm/test/test_extreme.py b/pypy/translator/jvm/test/test_extreme.py --- a/pypy/translator/jvm/test/test_extreme.py +++ b/pypy/translator/jvm/test/test_extreme.py @@ -1,5 +1,8 @@ +import py from pypy.translator.jvm.test.runtest import JvmTest from pypy.translator.oosupport.test_template.extreme import BaseTestExtreme class TestExtreme(BaseTestExtreme, JvmTest): - pass + + def test_runtimeerror_due_to_stack_overflow(self): + py.test.skip('hotspot bug') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1645,7 +1645,7 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ .cfi_startproc movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ @@ -1691,6 +1691,12 @@ ret .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: diff --git a/pypy/translator/jvm/test/test_builtin.py b/pypy/translator/jvm/test/test_builtin.py --- a/pypy/translator/jvm/test/test_builtin.py +++ b/pypy/translator/jvm/test/test_builtin.py @@ -37,6 +37,15 @@ def test_cast_primitive(self): py.test.skip('fixme!') + def test_os_fstat(self): + import os, stat + def fn(): + fd = os.open(__file__, os.O_RDONLY, 0) + st = os.fstat(fd) + os.close(fd) + return st.st_mode + res = self.interpret(fn, []) + assert stat.S_ISREG(res) class TestJvmTime(JvmTest, BaseTestTime): diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver from pypy.rlib.objectmodel import compute_hash from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import history diff --git a/pypy/jit/metainterp/test/test_immutable.py b/pypy/jit/metainterp/test/test_immutable.py --- a/pypy/jit/metainterp/test/test_immutable.py +++ b/pypy/jit/metainterp/test/test_immutable.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ImmutableFieldsTests: diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -1,6 +1,9 @@ import _rawffi, sys -import threading +try: + from thread import _local as local +except ImportError: + local = object # no threads class ConvMode: encoding = 'ascii' @@ -28,7 +31,7 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(threading.local): +class ErrorObject(local): def __init__(self): self.errno = 0 self.winerror = 0 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -1,6 +1,6 @@ import py -from pypy.jit.metainterp.test.test_basic import JitMixin +from pypy.jit.metainterp.test.support import JitMixin from pypy.jit.tl.spli import interpreter, objects, serializer from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.backend.llgraph import runner diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -12,7 +12,7 @@ import py from pypy.jit.metainterp.memmgr import MemoryManager -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside diff --git a/pypy/translator/jvm/src/pypy/ll_os.java b/pypy/translator/jvm/src/pypy/ll_os.java --- a/pypy/translator/jvm/src/pypy/ll_os.java +++ b/pypy/translator/jvm/src/pypy/ll_os.java @@ -14,10 +14,22 @@ abstract class FileWrapper { + private final String name; + + public FileWrapper(String name) + { + this.name = name; + } + public abstract void write(String buffer); public abstract String read(int count); public abstract void close(); public abstract RandomAccessFile getFile(); + + public String getName() + { + return this.name; + } } class PrintStreamWrapper extends FileWrapper @@ -25,8 +37,9 @@ private final PrintStream stream; private final ll_os os; - public PrintStreamWrapper(PrintStream stream, ll_os os) + public PrintStreamWrapper(String name, PrintStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -58,8 +71,9 @@ private final InputStream stream; private final ll_os os; - public InputStreamWrapper(InputStream stream, ll_os os) + public InputStreamWrapper(String name, InputStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -102,11 +116,13 @@ private final boolean canWrite; private final ll_os os; - public RandomAccessFileWrapper(RandomAccessFile file, + public RandomAccessFileWrapper(String name, + RandomAccessFile file, boolean canRead, boolean canWrite, ll_os os) { + super(name); this.file = file; this.canRead = canRead; this.canWrite = canWrite; @@ -228,9 +244,9 @@ public ll_os(Interlink interlink) { this.interlink = interlink; - FileDescriptors.put(0, new InputStreamWrapper(System.in, this)); - FileDescriptors.put(1, new PrintStreamWrapper(System.out, this)); - FileDescriptors.put(2, new PrintStreamWrapper(System.err, this)); + FileDescriptors.put(0, new InputStreamWrapper("", System.in, this)); + FileDescriptors.put(1, new PrintStreamWrapper("", System.out, this)); + FileDescriptors.put(2, new PrintStreamWrapper("", System.err, this)); fdcount = 2; } @@ -339,7 +355,7 @@ // XXX: we ignore O_CREAT RandomAccessFile file = open_file(name, javaMode, flags); RandomAccessFileWrapper wrapper = - new RandomAccessFileWrapper(file, canRead, canWrite, this); + new RandomAccessFileWrapper(name, file, canRead, canWrite, this); fdcount++; FileDescriptors.put(fdcount, wrapper); @@ -418,6 +434,12 @@ return ll_os_stat(path); // XXX } + public StatResult ll_os_fstat(int fd) + { + String name = getfd(fd).getName(); + return ll_os_stat(name); + } + public String ll_os_strerror(int errno) { String msg = ErrorMessages.remove(errno); diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, hint from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.lltypesystem import lltype, rclass from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -266,7 +265,7 @@ if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,14 +22,20 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - ## XXX! we would like: return jit.hint(self, promote=True).items - ## XXX! but it gives horrible performance in some cases + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items return self.items def getitem(self, idx): @@ -46,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -622,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -6,7 +6,7 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import BoxInt -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -198,44 +198,6 @@ print print '@' * 79 - def test_f1(self): - self.run_source(''' - def main(n): - "Arbitrary test function." - i = 0 - x = 1 - while i 1: - r *= n - n -= 1 - return r - ''', 28, - ([5], 120), - ([25], 15511210043330985984000000L)) - - def test_factorialrec(self): - self.run_source(''' - def main(n): - if n > 1: - return n * main(n-1) - else: - return 1 - ''', 0, - ([5], 120), - ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -247,529 +209,6 @@ ''' % (sys.path,), 7200, ([], 42)) - def test_simple_call(self): - self.run_source(''' - OFFSET = 0 - def f(i): - return i + 1 + OFFSET - def main(n): - i = 0 - while i < n+OFFSET: - i = f(f(i)) - return i - ''', 98, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOAD_GLOBAL", True) - assert len(ops) == 5 - assert ops[0].get_opnames() == ["guard_value", - "getfield_gc", "guard_value", - "getfield_gc", "guard_isnull", - "getfield_gc", "guard_nonnull_class"] - # the second getfield on the same globals is quicker - assert ops[1].get_opnames() == ["getfield_gc", "guard_nonnull_class"] - assert not ops[2] # second LOAD_GLOBAL of the same name folded away - # LOAD_GLOBAL of the same name but in different function partially - # folded away - # XXX could be improved - assert ops[3].get_opnames() == ["guard_value", - "getfield_gc", "guard_isnull"] - assert not ops[4] - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 10 - - ops = self.get_by_bytecode("LOAD_GLOBAL") - assert len(ops) == 5 - for bytecode in ops: - assert not bytecode - - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for bytecode in ops: - assert len(bytecode) <= 1 - - - def test_method_call(self): - self.run_source(''' - class A(object): - def __init__(self, a): - self.a = a - def f(self, i): - return self.a + i - def main(n): - i = 0 - a = A(1) - while i < n: - x = a.f(i) - i = a.f(x) - return i - ''', 93, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOOKUP_METHOD", True) - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 3 - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert not ops[0] # first LOOKUP_METHOD folded away - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("CALL_METHOD", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 6 - assert len(ops[1]) < len(ops[0]) - - ops = self.get_by_bytecode("CALL_METHOD") - assert len(ops) == 2 - assert len(ops[0]) <= 1 - assert len(ops[1]) <= 1 - - ops = self.get_by_bytecode("LOAD_ATTR", True) - assert len(ops) == 2 - # With mapdict, we get fast access to (so far) the 5 first - # attributes, which means it is done with only the following - # operations. (For the other attributes there is additionally - # a getarrayitem_gc.) - assert ops[0].get_opnames() == ["getfield_gc", - "guard_nonnull_class"] - assert not ops[1] # second LOAD_ATTR folded away - - ops = self.get_by_bytecode("LOAD_ATTR") - assert not ops[0] # first LOAD_ATTR folded away - assert not ops[1] # second LOAD_ATTR folded away - - def test_static_classmethod_call(self): - self.run_source(''' - class A(object): - @classmethod - def f(cls, i): - return i + (cls is A) + 1 - - @staticmethod - def g(i): - return i - 1 - - def main(n): - i = 0 - a = A() - while i < n: - x = a.f(i) - i = a.g(x) - return i - ''', 106, - ([20], 20), - ([31], 31)) - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 2 - assert len(ops[0].get_opnames("getfield")) <= 4 - assert not ops[1] # second LOOKUP_METHOD folded away - - def test_default_and_kw(self): - self.run_source(''' - def f(i, j=1): - return i + j - def main(n): - i = 0 - while i < n: - i = f(f(i), j=1) - return i - ''', 100, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - def test_kwargs(self): - self.run_source(''' - d = {} - - def g(**args): - return len(args) - - def main(x): - s = 0 - d = {} - for i in range(x): - s += g(**d) - d[str(i)] = i - if i % 100 == 99: - d = {} - return s - ''', 100000, ([100], 4950), - ([1000], 49500), - ([10000], 495000), - ([100000], 4950000)) - assert len(self.rawloops) + len(self.rawentrybridges) == 4 - op, = self.get_by_bytecode("CALL_FUNCTION_KW") - # XXX a bit too many guards, but better than before - assert len(op.get_opnames("guard")) <= 12 - - def test_stararg_virtual(self): - self.run_source(''' - d = {} - - def g(*args): - return len(args) - def h(a, b, c): - return c - - def main(x): - s = 0 - for i in range(x): - l = [i, x, 2] - s += g(*l) - s += h(*l) - s += g(i, x, 2) - for i in range(x): - l = [x, 2] - s += g(i, *l) - s += h(i, *l) - return s - ''', 100000, ([100], 1300), - ([1000], 13000), - ([10000], 130000), - ([100000], 1300000)) - assert len(self.loops) == 2 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - assert len(ops) == 4 - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - ops = self.get_by_bytecode("CALL_FUNCTION") - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_stararg(self): - self.run_source(''' - d = {} - - def g(*args): - return args[-1] - def h(*args): - return len(args) - - def main(x): - s = 0 - l = [] - i = 0 - while i < x: - l.append(1) - s += g(*l) - i = h(*l) - return s - ''', 100000, ([100], 100), - ([1000], 1000), - ([2000], 2000), - ([4000], 4000)) - assert len(self.loops) == 1 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - for op in ops: - assert len(op.get_opnames("new_with_vtable")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_virtual_instance(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0 - while i < n: - a = A() - assert isinstance(a, A) - assert not isinstance(a, int) - a.x = 2 - i = i + a.x - return i - ''', 69, - ([20], 20), - ([31], 32)) - - callA, callisinstance1, callisinstance2 = ( - self.get_by_bytecode("CALL_FUNCTION")) - assert not callA.get_opnames("call") - assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 2 - assert not callisinstance1.get_opnames("call") - assert not callisinstance1.get_opnames("new") - assert len(callisinstance1.get_opnames("guard")) <= 2 - # calling isinstance on a builtin type gives zero guards - # because the version_tag of a builtin type is immutable - assert not len(callisinstance1.get_opnames("guard")) - - - bytecode, = self.get_by_bytecode("STORE_ATTR") - assert bytecode.get_opnames() == [] - - def test_load_attr(self): - self.run_source(''' - class A(object): - pass - a = A() - a.x = 2 - def main(n): - i = 0 - while i < n: - i = i + a.x - return i - ''', 41, - ([20], 20), - ([31], 32)) - - load, = self.get_by_bytecode("LOAD_ATTR") - # 1 guard_value for the class - # 1 guard_value for the version_tag - # 1 guard_value for the structure - # 1 guard_nonnull_class for the result since it is used later - assert len(load.get_opnames("guard")) <= 4 - - def test_mixed_type_loop(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0.0 - j = 2 - while i < n: - i = j + i - return i, type(i) is float - ''', 35, - ([20], (20, True)), - ([31], (32, True))) - - bytecode, = self.get_by_bytecode("BINARY_ADD") - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 2 - - def test_call_builtin_function(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 2 - l = [] - while i < n: - i += 1 - l.append(i) - return i, len(l) - ''', 39, - ([20], (20, 18)), - ([31], (31, 29))) - - bytecode, = self.get_by_bytecode("CALL_METHOD") - assert len(bytecode.get_opnames("new_with_vtable")) == 1 # the forcing of the int - assert len(bytecode.get_opnames("call")) == 1 # the call to append - assert len(bytecode.get_opnames("guard")) == 1 # guard for guard_no_exception after the call - bytecode, = self.get_by_bytecode("CALL_METHOD", True) - assert len(bytecode.get_opnames("guard")) == 2 # guard for profiling disabledness + guard_no_exception after the call - - def test_range_iter(self): - self.run_source(''' - def g(n): - return range(n) - - def main(n): - s = 0 - for i in range(n): - s += g(n)[i] - return s - ''', 143, ([1000], 1000 * 999 / 2)) - bytecode, = self.get_by_bytecode("BINARY_SUBSCR", True) - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER", True) # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_value", - "guard_class", # check the class of the iterator - "guard_nonnull", # check that the iterator is not finished - "guard_isnull", # check that the range list is not forced - "guard_false", # check that the index is lower than the current length - ] - - bytecode, = self.get_by_bytecode("BINARY_SUBSCR") - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER") # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is lower than the current length - ] - - def test_exception_inside_loop_1(self): - self.run_source(''' - def main(n): - while n: - try: - raise ValueError - except ValueError: - pass - n -= 1 - return n - ''', 33, - ([30], 0)) - - bytecode, = self.get_by_bytecode("SETUP_EXCEPT") - #assert not bytecode.get_opnames("new") -- currently, we have - # new_with_vtable(pypy.interpreter.pyopcode.ExceptBlock) - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert not bytecode.get_opnames() - - def test_exception_inside_loop_2(self): - self.run_source(''' - def g(n): - raise ValueError(n) - def f(n): - g(n) - def main(n): - while n: - try: - f(n) - except ValueError: - pass - n -= 1 - return n - ''', 51, - ([30], 0)) - - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert len(bytecode.get_opnames()) <= 2 # oois, guard_true - - def test_chain_of_guards(self): - self.run_source(''' - class A(object): - def method_x(self): - return 3 - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - i = 0 - while i < 2000: - name = l[arg] - sum += getattr(a, 'method_' + name)() - i += 1 - return sum - ''', 3000, ([0], 2000*3)) - assert len(self.loops) == 1 - - def test_getattr_with_dynamic_attribute(self): - self.run_source(''' - class A(object): - pass - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - a.a1 = 0 - a.a2 = 0 - a.a3 = 0 - a.a4 = 0 - a.a5 = 0 # workaround, because the first five attributes need a promotion - a.x = 1 - a.y = 2 - i = 0 - while i < 2000: - name = l[i % 2] - sum += getattr(a, name) - i += 1 - return sum - ''', 3000, ([0], 3000)) - assert len(self.loops) == 1 - - def test_blockstack_virtualizable(self): - self.run_source(''' - from pypyjit import residual_call - - def main(): - i = 0 - while i < 100: - try: - residual_call(len, []) - except: - pass - i += 1 - return i - ''', 1000, ([], 100)) - bytecode, = self.get_by_bytecode("CALL_FUNCTION") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('new_with_vtable')) == 2 - - def test_import_in_function(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - from sys import version - i += 1 - return i - ''', 100, ([], 100)) - bytecode, = self.get_by_bytecode('IMPORT_NAME') - bytecode2, = self.get_by_bytecode('IMPORT_FROM') - assert len(bytecode.get_opnames('call')) == 2 # split_chr and list_pop - assert len(bytecode2.get_opnames('call')) == 0 - - def test_arraycopy_disappears(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - t = (1, 2, 3, i + 1) - t2 = t[:] - del t - i = t2[3] - del t2 - return i - ''', 40, ([], 100)) - bytecode, = self.get_by_bytecode('BINARY_SUBSCR') - assert len(bytecode.get_opnames('new_array')) == 0 def test_overflow_checking(self): startvalue = sys.maxint - 2147483647 @@ -783,269 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_boolrewrite_invers(self): - for a, b, res, ops in (('2000', '2000', 20001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 16001700, 83), - ( 'a', 'b', 16001700, 89), - ( 'a', 'a', 13001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if i >= %s: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - def test_boolrewrite_reflex(self): - for a, b, res, ops in (('2000', '2000', 10001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 14001700, 83), - ( 'a', 'b', 14001700, 89), - ( 'a', 'a', 17001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if %s > i: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - - def test_boolrewrite_correct_invers(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b-1, op2, b) * 10000 * (b) - res += opval( b, op2, b) * 10000 - res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if i %s %d: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, op2, b), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if i %s %f: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, op2, float(b)/4.0), 156, ([], res)) - - - def test_boolrewrite_correct_reflex(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b, op2, b-1) * 10000 * (b) - res += opval(b, op2, b) * 10000 - res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if %d %s i: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, b, op2), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if %f %s i: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, float(b)/4.0, op2), 156, ([], res)) - - def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') - for e1 in compares: - for e2 in compares: - a, b, c = 1, 2, 3 - if eval(e1): res = 752 * 1 - else: res = 752 * 2 - if eval(e2): res += 752 * 10000 - else: res += 752 * 20000 - a = b - if eval(e1): res += 248 * 1 - else: res += 248 * 2 - if eval(e2): res += 248 * 10000 - else: res += 248 * 20000 - - - if 'c' in e1 or 'c' in e2: - n = 337 - else: - n = 215 - - print - print 'Test:', e1, e2, n, res - self.run_source(''' - class tst(object): - pass - def main(): - a = tst() - b = tst() - c = tst() - sa = 0 - for i in range(1000): - if %s: sa += 1 - else: sa += 2 - if %s: sa += 10000 - else: sa += 20000 - if i > 750: a = b - return sa - '''%(e1, e2), n, ([], res)) - - def test_array_sum(self): - for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): - res = 19352859 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(): - img = array("%s", range(127) * 5) * 484 - l, i = 0, 0 - while i < 640 * 480: - l += img[i] - i += 1 - return l - ''' % tc, maxops, ([], res)) - - def test_array_sum_char(self): - self.run_source(''' - from array import array - - def main(): - img = array("c", "Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - l += ord(img[i]) - i += 1 - return l - ''', 60, ([], 30720000)) - - def test_array_sum_unicode(self): - self.run_source(''' - from array import array - - def main(): - img = array("u", u"Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - if img[i] == u"l": - l += 1 - i += 1 - return l - ''', 65, ([], 122880)) - - def test_array_intimg(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - for tc, maxops in zip('ilILd', (67, 67, 70, 70, 61)): - print - print '='*65 - print '='*20, 'running test for tc=%r' % (tc,), '='*20 - res = 73574560 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(tc): - img = array(tc, range(3)) * (350 * 480) - intimg = array(tc, (0,)) * (640 * 480) - l, i = 0, 640 - while i < 640 * 480: - l = l + img[i] - intimg[i] = (intimg[i-640] + l) - i += 1 - return intimg[i - 1] - ''', maxops, ([tc], res)) - - def test_unpackiterable(self): - self.run_source(''' - from array import array - - def main(): - i = 0 - t = array('l', (1, 2)) - while i < 2000: - a, b = t - i += 1 - return 3 - - ''', 100, ([], 3)) - bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('call_may_force')) == 0 def test_intbound_simple(self): diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder from pypy.jit.metainterp.blackhole import BlackholeInterpreter from pypy.jit.metainterp.blackhole import convert_and_run_from_pyjitpl diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, vref_None from pypy.rlib.jit import virtual_ref, virtual_ref_finish from pypy.rlib.objectmodel import compute_unique_id -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.virtualref import VirtualRefInfo diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -11,6 +11,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class StringTests: diff --git a/pypy/jit/metainterp/test/test_tlc.py b/pypy/jit/metainterp/test/test_tlc.py --- a/pypy/jit/metainterp/test/test_tlc.py +++ b/pypy/jit/metainterp/test/test_tlc.py @@ -3,7 +3,7 @@ from pypy.jit.tl import tlc -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class TLCTests: diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,9 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); -int _pypy_math_isnan(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -5,7 +5,7 @@ soon as possible (at least in a simple case). """ -import weakref, random +import weakref import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver class ListTests(object): diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -754,6 +754,8 @@ ("{x for x in z}", "set comprehension"), ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), + ("u'str'", "literal"), + ("b'bytes'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -1,4 +1,5 @@ from pypy.rpython.ootypesystem import ootype +from pypy.rpython.lltypesystem import rffi from pypy.translator.oosupport.metavm import MicroInstruction from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType import pypy.translator.jvm.typesystem as jvm @@ -94,14 +95,20 @@ (ootype.SignedLongLong, ootype.Signed): jvm.L2I, (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, + (ootype.Signed, rffi.SHORT): jvm.I2S, + (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, + (ootype.Signed, ootype.Unsigned): None, + (ootype.Unsigned, ootype.Signed): None, } class _CastPrimitive(MicroInstruction): def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype + if TO == FROM: + return opcode = CASTS[(FROM, TO)] if opcode: generator.emit(opcode) diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -101,7 +101,7 @@ # first annotate, rtype, and backendoptimize PyPy try: - interp, graph = get_interpreter(entry_point, [], backendopt=True, + interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,18 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - -int -_pypy_math_isnan(double x) -{ - return PyPy_IS_NAN(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -1,5 +1,5 @@ import py, sys -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.jit.codewriter.policy import StopAtXPolicy diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -38,6 +38,10 @@ public final static int INT_MIN = Integer.MIN_VALUE; public final static double ULONG_MAX = 18446744073709551616.0; + public static boolean int_between(int a, int b, int c) { + return a <= b && b < c; + } + /** * Compares two unsigned integers (value1 and value2) and returns * a value greater than, equal to, or less than zero if value 1 is @@ -163,6 +167,13 @@ return ULONG_MAX + value; } } + + public static long double_to_ulong(double value) { + if (value < 0) + return (long)(ULONG_MAX + value); + else + return (long)value; + } public static int double_to_uint(double value) { if (value <= Integer.MAX_VALUE) @@ -1175,6 +1186,18 @@ return Math.tanh(x); } + public double ll_math_copysign(double x, double y) { + return Math.copySign(x, y); + } + + public boolean ll_math_isnan(double x) { + return Double.isNaN(x); + } + + public boolean ll_math_isinf(double x) { + return Double.isInfinite(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); @@ -1187,9 +1210,42 @@ return Character.toLowerCase(c); } + public int locale_tolower(int chr) + { + return Character.toLowerCase(chr); + } + + public int locale_isupper(int chr) + { + return boolean2int(Character.isUpperCase(chr)); + } + + public int locale_islower(int chr) + { + return boolean2int(Character.isLowerCase(chr)); + } + + public int locale_isalpha(int chr) + { + return boolean2int(Character.isLetter(chr)); + } + + public int locale_isalnum(int chr) + { + return boolean2int(Character.isLetterOrDigit(chr)); + } + + // ---------------------------------------------------------------------- // Self Test + public static int boolean2int(boolean b) + { + if (b) + return 1; + return 0; + } + public static int __counter = 0, __failures = 0; public static void ensure(boolean f) { if (f) { diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -1,6 +1,6 @@ import py from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class ToyLanguageTests: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,7 +39,7 @@ translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "md5", "cStringIO", "array"])) + "struct", "_md5", "cStringIO", "array"])) working_oo_modules = default_modules.copy() working_oo_modules.update(dict.fromkeys( From commits-noreply at bitbucket.org Fri Apr 15 14:05:21 2011 From: commits-noreply at bitbucket.org (fijal) Date: Fri, 15 Apr 2011 14:05:21 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: use an out of line guard Message-ID: <20110415120521.581382A204C@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43389:82bddec49cab Date: 2011-04-15 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/82bddec49cab/ Log: use an out of line guard diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -85,6 +85,7 @@ 'nslots', 'instancetypedef', 'terminator', + '_version_tag?', ] # for config.objspace.std.getattributeshortcut From commits-noreply at bitbucket.org Fri Apr 15 14:05:41 2011 From: commits-noreply at bitbucket.org (fijal) Date: Fri, 15 Apr 2011 14:05:41 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: merge default Message-ID: <20110415120541.6D3232A2048@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43390:58c9e01d357e Date: 2011-04-15 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/58c9e01d357e/ Log: merge default diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support from pypy.rlib.nonconst import NonConstant from pypy.rlib.rsre.test.test_match import get_code from pypy.rlib.rsre import rsre_core @@ -45,7 +45,7 @@ assert m._jit_unroll_safe_ -class TestJitRSre(test_basic.LLJitMixin): +class TestJitRSre(support.LLJitMixin): def meta_interp_match(self, pattern, string, repeat=1): r = get_code(pattern) diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -167,128 +167,132 @@ result = formatd(value, tp, precision, flags) return result, special -if USE_SHORT_FLOAT_REPR: - def round_double(value, ndigits): - # The basic idea is very simple: convert and round the double to - # a decimal string using _Py_dg_dtoa, then convert that decimal - # string back to a double with _Py_dg_strtod. There's one minor - # difficulty: Python 2.x expects round to do - # round-half-away-from-zero, while _Py_dg_dtoa does - # round-half-to-even. So we need some way to detect and correct - # the halfway cases. +def round_double(value, ndigits): + if USE_SHORT_FLOAT_REPR: + return round_double_short_repr(value, ndigits) + else: + return round_double_fallback_repr(value, ndigits) - # a halfway value has the form k * 0.5 * 10**-ndigits for some - # odd integer k. Or in other words, a rational number x is - # exactly halfway between two multiples of 10**-ndigits if its - # 2-valuation is exactly -ndigits-1 and its 5-valuation is at - # least -ndigits. For ndigits >= 0 the latter condition is - # automatically satisfied for a binary float x, since any such - # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x - # needs to be an integral multiple of 5**-ndigits; we can check - # this using fmod. For -22 > ndigits, there are no halfway - # cases: 5**23 takes 54 bits to represent exactly, so any odd - # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of - # precision to represent exactly. +def round_double_short_repr(value, ndigits): + # The basic idea is very simple: convert and round the double to + # a decimal string using _Py_dg_dtoa, then convert that decimal + # string back to a double with _Py_dg_strtod. There's one minor + # difficulty: Python 2.x expects round to do + # round-half-away-from-zero, while _Py_dg_dtoa does + # round-half-to-even. So we need some way to detect and correct + # the halfway cases. - sign = copysign(1.0, value) - value = abs(value) + # a halfway value has the form k * 0.5 * 10**-ndigits for some + # odd integer k. Or in other words, a rational number x is + # exactly halfway between two multiples of 10**-ndigits if its + # 2-valuation is exactly -ndigits-1 and its 5-valuation is at + # least -ndigits. For ndigits >= 0 the latter condition is + # automatically satisfied for a binary float x, since any such + # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x + # needs to be an integral multiple of 5**-ndigits; we can check + # this using fmod. For -22 > ndigits, there are no halfway + # cases: 5**23 takes 54 bits to represent exactly, so any odd + # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of + # precision to represent exactly. - # find 2-valuation value - m, expo = math.frexp(value) - while m != math.floor(m): - m *= 2.0 - expo -= 1 + sign = copysign(1.0, value) + value = abs(value) - # determine whether this is a halfway case. - halfway_case = 0 - if expo == -ndigits - 1: - if ndigits >= 0: + # find 2-valuation value + m, expo = math.frexp(value) + while m != math.floor(m): + m *= 2.0 + expo -= 1 + + # determine whether this is a halfway case. + halfway_case = 0 + if expo == -ndigits - 1: + if ndigits >= 0: + halfway_case = 1 + elif ndigits >= -22: + # 22 is the largest k such that 5**k is exactly + # representable as a double + five_pow = 1.0 + for i in range(-ndigits): + five_pow *= 5.0 + if math.fmod(value, five_pow) == 0.0: halfway_case = 1 - elif ndigits >= -22: - # 22 is the largest k such that 5**k is exactly - # representable as a double - five_pow = 1.0 - for i in range(-ndigits): - five_pow *= 5.0 - if math.fmod(value, five_pow) == 0.0: - halfway_case = 1 - # round to a decimal string; use an extra place for halfway case - strvalue = formatd(value, 'f', ndigits + halfway_case) + # round to a decimal string; use an extra place for halfway case + strvalue = formatd(value, 'f', ndigits + halfway_case) - if halfway_case: - buf = [c for c in strvalue] - if ndigits >= 0: - endpos = len(buf) - 1 - else: - endpos = len(buf) + ndigits - # Sanity checks: there should be exactly ndigits+1 places - # following the decimal point, and the last digit in the - # buffer should be a '5' - if not objectmodel.we_are_translated(): - assert buf[endpos] == '5' - if '.' in buf: - assert endpos == len(buf) - 1 - assert buf.index('.') == len(buf) - ndigits - 2 + if halfway_case: + buf = [c for c in strvalue] + if ndigits >= 0: + endpos = len(buf) - 1 + else: + endpos = len(buf) + ndigits + # Sanity checks: there should be exactly ndigits+1 places + # following the decimal point, and the last digit in the + # buffer should be a '5' + if not objectmodel.we_are_translated(): + assert buf[endpos] == '5' + if '.' in buf: + assert endpos == len(buf) - 1 + assert buf.index('.') == len(buf) - ndigits - 2 - # increment and shift right at the same time - i = endpos - 1 - carry = 1 - while i >= 0: + # increment and shift right at the same time + i = endpos - 1 + carry = 1 + while i >= 0: + digit = ord(buf[i]) + if digit == ord('.'): + buf[i+1] = chr(digit) + i -= 1 digit = ord(buf[i]) - if digit == ord('.'): - buf[i+1] = chr(digit) - i -= 1 - digit = ord(buf[i]) - carry += digit - ord('0') - buf[i+1] = chr(carry % 10 + ord('0')) - carry /= 10 - i -= 1 - buf[0] = chr(carry + ord('0')) - if ndigits < 0: - buf.append('0') + carry += digit - ord('0') + buf[i+1] = chr(carry % 10 + ord('0')) + carry /= 10 + i -= 1 + buf[0] = chr(carry + ord('0')) + if ndigits < 0: + buf.append('0') - strvalue = ''.join(buf) + strvalue = ''.join(buf) - return sign * rstring_to_float(strvalue) + return sign * rstring_to_float(strvalue) -else: - # fallback version, to be used when correctly rounded - # binary<->decimal conversions aren't available - def round_double(value, ndigits): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 +# fallback version, to be used when correctly rounded +# binary<->decimal conversions aren't available +def round_double_fallback_repr(value, ndigits): + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow + pow1 = math.pow(10.0, ndigits - 22) + pow2 = 1e22 + else: + pow1 = math.pow(10.0, ndigits) + pow2 = 1.0 - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value + y = (value * pow1) * pow2 + # if y overflows, then rounded value is exactly x + if isinf(y): + return value - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 + else: + pow1 = math.pow(10.0, -ndigits); + pow2 = 1.0 # unused; for translation + y = value / pow1 - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y-z) == 1.0: # obscure case, see the test - z = y + if y >= 0.0: + z = math.floor(y + 0.5) + else: + z = math.ceil(y - 0.5) + if math.fabs(y-z) == 1.0: # obscure case, see the test + z = y - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + return z INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -119,13 +119,16 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. -License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' +License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' ============================================================== Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files -in the 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' directories +in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories are all copyrighted by the Python Software Foundation and licensed under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html @@ -158,21 +161,12 @@ ====================================== The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. - CompositionExclusions-3.2.0.txt - CompositionExclusions-4.1.0.txt - CompositionExclusions-5.0.0.txt - EastAsianWidth-3.2.0.txt - EastAsianWidth-4.1.0.txt - EastAsianWidth-5.0.0.txt - UnicodeData-3.2.0.txt - UnicodeData-4.1.0.txt - UnicodeData-5.0.0.txt - -The following files are derived from files from the above website. The same -terms of use apply. - UnihanNumeric-3.2.0.txt - UnihanNumeric-4.1.0.txt - UnihanNumeric-5.0.0.txt + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ListTests: diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_ajit.py copy from pypy/jit/metainterp/test/test_basic.py copy to pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_basic.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -4,269 +4,17 @@ from pypy.rlib.jit import loop_invariant from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.metainterp.warmspot import get_stats from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong from pypy import conftest from pypy.rlib.rarithmetic import ovfcheck from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class BasicTests: diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -181,6 +181,7 @@ jIntegerClass = JvmClassType('java.lang.Integer') jLongClass = JvmClassType('java.lang.Long') +jShortClass = JvmClassType('java.lang.Short') jDoubleClass = JvmClassType('java.lang.Double') jByteClass = JvmClassType('java.lang.Byte') jCharClass = JvmClassType('java.lang.Character') @@ -239,6 +240,7 @@ jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue') jByte = JvmScalarType('B', jByteClass, 'byteValue') jChar = JvmScalarType('C', jCharClass, 'charValue') +jShort = JvmScalarType('S', jShortClass, 'shortValue') class Generifier(object): @@ -527,6 +529,7 @@ if desc == 'C': return self._o("i") # Characters if desc == 'B': return self._o("i") # Bytes if desc == 'Z': return self._o("i") # Boolean + if desc == 'S': return self._o("i") # Short assert False, "Unknown argtype=%s" % repr(argtype) raise NotImplementedError @@ -625,6 +628,7 @@ NOP = Opcode('nop') I2D = Opcode('i2d') I2L = Opcode('i2l') +I2S = Opcode('i2s') D2I= Opcode('d2i') #D2L= Opcode('d2l') #PAUL L2I = Opcode('l2i') @@ -891,6 +895,7 @@ SYSTEMIDENTITYHASH = Method.s(jSystem, 'identityHashCode', (jObject,), jInt) SYSTEMGC = Method.s(jSystem, 'gc', (), jVoid) INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString) +SHORTTOSTRINGS = Method.s(jShortClass, 'toString', (jShort,), jString) LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString) DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString) CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString) @@ -922,15 +927,19 @@ CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool) STRINGBUILDERAPPEND = Method.v(jStringBuilder, 'append', (jString,), jStringBuilder) +PYPYINTBETWEEN = Method.s(jPyPy, 'int_between', (jInt,jInt,jInt), jBool) PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt) PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt) PYPYUINTMOD = Method.v(jPyPy, 'uint_mod', (jInt, jInt), jInt) PYPYUINTMUL = Method.v(jPyPy, 'uint_mul', (jInt, jInt), jInt) PYPYUINTDIV = Method.v(jPyPy, 'uint_div', (jInt, jInt), jInt) PYPYULONGMOD = Method.v(jPyPy, 'ulong_mod', (jLong, jLong), jLong) +PYPYUINTTOLONG = Method.s(jPyPy, 'uint_to_long', (jInt,), jLong) PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL +PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) +PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,19 +1,16 @@ try: - import pypyjit - pypyjit.set_param(threshold=3, inlining=True) + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + main(10) - def sqrt(y, n=10000): - x = y / 2 - while n > 0: - #assert y > 0 and x > 0 - if y > 0 and x > 0: pass - n -= 1 - x = (x + y/x) / 2 - return x - - print sqrt(1234, 4) - except Exception, e: print "Exception: ", type(e) print e diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -4,7 +4,7 @@ """ from cStringIO import StringIO -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.ootypesystem import ootype, rclass from pypy.rpython.ootypesystem.module import ll_os from pypy.translator.jvm import node, methods @@ -229,9 +229,15 @@ if not ootype.isSubclass(OOTYPE, SELF): continue mobj = self._function_for_graph( clsobj, mname, False, mimpl.graph) - graphs = OOTYPE._lookup_graphs(mname) - if len(graphs) == 1: - mobj.is_final = True + # XXX: this logic is broken: it might happen that there are + # ootype.Instance which contains a meth whose graph is exactly + # the same as the meth in the superclass: in this case, + # len(graphs) == 1 but we cannot just mark the method as final + # (or we can, but we should avoid to emit the method in the + # subclass, then) + ## graphs = OOTYPE._lookup_graphs(mname) + ## if len(graphs) == 1: + ## mobj.is_final = True clsobj.add_method(mobj) # currently, we always include a special "dump" method for debugging @@ -359,6 +365,7 @@ ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, ootype.String:jvm.PYPYESCAPEDSTRING, ootype.Unicode:jvm.PYPYESCAPEDUNICODE, + rffi.SHORT:jvm.SHORTTOSTRINGS, } def toString_method_for_ootype(self, OOTYPE): @@ -406,6 +413,7 @@ ootype.UniChar: jvm.jChar, ootype.Class: jvm.jClass, ootype.ROOT: jvm.jObject, # treat like a scalar + rffi.SHORT: jvm.jShort, } # Dictionary for non-scalar types; in this case, if we see the key, we diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -6,7 +6,7 @@ from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside from pypy.rlib.rarithmetic import intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.rclass import FieldListAccessor from pypy.jit.metainterp.warmspot import get_stats, get_translator from pypy.jit.metainterp import history diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py --- a/pypy/jit/metainterp/test/test_longlong.py +++ b/pypy/jit/metainterp/test/test_longlong.py @@ -1,6 +1,6 @@ import py, sys from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class WrongResult(Exception): pass diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -277,6 +277,7 @@ """) def test_default_and_kw(self): + py.test.skip("Wait until we have saner defaults strat") def main(n): def f(i, j=1): return i + j @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): @@ -685,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, -1, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) @@ -1009,6 +1010,7 @@ """) def test_func_defaults(self): + py.test.skip("until we fix defaults") def main(n): i = 1 while i < n: @@ -1061,7 +1063,7 @@ i23 = int_lt(0, i21) guard_true(i23, descr=) i24 = getfield_gc(p17, descr=) - i25 = getarrayitem_raw(i24, 0, descr=) + i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=) i28 = int_add_ovf(i10, i25) diff --git a/pypy/jit/metainterp/test/test_dlist.py b/pypy/jit/metainterp/test/test_dlist.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_dlist.py +++ /dev/null @@ -1,165 +0,0 @@ - -import py -from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin -py.test.skip("Disabled") - -class ListTests: - def test_basic(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - n -= 1 - return l[0] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(getarrayitem_gc=0, setarrayitem_gc=1) -# XXX fix codewriter -# guard_exception=0, -# guard_no_exception=1) - - def test_list_escapes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=2, getarrayitem_gc=0) - - def test_list_escapes_but_getitem_goes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - x = l[2] - y = l[1] + l[2] - l[1] = x + y - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=3, getarrayitem_gc=0) - - def test_list_of_ptrs(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - class A(object): - def __init__(self, x): - self.x = x - - def f(n): - l = [A(3)] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0].x + 1 - l[0] = A(x) - n -= 1 - return l[0].x - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=1, getarrayitem_gc=0, - new_with_vtable=1) # A should escape - - def test_list_checklength(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [10, 13], listops=True) - assert res == f(10, 13) - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_list_checklength_run(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) > n: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [50, 13], listops=True) - assert res == 42 - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_checklength_cannot_go_away(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n): - l = [0] * n - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return len(l) - l = [0] * n - n -= 1 - return 0 - - res = self.meta_interp(f, [10], listops=True) - assert res == 2 - self.check_loops(arraylen_gc=1) - - def test_list_indexerror(self): - # this is an example where IndexError is raised before - # even getting to the JIT - py.test.skip("I suspect bug somewhere outside of the JIT") - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - l[n] = n - n -= 1 - return l[3] - - def g(n): - try: - f(n) - return 0 - except IndexError: - return 42 - - res = self.meta_interp(g, [10]) - assert res == 42 - self.check_loops(setitem=2) - -class TestLLtype(ListTests, LLJitMixin): - pass diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -1,6 +1,6 @@ """Tests for multiple JitDrivers.""" from pypy.rlib.jit import JitDriver, unroll_safe -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -2757,7 +2757,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops(self): + def test_fold_partially_constant_add_sub(self): ops = """ [i0] i1 = int_sub(i0, 0) @@ -2791,7 +2791,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops_ovf(self): + def test_fold_partially_constant_add_sub_ovf(self): ops = """ [i0] i1 = int_sub_ovf(i0, 0) @@ -2828,6 +2828,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): + ops = """ + [i0] + i1 = int_lshift(i0, 0) + i2 = int_rshift(i1, 0) + i3 = int_eq(i2, i0) + guard_true(i3) [] + jump(i2) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + # ---------- class TestLLtype(OptimizeOptTest, LLtypeMixin): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver, hint, purefunction from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class SendTests(object): diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_basic.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_basic.py +++ /dev/null @@ -1,2411 +0,0 @@ -import py -import sys -from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside -from pypy.rlib.jit import loop_invariant -from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed -from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner -from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value -from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong -from pypy import conftest -from pypy.rlib.rarithmetic import ovfcheck -from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper -from pypy.rpython.lltypesystem import lltype, llmemory -from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - - -class BasicTests: - - def test_basic(self): - def f(x, y): - return x + y - res = self.interp_operations(f, [40, 2]) - assert res == 42 - - def test_basic_inst(self): - class A: - pass - def f(n): - a = A() - a.x = n - return a.x - res = self.interp_operations(f, [42]) - assert res == 42 - - def test_uint_floordiv(self): - from pypy.rlib.rarithmetic import r_uint - - def f(a, b): - a = r_uint(a) - b = r_uint(b) - return a/b - - res = self.interp_operations(f, [-4, 3]) - assert res == long(r_uint(-4)) // 3 - - def test_direct_call(self): - def g(n): - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_direct_call_with_guard(self): - def g(n): - if n < 0: - return 0 - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_loop(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - if self.basic: - found = 0 - for op in get_stats().loops[0]._all_operations(): - if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 3 - for box in liveboxes: - assert isinstance(box, history.BoxInt) - found += 1 - assert found == 1 - - def test_loop_invariant_mul1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loop_invariant_mul_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - b = y * 2 - res += ovfcheck(x * x) + b - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 308 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) - - def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - x += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 3427 - self.check_loop_count(3) - - def test_loop_invariant_mul_bridge_maintaining1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - res += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1167 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - - def test_loop_invariant_mul_bridge_maintaining2(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - z = x * x - res += z - if y<16: - res += z - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1692 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - def test_loop_invariant_mul_bridge_maintaining3(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) - def f(x, y, m): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res, m=m) - myjitdriver.jit_merge_point(x=x, y=y, res=res, m=m) - z = x * x - res += z - if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x.intval * x.intval - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loops_are_transient(self): - import gc, weakref - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - if y%2: - res *= 2 - y -= 1 - return res - wr_loops = [] - old_init = history.TreeLoop.__init__.im_func - try: - def track_init(self, name): - old_init(self, name) - wr_loops.append(weakref.ref(self)) - history.TreeLoop.__init__ = track_init - res = self.meta_interp(f, [6, 15], no_stats=True) - finally: - history.TreeLoop.__init__ = old_init - - assert res == f(6, 15) - gc.collect() - - #assert not [wr for wr in wr_loops if wr()] - for loop in [wr for wr in wr_loops if wr()]: - assert loop().name == 'short preamble' - - def test_string(self): - def f(n): - bytecode = 'adlfkj' + chr(n) - if n < len(bytecode): - return bytecode[n] - else: - return "?" - res = self.interp_operations(f, [1]) - assert res == ord("d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord("?") - - def test_chr2str(self): - def f(n): - s = chr(n) - return s[0] - res = self.interp_operations(f, [3]) - assert res == 3 - - def test_unicode(self): - def f(n): - bytecode = u'adlfkj' + unichr(n) - if n < len(bytecode): - return bytecode[n] - else: - return u"?" - res = self.interp_operations(f, [1]) - assert res == ord(u"d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord(u"?") - - def test_residual_call(self): - @dont_look_inside - def externfn(x, y): - return x * y - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) - - def test_residual_call_pure(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - n = hint(n, promote=True) - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is not recorded in the history if all-constant args - self.check_operations_history(int_add=0, int_mul=0, - call=0, call_pure=0) - - def test_residual_call_pure_1(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is recorded in the history if not-all-constant args - self.check_operations_history(int_add=1, int_mul=0, - call=0, call_pure=1) - - def test_residual_call_pure_2(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def externfn(x): - return x - 1 - externfn._pure_function_ = True - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - n = externfn(n) - return n - res = self.meta_interp(f, [7]) - assert res == 0 - # CALL_PURE is recorded in the history, but turned into a CALL - # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) - - def test_constfold_call_pure(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - n -= externfn(m) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_constfold_call_pure_2(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - class V: - def __init__(self, value): - self.value = value - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - v = V(m) - n -= externfn(v.value) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_pure_function_returning_object(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - class V: - def __init__(self, x): - self.x = x - v1 = V(1) - v2 = V(2) - def externfn(x): - if x: - return v1 - else: - return v2 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - m = V(m).x - n -= externfn(m).x + externfn(m + m - m).x - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) - - def test_constant_across_mp(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - class X(object): - pass - def f(n): - while n > -100: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - x = X() - x.arg = 5 - if n <= 0: break - n -= x.arg - x.arg = 6 # prevents 'x.arg' from being annotated as constant - return n - res = self.meta_interp(f, [31]) - assert res == -4 - - def test_stopatxpolicy(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def internfn(y): - return y * 3 - def externfn(y): - return y % 4 - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if y & 7: - f = internfn - else: - f = externfn - f(y) - y -= 1 - return 42 - policy = StopAtXPolicy(externfn) - res = self.meta_interp(f, [31], policy=policy) - assert res == 42 - self.check_loops(int_mul=1, int_mod=0) - - def test_we_are_jitted(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if we_are_jitted(): - x = 1 - else: - x = 10 - y -= x - return y - assert f(55) == -5 - res = self.meta_interp(f, [55]) - assert res == -1 - - def test_confirm_enter_jit(self): - def confirm_enter_jit(x, y): - return x <= 5 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - confirm_enter_jit = confirm_enter_jit) - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - y -= x - return y - # - res = self.meta_interp(f, [10, 84]) - assert res == -6 - self.check_loop_count(0) - # - res = self.meta_interp(f, [3, 19]) - assert res == -2 - self.check_loop_count(1) - - def test_can_never_inline(self): - def can_never_inline(x): - return x > 50 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - can_never_inline = can_never_inline) - @dont_look_inside - def marker(): - pass - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - x += 1 - if x == 4 or x == 61: - marker() - y -= x - return y - # - res = self.meta_interp(f, [3, 6], repeat=7) - assert res == 6 - 4 - 5 - self.check_history(call=0) # because the trace starts in the middle - # - res = self.meta_interp(f, [60, 84], repeat=7) - assert res == 84 - 61 - 62 - self.check_history(call=1) # because the trace starts immediately - - def test_format(self): - def f(n): - return len("<%d>" % n) - res = self.interp_operations(f, [421]) - assert res == 5 - - def test_switch(self): - def f(n): - if n == -5: return 12 - elif n == 2: return 51 - elif n == 7: return 1212 - else: return 42 - res = self.interp_operations(f, [7]) - assert res == 1212 - res = self.interp_operations(f, [12311]) - assert res == 42 - - def test_r_uint(self): - from pypy.rlib.rarithmetic import r_uint - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - y = r_uint(y) - while y > 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - y -= 1 - return y - res = self.meta_interp(f, [10]) - assert res == 0 - - def test_uint_operations(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - return ((r_uint(n) - 123) >> 1) <= r_uint(456) - res = self.interp_operations(f, [50]) - assert res == False - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_uint_condition(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - if ((r_uint(n) - 123) >> 1) <= r_uint(456): - return 24 - else: - return 12 - res = self.interp_operations(f, [50]) - assert res == 12 - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_int_between(self): - # - def check(arg1, arg2, arg3, expect_result, **expect_operations): - from pypy.rpython.lltypesystem import lltype - from pypy.rpython.lltypesystem.lloperation import llop - loc = locals().copy() - exec py.code.Source(""" - def f(n, m, p): - arg1 = %(arg1)s - arg2 = %(arg2)s - arg3 = %(arg3)s - return llop.int_between(lltype.Bool, arg1, arg2, arg3) - """ % locals()).compile() in loc - res = self.interp_operations(loc['f'], [5, 6, 7]) - assert res == expect_result - self.check_operations_history(expect_operations) - # - check('n', 'm', 'p', True, int_sub=2, uint_lt=1) - check('n', 'p', 'm', False, int_sub=2, uint_lt=1) - # - check('n', 'm', 6, False, int_sub=2, uint_lt=1) - # - check('n', 4, 'p', False, int_sub=2, uint_lt=1) - check('n', 5, 'p', True, int_sub=2, uint_lt=1) - check('n', 8, 'p', False, int_sub=2, uint_lt=1) - # - check('n', 6, 7, True, int_sub=2, uint_lt=1) - # - check(-2, 'n', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'm', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'p', 'm', False, int_sub=2, uint_lt=1) - #check(0, 'n', 'p', True, uint_lt=1) xxx implement me - #check(0, 'm', 'p', True, uint_lt=1) - #check(0, 'p', 'm', False, uint_lt=1) - # - check(2, 'n', 6, True, int_sub=1, uint_lt=1) - check(2, 'm', 6, False, int_sub=1, uint_lt=1) - check(2, 'p', 6, False, int_sub=1, uint_lt=1) - check(5, 'n', 6, True, int_eq=1) # 6 == 5+1 - check(5, 'm', 6, False, int_eq=1) # 6 == 5+1 - # - check(2, 6, 'm', False, int_sub=1, uint_lt=1) - check(2, 6, 'p', True, int_sub=1, uint_lt=1) - # - check(2, 40, 6, False) - check(2, 40, 60, True) - - def test_getfield(self): - class A: - pass - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=1) - - def test_getfield_immutable(self): - class A: - _immutable_ = True - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=0) - - def test_setfield_bool(self): - class A: - def __init__(self): - self.flag = True - myjitdriver = JitDriver(greens = [], reds = ['n', 'obj']) - def f(n): - obj = A() - res = False - while n > 0: - myjitdriver.can_enter_jit(n=n, obj=obj) - myjitdriver.jit_merge_point(n=n, obj=obj) - obj.flag = False - n -= 1 - return res - res = self.meta_interp(f, [7]) - assert type(res) == bool - assert not res - - def test_switch_dict(self): - def f(x): - if x == 1: return 61 - elif x == 2: return 511 - elif x == 3: return -22 - elif x == 4: return 81 - elif x == 5: return 17 - elif x == 6: return 54 - elif x == 7: return 987 - elif x == 8: return -12 - elif x == 9: return 321 - return -1 - res = self.interp_operations(f, [5]) - assert res == 17 - res = self.interp_operations(f, [15]) - assert res == -1 - - def test_int_add_ovf(self): - def f(x, y): - try: - return ovfcheck(x + y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -98 - res = self.interp_operations(f, [1, sys.maxint]) - assert res == -42 - - def test_int_sub_ovf(self): - def f(x, y): - try: - return ovfcheck(x - y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -102 - res = self.interp_operations(f, [1, -sys.maxint]) - assert res == -42 - - def test_int_mul_ovf(self): - def f(x, y): - try: - return ovfcheck(x * y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -200 - res = self.interp_operations(f, [-3, sys.maxint//2]) - assert res == -42 - - def test_mod_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'y']) - def f(n, x, y): - while n > 0: - myjitdriver.can_enter_jit(x=x, y=y, n=n) - myjitdriver.jit_merge_point(x=x, y=y, n=n) - n -= ovfcheck(x % y) - return n - res = self.meta_interp(f, [20, 1, 2]) - assert res == 0 - self.check_loops(call=0) - - def test_abs(self): - myjitdriver = JitDriver(greens = [], reds = ['i', 't']) - def f(i): - t = 0 - while i < 10: - myjitdriver.can_enter_jit(i=i, t=t) - myjitdriver.jit_merge_point(i=i, t=t) - t += abs(i) - i += 1 - return t - res = self.meta_interp(f, [-5]) - assert res == 5+4+3+2+1+0+1+2+3+4+5+6+7+8+9 - - def test_float(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - x = float(x) - y = float(y) - res = 0.0 - while y > 0.0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1.0 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42.0 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) - - def test_print(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - print n - n -= 1 - return n - res = self.meta_interp(f, [7]) - assert res == 0 - - def test_bridge_from_interpreter(self): - mydriver = JitDriver(reds = ['n'], greens = []) - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - n -= 1 - - self.meta_interp(f, [20], repeat=7) - self.check_tree_loop_count(2) # the loop and the entry path - # we get: - # ENTER - compile the new loop and the entry bridge - # ENTER - compile the leaving path - self.check_enter_count(2) - - def test_bridge_from_interpreter_2(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n'], greens = []) - glob = [1] - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - if n == 17 and glob[0]: - glob[0] = 0 - x = n + 1 - y = n + 2 - z = n + 3 - k = n + 4 - n -= 1 - n += x + y + z + k - n -= x + y + z + k - n -= 1 - - self.meta_interp(f, [20], repeat=7) - - def test_bridge_from_interpreter_3(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n', 'x', 'y', 'z', 'k'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - glob.x = 1 - x = 0 - y = 0 - z = 0 - k = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x, y=y, z=z, k=k) - mydriver.jit_merge_point(n=n, x=x, y=y, z=z, k=k) - x += 10 - y += 3 - z -= 15 - k += 4 - if n == 17 and glob.x: - glob.x = 0 - x += n + 1 - y += n + 2 - z += n + 3 - k += n + 4 - n -= 1 - n -= 1 - return x + 2*y + 3*z + 5*k + 13*n - - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_bridge_from_interpreter_4(self): - jitdriver = JitDriver(reds = ['n', 'k'], greens = []) - - def f(n, k): - while n > 0: - jitdriver.can_enter_jit(n=n, k=k) - jitdriver.jit_merge_point(n=n, k=k) - if k: - n -= 2 - else: - n -= 1 - return n + k - - from pypy.rpython.test.test_llinterp import get_interpreter, clear_tcache - from pypy.jit.metainterp.warmspot import WarmRunnerDesc - - interp, graph = get_interpreter(f, [0, 0], backendopt=False, - inline_threshold=0, type_system=self.type_system) - clear_tcache() - translator = interp.typer.annotator.translator - translator.config.translation.gc = "boehm" - warmrunnerdesc = WarmRunnerDesc(translator, - CPUClass=self.CPUClass) - state = warmrunnerdesc.jitdrivers_sd[0].warmstate - state.set_param_threshold(3) # for tests - state.set_param_trace_eagerness(0) # for tests - warmrunnerdesc.finish() - for n, k in [(20, 0), (20, 1)]: - interp.eval_graph(graph, [n, k]) - - def test_bridge_leaving_interpreter_5(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - x = 0 - glob.x = 1 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - glob.x += 1 - x += 3 - n -= 1 - glob.x += 100 - return glob.x + x - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_instantiate_classes(self): - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - def f(n): - if n > 5: - cls = A - else: - cls = B - return cls().foo - res = self.interp_operations(f, [3]) - assert res == 8 - res = self.interp_operations(f, [13]) - assert res == 72 - - def test_instantiate_does_not_call(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - - def f(n): - x = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - if n % 2 == 0: - cls = A - else: - cls = B - inst = cls() - x += inst.foo - n -= 1 - return x - res = self.meta_interp(f, [20], enable_opts='') - assert res == f(20) - self.check_loops(call=0) - - def test_zerodivisionerror(self): - # test the case of exception-raising operation that is not delegated - # to the backend at all: ZeroDivisionError - # - def f(n): - assert n >= 0 - try: - return ovfcheck(5 % n) - except ZeroDivisionError: - return -666 - except OverflowError: - return -777 - res = self.interp_operations(f, [0]) - assert res == -666 - # - def f(n): - assert n >= 0 - try: - return ovfcheck(6 // n) - except ZeroDivisionError: - return -667 - except OverflowError: - return -778 - res = self.interp_operations(f, [0]) - assert res == -667 - - def test_div_overflow(self): - import sys - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - try: - res += llop.int_floordiv_ovf(lltype.Signed, - -sys.maxint-1, x) - x += 5 - except OverflowError: - res += 100 - y -= 1 - return res - res = self.meta_interp(f, [-41, 16]) - assert res == ((-sys.maxint-1) // (-41) + - (-sys.maxint-1) // (-36) + - (-sys.maxint-1) // (-31) + - (-sys.maxint-1) // (-26) + - (-sys.maxint-1) // (-21) + - (-sys.maxint-1) // (-16) + - (-sys.maxint-1) // (-11) + - (-sys.maxint-1) // (-6) + - 100 * 8) - - def test_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - if n: - obj = A() - else: - obj = B() - return isinstance(obj, B) - res = self.interp_operations(fn, [0]) - assert res - self.check_operations_history(guard_class=1) - res = self.interp_operations(fn, [1]) - assert not res - - def test_isinstance_2(self): - driver = JitDriver(greens = [], reds = ['n', 'sum', 'x']) - class A: - pass - class B(A): - pass - class C(B): - pass - - def main(): - return f(5, B()) * 10 + f(5, C()) + f(5, A()) * 100 - - def f(n, x): - sum = 0 - while n > 0: - driver.can_enter_jit(x=x, n=n, sum=sum) - driver.jit_merge_point(x=x, n=n, sum=sum) - if isinstance(x, B): - sum += 1 - n -= 1 - return sum - - res = self.meta_interp(main, []) - assert res == 55 - - def test_assert_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - # this should only be called with n != 0 - if n: - obj = B() - obj.a = n - else: - obj = A() - obj.a = 17 - assert isinstance(obj, B) - return obj.a - res = self.interp_operations(fn, [1]) - assert res == 1 - self.check_operations_history(guard_class=0) - if self.type_system == 'ootype': - self.check_operations_history(instanceof=0) - - def test_r_dict(self): - from pypy.rlib.objectmodel import r_dict - class FooError(Exception): - pass - def myeq(n, m): - return n == m - def myhash(n): - if n < 0: - raise FooError - return -n - def f(n): - d = r_dict(myeq, myhash) - for i in range(10): - d[i] = i*i - try: - return d[n] - except FooError: - return 99 - res = self.interp_operations(f, [5]) - assert res == f(5) - - def test_free_object(self): - import weakref - from pypy.rlib import rgc - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - class X(object): - pass - def main(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= x.foo - def g(n): - x = X() - x.foo = 2 - main(n, x) - x.foo = 5 - return weakref.ref(x) - def f(n): - r = g(n) - rgc.collect(); rgc.collect(); rgc.collect() - return r() is None - # - assert f(30) == 1 - res = self.meta_interp(f, [30], no_stats=True) - assert res == 1 - - def test_pass_around(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - - def call(): - pass - - def f(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - if n % 2: - call() - if n == 8: - return x - x = 3 - else: - x = 5 - n -= 1 - return 0 - - self.meta_interp(f, [40, 0]) - - def test_const_inputargs(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'x']) - def f(n, x): - m = 0x7FFFFFFF - while n > 0: - myjitdriver.can_enter_jit(m=m, n=n, x=x) - myjitdriver.jit_merge_point(m=m, n=n, x=x) - x = 42 - n -= 1 - m = m >> 1 - return x - - res = self.meta_interp(f, [50, 1], enable_opts='') - assert res == 42 - - def test_set_param(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - def g(n): - x = 0 - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= 1 - x += n - return x - def f(n, threshold): - myjitdriver.set_param('threshold', threshold) - return g(n) - - res = self.meta_interp(f, [10, 3]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(2) - - res = self.meta_interp(f, [10, 13]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(0) - - def test_dont_look_inside(self): - @dont_look_inside - def g(a, b): - return a + b - def f(a, b): - return g(a, b) - res = self.interp_operations(f, [3, 5]) - assert res == 8 - self.check_operations_history(int_add=0, call=1) - - def test_listcomp(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst']) - def f(x, y): - lst = [0, 0, 0] - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, lst=lst) - myjitdriver.jit_merge_point(x=x, y=y, lst=lst) - lst = [i+x for i in lst if i >=0] - y -= 1 - return lst[0] - res = self.meta_interp(f, [6, 7], listcomp=True, backendopt=True, listops=True) - # XXX: the loop looks inefficient - assert res == 42 - - def test_tuple_immutable(self): - def new(a, b): - return a, b - def f(a, b): - tup = new(a, b) - return tup[1] - res = self.interp_operations(f, [3, 5]) - assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure=1) - - def test_oosend_look_inside_only_one(self): - class A: - pass - class B(A): - def g(self): - return 123 - class C(A): - @dont_look_inside - def g(self): - return 456 - def f(n): - if n > 3: - x = B() - else: - x = C() - return x.g() + x.g() - res = self.interp_operations(f, [10]) - assert res == 123 * 2 - res = self.interp_operations(f, [-10]) - assert res == 456 * 2 - - def test_residual_external_call(self): - import math - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - def f(x, y): - x = float(x) - res = 0.0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - # this is an external call that the default policy ignores - rpart, ipart = math.modf(x) - res += ipart - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops(call=1) - - def test_merge_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 5 - class B(A): - def g(self, y): - return y - 3 - - a1 = A() - a2 = A() - b = B() - def f(x): - l = [a1] * 100 + [a2] * 100 + [b] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - x = a.g(x) - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_value=2) - self.check_loops(guard_class=0, guard_value=5, everywhere=True) - - def test_merge_guardnonnull_guardclass(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=2, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=4, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [b1] * 100 + [None] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=1, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=3, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) - - def test_merge_guardnonnull_guardvalue_2(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=4, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - a2 = A() - b1 = B() - def f(x): - l = [a2] * 100 + [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [399], listops=True) - assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=5, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_residual_call_doesnt_lose_info(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) - - class A(object): - pass - - globall = [""] - @dont_look_inside - def g(x): - globall[0] = str(x) - return x - - def f(x): - y = A() - y.v = x - l = [0] - while y.v > 0: - myjitdriver.can_enter_jit(x=x, y=y, l=l) - myjitdriver.jit_merge_point(x=x, y=y, l=l) - l[0] = y.v - lc = l[0] - y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 - return y.v - res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) - - def test_guard_isnull_nonnull(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - - @dont_look_inside - def create(x): - if x >= -40: - return A() - return None - - def f(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - obj = create(x-1) - if obj is not None: - res += 1 - obj2 = create(x-1000) - if obj2 is None: - res += 1 - x -= 1 - return res - res = self.meta_interp(f, [21]) - assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) - - def test_loop_invariant1(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - a = A() - a.current_a = A() - a.current_a.x = 1 - @loop_invariant - def f(): - return a.current_a - - def g(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - res += f().x - res += f().x - res += f().x - x -= 1 - a.current_a = A() - a.current_a.x = 2 - return res - res = self.meta_interp(g, [21]) - assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) - - def test_bug_optimizeopt_mutates_ops(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) - class A(object): - pass - class B(A): - pass - - glob = A() - glob.a = None - def f(x): - res = 0 - a = A() - a.x = 0 - glob.a = A() - const = 2 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res, a=a, const=const) - myjitdriver.jit_merge_point(x=x, res=res, a=a, const=const) - if type(glob.a) is B: - res += 1 - if a is None: - a = A() - a.x = x - glob.a = B() - const = 2 - else: - const = hint(const, promote=True) - x -= const - res += a.x - a = None - glob.a = A() - const = 1 - return res - res = self.meta_interp(f, [21]) - assert res == f(21) - - def test_getitem_indexerror(self): - lst = [10, 4, 9, 16] - def f(n): - try: - return lst[n] - except IndexError: - return -2 - res = self.interp_operations(f, [2]) - assert res == 9 - res = self.interp_operations(f, [4]) - assert res == -2 - res = self.interp_operations(f, [-4]) - assert res == 10 - res = self.interp_operations(f, [-5]) - assert res == -2 - - def test_guard_always_changing_value(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - a = A() - hint(a, promote=True) - x -= 1 - self.meta_interp(f, [50]) - self.check_loop_count(1) - # this checks that the logic triggered by make_a_counter_per_value() - # works and prevents generating tons of bridges - - def test_swap_values(self): - def f(x, y): - if x > 5: - x, y = y, x - return x - y - res = self.interp_operations(f, [10, 2]) - assert res == -8 - res = self.interp_operations(f, [3, 2]) - assert res == 1 - - def test_raw_malloc_and_access(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Signed) - - def f(n): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = n - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10]) - assert res == 10 - - def test_raw_malloc_and_access_float(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Float) - - def f(n, f): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = f - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10, 3.5]) - assert res == 3.5 - - def test_jit_debug(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - jit_debug("hi there:", x) - jit_debug("foobar") - x -= 1 - return x - res = self.meta_interp(f, [8]) - assert res == 0 - self.check_loops(jit_debug=2) - - def test_assert_green(self): - def f(x, promote): - if promote: - x = hint(x, promote=True) - assert_green(x) - return x - res = self.interp_operations(f, [8, 1]) - assert res == 8 - py.test.raises(AssertGreenFailed, self.interp_operations, f, [8, 0]) - - def test_multiple_specialied_versions1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 7]) - assert res == 6*8 + 6**8 - self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) - - def test_multiple_specialied_versions_array(self): - myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', - 'array']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val - other.val) - def f(x, y): - res = x - array = [1, 2, 3] - array[1] = 7 - idx = 0 - while y > 0: - myjitdriver.can_enter_jit(idx=idx, y=y, x=x, res=res, - array=array) - myjitdriver.jit_merge_point(idx=idx, y=y, x=x, res=res, - array=array) - res = res.binop(x) - res.val += array[idx] + array[1] - if y < 7: - idx = 2 - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - self.check_loop_count(9) - self.check_loops(getarrayitem_gc=6, everywhere=True) - - def test_multiple_specialied_versions_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - b1 = f(B(x), y, B(x)) - b2 = f(B(x), y, B(x)) - assert b1.val == b2.val - c1 = f(B(x), y, A(x)) - c2 = f(B(x), y, A(x)) - assert c1.val == c2.val - d1 = f(A(x), y, B(x)) - d2 = f(A(x), y, B(x)) - assert d1.val == d2.val - return a1.val + b1.val + c1.val + d1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_failing_inlined_guard(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 8: - x = z - return res - def g(x, y): - c1 = f(A(x), y, B(x)) - c2 = f(A(x), y, B(x)) - assert c1.val == c2.val - return c1.val - res = self.meta_interp(g, [3, 16]) - assert res == g(3, 16) - - def test_inlined_guard_in_short_preamble(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class A: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - def binop(self, other): - return A(self.getval() + other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_specialied_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(A(y)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_specialied_bridge_const(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'const', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - const = 7 - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res, const=const) - myjitdriver.jit_merge_point(y=y, x=x, res=res, const=const) - const = hint(const, promote=True) - res = res.binop(A(const)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_multiple_specialied_zigzag(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - def switch(self): - return B(self.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def switch(self): - return A(self.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - if y % 4 == 0: - res = res.switch() - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [3, 23]) - assert res == 7068153 - self.check_loop_count(6) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) - - def test_dont_trace_every_iteration(self): - myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) - - def main(a, b): - i = sa = 0 - #while i < 200: - while i < 200: - myjitdriver.can_enter_jit(a=a, b=b, i=i, sa=sa) - myjitdriver.jit_merge_point(a=a, b=b, i=i, sa=sa) - if a > 0: pass - if b < 2: pass - sa += a % b - i += 1 - return sa - def g(): - return main(10, 20) + main(-10, -20) - res = self.meta_interp(g, []) - assert res == g() - self.check_enter_count(2) - - def test_current_trace_length(self): - myjitdriver = JitDriver(greens = ['g'], reds = ['x']) - @dont_look_inside - def residual(): - print "hi there" - @unroll_safe - def loop(g): - y = 0 - while y < g: - residual() - y += 1 - def f(x, g): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, g=g) - myjitdriver.jit_merge_point(x=x, g=g) - loop(g) - x -= 1 - n = current_trace_length() - return n - res = self.meta_interp(f, [5, 8]) - assert 14 < res < 42 - res = self.meta_interp(f, [5, 2]) - assert 4 < res < 14 - - def test_compute_identity_hash(self): - from pypy.rlib.objectmodel import compute_identity_hash - class A(object): - pass - def f(): - a = A() - return compute_identity_hash(a) == compute_identity_hash(a) - res = self.interp_operations(f, []) - assert res - # a "did not crash" kind of test - - def test_compute_unique_id(self): - from pypy.rlib.objectmodel import compute_unique_id - class A(object): - pass - def f(): - a1 = A() - a2 = A() - return (compute_unique_id(a1) == compute_unique_id(a1) and - compute_unique_id(a1) != compute_unique_id(a2)) - res = self.interp_operations(f, []) - assert res - - def test_wrap_around_add(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x += 1 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint-10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_mul(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x *= 2 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint>>10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_sub(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x < 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x -= 1 - n += 1 - return n - res = self.meta_interp(f, [10-sys.maxint]) - assert res == 12 - self.check_tree_loop_count(2) - - - -class TestOOtype(BasicTests, OOJitMixin): - - def test_oohash(self): - def f(n): - s = ootype.oostring(n, -1) - return s.ll_hash() - res = self.interp_operations(f, [5]) - assert res == ootype.oostring(5, -1).ll_hash() - - def test_identityhash(self): - A = ootype.Instance("A", ootype.ROOT) - def f(): - obj1 = ootype.new(A) - obj2 = ootype.new(A) - return ootype.identityhash(obj1) == ootype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oois(self): - A = ootype.Instance("A", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - if n: - obj2 = obj1 - else: - obj2 = ootype.new(A) - return obj1 is obj2 - res = self.interp_operations(f, [0]) - assert not res - res = self.interp_operations(f, [1]) - assert res - - def test_oostring_instance(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - obj2 = ootype.new(B) - s1 = ootype.oostring(obj1, -1) - s2 = ootype.oostring(obj2, -1) - ch1 = s1.ll_stritem_nonneg(1) - ch2 = s2.ll_stritem_nonneg(1) - return ord(ch1) + ord(ch2) - res = self.interp_operations(f, [0]) - assert res == ord('A') + ord('B') - - def test_subclassof(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", A) - clsA = ootype.runtimeClass(A) - clsB = ootype.runtimeClass(B) - myjitdriver = JitDriver(greens = [], reds = ['n', 'flag', 'res']) - - def getcls(flag): - if flag: - return clsA - else: - return clsB - - def f(flag, n): - res = True - while n > -100: - myjitdriver.can_enter_jit(n=n, flag=flag, res=res) - myjitdriver.jit_merge_point(n=n, flag=flag, res=res) - cls = getcls(flag) - n -= 1 - res = ootype.subclassof(cls, clsB) - return res - - res = self.meta_interp(f, [1, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert not res - - res = self.meta_interp(f, [0, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert res - -class BaseLLtypeTests(BasicTests): - - def test_identityhash(self): - A = lltype.GcStruct("A") - def f(): - obj1 = lltype.malloc(A) - obj2 = lltype.malloc(A) - return lltype.identityhash(obj1) == lltype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oops_on_nongc(self): - from pypy.rpython.lltypesystem import lltype - - TP = lltype.Struct('x') - def f(i1, i2): - p1 = prebuilt[i1] - p2 = prebuilt[i2] - a = p1 is p2 - b = p1 is not p2 - c = bool(p1) - d = not bool(p2) - return 1000*a + 100*b + 10*c + d - prebuilt = [lltype.malloc(TP, flavor='raw', immortal=True)] * 2 - expected = f(0, 1) - assert self.interp_operations(f, [0, 1]) == expected - - def test_casts(self): - py.test.skip("xxx fix or kill") - if not self.basic: - py.test.skip("test written in a style that " - "means it's frontend only") - from pypy.rpython.lltypesystem import lltype, llmemory, rffi - - TP = lltype.GcStruct('S1') - def f(p): - n = lltype.cast_ptr_to_int(p) - return n - x = lltype.malloc(TP) - xref = lltype.cast_opaque_ptr(llmemory.GCREF, x) - res = self.interp_operations(f, [xref]) - y = llmemory.cast_ptr_to_adr(x) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - # - TP = lltype.Struct('S2') - prebuilt = [lltype.malloc(TP, immortal=True), - lltype.malloc(TP, immortal=True)] - def f(x): - p = prebuilt[x] - n = lltype.cast_ptr_to_int(p) - return n - res = self.interp_operations(f, [1]) - y = llmemory.cast_ptr_to_adr(prebuilt[1]) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - - def test_collapsing_ptr_eq(self): - S = lltype.GcStruct('S') - p = lltype.malloc(S) - driver = JitDriver(greens = [], reds = ['n', 'x']) - - def f(n, x): - while n > 0: - driver.can_enter_jit(n=n, x=x) - driver.jit_merge_point(n=n, x=x) - if x: - n -= 1 - n -= 1 - - def main(): - f(10, p) - f(10, lltype.nullptr(S)) - - self.meta_interp(main, []) - - def test_enable_opts(self): - jitdriver = JitDriver(greens = [], reds = ['a']) - - class A(object): - def __init__(self, i): - self.i = i - - def f(): - a = A(0) - - while a.i < 10: - jitdriver.jit_merge_point(a=a) - jitdriver.can_enter_jit(a=a) - a = A(a.i + 1) - - self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) - self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) - -class TestLLtype(BaseLLtypeTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/support.py @@ -0,0 +1,261 @@ + +import py, sys +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.ootypesystem import ootype +from pypy.jit.backend.llgraph import runner +from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT +from pypy.jit.metainterp import pyjitpl, history +from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.codewriter.policy import JitPolicy +from pypy.jit.codewriter import longlong + +def _get_jitcodes(testself, CPUClass, func, values, type_system, + supports_longlong=False, **kwds): + from pypy.jit.codewriter import support, codewriter + + class FakeJitCell: + __compiled_merge_points = [] + def get_compiled_merge_points(self): + return self.__compiled_merge_points[:] + def set_compiled_merge_points(self, lst): + self.__compiled_merge_points = lst + + class FakeWarmRunnerState: + def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): + pass + + def jit_cell_at_key(self, greenkey): + assert greenkey == [] + return self._cell + _cell = FakeJitCell() + + trace_limit = sys.maxint + enable_opts = ALL_OPTS_DICT + + func._jit_unroll_safe_ = True + rtyper = support.annotate(func, values, type_system=type_system) + graphs = rtyper.annotator.translator.graphs + result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] + + class FakeJitDriverSD: + num_green_args = 0 + portal_graph = graphs[0] + virtualizable_info = None + greenfield_info = None + result_type = result_kind + portal_runner_ptr = "???" + + stats = history.Stats() + cpu = CPUClass(rtyper, stats, None, False) + cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) + testself.cw = cw + policy = JitPolicy() + policy.set_supports_longlong(supports_longlong) + cw.find_all_graphs(policy) + # + testself.warmrunnerstate = FakeWarmRunnerState() + testself.warmrunnerstate.cpu = cpu + FakeJitDriverSD.warmstate = testself.warmrunnerstate + if hasattr(testself, 'finish_setup_for_interp_operations'): + testself.finish_setup_for_interp_operations() + # + cw.make_jitcodes(verbose=True) + +def _run_with_blackhole(testself, args): + from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder + cw = testself.cw + blackholeinterpbuilder = BlackholeInterpBuilder(cw) + blackholeinterp = blackholeinterpbuilder.acquire_interp() + count_i = count_r = count_f = 0 + for value in args: + T = lltype.typeOf(value) + if T == lltype.Signed: + blackholeinterp.setarg_i(count_i, value) + count_i += 1 + elif T == llmemory.GCREF: + blackholeinterp.setarg_r(count_r, value) + count_r += 1 + elif T == lltype.Float: + value = longlong.getfloatstorage(value) + blackholeinterp.setarg_f(count_f, value) + count_f += 1 + else: + raise TypeError(T) + [jitdriver_sd] = cw.callcontrol.jitdrivers_sd + blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) + blackholeinterp.run() + return blackholeinterp._final_result_anytype() + +def _run_with_pyjitpl(testself, args): + + class DoneWithThisFrame(Exception): + pass + + class DoneWithThisFrameRef(DoneWithThisFrame): + def __init__(self, cpu, *args): + DoneWithThisFrame.__init__(self, *args) + + cw = testself.cw + opt = history.Options(listops=True) + metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) + metainterp_sd.finish_setup(cw) + [jitdriver_sd] = metainterp_sd.jitdrivers_sd + metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) + metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame + metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef + metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame + testself.metainterp = metainterp + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + except DoneWithThisFrame, e: + #if conftest.option.view: + # metainterp.stats.view() + return e.args[0] + else: + raise Exception("FAILED") + +def _run_with_machine_code(testself, args): + metainterp = testself.metainterp + num_green_args = metainterp.jitdriver_sd.num_green_args + loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) + if len(loop_tokens) != 1: + return NotImplemented + # a loop was successfully created by _run_with_pyjitpl(); call it + cpu = metainterp.cpu + for i in range(len(args) - num_green_args): + x = args[num_green_args + i] + typecode = history.getkind(lltype.typeOf(x)) + set_future_value(cpu, i, x, typecode) + faildescr = cpu.execute_token(loop_tokens[0]) + assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') + if metainterp.jitdriver_sd.result_type == history.INT: + return cpu.get_latest_value_int(0) + elif metainterp.jitdriver_sd.result_type == history.REF: + return cpu.get_latest_value_ref(0) + elif metainterp.jitdriver_sd.result_type == history.FLOAT: + return cpu.get_latest_value_float(0) + else: + return None + + +class JitMixin: + basic = True + def check_loops(self, expected=None, everywhere=False, **check): + get_stats().check_loops(expected=expected, everywhere=everywhere, + **check) + def check_loop_count(self, count): + """NB. This is a hack; use check_tree_loop_count() or + check_enter_count() for the real thing. + This counts as 1 every bridge in addition to every loop; and it does + not count at all the entry bridges from interpreter, although they + are TreeLoops as well.""" + assert get_stats().compiled_count == count + def check_tree_loop_count(self, count): + assert len(get_stats().loops) == count + def check_loop_count_at_most(self, count): + assert get_stats().compiled_count <= count + def check_enter_count(self, count): + assert get_stats().enter_count == count + def check_enter_count_at_most(self, count): + assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): + assert get_stats().aborted_count == count + def check_aborted_count_at_least(self, count): + assert get_stats().aborted_count >= count + + def meta_interp(self, *args, **kwds): + kwds['CPUClass'] = self.CPUClass + kwds['type_system'] = self.type_system + if "backendopt" not in kwds: + kwds["backendopt"] = False + return ll_meta_interp(*args, **kwds) + + def interp_operations(self, f, args, **kwds): + # get the JitCodes for the function f + _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + # try to run it with blackhole.py + result1 = _run_with_blackhole(self, args) + # try to run it with pyjitpl.py + result2 = _run_with_pyjitpl(self, args) + assert result1 == result2 + # try to run it by running the code compiled just before + result3 = _run_with_machine_code(self, args) + assert result1 == result3 or result3 == NotImplemented + # + if (longlong.supports_longlong and + isinstance(result1, longlong.r_float_storage)): + result1 = longlong.getrealfloat(result1) + return result1 + + def check_history(self, expected=None, **isns): + # this can be used after calling meta_interp + get_stats().check_history(expected, **isns) + + def check_operations_history(self, expected=None, **isns): + # this can be used after interp_operations + if expected is not None: + expected = dict(expected) + expected['jump'] = 1 + self.metainterp.staticdata.stats.check_history(expected, **isns) + + +class LLJitMixin(JitMixin): + type_system = 'lltype' + CPUClass = runner.LLtypeCPU + + @staticmethod + def Ptr(T): + return lltype.Ptr(T) + + @staticmethod + def GcStruct(name, *fields, **kwds): + S = lltype.GcStruct(name, *fields, **kwds) + return S + + malloc = staticmethod(lltype.malloc) + nullptr = staticmethod(lltype.nullptr) + + @staticmethod + def malloc_immortal(T): + return lltype.malloc(T, immortal=True) + + def _get_NODE(self): + NODE = lltype.GcForwardReference() + NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), + ('next', lltype.Ptr(NODE)))) + return NODE + +class OOJitMixin(JitMixin): + type_system = 'ootype' + #CPUClass = runner.OOtypeCPU + + def setup_class(cls): + py.test.skip("ootype tests skipped for now") + + @staticmethod + def Ptr(T): + return T + + @staticmethod + def GcStruct(name, *fields, **kwds): + if 'hints' in kwds: + kwds['_hints'] = kwds['hints'] + del kwds['hints'] + I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) + return I + + malloc = staticmethod(ootype.new) + nullptr = staticmethod(ootype.null) + + @staticmethod + def malloc_immortal(T): + return ootype.new(T) + + def _get_NODE(self): + NODE = ootype.Instance('NODE', ootype.ROOT, {}) + NODE._add_fields({'value': ootype.Signed, + 'next': NODE}) + return NODE diff --git a/pypy/jit/backend/cli/test/test_basic.py b/pypy/jit/backend/cli/test/test_basic.py --- a/pypy/jit/backend/cli/test/test_basic.py +++ b/pypy/jit/backend/cli/test/test_basic.py @@ -1,14 +1,14 @@ import py from pypy.jit.backend.cli.runner import CliCPU -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit -class CliJitMixin(test_basic.OOJitMixin): +class CliJitMixin(suport.OOJitMixin): CPUClass = CliCPU def setup_class(cls): from pypy.translator.cli.support import PythonNet PythonNet.System # possibly raises Skip -class TestBasic(CliJitMixin, test_basic.TestOOtype): +class TestBasic(CliJitMixin, test_ajit.TestOOtype): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf', '_pypy_math_isnan'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -57,8 +56,6 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -91,13 +88,13 @@ # # Custom implementations - at jit.purefunction def ll_math_isnan(y): - return bool(math_isnan(y)) + # By not calling into the extenal function the JIT can inline this. Floats + # are awesome. + return y != y - at jit.purefunction def ll_math_isinf(y): - return bool(math_isinf(y)) + return y != 0 and y * .5 == y ll_math_copysign = math_copysign diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver from pypy.rlib import objectmodel diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,3 +1,5 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype @@ -509,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -1,7 +1,7 @@ from pypy.jit.metainterp.warmspot import ll_meta_interp from pypy.rlib.jit import JitDriver, dont_look_inside, purefunction -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.jitprof import * diff --git a/pypy/jit/tl/pypyjit_child.py b/pypy/jit/tl/pypyjit_child.py --- a/pypy/jit/tl/pypyjit_child.py +++ b/pypy/jit/tl/pypyjit_child.py @@ -2,7 +2,6 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp import warmspot from pypy.module.pypyjit.policy import PyPyJitPolicy -from pypy.rlib.jit import OPTIMIZER_FULL, OPTIMIZER_NO_UNROLL def run_child(glob, loc): @@ -34,6 +33,5 @@ option.view = True warmspot.jittify_and_run(interp, graph, [], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -285,6 +285,15 @@ elif drv.exe_name is None and '__name__' in targetspec_dic: drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s' + # Double check to ensure we are not overwriting the current interpreter + try: + exe_name = str(drv.compute_exe_name()) + assert not os.path.samefile(exe_name, sys.executable), ( + 'Output file %r is the currently running ' + 'interpreter (use --output=...)'% exe_name) + except OSError: + pass + goals = translateconfig.goals try: drv.proceed(goals) diff --git a/pypy/jit/backend/x86/test/test_basic.py b/pypy/jit/backend/x86/test/test_basic.py --- a/pypy/jit/backend/x86/test/test_basic.py +++ b/pypy/jit/backend/x86/test/test_basic.py @@ -1,18 +1,18 @@ import py from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.metainterp.warmspot import ll_meta_interp -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver -class Jit386Mixin(test_basic.LLJitMixin): +class Jit386Mixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() def check_jumps(self, maxcount): pass -class TestBasic(Jit386Mixin, test_basic.BaseLLtypeTests): +class TestBasic(Jit386Mixin, test_ajit.BaseLLtypeTests): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py def test_bug(self): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -154,6 +154,24 @@ self.emit_operation(op) + def optimize_INT_LSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_RSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_CALL_PURE(self, op): arg_consts = [] for i in range(op.numargs()): diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class DelTests: diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -5,7 +5,7 @@ from pypy.rlib.libffi import ArgChain from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestFfiCall(LLJitMixin, _TestLibffiCall): diff --git a/pypy/translator/jvm/test/test_list.py b/pypy/translator/jvm/test/test_list.py --- a/pypy/translator/jvm/test/test_list.py +++ b/pypy/translator/jvm/test/test_list.py @@ -6,7 +6,10 @@ def test_recursive(self): py.test.skip("JVM doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_r_short_list(self): diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -106,6 +106,10 @@ 'debug_catch_exception': Ignore, 'debug_reraise_traceback': Ignore, 'debug_print_traceback': Ignore, + 'debug_start': Ignore, + 'debug_stop': Ignore, + 'debug_print': Ignore, + 'keepalive': Ignore, # __________ numeric operations __________ @@ -144,6 +148,7 @@ 'int_xor_ovf': jvm.IXOR, 'int_floordiv_ovf_zer': jvm.IFLOORDIVZEROVF, 'int_mod_ovf_zer': _check_zer(jvm.IREMOVF), + 'int_between': jvm.PYPYINTBETWEEN, 'uint_invert': 'bitwise_negate', @@ -185,8 +190,8 @@ 'llong_mod_zer': _check_zer(jvm.LREM), 'llong_and': jvm.LAND, 'llong_or': jvm.LOR, - 'llong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'llong_rshift': [PushAllArgs, jvm.L2I, jvm.LSHR, StoreResult], + 'llong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'llong_rshift': [PushAllArgs, jvm.LSHR, StoreResult], 'llong_xor': jvm.LXOR, 'llong_floordiv_ovf': jvm.LFLOORDIVOVF, 'llong_floordiv_ovf_zer': jvm.LFLOORDIVZEROVF, @@ -202,9 +207,11 @@ 'ullong_truediv': None, # TODO 'ullong_floordiv': jvm.LDIV, # valid? 'ullong_mod': jvm.PYPYULONGMOD, - 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], + 'ullong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'ullong_rshift': [PushAllArgs, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, + 'ullong_or': jvm.LOR, + 'ullong_and': jvm.LAND, # when casting from bool we want that every truth value is casted # to 1: we can't simply DoNothing, because the CLI stack could @@ -227,5 +234,8 @@ 'cast_float_to_uint': jvm.PYPYDOUBLETOUINT, 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, + 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, + 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], + 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/translator/jvm/src/pypy/StatResult.java b/pypy/translator/jvm/src/pypy/StatResult.java --- a/pypy/translator/jvm/src/pypy/StatResult.java +++ b/pypy/translator/jvm/src/pypy/StatResult.java @@ -8,7 +8,7 @@ * *

The actual stat() function is defined in PyPy.java. */ -class StatResult { +public class StatResult { public int item0, item3, item4, item5; public long item1, item2, item6; public double item7, item8, item9; diff --git a/pypy/jit/metainterp/test/test_float.py b/pypy/jit/metainterp/test/test_float.py --- a/pypy/jit/metainterp/test/test_float.py +++ b/pypy/jit/metainterp/test/test_float.py @@ -1,5 +1,5 @@ import math -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class FloatTests: diff --git a/pypy/jit/tl/tla/test_tla.py b/pypy/jit/tl/tla/test_tla.py --- a/pypy/jit/tl/tla/test_tla.py +++ b/pypy/jit/tl/tla/test_tla.py @@ -155,7 +155,7 @@ # ____________________________________________________________ -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestLLtype(LLJitMixin): def test_loop(self): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test import test_loop -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES class LoopUnrollTest(test_loop.LoopTest): diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,18 +30,18 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno @@ -49,7 +49,7 @@ # produced by gateway.applevel(), such as the ones found in # nanos.py) return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import unroll_safe, dont_look_inside from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.annlowlevel import hlstr from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/translator/jvm/test/test_extreme.py b/pypy/translator/jvm/test/test_extreme.py --- a/pypy/translator/jvm/test/test_extreme.py +++ b/pypy/translator/jvm/test/test_extreme.py @@ -1,5 +1,8 @@ +import py from pypy.translator.jvm.test.runtest import JvmTest from pypy.translator.oosupport.test_template.extreme import BaseTestExtreme class TestExtreme(BaseTestExtreme, JvmTest): - pass + + def test_runtimeerror_due_to_stack_overflow(self): + py.test.skip('hotspot bug') diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1645,7 +1645,7 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ .cfi_startproc movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ @@ -1691,6 +1691,12 @@ ret .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: diff --git a/pypy/translator/jvm/test/test_builtin.py b/pypy/translator/jvm/test/test_builtin.py --- a/pypy/translator/jvm/test/test_builtin.py +++ b/pypy/translator/jvm/test/test_builtin.py @@ -37,6 +37,15 @@ def test_cast_primitive(self): py.test.skip('fixme!') + def test_os_fstat(self): + import os, stat + def fn(): + fd = os.open(__file__, os.O_RDONLY, 0) + st = os.fstat(fd) + os.close(fd) + return st.st_mode + res = self.interpret(fn, []) + assert stat.S_ISREG(res) class TestJvmTime(JvmTest, BaseTestTime): diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver from pypy.rlib.objectmodel import compute_hash from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import history diff --git a/pypy/jit/metainterp/test/test_immutable.py b/pypy/jit/metainterp/test/test_immutable.py --- a/pypy/jit/metainterp/test/test_immutable.py +++ b/pypy/jit/metainterp/test/test_immutable.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ImmutableFieldsTests: diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -1,6 +1,9 @@ import _rawffi, sys -import threading +try: + from thread import _local as local +except ImportError: + local = object # no threads class ConvMode: encoding = 'ascii' @@ -28,7 +31,7 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(threading.local): +class ErrorObject(local): def __init__(self): self.errno = 0 self.winerror = 0 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -1,6 +1,6 @@ import py -from pypy.jit.metainterp.test.test_basic import JitMixin +from pypy.jit.metainterp.test.support import JitMixin from pypy.jit.tl.spli import interpreter, objects, serializer from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.backend.llgraph import runner diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -12,7 +12,7 @@ import py from pypy.jit.metainterp.memmgr import MemoryManager -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside diff --git a/pypy/translator/jvm/src/pypy/ll_os.java b/pypy/translator/jvm/src/pypy/ll_os.java --- a/pypy/translator/jvm/src/pypy/ll_os.java +++ b/pypy/translator/jvm/src/pypy/ll_os.java @@ -14,10 +14,22 @@ abstract class FileWrapper { + private final String name; + + public FileWrapper(String name) + { + this.name = name; + } + public abstract void write(String buffer); public abstract String read(int count); public abstract void close(); public abstract RandomAccessFile getFile(); + + public String getName() + { + return this.name; + } } class PrintStreamWrapper extends FileWrapper @@ -25,8 +37,9 @@ private final PrintStream stream; private final ll_os os; - public PrintStreamWrapper(PrintStream stream, ll_os os) + public PrintStreamWrapper(String name, PrintStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -58,8 +71,9 @@ private final InputStream stream; private final ll_os os; - public InputStreamWrapper(InputStream stream, ll_os os) + public InputStreamWrapper(String name, InputStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -102,11 +116,13 @@ private final boolean canWrite; private final ll_os os; - public RandomAccessFileWrapper(RandomAccessFile file, + public RandomAccessFileWrapper(String name, + RandomAccessFile file, boolean canRead, boolean canWrite, ll_os os) { + super(name); this.file = file; this.canRead = canRead; this.canWrite = canWrite; @@ -228,9 +244,9 @@ public ll_os(Interlink interlink) { this.interlink = interlink; - FileDescriptors.put(0, new InputStreamWrapper(System.in, this)); - FileDescriptors.put(1, new PrintStreamWrapper(System.out, this)); - FileDescriptors.put(2, new PrintStreamWrapper(System.err, this)); + FileDescriptors.put(0, new InputStreamWrapper("", System.in, this)); + FileDescriptors.put(1, new PrintStreamWrapper("", System.out, this)); + FileDescriptors.put(2, new PrintStreamWrapper("", System.err, this)); fdcount = 2; } @@ -339,7 +355,7 @@ // XXX: we ignore O_CREAT RandomAccessFile file = open_file(name, javaMode, flags); RandomAccessFileWrapper wrapper = - new RandomAccessFileWrapper(file, canRead, canWrite, this); + new RandomAccessFileWrapper(name, file, canRead, canWrite, this); fdcount++; FileDescriptors.put(fdcount, wrapper); @@ -418,6 +434,12 @@ return ll_os_stat(path); // XXX } + public StatResult ll_os_fstat(int fd) + { + String name = getfd(fd).getName(); + return ll_os_stat(name); + } + public String ll_os_strerror(int errno) { String msg = ErrorMessages.remove(errno); diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, hint from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.lltypesystem import lltype, rclass from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -266,7 +265,7 @@ if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,14 +22,20 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - ## XXX! we would like: return jit.hint(self, promote=True).items - ## XXX! but it gives horrible performance in some cases + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items return self.items def getitem(self, idx): @@ -46,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -622,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -6,7 +6,7 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import BoxInt -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -198,44 +198,6 @@ print print '@' * 79 - def test_f1(self): - self.run_source(''' - def main(n): - "Arbitrary test function." - i = 0 - x = 1 - while i 1: - r *= n - n -= 1 - return r - ''', 28, - ([5], 120), - ([25], 15511210043330985984000000L)) - - def test_factorialrec(self): - self.run_source(''' - def main(n): - if n > 1: - return n * main(n-1) - else: - return 1 - ''', 0, - ([5], 120), - ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -247,529 +209,6 @@ ''' % (sys.path,), 7200, ([], 42)) - def test_simple_call(self): - self.run_source(''' - OFFSET = 0 - def f(i): - return i + 1 + OFFSET - def main(n): - i = 0 - while i < n+OFFSET: - i = f(f(i)) - return i - ''', 98, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOAD_GLOBAL", True) - assert len(ops) == 5 - assert ops[0].get_opnames() == ["guard_value", - "getfield_gc", "guard_value", - "getfield_gc", "guard_isnull", - "getfield_gc", "guard_nonnull_class"] - # the second getfield on the same globals is quicker - assert ops[1].get_opnames() == ["getfield_gc", "guard_nonnull_class"] - assert not ops[2] # second LOAD_GLOBAL of the same name folded away - # LOAD_GLOBAL of the same name but in different function partially - # folded away - # XXX could be improved - assert ops[3].get_opnames() == ["guard_value", - "getfield_gc", "guard_isnull"] - assert not ops[4] - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 10 - - ops = self.get_by_bytecode("LOAD_GLOBAL") - assert len(ops) == 5 - for bytecode in ops: - assert not bytecode - - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for bytecode in ops: - assert len(bytecode) <= 1 - - - def test_method_call(self): - self.run_source(''' - class A(object): - def __init__(self, a): - self.a = a - def f(self, i): - return self.a + i - def main(n): - i = 0 - a = A(1) - while i < n: - x = a.f(i) - i = a.f(x) - return i - ''', 93, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOOKUP_METHOD", True) - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 3 - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert not ops[0] # first LOOKUP_METHOD folded away - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("CALL_METHOD", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 6 - assert len(ops[1]) < len(ops[0]) - - ops = self.get_by_bytecode("CALL_METHOD") - assert len(ops) == 2 - assert len(ops[0]) <= 1 - assert len(ops[1]) <= 1 - - ops = self.get_by_bytecode("LOAD_ATTR", True) - assert len(ops) == 2 - # With mapdict, we get fast access to (so far) the 5 first - # attributes, which means it is done with only the following - # operations. (For the other attributes there is additionally - # a getarrayitem_gc.) - assert ops[0].get_opnames() == ["getfield_gc", - "guard_nonnull_class"] - assert not ops[1] # second LOAD_ATTR folded away - - ops = self.get_by_bytecode("LOAD_ATTR") - assert not ops[0] # first LOAD_ATTR folded away - assert not ops[1] # second LOAD_ATTR folded away - - def test_static_classmethod_call(self): - self.run_source(''' - class A(object): - @classmethod - def f(cls, i): - return i + (cls is A) + 1 - - @staticmethod - def g(i): - return i - 1 - - def main(n): - i = 0 - a = A() - while i < n: - x = a.f(i) - i = a.g(x) - return i - ''', 106, - ([20], 20), - ([31], 31)) - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 2 - assert len(ops[0].get_opnames("getfield")) <= 4 - assert not ops[1] # second LOOKUP_METHOD folded away - - def test_default_and_kw(self): - self.run_source(''' - def f(i, j=1): - return i + j - def main(n): - i = 0 - while i < n: - i = f(f(i), j=1) - return i - ''', 100, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - def test_kwargs(self): - self.run_source(''' - d = {} - - def g(**args): - return len(args) - - def main(x): - s = 0 - d = {} - for i in range(x): - s += g(**d) - d[str(i)] = i - if i % 100 == 99: - d = {} - return s - ''', 100000, ([100], 4950), - ([1000], 49500), - ([10000], 495000), - ([100000], 4950000)) - assert len(self.rawloops) + len(self.rawentrybridges) == 4 - op, = self.get_by_bytecode("CALL_FUNCTION_KW") - # XXX a bit too many guards, but better than before - assert len(op.get_opnames("guard")) <= 12 - - def test_stararg_virtual(self): - self.run_source(''' - d = {} - - def g(*args): - return len(args) - def h(a, b, c): - return c - - def main(x): - s = 0 - for i in range(x): - l = [i, x, 2] - s += g(*l) - s += h(*l) - s += g(i, x, 2) - for i in range(x): - l = [x, 2] - s += g(i, *l) - s += h(i, *l) - return s - ''', 100000, ([100], 1300), - ([1000], 13000), - ([10000], 130000), - ([100000], 1300000)) - assert len(self.loops) == 2 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - assert len(ops) == 4 - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - ops = self.get_by_bytecode("CALL_FUNCTION") - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_stararg(self): - self.run_source(''' - d = {} - - def g(*args): - return args[-1] - def h(*args): - return len(args) - - def main(x): - s = 0 - l = [] - i = 0 - while i < x: - l.append(1) - s += g(*l) - i = h(*l) - return s - ''', 100000, ([100], 100), - ([1000], 1000), - ([2000], 2000), - ([4000], 4000)) - assert len(self.loops) == 1 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - for op in ops: - assert len(op.get_opnames("new_with_vtable")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_virtual_instance(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0 - while i < n: - a = A() - assert isinstance(a, A) - assert not isinstance(a, int) - a.x = 2 - i = i + a.x - return i - ''', 69, - ([20], 20), - ([31], 32)) - - callA, callisinstance1, callisinstance2 = ( - self.get_by_bytecode("CALL_FUNCTION")) - assert not callA.get_opnames("call") - assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 2 - assert not callisinstance1.get_opnames("call") - assert not callisinstance1.get_opnames("new") - assert len(callisinstance1.get_opnames("guard")) <= 2 - # calling isinstance on a builtin type gives zero guards - # because the version_tag of a builtin type is immutable - assert not len(callisinstance1.get_opnames("guard")) - - - bytecode, = self.get_by_bytecode("STORE_ATTR") - assert bytecode.get_opnames() == [] - - def test_load_attr(self): - self.run_source(''' - class A(object): - pass - a = A() - a.x = 2 - def main(n): - i = 0 - while i < n: - i = i + a.x - return i - ''', 41, - ([20], 20), - ([31], 32)) - - load, = self.get_by_bytecode("LOAD_ATTR") - # 1 guard_value for the class - # 1 guard_value for the version_tag - # 1 guard_value for the structure - # 1 guard_nonnull_class for the result since it is used later - assert len(load.get_opnames("guard")) <= 4 - - def test_mixed_type_loop(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0.0 - j = 2 - while i < n: - i = j + i - return i, type(i) is float - ''', 35, - ([20], (20, True)), - ([31], (32, True))) - - bytecode, = self.get_by_bytecode("BINARY_ADD") - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 2 - - def test_call_builtin_function(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 2 - l = [] - while i < n: - i += 1 - l.append(i) - return i, len(l) - ''', 39, - ([20], (20, 18)), - ([31], (31, 29))) - - bytecode, = self.get_by_bytecode("CALL_METHOD") - assert len(bytecode.get_opnames("new_with_vtable")) == 1 # the forcing of the int - assert len(bytecode.get_opnames("call")) == 1 # the call to append - assert len(bytecode.get_opnames("guard")) == 1 # guard for guard_no_exception after the call - bytecode, = self.get_by_bytecode("CALL_METHOD", True) - assert len(bytecode.get_opnames("guard")) == 2 # guard for profiling disabledness + guard_no_exception after the call - - def test_range_iter(self): - self.run_source(''' - def g(n): - return range(n) - - def main(n): - s = 0 - for i in range(n): - s += g(n)[i] - return s - ''', 143, ([1000], 1000 * 999 / 2)) - bytecode, = self.get_by_bytecode("BINARY_SUBSCR", True) - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER", True) # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_value", - "guard_class", # check the class of the iterator - "guard_nonnull", # check that the iterator is not finished - "guard_isnull", # check that the range list is not forced - "guard_false", # check that the index is lower than the current length - ] - - bytecode, = self.get_by_bytecode("BINARY_SUBSCR") - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER") # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is lower than the current length - ] - - def test_exception_inside_loop_1(self): - self.run_source(''' - def main(n): - while n: - try: - raise ValueError - except ValueError: - pass - n -= 1 - return n - ''', 33, - ([30], 0)) - - bytecode, = self.get_by_bytecode("SETUP_EXCEPT") - #assert not bytecode.get_opnames("new") -- currently, we have - # new_with_vtable(pypy.interpreter.pyopcode.ExceptBlock) - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert not bytecode.get_opnames() - - def test_exception_inside_loop_2(self): - self.run_source(''' - def g(n): - raise ValueError(n) - def f(n): - g(n) - def main(n): - while n: - try: - f(n) - except ValueError: - pass - n -= 1 - return n - ''', 51, - ([30], 0)) - - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert len(bytecode.get_opnames()) <= 2 # oois, guard_true - - def test_chain_of_guards(self): - self.run_source(''' - class A(object): - def method_x(self): - return 3 - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - i = 0 - while i < 2000: - name = l[arg] - sum += getattr(a, 'method_' + name)() - i += 1 - return sum - ''', 3000, ([0], 2000*3)) - assert len(self.loops) == 1 - - def test_getattr_with_dynamic_attribute(self): - self.run_source(''' - class A(object): - pass - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - a.a1 = 0 - a.a2 = 0 - a.a3 = 0 - a.a4 = 0 - a.a5 = 0 # workaround, because the first five attributes need a promotion - a.x = 1 - a.y = 2 - i = 0 - while i < 2000: - name = l[i % 2] - sum += getattr(a, name) - i += 1 - return sum - ''', 3000, ([0], 3000)) - assert len(self.loops) == 1 - - def test_blockstack_virtualizable(self): - self.run_source(''' - from pypyjit import residual_call - - def main(): - i = 0 - while i < 100: - try: - residual_call(len, []) - except: - pass - i += 1 - return i - ''', 1000, ([], 100)) - bytecode, = self.get_by_bytecode("CALL_FUNCTION") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('new_with_vtable')) == 2 - - def test_import_in_function(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - from sys import version - i += 1 - return i - ''', 100, ([], 100)) - bytecode, = self.get_by_bytecode('IMPORT_NAME') - bytecode2, = self.get_by_bytecode('IMPORT_FROM') - assert len(bytecode.get_opnames('call')) == 2 # split_chr and list_pop - assert len(bytecode2.get_opnames('call')) == 0 - - def test_arraycopy_disappears(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - t = (1, 2, 3, i + 1) - t2 = t[:] - del t - i = t2[3] - del t2 - return i - ''', 40, ([], 100)) - bytecode, = self.get_by_bytecode('BINARY_SUBSCR') - assert len(bytecode.get_opnames('new_array')) == 0 def test_overflow_checking(self): startvalue = sys.maxint - 2147483647 @@ -783,269 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - def test_boolrewrite_invers(self): - for a, b, res, ops in (('2000', '2000', 20001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 16001700, 83), - ( 'a', 'b', 16001700, 89), - ( 'a', 'a', 13001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if i >= %s: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - def test_boolrewrite_reflex(self): - for a, b, res, ops in (('2000', '2000', 10001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 14001700, 83), - ( 'a', 'b', 14001700, 89), - ( 'a', 'a', 17001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if %s > i: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - - def test_boolrewrite_correct_invers(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b-1, op2, b) * 10000 * (b) - res += opval( b, op2, b) * 10000 - res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if i %s %d: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, op2, b), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if i %s %f: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, op2, float(b)/4.0), 156, ([], res)) - - - def test_boolrewrite_correct_reflex(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b, op2, b-1) * 10000 * (b) - res += opval(b, op2, b) * 10000 - res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if %d %s i: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, b, op2), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if %f %s i: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, float(b)/4.0, op2), 156, ([], res)) - - def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') - for e1 in compares: - for e2 in compares: - a, b, c = 1, 2, 3 - if eval(e1): res = 752 * 1 - else: res = 752 * 2 - if eval(e2): res += 752 * 10000 - else: res += 752 * 20000 - a = b - if eval(e1): res += 248 * 1 - else: res += 248 * 2 - if eval(e2): res += 248 * 10000 - else: res += 248 * 20000 - - - if 'c' in e1 or 'c' in e2: - n = 337 - else: - n = 215 - - print - print 'Test:', e1, e2, n, res - self.run_source(''' - class tst(object): - pass - def main(): - a = tst() - b = tst() - c = tst() - sa = 0 - for i in range(1000): - if %s: sa += 1 - else: sa += 2 - if %s: sa += 10000 - else: sa += 20000 - if i > 750: a = b - return sa - '''%(e1, e2), n, ([], res)) - - def test_array_sum(self): - for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): - res = 19352859 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(): - img = array("%s", range(127) * 5) * 484 - l, i = 0, 0 - while i < 640 * 480: - l += img[i] - i += 1 - return l - ''' % tc, maxops, ([], res)) - - def test_array_sum_char(self): - self.run_source(''' - from array import array - - def main(): - img = array("c", "Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - l += ord(img[i]) - i += 1 - return l - ''', 60, ([], 30720000)) - - def test_array_sum_unicode(self): - self.run_source(''' - from array import array - - def main(): - img = array("u", u"Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - if img[i] == u"l": - l += 1 - i += 1 - return l - ''', 65, ([], 122880)) - - def test_array_intimg(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - for tc, maxops in zip('ilILd', (67, 67, 70, 70, 61)): - print - print '='*65 - print '='*20, 'running test for tc=%r' % (tc,), '='*20 - res = 73574560 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(tc): - img = array(tc, range(3)) * (350 * 480) - intimg = array(tc, (0,)) * (640 * 480) - l, i = 0, 640 - while i < 640 * 480: - l = l + img[i] - intimg[i] = (intimg[i-640] + l) - i += 1 - return intimg[i - 1] - ''', maxops, ([tc], res)) - - def test_unpackiterable(self): - self.run_source(''' - from array import array - - def main(): - i = 0 - t = array('l', (1, 2)) - while i < 2000: - a, b = t - i += 1 - return 3 - - ''', 100, ([], 3)) - bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('call_may_force')) == 0 def test_intbound_simple(self): diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder from pypy.jit.metainterp.blackhole import BlackholeInterpreter from pypy.jit.metainterp.blackhole import convert_and_run_from_pyjitpl diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, vref_None from pypy.rlib.jit import virtual_ref, virtual_ref_finish from pypy.rlib.objectmodel import compute_unique_id -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.virtualref import VirtualRefInfo diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -11,6 +11,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class StringTests: diff --git a/pypy/jit/metainterp/test/test_tlc.py b/pypy/jit/metainterp/test/test_tlc.py --- a/pypy/jit/metainterp/test/test_tlc.py +++ b/pypy/jit/metainterp/test/test_tlc.py @@ -3,7 +3,7 @@ from pypy.jit.tl import tlc -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class TLCTests: diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,9 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); -int _pypy_math_isnan(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -5,7 +5,7 @@ soon as possible (at least in a simple case). """ -import weakref, random +import weakref import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver class ListTests(object): diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -754,6 +754,8 @@ ("{x for x in z}", "set comprehension"), ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), + ("u'str'", "literal"), + ("b'bytes'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -1,4 +1,5 @@ from pypy.rpython.ootypesystem import ootype +from pypy.rpython.lltypesystem import rffi from pypy.translator.oosupport.metavm import MicroInstruction from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType import pypy.translator.jvm.typesystem as jvm @@ -94,14 +95,20 @@ (ootype.SignedLongLong, ootype.Signed): jvm.L2I, (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, + (ootype.Signed, rffi.SHORT): jvm.I2S, + (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, + (ootype.Signed, ootype.Unsigned): None, + (ootype.Unsigned, ootype.Signed): None, } class _CastPrimitive(MicroInstruction): def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype + if TO == FROM: + return opcode = CASTS[(FROM, TO)] if opcode: generator.emit(opcode) diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -101,7 +101,7 @@ # first annotate, rtype, and backendoptimize PyPy try: - interp, graph = get_interpreter(entry_point, [], backendopt=True, + interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,18 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - -int -_pypy_math_isnan(double x) -{ - return PyPy_IS_NAN(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -1,5 +1,5 @@ import py, sys -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.jit.codewriter.policy import StopAtXPolicy diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -38,6 +38,10 @@ public final static int INT_MIN = Integer.MIN_VALUE; public final static double ULONG_MAX = 18446744073709551616.0; + public static boolean int_between(int a, int b, int c) { + return a <= b && b < c; + } + /** * Compares two unsigned integers (value1 and value2) and returns * a value greater than, equal to, or less than zero if value 1 is @@ -163,6 +167,13 @@ return ULONG_MAX + value; } } + + public static long double_to_ulong(double value) { + if (value < 0) + return (long)(ULONG_MAX + value); + else + return (long)value; + } public static int double_to_uint(double value) { if (value <= Integer.MAX_VALUE) @@ -1175,6 +1186,18 @@ return Math.tanh(x); } + public double ll_math_copysign(double x, double y) { + return Math.copySign(x, y); + } + + public boolean ll_math_isnan(double x) { + return Double.isNaN(x); + } + + public boolean ll_math_isinf(double x) { + return Double.isInfinite(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); @@ -1187,9 +1210,42 @@ return Character.toLowerCase(c); } + public int locale_tolower(int chr) + { + return Character.toLowerCase(chr); + } + + public int locale_isupper(int chr) + { + return boolean2int(Character.isUpperCase(chr)); + } + + public int locale_islower(int chr) + { + return boolean2int(Character.isLowerCase(chr)); + } + + public int locale_isalpha(int chr) + { + return boolean2int(Character.isLetter(chr)); + } + + public int locale_isalnum(int chr) + { + return boolean2int(Character.isLetterOrDigit(chr)); + } + + // ---------------------------------------------------------------------- // Self Test + public static int boolean2int(boolean b) + { + if (b) + return 1; + return 0; + } + public static int __counter = 0, __failures = 0; public static void ensure(boolean f) { if (f) { diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -1,6 +1,6 @@ import py from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class ToyLanguageTests: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,7 +39,7 @@ translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "md5", "cStringIO", "array"])) + "struct", "_md5", "cStringIO", "array"])) working_oo_modules = default_modules.copy() working_oo_modules.update(dict.fromkeys( From commits-noreply at bitbucket.org Fri Apr 15 16:51:09 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 16:51:09 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: small tweaks Message-ID: <20110415145109.063D92A2049@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3519:2619562575c7 Date: 2011-04-15 16:50 +0200 http://bitbucket.org/pypy/extradoc/changeset/2619562575c7/ Log: small tweaks diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -213,7 +213,7 @@ and less error prone than manually writing a JIT compiler. Similarly, writing in a high level language such as RPython is easier than writing in C. -We call the code that runs on top of an interpreter implemented with PyPy the +We call the code that runs on top of a VM implemented with PyPy the \emph{user code} or \emph{user program}. %___________________________________________________________________________ @@ -385,7 +385,7 @@ There is one kind of hint for both of these conditions. -\subsection{Where Do All the Constants Come From} +\subsection{Where Do All the Constants Come From?} It is worth clarifying what a ``constant'' is in this context. A variable of the trace is said to be constant if its value is statically known by the @@ -683,7 +683,7 @@ In this section we describe how the simple object model from Section~\ref{sub:running} can be made efficient using the hints described in the -previous the section. The object model there is typical for many current +previous section. The object model there is typical for many current dynamic languages (such as Python, Ruby and JavaScript) as it relies heavily on hash-maps to implement its objects. diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 295a4bb35ae37f83812c8d212381354e07d8d44d..81df7e949fcd805cf5223526d3df281863c5f517 GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 16:51:13 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 15 Apr 2011 16:51:13 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: small tweaks Message-ID: <20110415145113.DF15D2A204C@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3519:2619562575c7 Date: 2011-04-15 16:50 +0200 http://bitbucket.org/pypy/extradoc/changeset/2619562575c7/ Log: small tweaks diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -213,7 +213,7 @@ and less error prone than manually writing a JIT compiler. Similarly, writing in a high level language such as RPython is easier than writing in C. -We call the code that runs on top of an interpreter implemented with PyPy the +We call the code that runs on top of a VM implemented with PyPy the \emph{user code} or \emph{user program}. %___________________________________________________________________________ @@ -385,7 +385,7 @@ There is one kind of hint for both of these conditions. -\subsection{Where Do All the Constants Come From} +\subsection{Where Do All the Constants Come From?} It is worth clarifying what a ``constant'' is in this context. A variable of the trace is said to be constant if its value is statically known by the @@ -683,7 +683,7 @@ In this section we describe how the simple object model from Section~\ref{sub:running} can be made efficient using the hints described in the -previous the section. The object model there is typical for many current +previous section. The object model there is typical for many current dynamic languages (such as Python, Ruby and JavaScript) as it relies heavily on hash-maps to implement its objects. diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 295a4bb35ae37f83812c8d212381354e07d8d44d..81df7e949fcd805cf5223526d3df281863c5f517 GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 17:35:56 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 17:35:56 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: submitted version Message-ID: <20110415153556.BB70E2A2049@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3520:9865a1dc7115 Date: 2011-04-15 17:35 +0200 http://bitbucket.org/pypy/extradoc/changeset/9865a1dc7115/ Log: submitted version diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -102,6 +102,11 @@ \category{D.3.4}{Programming Languages}{Processors}[code generation, incremental compilers, interpreters, run-time environments] +\terms +Languages, Performance, Experimentation + +\keywords{Tracing JIT, Runtime Feedback, Interpreter, Code Generation, Meta-Programming} + \begin{abstract} Meta-tracing JIT compilers can be applied to a variety of different @@ -981,8 +986,9 @@ implementation of dynamic languages on top of JVMs easier. The bytecode gives the user access to generalized inline caches. It requires of course compilation to JVM bytecode instead of simply writing an interpreter, predictability of performance across JVMs is also an open question. We already explored promotion in other context, such as earlier versions of -PyPy's JIT as well as a Prolog partial evaluator -\cite{bolz_towards_2009}. Promotion is also heavily +PyPy's JIT. +%as well as a Prolog partial evaluator \cite{bolz_towards_2009} +Promotion is also heavily used by Psyco \cite{rigo_representation-based_2004} (promotion is called "unlifting" in this paper) a method-based JIT compiler for Python written by one of the authors. Promotion is quite similar to diff --git a/talk/icooolps2011/bolz-hints.pdf b/talk/icooolps2011/bolz-hints.pdf new file mode 100644 index 0000000000000000000000000000000000000000..96937d926b2c6aa2adc790667a4992a22446dce0 GIT binary patch [cut] diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 81df7e949fcd805cf5223526d3df281863c5f517..96937d926b2c6aa2adc790667a4992a22446dce0 GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 17:36:01 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 17:36:01 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: submitted version Message-ID: <20110415153601.132E92A204F@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3520:9865a1dc7115 Date: 2011-04-15 17:35 +0200 http://bitbucket.org/pypy/extradoc/changeset/9865a1dc7115/ Log: submitted version diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -102,6 +102,11 @@ \category{D.3.4}{Programming Languages}{Processors}[code generation, incremental compilers, interpreters, run-time environments] +\terms +Languages, Performance, Experimentation + +\keywords{Tracing JIT, Runtime Feedback, Interpreter, Code Generation, Meta-Programming} + \begin{abstract} Meta-tracing JIT compilers can be applied to a variety of different @@ -981,8 +986,9 @@ implementation of dynamic languages on top of JVMs easier. The bytecode gives the user access to generalized inline caches. It requires of course compilation to JVM bytecode instead of simply writing an interpreter, predictability of performance across JVMs is also an open question. We already explored promotion in other context, such as earlier versions of -PyPy's JIT as well as a Prolog partial evaluator -\cite{bolz_towards_2009}. Promotion is also heavily +PyPy's JIT. +%as well as a Prolog partial evaluator \cite{bolz_towards_2009} +Promotion is also heavily used by Psyco \cite{rigo_representation-based_2004} (promotion is called "unlifting" in this paper) a method-based JIT compiler for Python written by one of the authors. Promotion is quite similar to diff --git a/talk/icooolps2011/bolz-hints.pdf b/talk/icooolps2011/bolz-hints.pdf new file mode 100644 index 0000000000000000000000000000000000000000..96937d926b2c6aa2adc790667a4992a22446dce0 GIT binary patch [cut] diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 81df7e949fcd805cf5223526d3df281863c5f517..96937d926b2c6aa2adc790667a4992a22446dce0 GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 17:57:25 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 17:57:25 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: make anto have two affiliations Message-ID: <20110415155725.245062A2049@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3521:d542fdd7d1b2 Date: 2011-04-15 17:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/d542fdd7d1b2/ Log: make anto have two affiliations diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -82,7 +82,7 @@ \title{Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages} -\authorinfo{Carl Friedrich Bolz$^a$ \and Antonio Cuni$^a$ \and Maciej Fijałkowski$^b$ \and Michael Leuschel$^a$ \and \\ +\authorinfo{Carl Friedrich Bolz$^a$ \and Antonio Cuni$^{a, c}$ \and Maciej Fijałkowski$^b$ \and Michael Leuschel$^a$ \and \\ Samuele Pedroni$^c$ \and Armin Rigo$^a$} {$^a$Heinrich-Heine-Universität Düsseldorf, STUPS Group, Germany diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 96937d926b2c6aa2adc790667a4992a22446dce0..8f565e168273b6727e94e7d51edd71a0674852a7 GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 17:57:28 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 15 Apr 2011 17:57:28 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: make anto have two affiliations Message-ID: <20110415155728.B09FF2A204D@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3521:d542fdd7d1b2 Date: 2011-04-15 17:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/d542fdd7d1b2/ Log: make anto have two affiliations diff --git a/talk/icooolps2011/paper.tex b/talk/icooolps2011/paper.tex --- a/talk/icooolps2011/paper.tex +++ b/talk/icooolps2011/paper.tex @@ -82,7 +82,7 @@ \title{Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages} -\authorinfo{Carl Friedrich Bolz$^a$ \and Antonio Cuni$^a$ \and Maciej Fijałkowski$^b$ \and Michael Leuschel$^a$ \and \\ +\authorinfo{Carl Friedrich Bolz$^a$ \and Antonio Cuni$^{a, c}$ \and Maciej Fijałkowski$^b$ \and Michael Leuschel$^a$ \and \\ Samuele Pedroni$^c$ \and Armin Rigo$^a$} {$^a$Heinrich-Heine-Universität Düsseldorf, STUPS Group, Germany diff --git a/talk/icooolps2011/jit-hints.pdf b/talk/icooolps2011/jit-hints.pdf index 96937d926b2c6aa2adc790667a4992a22446dce0..8f565e168273b6727e94e7d51edd71a0674852a7 GIT binary patch [cut] From commits-noreply at bitbucket.org Fri Apr 15 22:47:31 2011 From: commits-noreply at bitbucket.org (hpk42) Date: Fri, 15 Apr 2011 22:47:31 +0200 (CEST) Subject: [pypy-svn] pypy default: no need to use internal pytest imports here Message-ID: <20110415204731.64F2B2A2047@codespeak.net> Author: holger krekel Branch: Changeset: r43391:7dca9ebbc108 Date: 2011-04-15 22:47 +0200 http://bitbucket.org/pypy/pypy/changeset/7dca9ebbc108/ Log: no need to use internal pytest imports here diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -271,12 +271,12 @@ assert log.result == 42 def test_skip(self): - import _pytest + import pytest def f(): import sys print >> sys.stderr, 'SKIP: foobar' # - raises(_pytest.runner.Skipped, "self.run(f, [])") + raises(pytest.skip.Exception, "self.run(f, [])") def test_parse_jitlog(self): def f(): From commits-noreply at bitbucket.org Fri Apr 15 22:47:35 2011 From: commits-noreply at bitbucket.org (hpk42) Date: Fri, 15 Apr 2011 22:47:35 +0200 (CEST) Subject: [pypy-svn] pypy default: no need to use internal pytest imports here Message-ID: <20110415204735.0459D2A204B@codespeak.net> Author: holger krekel Branch: Changeset: r43391:7dca9ebbc108 Date: 2011-04-15 22:47 +0200 http://bitbucket.org/pypy/pypy/changeset/7dca9ebbc108/ Log: no need to use internal pytest imports here diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -271,12 +271,12 @@ assert log.result == 42 def test_skip(self): - import _pytest + import pytest def f(): import sys print >> sys.stderr, 'SKIP: foobar' # - raises(_pytest.runner.Skipped, "self.run(f, [])") + raises(pytest.skip.Exception, "self.run(f, [])") def test_parse_jitlog(self): def f(): From commits-noreply at bitbucket.org Fri Apr 15 22:55:40 2011 From: commits-noreply at bitbucket.org (fijal) Date: Fri, 15 Apr 2011 22:55:40 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: fix virtualizables Message-ID: <20110415205540.148ED2A2047@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43392:720bc31147bc Date: 2011-04-15 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/720bc31147bc/ Log: fix virtualizables diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -2,6 +2,7 @@ from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype, lloperation, rclass, llmemory from pypy.rpython.annlowlevel import llhelper +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside @@ -45,7 +46,7 @@ ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY._hints['virtualizable2_accessor'].initialize( - XY, {'inst_x' : "", 'inst_node' : ""}) + XY, {'inst_x' : IR_IMMUTABLE, 'inst_node' : IR_IMMUTABLE}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY, xy_vtable, 'XY') @@ -210,7 +211,8 @@ ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY2._hints['virtualizable2_accessor'].initialize( - XY2, {'inst_x' : "", 'inst_l1' : "[*]", 'inst_l2' : "[*]"}) + XY2, {'inst_x' : IR_IMMUTABLE, + 'inst_l1' : IR_ARRAY_IMMUTABLE, 'inst_l2' : IR_ARRAY_IMMUTABLE}) xy2_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY2, xy2_vtable, 'XY2') diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -1,6 +1,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.rpython.rclass import IR_ARRAY_IMMUTABLE, IR_IMMUTABLE from pypy.rpython import rvirtualizable2 from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.unroll import unrolling_iterable @@ -10,7 +11,7 @@ from pypy.jit.metainterp.warmstate import wrap, unwrap from pypy.rlib.objectmodel import specialize -class VirtualizableInfo: +class VirtualizableInfo(object): TOKEN_NONE = 0 # must be 0 -- see also x86.call_assembler TOKEN_TRACING_RESCALL = -1 @@ -33,11 +34,13 @@ all_fields = accessor.fields static_fields = [] array_fields = [] - for name, suffix in all_fields.iteritems(): - if suffix == '[*]': + for name, tp in all_fields.iteritems(): + if tp == IR_ARRAY_IMMUTABLE: array_fields.append(name) + elif tp == IR_IMMUTABLE: + static_fields.append(name) else: - static_fields.append(name) + raise Exception("unknown type: %s" % tp) self.static_fields = static_fields self.array_fields = array_fields # From commits-noreply at bitbucket.org Fri Apr 15 22:55:36 2011 From: commits-noreply at bitbucket.org (fijal) Date: Fri, 15 Apr 2011 22:55:36 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: fix virtualizables Message-ID: <20110415205536.EB30B2A2047@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43392:720bc31147bc Date: 2011-04-15 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/720bc31147bc/ Log: fix virtualizables diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -2,6 +2,7 @@ from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype, lloperation, rclass, llmemory from pypy.rpython.annlowlevel import llhelper +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside @@ -45,7 +46,7 @@ ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY._hints['virtualizable2_accessor'].initialize( - XY, {'inst_x' : "", 'inst_node' : ""}) + XY, {'inst_x' : IR_IMMUTABLE, 'inst_node' : IR_IMMUTABLE}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY, xy_vtable, 'XY') @@ -210,7 +211,8 @@ ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY2._hints['virtualizable2_accessor'].initialize( - XY2, {'inst_x' : "", 'inst_l1' : "[*]", 'inst_l2' : "[*]"}) + XY2, {'inst_x' : IR_IMMUTABLE, + 'inst_l1' : IR_ARRAY_IMMUTABLE, 'inst_l2' : IR_ARRAY_IMMUTABLE}) xy2_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY2, xy2_vtable, 'XY2') diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -1,6 +1,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.rpython.rclass import IR_ARRAY_IMMUTABLE, IR_IMMUTABLE from pypy.rpython import rvirtualizable2 from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.unroll import unrolling_iterable @@ -10,7 +11,7 @@ from pypy.jit.metainterp.warmstate import wrap, unwrap from pypy.rlib.objectmodel import specialize -class VirtualizableInfo: +class VirtualizableInfo(object): TOKEN_NONE = 0 # must be 0 -- see also x86.call_assembler TOKEN_TRACING_RESCALL = -1 @@ -33,11 +34,13 @@ all_fields = accessor.fields static_fields = [] array_fields = [] - for name, suffix in all_fields.iteritems(): - if suffix == '[*]': + for name, tp in all_fields.iteritems(): + if tp == IR_ARRAY_IMMUTABLE: array_fields.append(name) + elif tp == IR_IMMUTABLE: + static_fields.append(name) else: - static_fields.append(name) + raise Exception("unknown type: %s" % tp) self.static_fields = static_fields self.array_fields = array_fields # From commits-noreply at bitbucket.org Sat Apr 16 11:09:28 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 11:09:28 +0200 (CEST) Subject: [pypy-svn] pypy default: I think that "SKIP:" is supposed to be printed to stderr, not stdout. Message-ID: <20110416090928.5FF79282B90@codespeak.net> Author: Armin Rigo Branch: Changeset: r43393:7bcedc5f2e61 Date: 2011-04-16 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/7bcedc5f2e61/ Log: I think that "SKIP:" is supposed to be printed to stderr, not stdout. diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1498,7 +1498,7 @@ try: from _ffi import CDLL, types except ImportError: - sys.stdout.write('SKIP: cannot import _ffi') + sys.stderr.write('SKIP: cannot import _ffi\n') return 0 libm = CDLL(libm_name) From commits-noreply at bitbucket.org Sat Apr 16 11:09:31 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 11:09:31 +0200 (CEST) Subject: [pypy-svn] pypy default: I think that "SKIP:" is supposed to be printed to stderr, not stdout. Message-ID: <20110416090931.BFDC5282BAD@codespeak.net> Author: Armin Rigo Branch: Changeset: r43393:7bcedc5f2e61 Date: 2011-04-16 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/7bcedc5f2e61/ Log: I think that "SKIP:" is supposed to be printed to stderr, not stdout. diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1498,7 +1498,7 @@ try: from _ffi import CDLL, types except ImportError: - sys.stdout.write('SKIP: cannot import _ffi') + sys.stderr.write('SKIP: cannot import _ffi\n') return 0 libm = CDLL(libm_name) From commits-noreply at bitbucket.org Sat Apr 16 11:20:39 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 16 Apr 2011 11:20:39 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: Improve the comment documenting other invariant Message-ID: <20110416092039.5D01A282B90@codespeak.net> Author: Maciej Fijalkowski Branch: jit-short_from_state Changeset: r43394:87b741d57cc2 Date: 2011-04-16 11:20 +0200 http://bitbucket.org/pypy/pypy/changeset/87b741d57cc2/ Log: Improve the comment documenting other invariant diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -13,6 +13,8 @@ the same (same numbers or same pointers) (2) it's fine to remove the call completely if we can guess the result according to rule 1 + (3) the function call can be moved around by optimizer, + but only so it'll be called earlier and not later. Most importantly it doesn't mean that pure function has no observable side effect, but those side effects can be ommited (ie caching). From commits-noreply at bitbucket.org Sat Apr 16 11:20:42 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 16 Apr 2011 11:20:42 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: Improve the comment documenting other invariant Message-ID: <20110416092042.C064A282BAD@codespeak.net> Author: Maciej Fijalkowski Branch: jit-short_from_state Changeset: r43394:87b741d57cc2 Date: 2011-04-16 11:20 +0200 http://bitbucket.org/pypy/pypy/changeset/87b741d57cc2/ Log: Improve the comment documenting other invariant diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -13,6 +13,8 @@ the same (same numbers or same pointers) (2) it's fine to remove the call completely if we can guess the result according to rule 1 + (3) the function call can be moved around by optimizer, + but only so it'll be called earlier and not later. Most importantly it doesn't mean that pure function has no observable side effect, but those side effects can be ommited (ie caching). From commits-noreply at bitbucket.org Sat Apr 16 12:29:45 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 12:29:45 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix. Actually the other encoding is not really wrong, but just non-standard. Message-ID: <20110416102945.86C80282B90@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43395:923b66e6cd0d Date: 2011-04-16 11:23 +0200 http://bitbucket.org/pypy/pypy/changeset/923b66e6cd0d/ Log: Fix. Actually the other encoding is not really wrong, but just non- standard. diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -672,8 +672,8 @@ define_modrm_modes(insnname + '_r*', [rex_type, '\x8B', register(1, 8)]) define_modrm_modes(insnname + '_*i', [rex_type, '\xC7', orbyte(0<<3)], [immediate(2)]) -define_modrm_modes('MOV8_*r', [rex_w, '\x88', byte_register(2, 8)], regtype='BYTE') -define_modrm_modes('MOV8_*i', [rex_w, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') +define_modrm_modes('MOV8_*r', [rex_nw, '\x88', byte_register(2, 8)], regtype='BYTE') +define_modrm_modes('MOV8_*i', [rex_nw, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') define_modrm_modes('MOVZX8_r*', [rex_w, '\x0F\xB6', register(1, 8)], regtype='BYTE') define_modrm_modes('MOVSX8_r*', [rex_w, '\x0F\xBE', register(1, 8)], regtype='BYTE') From commits-noreply at bitbucket.org Sat Apr 16 12:29:47 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 12:29:47 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: In-progress: add HIDE_INTO_PTR32 and SHOW_FROM_PTR32 as explicit Message-ID: <20110416102947.47F70282B90@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43396:0b1a8ed1bd80 Date: 2011-04-16 11:50 +0200 http://bitbucket.org/pypy/pypy/changeset/0b1a8ed1bd80/ Log: In-progress: add HIDE_INTO_PTR32 and SHOW_FROM_PTR32 as explicit operations, inserted by llsupport.gc. diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -1,4 +1,3 @@ - import py, sys from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -10,6 +9,9 @@ from pypy.jit.codewriter.policy import JitPolicy from pypy.jit.codewriter import longlong +class SkipThisRun(Exception): + pass + def _get_jitcodes(testself, CPUClass, func, values, type_system, supports_longlong=False, **kwds): from pypy.jit.codewriter import support, codewriter @@ -112,6 +114,8 @@ #if conftest.option.view: # metainterp.stats.view() return e.args[0] + except SkipThisRun: + return NotImplemented else: raise Exception("FAILED") @@ -180,10 +184,11 @@ result1 = _run_with_blackhole(self, args) # try to run it with pyjitpl.py result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented + if result2 != NotImplemented: + assert result1 == result2 + # try to run it by running the code compiled just before + result3 = _run_with_machine_code(self, args) + assert result1 == result3 or result3 == NotImplemented # if (longlong.supports_longlong and isinstance(result1, longlong.r_float_storage)): diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -445,6 +445,9 @@ #'INSTANCEOF/1db', #'SUBCLASSOF/2b', # + # backend-only operations + 'HIDE_INTO_PTR32/1', + 'SHOW_FROM_PTR32/1', '_ALWAYS_PURE_LAST', # ----- end of always_pure operations ----- 'GETARRAYITEM_GC/2d', diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,4 +1,4 @@ -import os +import os, py from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror @@ -17,6 +17,8 @@ from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.descr import GcPtrHidden32FieldDescr +from pypy.jit.backend.llsupport.descr import GcPtrHidden32ArrayDescr from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ @@ -36,7 +38,13 @@ def do_write_barrier(self, gcref_struct, gcref_newptr): pass def rewrite_assembler(self, cpu, operations): - pass + if not we_are_translated(): + # skip non-translated tests (using Boehm) if compressed ptrs + for op in operations: + if (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or + isinstance(op.getdescr(), GcPtrHidden32ArrayDescr)): + from pypy.jit.metainterp.test.support import SkipThisRun + raise SkipThisRun("non-translated test with compressptr") def can_inline_malloc(self, descr): return False def can_inline_malloc_varsize(self, descr, num_elem): @@ -579,6 +587,7 @@ self.single_gcref_descr = GcPtrFieldDescr('', 0) self.supports_compressed_ptrs = gcdescr.config.translation.compressptr if self.supports_compressed_ptrs: + assert WORD == 8 assert rffi.sizeof(rffi.UINT)==rffi.sizeof(llmemory.HiddenGcRef32) # make a TransformerLayoutBuilder and save it on the translator @@ -773,7 +782,7 @@ llmemory.cast_ptr_to_adr(gcref_newptr)) def rewrite_assembler(self, cpu, operations): - # Perform two kinds of rewrites in parallel: + # Perform three kinds of rewrites in parallel: # # - Add COND_CALLs to the write barrier before SETFIELD_GC and # SETARRAYITEM_GC operations. @@ -786,6 +795,9 @@ # replace direct usage of ConstPtr with a BoxPtr loaded by a # GETFIELD_RAW from the array 'gcrefs.list'. # + # - For compressptr, add explicit HIDE_INTO_PTR32 and + # SHOW_FROM_PTR32 operations. + # newops = [] # we can only remember one malloc since the next malloc can possibly # collect @@ -833,6 +845,30 @@ # write_barrier_from_array self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETARRAYITEM_RAW) + # ---------- compressptr support ---------- + if (self.supports_compressed_ptrs and + (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or + isinstance(op.getdescr(), GcPtrHidden32ArrayDescr))): + num = op.getopnum() + if (num == rop.GETFIELD_GC or + num == rop.GETFIELD_GC_PURE or + num == rop.GETARRAYITEM_GC or + num == rop.GETARRAYITEM_GC_PURE): + v1 = BoxInt() + v2 = op.result + newops.append(op.copy_and_change(num, result=v1)) + op = ResOperation(rop.SHOW_FROM_PTR32, [v1], v2) + elif num == rop.SETFIELD_GC or num == rop.SETFIELD_RAW: + v1 = op.getarg(1) + v2 = BoxInt() + newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) + op = op.copy_and_change(num, args=[op.getarg(0), v2]) + elif num == rop.SETARRAYITEM_GC or num == rop.SETARRAYITEM_RAW: + v1 = op.getarg(2) + v2 = BoxInt() + newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) + op = op.copy_and_change(num, args=[op.getarg(0), + op.getarg(1), v2]) # ---------- newops.append(op) del operations[:] diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -312,6 +312,8 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.HIDE_INTO_PTR32, + rop.SHOW_FROM_PTR32, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) From commits-noreply at bitbucket.org Sat Apr 16 12:29:49 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 12:29:49 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: In-progress: calls with HiddenGcRef32s as arguments or result. Message-ID: <20110416102949.42863282B90@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43397:75a62468dae4 Date: 2011-04-16 12:28 +0200 http://bitbucket.org/pypy/pypy/changeset/75a62468dae4/ Log: In-progress: calls with HiddenGcRef32s as arguments or result. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -16,7 +16,7 @@ from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr -from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.descr import get_call_descr, BaseCallDescr from pypy.jit.backend.llsupport.descr import GcPtrHidden32FieldDescr from pypy.jit.backend.llsupport.descr import GcPtrHidden32ArrayDescr from pypy.rpython.memory.gctransform import asmgcroot @@ -42,7 +42,9 @@ # skip non-translated tests (using Boehm) if compressed ptrs for op in operations: if (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or - isinstance(op.getdescr(), GcPtrHidden32ArrayDescr)): + isinstance(op.getdescr(), GcPtrHidden32ArrayDescr) or + (isinstance(op.getdescr(), BaseCallDescr) and + 'H' in op.getdescr().get_arg_types())): from pypy.jit.metainterp.test.support import SkipThisRun raise SkipThisRun("non-translated test with compressptr") def can_inline_malloc(self, descr): @@ -846,9 +848,10 @@ self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETARRAYITEM_RAW) # ---------- compressptr support ---------- + descr = op.getdescr() if (self.supports_compressed_ptrs and - (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or - isinstance(op.getdescr(), GcPtrHidden32ArrayDescr))): + (isinstance(descr, GcPtrHidden32FieldDescr) or + isinstance(descr, GcPtrHidden32ArrayDescr))): num = op.getopnum() if (num == rop.GETFIELD_GC or num == rop.GETFIELD_GC_PURE or @@ -869,6 +872,19 @@ newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) op = op.copy_and_change(num, args=[op.getarg(0), op.getarg(1), v2]) + elif (self.supports_compressed_ptrs and + isinstance(descr, BaseCallDescr)): + args = op.getarglist() + arg_classes = descr.get_arg_types() + assert len(args) == len(arg_classes) + for i in range(len(arg_classes)): + if arg_classes[i] == 'H': + v1 = args[i] + v2 = BoxInt() + newops.append(ResOperation(rop.HIDE_INTO_PTR32, + [v1], v2)) + args[i] = v2 + op = op.copy_and_change(op.getopnum(), args=args) # ---------- newops.append(op) del operations[:] diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -44,3 +44,9 @@ return z.n * 1000 + ord(t.c) res = self.interp_operations(f, [42]) assert res == 42063 + + def test_call_argument(self): + ... + + def test_call_result(self): + ... diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -1,4 +1,5 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated @@ -34,6 +35,18 @@ raise ValueError(TYPE) return result +def cast_to_gcref(value): + TYPE = lltype.typeOf(value) + if isinstance(TYPE.TO, lltype.GcOpaqueType): + if TYPE == llmemory.GCREF: + return value + elif TYPE == llmemory.HiddenGcRef32: + return llop.show_from_ptr32(llmemory.GCREF, value) + else: + raise TypeError(TYPE) + else: + return lltype.cast_opaque_ptr(llmemory.GCREF, value) + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/jit/codewriter/assembler.py b/pypy/jit/codewriter/assembler.py --- a/pypy/jit/codewriter/assembler.py +++ b/pypy/jit/codewriter/assembler.py @@ -84,7 +84,7 @@ return True constants = self.constants_i elif kind == 'ref': - value = lltype.cast_opaque_ptr(llmemory.GCREF, value) + value = heaptracker.cast_to_gcref(value) constants = self.constants_r elif kind == 'float': if const.concretetype == lltype.Float: diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -531,7 +531,7 @@ def bh_call_r(self, func, calldescr, args_i, args_r, args_f): assert isinstance(calldescr, GcPtrCallDescr) if not we_are_translated(): - calldescr.verify_types(args_i, args_r, args_f, history.REF) + calldescr.verify_types(args_i, args_r, args_f, history.REF + 'H') return calldescr.call_stub(func, args_i, args_r, args_f) def bh_call_f(self, func, calldescr, args_i, args_r, args_f): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -1,5 +1,6 @@ import py from pypy.rpython.lltypesystem import lltype, rffi, llmemory, rclass +from pypy.rpython.lltypesystem.lloperation import llop from pypy.jit.backend.llsupport import symbolic, support from pypy.jit.metainterp.history import AbstractDescr, getkind, BoxInt, BoxPtr from pypy.jit.metainterp.history import BasicFailDescr, LoopToken, BoxFloat @@ -270,7 +271,7 @@ arg_classes = '' # <-- annotation hack def __init__(self, arg_classes, extrainfo=None): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + self.arg_classes = arg_classes # string of chars, see get_call_descr self.extrainfo = extrainfo def get_extra_info(self): @@ -295,6 +296,9 @@ c = 'f' elif c == 'f' and longlong.supports_longlong: return 'longlong.getrealfloat(%s)' % (process('L'),) + elif c == 'H': + return 'llop.hide_into_ptr32(llmemory.HiddenGcRef32, %s)' % ( + process('r'),) arg = 'args_%s[%d]' % (c, seen[c]) seen[c] += 1 return arg @@ -310,6 +314,8 @@ return lltype.Void elif arg == 'L': return lltype.SignedLongLong + elif arg == 'H': + return llmemory.HiddenGcRef32 else: raise AssertionError(arg) @@ -344,7 +350,8 @@ def verify_types(self, args_i, args_r, args_f, return_type): assert self._return_type in return_type assert self.arg_classes.count('i') == len(args_i or ()) - assert self.arg_classes.count('r') == len(args_r or ()) + assert (self.arg_classes.count('r') + + self.arg_classes.count('H')) == len(args_r or ()) assert (self.arg_classes.count('f') + self.arg_classes.count('L')) == len(args_f or ()) @@ -435,10 +442,14 @@ def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): arg_classes = [] for ARG in ARGS: - assert ARG != llmemory.HiddenGcRef32 kind = getkind(ARG) - if kind == 'int': arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') + if kind == 'int': + arg_classes.append('i') + elif kind == 'ref': + if ARG == llmemory.HiddenGcRef32: + arg_classes.append('H') + else: + arg_classes.append('r') elif kind == 'float': if is_longlong(ARG): arg_classes.append('L') From commits-noreply at bitbucket.org Sat Apr 16 12:29:54 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 12:29:54 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix. Actually the other encoding is not really wrong, but just non-standard. Message-ID: <20110416102954.C7B5A282BD8@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43395:923b66e6cd0d Date: 2011-04-16 11:23 +0200 http://bitbucket.org/pypy/pypy/changeset/923b66e6cd0d/ Log: Fix. Actually the other encoding is not really wrong, but just non- standard. diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -672,8 +672,8 @@ define_modrm_modes(insnname + '_r*', [rex_type, '\x8B', register(1, 8)]) define_modrm_modes(insnname + '_*i', [rex_type, '\xC7', orbyte(0<<3)], [immediate(2)]) -define_modrm_modes('MOV8_*r', [rex_w, '\x88', byte_register(2, 8)], regtype='BYTE') -define_modrm_modes('MOV8_*i', [rex_w, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') +define_modrm_modes('MOV8_*r', [rex_nw, '\x88', byte_register(2, 8)], regtype='BYTE') +define_modrm_modes('MOV8_*i', [rex_nw, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') define_modrm_modes('MOVZX8_r*', [rex_w, '\x0F\xB6', register(1, 8)], regtype='BYTE') define_modrm_modes('MOVSX8_r*', [rex_w, '\x0F\xBE', register(1, 8)], regtype='BYTE') From commits-noreply at bitbucket.org Sat Apr 16 12:29:57 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 12:29:57 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: In-progress: add HIDE_INTO_PTR32 and SHOW_FROM_PTR32 as explicit Message-ID: <20110416102957.78F41282BE9@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43396:0b1a8ed1bd80 Date: 2011-04-16 11:50 +0200 http://bitbucket.org/pypy/pypy/changeset/0b1a8ed1bd80/ Log: In-progress: add HIDE_INTO_PTR32 and SHOW_FROM_PTR32 as explicit operations, inserted by llsupport.gc. diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py --- a/pypy/jit/metainterp/test/support.py +++ b/pypy/jit/metainterp/test/support.py @@ -1,4 +1,3 @@ - import py, sys from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -10,6 +9,9 @@ from pypy.jit.codewriter.policy import JitPolicy from pypy.jit.codewriter import longlong +class SkipThisRun(Exception): + pass + def _get_jitcodes(testself, CPUClass, func, values, type_system, supports_longlong=False, **kwds): from pypy.jit.codewriter import support, codewriter @@ -112,6 +114,8 @@ #if conftest.option.view: # metainterp.stats.view() return e.args[0] + except SkipThisRun: + return NotImplemented else: raise Exception("FAILED") @@ -180,10 +184,11 @@ result1 = _run_with_blackhole(self, args) # try to run it with pyjitpl.py result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented + if result2 != NotImplemented: + assert result1 == result2 + # try to run it by running the code compiled just before + result3 = _run_with_machine_code(self, args) + assert result1 == result3 or result3 == NotImplemented # if (longlong.supports_longlong and isinstance(result1, longlong.r_float_storage)): diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -445,6 +445,9 @@ #'INSTANCEOF/1db', #'SUBCLASSOF/2b', # + # backend-only operations + 'HIDE_INTO_PTR32/1', + 'SHOW_FROM_PTR32/1', '_ALWAYS_PURE_LAST', # ----- end of always_pure operations ----- 'GETARRAYITEM_GC/2d', diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,4 +1,4 @@ -import os +import os, py from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror @@ -17,6 +17,8 @@ from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.descr import GcPtrHidden32FieldDescr +from pypy.jit.backend.llsupport.descr import GcPtrHidden32ArrayDescr from pypy.rpython.memory.gctransform import asmgcroot # ____________________________________________________________ @@ -36,7 +38,13 @@ def do_write_barrier(self, gcref_struct, gcref_newptr): pass def rewrite_assembler(self, cpu, operations): - pass + if not we_are_translated(): + # skip non-translated tests (using Boehm) if compressed ptrs + for op in operations: + if (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or + isinstance(op.getdescr(), GcPtrHidden32ArrayDescr)): + from pypy.jit.metainterp.test.support import SkipThisRun + raise SkipThisRun("non-translated test with compressptr") def can_inline_malloc(self, descr): return False def can_inline_malloc_varsize(self, descr, num_elem): @@ -579,6 +587,7 @@ self.single_gcref_descr = GcPtrFieldDescr('', 0) self.supports_compressed_ptrs = gcdescr.config.translation.compressptr if self.supports_compressed_ptrs: + assert WORD == 8 assert rffi.sizeof(rffi.UINT)==rffi.sizeof(llmemory.HiddenGcRef32) # make a TransformerLayoutBuilder and save it on the translator @@ -773,7 +782,7 @@ llmemory.cast_ptr_to_adr(gcref_newptr)) def rewrite_assembler(self, cpu, operations): - # Perform two kinds of rewrites in parallel: + # Perform three kinds of rewrites in parallel: # # - Add COND_CALLs to the write barrier before SETFIELD_GC and # SETARRAYITEM_GC operations. @@ -786,6 +795,9 @@ # replace direct usage of ConstPtr with a BoxPtr loaded by a # GETFIELD_RAW from the array 'gcrefs.list'. # + # - For compressptr, add explicit HIDE_INTO_PTR32 and + # SHOW_FROM_PTR32 operations. + # newops = [] # we can only remember one malloc since the next malloc can possibly # collect @@ -833,6 +845,30 @@ # write_barrier_from_array self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETARRAYITEM_RAW) + # ---------- compressptr support ---------- + if (self.supports_compressed_ptrs and + (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or + isinstance(op.getdescr(), GcPtrHidden32ArrayDescr))): + num = op.getopnum() + if (num == rop.GETFIELD_GC or + num == rop.GETFIELD_GC_PURE or + num == rop.GETARRAYITEM_GC or + num == rop.GETARRAYITEM_GC_PURE): + v1 = BoxInt() + v2 = op.result + newops.append(op.copy_and_change(num, result=v1)) + op = ResOperation(rop.SHOW_FROM_PTR32, [v1], v2) + elif num == rop.SETFIELD_GC or num == rop.SETFIELD_RAW: + v1 = op.getarg(1) + v2 = BoxInt() + newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) + op = op.copy_and_change(num, args=[op.getarg(0), v2]) + elif num == rop.SETARRAYITEM_GC or num == rop.SETARRAYITEM_RAW: + v1 = op.getarg(2) + v2 = BoxInt() + newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) + op = op.copy_and_change(num, args=[op.getarg(0), + op.getarg(1), v2]) # ---------- newops.append(op) del operations[:] diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -312,6 +312,8 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.HIDE_INTO_PTR32, + rop.SHOW_FROM_PTR32, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) From commits-noreply at bitbucket.org Sat Apr 16 12:30:01 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 12:30:01 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: In-progress: calls with HiddenGcRef32s as arguments or result. Message-ID: <20110416103001.45067282BD4@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43397:75a62468dae4 Date: 2011-04-16 12:28 +0200 http://bitbucket.org/pypy/pypy/changeset/75a62468dae4/ Log: In-progress: calls with HiddenGcRef32s as arguments or result. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -16,7 +16,7 @@ from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr -from pypy.jit.backend.llsupport.descr import get_call_descr +from pypy.jit.backend.llsupport.descr import get_call_descr, BaseCallDescr from pypy.jit.backend.llsupport.descr import GcPtrHidden32FieldDescr from pypy.jit.backend.llsupport.descr import GcPtrHidden32ArrayDescr from pypy.rpython.memory.gctransform import asmgcroot @@ -42,7 +42,9 @@ # skip non-translated tests (using Boehm) if compressed ptrs for op in operations: if (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or - isinstance(op.getdescr(), GcPtrHidden32ArrayDescr)): + isinstance(op.getdescr(), GcPtrHidden32ArrayDescr) or + (isinstance(op.getdescr(), BaseCallDescr) and + 'H' in op.getdescr().get_arg_types())): from pypy.jit.metainterp.test.support import SkipThisRun raise SkipThisRun("non-translated test with compressptr") def can_inline_malloc(self, descr): @@ -846,9 +848,10 @@ self._gen_write_barrier(newops, op.getarg(0), v) op = op.copy_and_change(rop.SETARRAYITEM_RAW) # ---------- compressptr support ---------- + descr = op.getdescr() if (self.supports_compressed_ptrs and - (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or - isinstance(op.getdescr(), GcPtrHidden32ArrayDescr))): + (isinstance(descr, GcPtrHidden32FieldDescr) or + isinstance(descr, GcPtrHidden32ArrayDescr))): num = op.getopnum() if (num == rop.GETFIELD_GC or num == rop.GETFIELD_GC_PURE or @@ -869,6 +872,19 @@ newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) op = op.copy_and_change(num, args=[op.getarg(0), op.getarg(1), v2]) + elif (self.supports_compressed_ptrs and + isinstance(descr, BaseCallDescr)): + args = op.getarglist() + arg_classes = descr.get_arg_types() + assert len(args) == len(arg_classes) + for i in range(len(arg_classes)): + if arg_classes[i] == 'H': + v1 = args[i] + v2 = BoxInt() + newops.append(ResOperation(rop.HIDE_INTO_PTR32, + [v1], v2)) + args[i] = v2 + op = op.copy_and_change(op.getopnum(), args=args) # ---------- newops.append(op) del operations[:] diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -44,3 +44,9 @@ return z.n * 1000 + ord(t.c) res = self.interp_operations(f, [42]) assert res == 42063 + + def test_call_argument(self): + ... + + def test_call_result(self): + ... diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -1,4 +1,5 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated @@ -34,6 +35,18 @@ raise ValueError(TYPE) return result +def cast_to_gcref(value): + TYPE = lltype.typeOf(value) + if isinstance(TYPE.TO, lltype.GcOpaqueType): + if TYPE == llmemory.GCREF: + return value + elif TYPE == llmemory.HiddenGcRef32: + return llop.show_from_ptr32(llmemory.GCREF, value) + else: + raise TypeError(TYPE) + else: + return lltype.cast_opaque_ptr(llmemory.GCREF, value) + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/jit/codewriter/assembler.py b/pypy/jit/codewriter/assembler.py --- a/pypy/jit/codewriter/assembler.py +++ b/pypy/jit/codewriter/assembler.py @@ -84,7 +84,7 @@ return True constants = self.constants_i elif kind == 'ref': - value = lltype.cast_opaque_ptr(llmemory.GCREF, value) + value = heaptracker.cast_to_gcref(value) constants = self.constants_r elif kind == 'float': if const.concretetype == lltype.Float: diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py --- a/pypy/jit/backend/llsupport/llmodel.py +++ b/pypy/jit/backend/llsupport/llmodel.py @@ -531,7 +531,7 @@ def bh_call_r(self, func, calldescr, args_i, args_r, args_f): assert isinstance(calldescr, GcPtrCallDescr) if not we_are_translated(): - calldescr.verify_types(args_i, args_r, args_f, history.REF) + calldescr.verify_types(args_i, args_r, args_f, history.REF + 'H') return calldescr.call_stub(func, args_i, args_r, args_f) def bh_call_f(self, func, calldescr, args_i, args_r, args_f): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -1,5 +1,6 @@ import py from pypy.rpython.lltypesystem import lltype, rffi, llmemory, rclass +from pypy.rpython.lltypesystem.lloperation import llop from pypy.jit.backend.llsupport import symbolic, support from pypy.jit.metainterp.history import AbstractDescr, getkind, BoxInt, BoxPtr from pypy.jit.metainterp.history import BasicFailDescr, LoopToken, BoxFloat @@ -270,7 +271,7 @@ arg_classes = '' # <-- annotation hack def __init__(self, arg_classes, extrainfo=None): - self.arg_classes = arg_classes # string of "r" and "i" (ref/int) + self.arg_classes = arg_classes # string of chars, see get_call_descr self.extrainfo = extrainfo def get_extra_info(self): @@ -295,6 +296,9 @@ c = 'f' elif c == 'f' and longlong.supports_longlong: return 'longlong.getrealfloat(%s)' % (process('L'),) + elif c == 'H': + return 'llop.hide_into_ptr32(llmemory.HiddenGcRef32, %s)' % ( + process('r'),) arg = 'args_%s[%d]' % (c, seen[c]) seen[c] += 1 return arg @@ -310,6 +314,8 @@ return lltype.Void elif arg == 'L': return lltype.SignedLongLong + elif arg == 'H': + return llmemory.HiddenGcRef32 else: raise AssertionError(arg) @@ -344,7 +350,8 @@ def verify_types(self, args_i, args_r, args_f, return_type): assert self._return_type in return_type assert self.arg_classes.count('i') == len(args_i or ()) - assert self.arg_classes.count('r') == len(args_r or ()) + assert (self.arg_classes.count('r') + + self.arg_classes.count('H')) == len(args_r or ()) assert (self.arg_classes.count('f') + self.arg_classes.count('L')) == len(args_f or ()) @@ -435,10 +442,14 @@ def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): arg_classes = [] for ARG in ARGS: - assert ARG != llmemory.HiddenGcRef32 kind = getkind(ARG) - if kind == 'int': arg_classes.append('i') - elif kind == 'ref': arg_classes.append('r') + if kind == 'int': + arg_classes.append('i') + elif kind == 'ref': + if ARG == llmemory.HiddenGcRef32: + arg_classes.append('H') + else: + arg_classes.append('r') elif kind == 'float': if is_longlong(ARG): arg_classes.append('L') From commits-noreply at bitbucket.org Sat Apr 16 13:40:55 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Sat, 16 Apr 2011 13:40:55 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: getfield support Message-ID: <20110416114055.90DAB282B90@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43398:93feea3e67e6 Date: 2011-04-16 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/93feea3e67e6/ Log: getfield support diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -256,58 +256,63 @@ # This loop is equivalent to the main optimization loop in # Optimizer.propagate_all_forward + jumpop = None for newop in loop_operations: newop = inliner.inline_op(newop, clone=False) if newop.getopnum() == rop.JUMP: values = [self.getvalue(arg) for arg in newop.getarglist()] newop.initarglist(virtual_state.make_inputargs(values)) + jumpop = newop + break #self.optimizer.first_optimization.propagate_forward(newop) self.optimizer.send_extra_operation(newop) - - # Remove jump to make sure forced code are placed before it - newoperations = self.optimizer.newoperations - jmp = newoperations[-1] - assert jmp.getopnum() == rop.JUMP - self.optimizer.newoperations = newoperations[:-1] + assert jumpop self.boxes_created_this_iteration = {} - jumpargs = jmp.getarglist() + jumpargs = jumpop.getarglist() self.short_inliner = Inliner(inputargs, jumpargs) short = [] - # FIXME: Should also loop over operations added by forcing things in this loop - for op in newoperations: + i = j = 0 + while i < len(self.optimizer.newoperations): + op = self.optimizer.newoperations[i] self.boxes_created_this_iteration[op.result] = True args = op.getarglist() if op.is_guard(): args = args + op.getfailargs() for a in args: - self.import_box(a, inputargs, short, short_jumpargs, jumpargs) + self.import_box(a, inputargs, short, short_jumpargs, + jumpargs, True) + i += 1 - jmp.initarglist(jumpargs) - self.optimizer.newoperations.append(jmp) + if i == len(self.optimizer.newoperations): + while j < len(jumpargs): + a = jumpargs[j] + self.import_box(a, inputargs, short, short_jumpargs, + jumpargs, True) + j += 1 + + jumpop.initarglist(jumpargs) + self.optimizer.send_extra_operation(jumpop) short.append(ResOperation(rop.JUMP, short_jumpargs, None)) return inputargs, short - def import_box(self, box, inputargs, short, short_jumpargs, jumpargs): + def import_box(self, box, inputargs, short, short_jumpargs, + jumpargs, extend_inputargs): if isinstance(box, Const) or box in inputargs: return if box in self.boxes_created_this_iteration: return short_op = self.short_boxes[box] - import pdb; pdb.set_trace() - for a in short_op.getarglist(): - self.import_box(a, inputargs, short, short_jumpargs, jumpargs) + self.import_box(a, inputargs, short, short_jumpargs, jumpargs, False) short.append(short_op) - short_jumpargs.append(short_op.result) newop = self.short_inliner.inline_op(short_op) self.optimizer.send_extra_operation(newop) - inputargs.append(box) if newop.is_ovf(): # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) @@ -317,10 +322,13 @@ self.optimizer.send_extra_operation(guard) assert self.optimizer.newoperations[-1] is not guard - box = newop.result - if box in self.optimizer.values: - box = self.optimizer.values[box].force_box() - jumpargs.append(box) + if extend_inputargs: + short_jumpargs.append(short_op.result) + inputargs.append(box) + box = newop.result + if box in self.optimizer.values: + box = self.optimizer.values[box].force_box() + jumpargs.append(box) def sameop(self, op1, op2): if op1.getopnum() != op2.getopnum(): diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -1673,6 +1673,24 @@ """ self.optimize_loop(ops, expected) + def test_duplicate_getfield_2(self): + ops = """ + [p1, p2, i0] + i1 = getfield_gc(p1, descr=valuedescr) + i2 = getfield_gc(p2, descr=valuedescr) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = getfield_gc(p2, descr=valuedescr) + i5 = int_add(i3, i4) + i6 = int_add(i0, i5) + jump(p1, p2, i6) + """ + expected = """ + [p1, p2, i0, i5] + i6 = int_add(i0, i5) + jump(p1, p2, i6, i5) + """ + self.optimize_loop(ops, expected) + def test_getfield_after_setfield(self): ops = """ [p1, i1] diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -99,6 +99,14 @@ cf._cached_fields[structvalue2] = fieldvalue2 return cf + def produce_potential_short_preamble_ops(self, potential_ops, descr): + for structvalue, fieldvalue in self._cached_fields.iteritems(): + result = fieldvalue.get_key_box() + potential_ops[result] = ResOperation(rop.GETFIELD_GC, + [structvalue.get_key_box()], + result, + descr) + class CachedArrayItems(object): def __init__(self): @@ -129,7 +137,7 @@ self.force_all_lazy_setfields() else: assert 0 # was: new.lazy_setfields = self.lazy_setfields - + for descr, d in self.cached_fields.items(): new.cached_fields[descr] = d.get_cloned(optimizer, valuemap) @@ -153,6 +161,10 @@ return new + def produce_potential_short_preamble_ops(self, potential_ops): + for descr, d in self.cached_fields.items(): + d.produce_potential_short_preamble_ops(potential_ops, descr) + def clean_caches(self): del self._lazy_setfields[:] self.cached_fields.clear() From commits-noreply at bitbucket.org Sat Apr 16 13:40:56 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Sat, 16 Apr 2011 13:40:56 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: hg merge Message-ID: <20110416114056.3D543282B90@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43399:9c09f1f0dc5f Date: 2011-04-16 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/9c09f1f0dc5f/ Log: hg merge diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -13,6 +13,8 @@ the same (same numbers or same pointers) (2) it's fine to remove the call completely if we can guess the result according to rule 1 + (3) the function call can be moved around by optimizer, + but only so it'll be called earlier and not later. Most importantly it doesn't mean that pure function has no observable side effect, but those side effects can be ommited (ie caching). From commits-noreply at bitbucket.org Sat Apr 16 13:41:01 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Sat, 16 Apr 2011 13:41:01 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: getfield support Message-ID: <20110416114101.C55A7282BA1@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43398:93feea3e67e6 Date: 2011-04-16 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/93feea3e67e6/ Log: getfield support diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -256,58 +256,63 @@ # This loop is equivalent to the main optimization loop in # Optimizer.propagate_all_forward + jumpop = None for newop in loop_operations: newop = inliner.inline_op(newop, clone=False) if newop.getopnum() == rop.JUMP: values = [self.getvalue(arg) for arg in newop.getarglist()] newop.initarglist(virtual_state.make_inputargs(values)) + jumpop = newop + break #self.optimizer.first_optimization.propagate_forward(newop) self.optimizer.send_extra_operation(newop) - - # Remove jump to make sure forced code are placed before it - newoperations = self.optimizer.newoperations - jmp = newoperations[-1] - assert jmp.getopnum() == rop.JUMP - self.optimizer.newoperations = newoperations[:-1] + assert jumpop self.boxes_created_this_iteration = {} - jumpargs = jmp.getarglist() + jumpargs = jumpop.getarglist() self.short_inliner = Inliner(inputargs, jumpargs) short = [] - # FIXME: Should also loop over operations added by forcing things in this loop - for op in newoperations: + i = j = 0 + while i < len(self.optimizer.newoperations): + op = self.optimizer.newoperations[i] self.boxes_created_this_iteration[op.result] = True args = op.getarglist() if op.is_guard(): args = args + op.getfailargs() for a in args: - self.import_box(a, inputargs, short, short_jumpargs, jumpargs) + self.import_box(a, inputargs, short, short_jumpargs, + jumpargs, True) + i += 1 - jmp.initarglist(jumpargs) - self.optimizer.newoperations.append(jmp) + if i == len(self.optimizer.newoperations): + while j < len(jumpargs): + a = jumpargs[j] + self.import_box(a, inputargs, short, short_jumpargs, + jumpargs, True) + j += 1 + + jumpop.initarglist(jumpargs) + self.optimizer.send_extra_operation(jumpop) short.append(ResOperation(rop.JUMP, short_jumpargs, None)) return inputargs, short - def import_box(self, box, inputargs, short, short_jumpargs, jumpargs): + def import_box(self, box, inputargs, short, short_jumpargs, + jumpargs, extend_inputargs): if isinstance(box, Const) or box in inputargs: return if box in self.boxes_created_this_iteration: return short_op = self.short_boxes[box] - import pdb; pdb.set_trace() - for a in short_op.getarglist(): - self.import_box(a, inputargs, short, short_jumpargs, jumpargs) + self.import_box(a, inputargs, short, short_jumpargs, jumpargs, False) short.append(short_op) - short_jumpargs.append(short_op.result) newop = self.short_inliner.inline_op(short_op) self.optimizer.send_extra_operation(newop) - inputargs.append(box) if newop.is_ovf(): # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) @@ -317,10 +322,13 @@ self.optimizer.send_extra_operation(guard) assert self.optimizer.newoperations[-1] is not guard - box = newop.result - if box in self.optimizer.values: - box = self.optimizer.values[box].force_box() - jumpargs.append(box) + if extend_inputargs: + short_jumpargs.append(short_op.result) + inputargs.append(box) + box = newop.result + if box in self.optimizer.values: + box = self.optimizer.values[box].force_box() + jumpargs.append(box) def sameop(self, op1, op2): if op1.getopnum() != op2.getopnum(): diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -1673,6 +1673,24 @@ """ self.optimize_loop(ops, expected) + def test_duplicate_getfield_2(self): + ops = """ + [p1, p2, i0] + i1 = getfield_gc(p1, descr=valuedescr) + i2 = getfield_gc(p2, descr=valuedescr) + i3 = getfield_gc(p1, descr=valuedescr) + i4 = getfield_gc(p2, descr=valuedescr) + i5 = int_add(i3, i4) + i6 = int_add(i0, i5) + jump(p1, p2, i6) + """ + expected = """ + [p1, p2, i0, i5] + i6 = int_add(i0, i5) + jump(p1, p2, i6, i5) + """ + self.optimize_loop(ops, expected) + def test_getfield_after_setfield(self): ops = """ [p1, i1] diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -99,6 +99,14 @@ cf._cached_fields[structvalue2] = fieldvalue2 return cf + def produce_potential_short_preamble_ops(self, potential_ops, descr): + for structvalue, fieldvalue in self._cached_fields.iteritems(): + result = fieldvalue.get_key_box() + potential_ops[result] = ResOperation(rop.GETFIELD_GC, + [structvalue.get_key_box()], + result, + descr) + class CachedArrayItems(object): def __init__(self): @@ -129,7 +137,7 @@ self.force_all_lazy_setfields() else: assert 0 # was: new.lazy_setfields = self.lazy_setfields - + for descr, d in self.cached_fields.items(): new.cached_fields[descr] = d.get_cloned(optimizer, valuemap) @@ -153,6 +161,10 @@ return new + def produce_potential_short_preamble_ops(self, potential_ops): + for descr, d in self.cached_fields.items(): + d.produce_potential_short_preamble_ops(potential_ops, descr) + def clean_caches(self): del self._lazy_setfields[:] self.cached_fields.clear() From commits-noreply at bitbucket.org Sat Apr 16 13:41:02 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Sat, 16 Apr 2011 13:41:02 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: hg merge Message-ID: <20110416114102.928C5282BA1@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43399:9c09f1f0dc5f Date: 2011-04-16 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/9c09f1f0dc5f/ Log: hg merge diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -13,6 +13,8 @@ the same (same numbers or same pointers) (2) it's fine to remove the call completely if we can guess the result according to rule 1 + (3) the function call can be moved around by optimizer, + but only so it'll be called earlier and not later. Most importantly it doesn't mean that pure function has no observable side effect, but those side effects can be ommited (ie caching). From commits-noreply at bitbucket.org Sat Apr 16 17:24:10 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Sat, 16 Apr 2011 17:24:10 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: getarrayitem support Message-ID: <20110416152410.5187A282B90@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43400:cffe68e8c562 Date: 2011-04-16 17:23 +0200 http://bitbucket.org/pypy/pypy/changeset/cffe68e8c562/ Log: getarrayitem support diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -2174,6 +2174,39 @@ jump(p1) """ self.optimize_loop(ops, expected) + + def test_duplicate_getarrayitem_2(self): + ops = """ + [p1, i0] + i2 = getarrayitem_gc(p1, 0, descr=arraydescr2) + i3 = getarrayitem_gc(p1, 1, descr=arraydescr2) + i4 = getarrayitem_gc(p1, 0, descr=arraydescr2) + i5 = getarrayitem_gc(p1, 1, descr=arraydescr2) + i6 = int_add(i3, i4) + i7 = int_add(i0, i6) + jump(p1, i7) + """ + expected = """ + [p1, i0, i6] + i7 = int_add(i0, i6) + jump(p1, i7, i6) + """ + self.optimize_loop(ops, expected) + + def test_duplicate_getarrayitem_3(self): + ops = """ + [p1, i0, i10] + i2 = getarrayitem_gc(p1, i10, descr=arraydescr2) + i4 = getarrayitem_gc(p1, i10, descr=arraydescr2) + i7 = int_add(i0, i4) + jump(p1, i7, i10) + """ + expected = """ + [p1, i0, i10, i6] + i7 = int_add(i0, i6) + jump(p1, i7, i10, i6) + """ + self.optimize_loop(ops, expected) def test_duplicate_getarrayitem_after_setarrayitem_1(self): ops = """ diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -4,7 +4,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.jitexc import JitException from pypy.jit.metainterp.optimizeopt.optimizer import Optimization - +from pypy.jit.metainterp.history import ConstInt class CachedField(object): def __init__(self): @@ -104,9 +104,8 @@ result = fieldvalue.get_key_box() potential_ops[result] = ResOperation(rop.GETFIELD_GC, [structvalue.get_key_box()], - result, - descr) - + result, descr) + class CachedArrayItems(object): def __init__(self): @@ -164,6 +163,22 @@ def produce_potential_short_preamble_ops(self, potential_ops): for descr, d in self.cached_fields.items(): d.produce_potential_short_preamble_ops(potential_ops, descr) + for descr, d in self.cached_arrayitems.items(): + for value, cache in d.items(): + for index, fieldvalue in cache.fixed_index_items.items(): + result = fieldvalue.get_key_box() + op = ResOperation(rop.GETARRAYITEM_GC, + [value.get_key_box(), ConstInt(index)], + result, descr) + potential_ops[result] = op + if cache.var_index_item and cache.var_index_indexvalue: + result = cache.var_index_item.get_key_box() + op = ResOperation(rop.GETARRAYITEM_GC, + [value.get_key_box(), + cache.var_index_indexvalue.get_key_box()], + result, descr) + potential_ops[result] = op + def clean_caches(self): del self._lazy_setfields[:] From commits-noreply at bitbucket.org Sat Apr 16 17:24:15 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Sat, 16 Apr 2011 17:24:15 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: getarrayitem support Message-ID: <20110416152415.A2BB5282BAD@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43400:cffe68e8c562 Date: 2011-04-16 17:23 +0200 http://bitbucket.org/pypy/pypy/changeset/cffe68e8c562/ Log: getarrayitem support diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -2174,6 +2174,39 @@ jump(p1) """ self.optimize_loop(ops, expected) + + def test_duplicate_getarrayitem_2(self): + ops = """ + [p1, i0] + i2 = getarrayitem_gc(p1, 0, descr=arraydescr2) + i3 = getarrayitem_gc(p1, 1, descr=arraydescr2) + i4 = getarrayitem_gc(p1, 0, descr=arraydescr2) + i5 = getarrayitem_gc(p1, 1, descr=arraydescr2) + i6 = int_add(i3, i4) + i7 = int_add(i0, i6) + jump(p1, i7) + """ + expected = """ + [p1, i0, i6] + i7 = int_add(i0, i6) + jump(p1, i7, i6) + """ + self.optimize_loop(ops, expected) + + def test_duplicate_getarrayitem_3(self): + ops = """ + [p1, i0, i10] + i2 = getarrayitem_gc(p1, i10, descr=arraydescr2) + i4 = getarrayitem_gc(p1, i10, descr=arraydescr2) + i7 = int_add(i0, i4) + jump(p1, i7, i10) + """ + expected = """ + [p1, i0, i10, i6] + i7 = int_add(i0, i6) + jump(p1, i7, i10, i6) + """ + self.optimize_loop(ops, expected) def test_duplicate_getarrayitem_after_setarrayitem_1(self): ops = """ diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -4,7 +4,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.jitexc import JitException from pypy.jit.metainterp.optimizeopt.optimizer import Optimization - +from pypy.jit.metainterp.history import ConstInt class CachedField(object): def __init__(self): @@ -104,9 +104,8 @@ result = fieldvalue.get_key_box() potential_ops[result] = ResOperation(rop.GETFIELD_GC, [structvalue.get_key_box()], - result, - descr) - + result, descr) + class CachedArrayItems(object): def __init__(self): @@ -164,6 +163,22 @@ def produce_potential_short_preamble_ops(self, potential_ops): for descr, d in self.cached_fields.items(): d.produce_potential_short_preamble_ops(potential_ops, descr) + for descr, d in self.cached_arrayitems.items(): + for value, cache in d.items(): + for index, fieldvalue in cache.fixed_index_items.items(): + result = fieldvalue.get_key_box() + op = ResOperation(rop.GETARRAYITEM_GC, + [value.get_key_box(), ConstInt(index)], + result, descr) + potential_ops[result] = op + if cache.var_index_item and cache.var_index_indexvalue: + result = cache.var_index_item.get_key_box() + op = ResOperation(rop.GETARRAYITEM_GC, + [value.get_key_box(), + cache.var_index_indexvalue.get_key_box()], + result, descr) + potential_ops[result] = op + def clean_caches(self): del self._lazy_setfields[:] From commits-noreply at bitbucket.org Sat Apr 16 21:11:35 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 16 Apr 2011 21:11:35 +0200 (CEST) Subject: [pypy-svn] pypy default: a patch by ezio melotti. This fixes some unicode-related bugs. Message-ID: <20110416191135.D76C7282B90@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43401:3bc52d356c52 Date: 2011-04-16 21:08 +0200 http://bitbucket.org/pypy/pypy/changeset/3bc52d356c52/ Log: a patch by ezio melotti. This fixes some unicode-related bugs. Details here: http://bugs.python.org/issue8271 diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -66,9 +66,10 @@ assert called[0] assert "42424242" in result - def checkdecodeerror(self, s, encoding, start, stop, addstuff=True): + def checkdecodeerror(self, s, encoding, start, stop, + addstuff=True, msg=None): called = [0] - def errorhandler(errors, enc, msg, t, startingpos, + def errorhandler(errors, enc, errmsg, t, startingpos, endingpos): called[0] += 1 if called[0] == 1: @@ -77,6 +78,8 @@ assert t is s assert start == startingpos assert stop == endingpos + if msg is not None: + assert errmsg == msg return u"42424242", stop return u"", endingpos decoder = self.getdecoder(encoding) @@ -90,7 +93,7 @@ class TestDecoding(UnicodeTests): - + # XXX test bom recognition in utf-16 # XXX test proper error handling @@ -131,6 +134,96 @@ "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(uni, encoding) + def test_ascii_error(self): + self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) + + def test_utf16_errors(self): + # trunkated BOM + for s in ["\xff", "\xfe"]: + self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) + + for s in [ + # unexpected end of data ascii + "\xff\xfeF", + # unexpected end of data + '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', + ]: + self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) + for s in [ + # illegal surrogate + "\xff\xfe\xff\xdb\xff\xff", + ]: + self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) + + def test_utf16_bugs(self): + s = '\x80-\xe9\xdeL\xa3\x9b' + py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, + s, len(s), True) + + def test_utf7_bugs(self): + u = u'A\u2262\u0391.' + assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' + + def test_utf7_tofrom_utf8_bug(self): + def _assert_decu7(input, expected): + assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) + + _assert_decu7('+-', u'+') + _assert_decu7('+-+-', u'++') + _assert_decu7('+-+AOQ-', u'+\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ- ', u'\xe4 ') + _assert_decu7(' +AOQ-', u' \xe4') + _assert_decu7(' +AOQ- ', u' \xe4 ') + _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') + + s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' + s_utf8 = u'Die Männer ärgen sich!' + s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' + + _assert_decu7(s_utf7, s_utf8_esc) + _assert_decu7(s_utf7, s_utf8) + + assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 + assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 + + def test_utf7_partial(self): + s = u"a+-b".encode('utf-7') + assert s == "a+--b" + decode = self.getdecoder('utf-7') + assert decode(s, 1, None) == (u'a', 1) + assert decode(s, 2, None) == (u'a', 1) + assert decode(s, 3, None) == (u'a+', 3) + assert decode(s, 4, None) == (u'a+-', 4) + assert decode(s, 5, None) == (u'a+-b', 5) + + def test_utf7_surrogates(self): + encode = self.getencoder('utf-7') + u = u'\U000abcde' + assert encode(u, len(u), None) == '+2m/c3g-' + decode = self.getdecoder('utf-7') + s = '+3ADYAA-' + raises(UnicodeError, decode, s, len(s), None) + def replace_handler(errors, codec, message, input, start, end): + return u'?', end + assert decode(s, len(s), None, final=True, + errorhandler = replace_handler) == (u'??', len(s)) + + +class TestUTF8Decoding(UnicodeTests): + def __init__(self): + self.decoder = self.getdecoder('utf-8') + + def replace_handler(self, errors, codec, message, input, start, end): + return u'\ufffd', end + + def ignore_handler(self, errors, codec, message, input, start, end): + return u'', end + + def to_bytestring(self, bytes): + return ''.join(chr(int(c, 16)) for c in bytes.split()) + def test_single_chars_utf8(self): for s in ["\xd7\x90", "\xd6\x96", "\xeb\x96\x95", "\xf0\x90\x91\x93"]: self.checkdecode(s, "utf-8") @@ -140,30 +233,297 @@ # This test will raise an error with python 3.x self.checkdecode(u"\ud800", "utf-8") + def test_invalid_start_byte(self): + """ + Test that an 'invalid start byte' error is raised when the first byte + is not in the ASCII range or is not a valid start byte of a 2-, 3-, or + 4-bytes sequence. The invalid start byte is replaced with a single + U+FFFD when errors='replace'. + E.g. <80> is a continuation byte and can appear only after a start byte. + """ + FFFD = u'\ufffd' + for byte in '\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF': + raises(UnicodeDecodeError, self.decoder, byte, 1, None, final=True) + self.checkdecodeerror(byte, 'utf-8', 0, 1, addstuff=False, + msg='invalid start byte') + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.replace_handler) == (FFFD, 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', 9)) + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.ignore_handler) == (u'', 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', 9)) + + def test_unexpected_end_of_data(self): + """ + Test that an 'unexpected end of data' error is raised when the string + ends after a start byte of a 2-, 3-, or 4-bytes sequence without having + enough continuation bytes. The incomplete sequence is replaced with a + single U+FFFD when errors='replace'. + E.g. in the sequence , F3 is the start byte of a 4-bytes + sequence, but it's followed by only 2 valid continuation bytes and the + last continuation bytes is missing. + Note: the continuation bytes must be all valid, if one of them is + invalid another error will be raised. + """ + sequences = [ + 'C2', 'DF', + 'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF', + 'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF', + 'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF', + 'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF', + 'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF', + 'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF' + ] + FFFD = u'\ufffd' + for seq in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq), addstuff=False, + msg='unexpected end of data') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (FFFD, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', len(seq) + 8)) + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (u'', len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', len(seq) + 8)) + + def test_invalid_cb_for_2bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte of a 2-bytes sequence is invalid. The start byte + is replaced by a single U+FFFD and the second byte is handled + separately when errors='replace'. + E.g. in the sequence , C2 is the start byte of a 2-bytes + sequence, but 41 is not a valid continuation byte because it's the + ASCII letter 'A'. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('C2 00', FFFD+u'\x00'), ('C2 7F', FFFD+u'\x7f'), + ('C2 C0', FFFDx2), ('C2 FF', FFFDx2), + ('DF 00', FFFD+u'\x00'), ('DF 7F', FFFD+u'\x7f'), + ('DF C0', FFFDx2), ('DF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, 1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_3bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 3-bytes sequence are invalid. When + errors='replace', if the first continuation byte is valid, the first + two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the + third byte is handled separately, otherwise only the start byte is + replaced with a U+FFFD and the other continuation bytes are handled + separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('E0 00', FFFD+u'\x00'), ('E0 7F', FFFD+u'\x7f'), ('E0 80', FFFDx2), + ('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2), + ('E0 A0 00', FFFD+u'\x00'), ('E0 A0 7F', FFFD+u'\x7f'), + ('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2), + ('E0 BF 00', FFFD+u'\x00'), ('E0 BF 7F', FFFD+u'\x7f'), + ('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+u'\x00'), + ('E1 7F', FFFD+u'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2), + ('E1 80 00', FFFD+u'\x00'), ('E1 80 7F', FFFD+u'\x7f'), + ('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2), + ('E1 BF 00', FFFD+u'\x00'), ('E1 BF 7F', FFFD+u'\x7f'), + ('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+u'\x00'), + ('EC 7F', FFFD+u'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2), + ('EC 80 00', FFFD+u'\x00'), ('EC 80 7F', FFFD+u'\x7f'), + ('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2), + ('EC BF 00', FFFD+u'\x00'), ('EC BF 7F', FFFD+u'\x7f'), + ('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+u'\x00'), + ('ED 7F', FFFD+u'\x7f'), + # ('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^ + ('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+u'\x00'), + ('ED 80 7F', FFFD+u'\x7f'), ('ED 80 C0', FFFDx2), + ('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+u'\x00'), + ('ED 9F 7F', FFFD+u'\x7f'), ('ED 9F C0', FFFDx2), + ('ED 9F FF', FFFDx2), ('EE 00', FFFD+u'\x00'), + ('EE 7F', FFFD+u'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2), + ('EE 80 00', FFFD+u'\x00'), ('EE 80 7F', FFFD+u'\x7f'), + ('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2), + ('EE BF 00', FFFD+u'\x00'), ('EE BF 7F', FFFD+u'\x7f'), + ('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+u'\x00'), + ('EF 7F', FFFD+u'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2), + ('EF 80 00', FFFD+u'\x00'), ('EF 80 7F', FFFD+u'\x7f'), + ('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2), + ('EF BF 00', FFFD+u'\x00'), ('EF BF 7F', FFFD+u'\x7f'), + ('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_4bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 4-bytes sequence are invalid. When + errors='replace',the start byte and all the following valid + continuation bytes are replaced with a single U+FFFD, and all the bytes + starting from the first invalid continuation bytes (included) are + handled separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('F0 00', FFFD+u'\x00'), ('F0 7F', FFFD+u'\x7f'), ('F0 80', FFFDx2), + ('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2), + ('F0 90 00', FFFD+u'\x00'), ('F0 90 7F', FFFD+u'\x7f'), + ('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2), + ('F0 BF 00', FFFD+u'\x00'), ('F0 BF 7F', FFFD+u'\x7f'), + ('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2), + ('F0 90 80 00', FFFD+u'\x00'), ('F0 90 80 7F', FFFD+u'\x7f'), + ('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2), + ('F0 90 BF 00', FFFD+u'\x00'), ('F0 90 BF 7F', FFFD+u'\x7f'), + ('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2), + ('F0 BF 80 00', FFFD+u'\x00'), ('F0 BF 80 7F', FFFD+u'\x7f'), + ('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2), + ('F0 BF BF 00', FFFD+u'\x00'), ('F0 BF BF 7F', FFFD+u'\x7f'), + ('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2), + ('F1 00', FFFD+u'\x00'), ('F1 7F', FFFD+u'\x7f'), ('F1 C0', FFFDx2), + ('F1 FF', FFFDx2), ('F1 80 00', FFFD+u'\x00'), + ('F1 80 7F', FFFD+u'\x7f'), ('F1 80 C0', FFFDx2), + ('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+u'\x00'), + ('F1 BF 7F', FFFD+u'\x7f'), ('F1 BF C0', FFFDx2), + ('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+u'\x00'), + ('F1 80 80 7F', FFFD+u'\x7f'), ('F1 80 80 C0', FFFDx2), + ('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+u'\x00'), + ('F1 80 BF 7F', FFFD+u'\x7f'), ('F1 80 BF C0', FFFDx2), + ('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+u'\x00'), + ('F1 BF 80 7F', FFFD+u'\x7f'), ('F1 BF 80 C0', FFFDx2), + ('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+u'\x00'), + ('F1 BF BF 7F', FFFD+u'\x7f'), ('F1 BF BF C0', FFFDx2), + ('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+u'\x00'), + ('F3 7F', FFFD+u'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2), + ('F3 80 00', FFFD+u'\x00'), ('F3 80 7F', FFFD+u'\x7f'), + ('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2), + ('F3 BF 00', FFFD+u'\x00'), ('F3 BF 7F', FFFD+u'\x7f'), + ('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2), + ('F3 80 80 00', FFFD+u'\x00'), ('F3 80 80 7F', FFFD+u'\x7f'), + ('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2), + ('F3 80 BF 00', FFFD+u'\x00'), ('F3 80 BF 7F', FFFD+u'\x7f'), + ('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2), + ('F3 BF 80 00', FFFD+u'\x00'), ('F3 BF 80 7F', FFFD+u'\x7f'), + ('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2), + ('F3 BF BF 00', FFFD+u'\x00'), ('F3 BF BF 7F', FFFD+u'\x7f'), + ('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2), + ('F4 00', FFFD+u'\x00'), ('F4 7F', FFFD+u'\x7f'), ('F4 90', FFFDx2), + ('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2), + ('F4 80 00', FFFD+u'\x00'), ('F4 80 7F', FFFD+u'\x7f'), + ('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2), + ('F4 8F 00', FFFD+u'\x00'), ('F4 8F 7F', FFFD+u'\x7f'), + ('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2), + ('F4 80 80 00', FFFD+u'\x00'), ('F4 80 80 7F', FFFD+u'\x7f'), + ('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2), + ('F4 80 BF 00', FFFD+u'\x00'), ('F4 80 BF 7F', FFFD+u'\x7f'), + ('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2), + ('F4 8F 80 00', FFFD+u'\x00'), ('F4 8F 80 7F', FFFD+u'\x7f'), + ('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2), + ('F4 8F BF 00', FFFD+u'\x00'), ('F4 8F BF 7F', FFFD+u'\x7f'), + ('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2) + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + def test_utf8_errors(self): - for s in [# unexpected end of data - "\xd7", "\xd6", "\xeb\x96", "\xf0\x90\x91"]: - self.checkdecodeerror(s, "utf-8", 0, len(s), addstuff=False) - - # unexpected code byte - for s in ["\x81", "\xbf"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + # unexpected end of data + for s in ['\xd7', '\xd6', '\xeb\x96', '\xf0\x90\x91', '\xc2', '\xdf']: + self.checkdecodeerror(s, 'utf-8', 0, len(s), addstuff=False, + msg='unexpected end of data') # invalid data 2 byte for s in ["\xd7\x50", "\xd6\x06", "\xd6\xD6"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') # invalid data 3 byte for s in ["\xeb\x56\x95", "\xeb\x06\x95", "\xeb\xD6\x95"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xeb\x96\x55", "\xeb\x96\x05", "\xeb\x96\xD5"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') # invalid data 4 byte for s in ["\xf0\x50\x91\x93", "\xf0\x00\x91\x93", "\xf0\xd0\x91\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x51\x93", "\xf0\x90\x01\x93", "\xf0\x90\xd1\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x91\x53", "\xf0\x90\x91\x03", "\xf0\x90\x91\xd3"]: - self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True, + msg='invalid continuation byte') def test_issue8271(self): @@ -249,97 +609,18 @@ ('\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64', u'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'), ] - def replace_handler(errors, codec, message, input, start, end): - return FFFD, end - def ignore_handler(errors, codec, message, input, start, end): - return u'', end + for n, (seq, res) in enumerate(sequences): decoder = self.getdecoder('utf-8') raises(UnicodeDecodeError, decoder, seq, len(seq), None, final=True) assert decoder(seq, len(seq), None, final=True, - errorhandler=replace_handler) == (res, len(seq)) + errorhandler=self.replace_handler) == (res, len(seq)) assert decoder(seq + 'b', len(seq) + 1, None, final=True, - errorhandler=replace_handler) == (res + u'b', - len(seq) + 1) + errorhandler=self.replace_handler) == (res + u'b', + len(seq) + 1) res = res.replace(FFFD, u'') assert decoder(seq, len(seq), None, final=True, - errorhandler=ignore_handler) == (res, len(seq)) - - def test_ascii_error(self): - self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) - - def test_utf16_errors(self): - # trunkated BOM - for s in ["\xff", "\xfe"]: - self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) - - for s in [ - # unexpected end of data ascii - "\xff\xfeF", - # unexpected end of data - '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', - ]: - self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) - for s in [ - # illegal surrogate - "\xff\xfe\xff\xdb\xff\xff", - ]: - self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) - - def test_utf16_bugs(self): - s = '\x80-\xe9\xdeL\xa3\x9b' - py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, - s, len(s), True) - - def test_utf7_bugs(self): - u = u'A\u2262\u0391.' - assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' - - def test_utf7_tofrom_utf8_bug(self): - def _assert_decu7(input, expected): - assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) - - _assert_decu7('+-', u'+') - _assert_decu7('+-+-', u'++') - _assert_decu7('+-+AOQ-', u'+\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ- ', u'\xe4 ') - _assert_decu7(' +AOQ-', u' \xe4') - _assert_decu7(' +AOQ- ', u' \xe4 ') - _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') - - s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' - s_utf8 = u'Die Männer ärgen sich!' - s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' - - _assert_decu7(s_utf7, s_utf8_esc) - _assert_decu7(s_utf7, s_utf8) - - assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 - assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 - - def test_utf7_partial(self): - s = u"a+-b".encode('utf-7') - assert s == "a+--b" - decode = self.getdecoder('utf-7') - assert decode(s, 1, None) == (u'a', 1) - assert decode(s, 2, None) == (u'a', 1) - assert decode(s, 3, None) == (u'a+', 3) - assert decode(s, 4, None) == (u'a+-', 4) - assert decode(s, 5, None) == (u'a+-b', 5) - - def test_utf7_surrogates(self): - encode = self.getencoder('utf-7') - u = u'\U000abcde' - assert encode(u, len(u), None) == '+2m/c3g-' - decode = self.getdecoder('utf-7') - s = '+3ADYAA-' - raises(UnicodeError, decode, s, len(s), None) - def replace_handler(errors, codec, message, input, start, end): - return u'?', end - assert decode(s, len(s), None, final=True, - errorhandler = replace_handler) == (u'??', len(s)) + errorhandler=self.ignore_handler) == (res, len(seq)) class TestEncoding(UnicodeTests): @@ -376,7 +657,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_maxunicode(self): uni = unichr(sys.maxunicode) @@ -384,7 +665,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_single_chars_utf8(self): # check every number of bytes per char @@ -394,7 +675,7 @@ def test_utf8_surrogates(self): # check replacing of two surrogates by single char while encoding # make sure that the string itself is not marshalled - u = u"\ud800" + u = u"\ud800" for i in range(4): u += u"\udc00" self.checkencode(u, "utf-8") @@ -422,7 +703,7 @@ def test_utf8(self): from pypy.rpython.test.test_llinterp import interpret def f(x): - + s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) s2 = runicode.unicode_encode_utf_8(u, len(u), True) @@ -438,6 +719,6 @@ u = runicode.UNICHR(x) t = runicode.ORD(u) return t - + res = interpret(f, [0x10140]) assert res == 0x10140 diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -87,8 +87,9 @@ result = UnicodeBuilder(size) pos = 0 while pos < size: - ch = s[pos] - ordch1 = ord(ch) + ordch1 = ord(s[pos]) + # fast path for ASCII + # XXX maybe use a while loop here if ordch1 < 0x80: result.append(unichr(ordch1)) pos += 1 @@ -98,110 +99,149 @@ if pos + n > size: if not final: break - else: - endpos = pos + 1 - while endpos < size and ord(s[endpos]) & 0xC0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "unexpected end of data", - s, pos, endpos) + charsleft = size - pos - 1 # either 0, 1, 2 + # note: when we get the 'unexpected end of data' we don't care + # about the pos anymore and we just ignore the value + if not charsleft: + # there's only the start byte and nothing else + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+1) + result.append(r) + break + ordch2 = ord(s[pos+1]) + if n == 3: + # 3-bytes seq with only a continuation byte + if (ordch2>>6 != 0b10 or + (ordch1 == 0xe0 and ordch2 < 0xa0)): + # or (ordch1 == 0xed and ordch2 > 0x9f) + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + else: + # second byte valid, but third byte missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+2) + result.append(r) + break + elif n == 4: + # 4-bytes seq with 1 or 2 continuation bytes + if (ordch2>>6 != 0b10 or + (ordch1 == 0xf0 and ordch2 < 0x90) or + (ordch1 == 0xf4 and ordch2 > 0x8f)): + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + # third byte invalid, take the first two and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + else: + # there's only 1 or 2 valid cb, but the others are missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+charsleft+1) + result.append(r) + break + + if n == 0: + r, pos = errorhandler(errors, 'utf-8', + 'invalid start byte', + s, pos, pos+1) + result.append(r) + + elif n == 1: + assert 0, "ascii should have gone through the fast path" + + elif n == 2: + ordch2 = ord(s[pos+1]) + if ordch2>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) continue + # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz + result.append(unichr(((ordch1 & 0b00011111) << 6) + + (ordch2 & 0b00111111))) + pos += 2 - if n == 0: - r, pos = errorhandler(errors, "utf-8", - "invalid start byte", - s, pos, pos + 1) - result.append(r) - elif n == 1: - assert 0, "you can never get here" - elif n == 2: - # 110yyyyy 10zzzzzz ====> 00000000 00000yyy yyzzzzzz - - ordch2 = ord(s[pos+1]) - z, two = splitter[6, 2](ordch2) - y, six = splitter[5, 3](ordch1) - assert six == 6 - if two != 2: - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, pos + 1) - result.append(r) - else: - c = (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 3: - # 1110xxxx 10yyyyyy 10zzzzzz ====> 00000000 xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - z, two1 = splitter[6, 2](ordch3) - y, two2 = splitter[6, 2](ordch2) - x, fourteen = splitter[4, 4](ordch1) - assert fourteen == 14 - if (two1 != 2 or two2 != 2 or + if (ordch2>>6 != 0b10 or (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. # or (ordch1 == 0xed and ordch2 > 0x9f) ): + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif ordch3>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz + result.append(unichr(((ordch1 & 0b00001111) << 12) + + ((ordch2 & 0b00111111) << 6) + + (ordch3 & 0b00111111))) + pos += 3 - # if ordch2 first two bits are 1 and 0, then the invalid - # continuation byte is ordch3; else ordch2 is invalid. - if two2 == 2: - endpos = pos + 2 - else: - endpos = pos + 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) - result.append(r) - else: - c = (x << 12) + (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 4: - # 11110www 10xxxxxx 10yyyyyy 10zzzzzz ====> - # 000wwwxx xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - z, two1 = splitter[6, 2](ordch4) - y, two2 = splitter[6, 2](ordch3) - x, two3 = splitter[6, 2](ordch2) - w, thirty = splitter[3, 5](ordch1) - assert thirty == 30 - if (two1 != 2 or two2 != 2 or two3 != 2 or + if (ordch2>>6 != 0b10 or (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): - endpos = pos + 1 - if ordch2 & 0xc0 == 0x80: - endpos += 1 - if ordch3 & 0xc0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) + continue + elif ordch3>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + elif ordch4>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+3) + result.append(r) + continue + # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz + c = (((ordch1 & 0b00000111) << 18) + + ((ordch2 & 0b00111111) << 12) + + ((ordch3 & 0b00111111) << 6) + + (ordch4 & 0b00111111)) + if c <= MAXUNICODE: + result.append(UNICHR(c)) else: - c = (w << 18) + (x << 12) + (y << 6) + z - # convert to UTF-16 if necessary - if c <= MAXUNICODE: - result.append(UNICHR(c)) - else: - # compute and append the two surrogates: - # translate from 10000..10FFFF to 0..FFFF - c -= 0x10000 - # high surrogate = top 10 bits added to D800 - result.append(unichr(0xD800 + (c >> 10))) - # low surrogate = bottom 10 bits added to DC00 - result.append(unichr(0xDC00 + (c & 0x03FF))) - pos += n - else: - r, pos = errorhandler(errors, "utf-8", - "unsupported Unicode code range", - s, pos, pos + n) - result.append(r) + # compute and append the two surrogates: + # translate from 10000..10FFFF to 0..FFFF + c -= 0x10000 + # high surrogate = top 10 bits added to D800 + result.append(unichr(0xD800 + (c >> 10))) + # low surrogate = bottom 10 bits added to DC00 + result.append(unichr(0xDC00 + (c & 0x03FF))) + pos += 4 return result.build(), pos @@ -629,7 +669,7 @@ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, # sp ! " # $ % & ' ( ) * + , - . / 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 3, 0, 0, 0, 0, -# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? +# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, # @ A B C D E F G H I J K L M N O 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -905,20 +945,20 @@ pos = 0 while pos < size: ch = p[pos] - + if ord(ch) < limit: result.append(chr(ord(ch))) pos += 1 else: # startpos for collecting unencodable chars - collstart = pos - collend = pos+1 + collstart = pos + collend = pos+1 while collend < len(p) and ord(p[collend]) >= limit: collend += 1 r, pos = errorhandler(errors, encoding, reason, p, collstart, collend) result.append(r) - + return result.build() def unicode_encode_latin_1(p, size, errors, errorhandler=None): From commits-noreply at bitbucket.org Sat Apr 16 21:12:02 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 16 Apr 2011 21:12:02 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110416191202.5A0D1282BA1@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43402:3e887780e9e3 Date: 2011-04-16 21:09 +0200 http://bitbucket.org/pypy/pypy/changeset/3e887780e9e3/ Log: merge diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1498,7 +1498,7 @@ try: from _ffi import CDLL, types except ImportError: - sys.stdout.write('SKIP: cannot import _ffi') + sys.stderr.write('SKIP: cannot import _ffi\n') return 0 libm = CDLL(libm_name) diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -271,12 +271,12 @@ assert log.result == 42 def test_skip(self): - import _pytest + import pytest def f(): import sys print >> sys.stderr, 'SKIP: foobar' # - raises(_pytest.runner.Skipped, "self.run(f, [])") + raises(pytest.skip.Exception, "self.run(f, [])") def test_parse_jitlog(self): def f(): From commits-noreply at bitbucket.org Sat Apr 16 21:13:37 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 16 Apr 2011 21:13:37 +0200 (CEST) Subject: [pypy-svn] pypy default: a patch by ezio melotti. This fixes some unicode-related bugs. Message-ID: <20110416191337.1C962282B90@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43401:3bc52d356c52 Date: 2011-04-16 21:08 +0200 http://bitbucket.org/pypy/pypy/changeset/3bc52d356c52/ Log: a patch by ezio melotti. This fixes some unicode-related bugs. Details here: http://bugs.python.org/issue8271 diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -66,9 +66,10 @@ assert called[0] assert "42424242" in result - def checkdecodeerror(self, s, encoding, start, stop, addstuff=True): + def checkdecodeerror(self, s, encoding, start, stop, + addstuff=True, msg=None): called = [0] - def errorhandler(errors, enc, msg, t, startingpos, + def errorhandler(errors, enc, errmsg, t, startingpos, endingpos): called[0] += 1 if called[0] == 1: @@ -77,6 +78,8 @@ assert t is s assert start == startingpos assert stop == endingpos + if msg is not None: + assert errmsg == msg return u"42424242", stop return u"", endingpos decoder = self.getdecoder(encoding) @@ -90,7 +93,7 @@ class TestDecoding(UnicodeTests): - + # XXX test bom recognition in utf-16 # XXX test proper error handling @@ -131,6 +134,96 @@ "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(uni, encoding) + def test_ascii_error(self): + self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) + + def test_utf16_errors(self): + # trunkated BOM + for s in ["\xff", "\xfe"]: + self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) + + for s in [ + # unexpected end of data ascii + "\xff\xfeF", + # unexpected end of data + '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', + ]: + self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) + for s in [ + # illegal surrogate + "\xff\xfe\xff\xdb\xff\xff", + ]: + self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) + + def test_utf16_bugs(self): + s = '\x80-\xe9\xdeL\xa3\x9b' + py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, + s, len(s), True) + + def test_utf7_bugs(self): + u = u'A\u2262\u0391.' + assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' + + def test_utf7_tofrom_utf8_bug(self): + def _assert_decu7(input, expected): + assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) + + _assert_decu7('+-', u'+') + _assert_decu7('+-+-', u'++') + _assert_decu7('+-+AOQ-', u'+\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ- ', u'\xe4 ') + _assert_decu7(' +AOQ-', u' \xe4') + _assert_decu7(' +AOQ- ', u' \xe4 ') + _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') + + s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' + s_utf8 = u'Die Männer ärgen sich!' + s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' + + _assert_decu7(s_utf7, s_utf8_esc) + _assert_decu7(s_utf7, s_utf8) + + assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 + assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 + + def test_utf7_partial(self): + s = u"a+-b".encode('utf-7') + assert s == "a+--b" + decode = self.getdecoder('utf-7') + assert decode(s, 1, None) == (u'a', 1) + assert decode(s, 2, None) == (u'a', 1) + assert decode(s, 3, None) == (u'a+', 3) + assert decode(s, 4, None) == (u'a+-', 4) + assert decode(s, 5, None) == (u'a+-b', 5) + + def test_utf7_surrogates(self): + encode = self.getencoder('utf-7') + u = u'\U000abcde' + assert encode(u, len(u), None) == '+2m/c3g-' + decode = self.getdecoder('utf-7') + s = '+3ADYAA-' + raises(UnicodeError, decode, s, len(s), None) + def replace_handler(errors, codec, message, input, start, end): + return u'?', end + assert decode(s, len(s), None, final=True, + errorhandler = replace_handler) == (u'??', len(s)) + + +class TestUTF8Decoding(UnicodeTests): + def __init__(self): + self.decoder = self.getdecoder('utf-8') + + def replace_handler(self, errors, codec, message, input, start, end): + return u'\ufffd', end + + def ignore_handler(self, errors, codec, message, input, start, end): + return u'', end + + def to_bytestring(self, bytes): + return ''.join(chr(int(c, 16)) for c in bytes.split()) + def test_single_chars_utf8(self): for s in ["\xd7\x90", "\xd6\x96", "\xeb\x96\x95", "\xf0\x90\x91\x93"]: self.checkdecode(s, "utf-8") @@ -140,30 +233,297 @@ # This test will raise an error with python 3.x self.checkdecode(u"\ud800", "utf-8") + def test_invalid_start_byte(self): + """ + Test that an 'invalid start byte' error is raised when the first byte + is not in the ASCII range or is not a valid start byte of a 2-, 3-, or + 4-bytes sequence. The invalid start byte is replaced with a single + U+FFFD when errors='replace'. + E.g. <80> is a continuation byte and can appear only after a start byte. + """ + FFFD = u'\ufffd' + for byte in '\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF': + raises(UnicodeDecodeError, self.decoder, byte, 1, None, final=True) + self.checkdecodeerror(byte, 'utf-8', 0, 1, addstuff=False, + msg='invalid start byte') + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.replace_handler) == (FFFD, 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', 9)) + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.ignore_handler) == (u'', 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', 9)) + + def test_unexpected_end_of_data(self): + """ + Test that an 'unexpected end of data' error is raised when the string + ends after a start byte of a 2-, 3-, or 4-bytes sequence without having + enough continuation bytes. The incomplete sequence is replaced with a + single U+FFFD when errors='replace'. + E.g. in the sequence , F3 is the start byte of a 4-bytes + sequence, but it's followed by only 2 valid continuation bytes and the + last continuation bytes is missing. + Note: the continuation bytes must be all valid, if one of them is + invalid another error will be raised. + """ + sequences = [ + 'C2', 'DF', + 'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF', + 'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF', + 'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF', + 'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF', + 'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF', + 'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF' + ] + FFFD = u'\ufffd' + for seq in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq), addstuff=False, + msg='unexpected end of data') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (FFFD, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', len(seq) + 8)) + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (u'', len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', len(seq) + 8)) + + def test_invalid_cb_for_2bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte of a 2-bytes sequence is invalid. The start byte + is replaced by a single U+FFFD and the second byte is handled + separately when errors='replace'. + E.g. in the sequence , C2 is the start byte of a 2-bytes + sequence, but 41 is not a valid continuation byte because it's the + ASCII letter 'A'. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('C2 00', FFFD+u'\x00'), ('C2 7F', FFFD+u'\x7f'), + ('C2 C0', FFFDx2), ('C2 FF', FFFDx2), + ('DF 00', FFFD+u'\x00'), ('DF 7F', FFFD+u'\x7f'), + ('DF C0', FFFDx2), ('DF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, 1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_3bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 3-bytes sequence are invalid. When + errors='replace', if the first continuation byte is valid, the first + two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the + third byte is handled separately, otherwise only the start byte is + replaced with a U+FFFD and the other continuation bytes are handled + separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('E0 00', FFFD+u'\x00'), ('E0 7F', FFFD+u'\x7f'), ('E0 80', FFFDx2), + ('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2), + ('E0 A0 00', FFFD+u'\x00'), ('E0 A0 7F', FFFD+u'\x7f'), + ('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2), + ('E0 BF 00', FFFD+u'\x00'), ('E0 BF 7F', FFFD+u'\x7f'), + ('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+u'\x00'), + ('E1 7F', FFFD+u'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2), + ('E1 80 00', FFFD+u'\x00'), ('E1 80 7F', FFFD+u'\x7f'), + ('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2), + ('E1 BF 00', FFFD+u'\x00'), ('E1 BF 7F', FFFD+u'\x7f'), + ('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+u'\x00'), + ('EC 7F', FFFD+u'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2), + ('EC 80 00', FFFD+u'\x00'), ('EC 80 7F', FFFD+u'\x7f'), + ('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2), + ('EC BF 00', FFFD+u'\x00'), ('EC BF 7F', FFFD+u'\x7f'), + ('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+u'\x00'), + ('ED 7F', FFFD+u'\x7f'), + # ('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^ + ('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+u'\x00'), + ('ED 80 7F', FFFD+u'\x7f'), ('ED 80 C0', FFFDx2), + ('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+u'\x00'), + ('ED 9F 7F', FFFD+u'\x7f'), ('ED 9F C0', FFFDx2), + ('ED 9F FF', FFFDx2), ('EE 00', FFFD+u'\x00'), + ('EE 7F', FFFD+u'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2), + ('EE 80 00', FFFD+u'\x00'), ('EE 80 7F', FFFD+u'\x7f'), + ('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2), + ('EE BF 00', FFFD+u'\x00'), ('EE BF 7F', FFFD+u'\x7f'), + ('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+u'\x00'), + ('EF 7F', FFFD+u'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2), + ('EF 80 00', FFFD+u'\x00'), ('EF 80 7F', FFFD+u'\x7f'), + ('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2), + ('EF BF 00', FFFD+u'\x00'), ('EF BF 7F', FFFD+u'\x7f'), + ('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_4bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 4-bytes sequence are invalid. When + errors='replace',the start byte and all the following valid + continuation bytes are replaced with a single U+FFFD, and all the bytes + starting from the first invalid continuation bytes (included) are + handled separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('F0 00', FFFD+u'\x00'), ('F0 7F', FFFD+u'\x7f'), ('F0 80', FFFDx2), + ('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2), + ('F0 90 00', FFFD+u'\x00'), ('F0 90 7F', FFFD+u'\x7f'), + ('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2), + ('F0 BF 00', FFFD+u'\x00'), ('F0 BF 7F', FFFD+u'\x7f'), + ('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2), + ('F0 90 80 00', FFFD+u'\x00'), ('F0 90 80 7F', FFFD+u'\x7f'), + ('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2), + ('F0 90 BF 00', FFFD+u'\x00'), ('F0 90 BF 7F', FFFD+u'\x7f'), + ('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2), + ('F0 BF 80 00', FFFD+u'\x00'), ('F0 BF 80 7F', FFFD+u'\x7f'), + ('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2), + ('F0 BF BF 00', FFFD+u'\x00'), ('F0 BF BF 7F', FFFD+u'\x7f'), + ('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2), + ('F1 00', FFFD+u'\x00'), ('F1 7F', FFFD+u'\x7f'), ('F1 C0', FFFDx2), + ('F1 FF', FFFDx2), ('F1 80 00', FFFD+u'\x00'), + ('F1 80 7F', FFFD+u'\x7f'), ('F1 80 C0', FFFDx2), + ('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+u'\x00'), + ('F1 BF 7F', FFFD+u'\x7f'), ('F1 BF C0', FFFDx2), + ('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+u'\x00'), + ('F1 80 80 7F', FFFD+u'\x7f'), ('F1 80 80 C0', FFFDx2), + ('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+u'\x00'), + ('F1 80 BF 7F', FFFD+u'\x7f'), ('F1 80 BF C0', FFFDx2), + ('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+u'\x00'), + ('F1 BF 80 7F', FFFD+u'\x7f'), ('F1 BF 80 C0', FFFDx2), + ('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+u'\x00'), + ('F1 BF BF 7F', FFFD+u'\x7f'), ('F1 BF BF C0', FFFDx2), + ('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+u'\x00'), + ('F3 7F', FFFD+u'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2), + ('F3 80 00', FFFD+u'\x00'), ('F3 80 7F', FFFD+u'\x7f'), + ('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2), + ('F3 BF 00', FFFD+u'\x00'), ('F3 BF 7F', FFFD+u'\x7f'), + ('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2), + ('F3 80 80 00', FFFD+u'\x00'), ('F3 80 80 7F', FFFD+u'\x7f'), + ('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2), + ('F3 80 BF 00', FFFD+u'\x00'), ('F3 80 BF 7F', FFFD+u'\x7f'), + ('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2), + ('F3 BF 80 00', FFFD+u'\x00'), ('F3 BF 80 7F', FFFD+u'\x7f'), + ('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2), + ('F3 BF BF 00', FFFD+u'\x00'), ('F3 BF BF 7F', FFFD+u'\x7f'), + ('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2), + ('F4 00', FFFD+u'\x00'), ('F4 7F', FFFD+u'\x7f'), ('F4 90', FFFDx2), + ('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2), + ('F4 80 00', FFFD+u'\x00'), ('F4 80 7F', FFFD+u'\x7f'), + ('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2), + ('F4 8F 00', FFFD+u'\x00'), ('F4 8F 7F', FFFD+u'\x7f'), + ('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2), + ('F4 80 80 00', FFFD+u'\x00'), ('F4 80 80 7F', FFFD+u'\x7f'), + ('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2), + ('F4 80 BF 00', FFFD+u'\x00'), ('F4 80 BF 7F', FFFD+u'\x7f'), + ('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2), + ('F4 8F 80 00', FFFD+u'\x00'), ('F4 8F 80 7F', FFFD+u'\x7f'), + ('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2), + ('F4 8F BF 00', FFFD+u'\x00'), ('F4 8F BF 7F', FFFD+u'\x7f'), + ('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2) + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + def test_utf8_errors(self): - for s in [# unexpected end of data - "\xd7", "\xd6", "\xeb\x96", "\xf0\x90\x91"]: - self.checkdecodeerror(s, "utf-8", 0, len(s), addstuff=False) - - # unexpected code byte - for s in ["\x81", "\xbf"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + # unexpected end of data + for s in ['\xd7', '\xd6', '\xeb\x96', '\xf0\x90\x91', '\xc2', '\xdf']: + self.checkdecodeerror(s, 'utf-8', 0, len(s), addstuff=False, + msg='unexpected end of data') # invalid data 2 byte for s in ["\xd7\x50", "\xd6\x06", "\xd6\xD6"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') # invalid data 3 byte for s in ["\xeb\x56\x95", "\xeb\x06\x95", "\xeb\xD6\x95"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xeb\x96\x55", "\xeb\x96\x05", "\xeb\x96\xD5"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') # invalid data 4 byte for s in ["\xf0\x50\x91\x93", "\xf0\x00\x91\x93", "\xf0\xd0\x91\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x51\x93", "\xf0\x90\x01\x93", "\xf0\x90\xd1\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x91\x53", "\xf0\x90\x91\x03", "\xf0\x90\x91\xd3"]: - self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True, + msg='invalid continuation byte') def test_issue8271(self): @@ -249,97 +609,18 @@ ('\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64', u'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'), ] - def replace_handler(errors, codec, message, input, start, end): - return FFFD, end - def ignore_handler(errors, codec, message, input, start, end): - return u'', end + for n, (seq, res) in enumerate(sequences): decoder = self.getdecoder('utf-8') raises(UnicodeDecodeError, decoder, seq, len(seq), None, final=True) assert decoder(seq, len(seq), None, final=True, - errorhandler=replace_handler) == (res, len(seq)) + errorhandler=self.replace_handler) == (res, len(seq)) assert decoder(seq + 'b', len(seq) + 1, None, final=True, - errorhandler=replace_handler) == (res + u'b', - len(seq) + 1) + errorhandler=self.replace_handler) == (res + u'b', + len(seq) + 1) res = res.replace(FFFD, u'') assert decoder(seq, len(seq), None, final=True, - errorhandler=ignore_handler) == (res, len(seq)) - - def test_ascii_error(self): - self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) - - def test_utf16_errors(self): - # trunkated BOM - for s in ["\xff", "\xfe"]: - self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) - - for s in [ - # unexpected end of data ascii - "\xff\xfeF", - # unexpected end of data - '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', - ]: - self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) - for s in [ - # illegal surrogate - "\xff\xfe\xff\xdb\xff\xff", - ]: - self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) - - def test_utf16_bugs(self): - s = '\x80-\xe9\xdeL\xa3\x9b' - py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, - s, len(s), True) - - def test_utf7_bugs(self): - u = u'A\u2262\u0391.' - assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' - - def test_utf7_tofrom_utf8_bug(self): - def _assert_decu7(input, expected): - assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) - - _assert_decu7('+-', u'+') - _assert_decu7('+-+-', u'++') - _assert_decu7('+-+AOQ-', u'+\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ- ', u'\xe4 ') - _assert_decu7(' +AOQ-', u' \xe4') - _assert_decu7(' +AOQ- ', u' \xe4 ') - _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') - - s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' - s_utf8 = u'Die Männer ärgen sich!' - s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' - - _assert_decu7(s_utf7, s_utf8_esc) - _assert_decu7(s_utf7, s_utf8) - - assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 - assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 - - def test_utf7_partial(self): - s = u"a+-b".encode('utf-7') - assert s == "a+--b" - decode = self.getdecoder('utf-7') - assert decode(s, 1, None) == (u'a', 1) - assert decode(s, 2, None) == (u'a', 1) - assert decode(s, 3, None) == (u'a+', 3) - assert decode(s, 4, None) == (u'a+-', 4) - assert decode(s, 5, None) == (u'a+-b', 5) - - def test_utf7_surrogates(self): - encode = self.getencoder('utf-7') - u = u'\U000abcde' - assert encode(u, len(u), None) == '+2m/c3g-' - decode = self.getdecoder('utf-7') - s = '+3ADYAA-' - raises(UnicodeError, decode, s, len(s), None) - def replace_handler(errors, codec, message, input, start, end): - return u'?', end - assert decode(s, len(s), None, final=True, - errorhandler = replace_handler) == (u'??', len(s)) + errorhandler=self.ignore_handler) == (res, len(seq)) class TestEncoding(UnicodeTests): @@ -376,7 +657,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_maxunicode(self): uni = unichr(sys.maxunicode) @@ -384,7 +665,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_single_chars_utf8(self): # check every number of bytes per char @@ -394,7 +675,7 @@ def test_utf8_surrogates(self): # check replacing of two surrogates by single char while encoding # make sure that the string itself is not marshalled - u = u"\ud800" + u = u"\ud800" for i in range(4): u += u"\udc00" self.checkencode(u, "utf-8") @@ -422,7 +703,7 @@ def test_utf8(self): from pypy.rpython.test.test_llinterp import interpret def f(x): - + s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) s2 = runicode.unicode_encode_utf_8(u, len(u), True) @@ -438,6 +719,6 @@ u = runicode.UNICHR(x) t = runicode.ORD(u) return t - + res = interpret(f, [0x10140]) assert res == 0x10140 diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -87,8 +87,9 @@ result = UnicodeBuilder(size) pos = 0 while pos < size: - ch = s[pos] - ordch1 = ord(ch) + ordch1 = ord(s[pos]) + # fast path for ASCII + # XXX maybe use a while loop here if ordch1 < 0x80: result.append(unichr(ordch1)) pos += 1 @@ -98,110 +99,149 @@ if pos + n > size: if not final: break - else: - endpos = pos + 1 - while endpos < size and ord(s[endpos]) & 0xC0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "unexpected end of data", - s, pos, endpos) + charsleft = size - pos - 1 # either 0, 1, 2 + # note: when we get the 'unexpected end of data' we don't care + # about the pos anymore and we just ignore the value + if not charsleft: + # there's only the start byte and nothing else + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+1) + result.append(r) + break + ordch2 = ord(s[pos+1]) + if n == 3: + # 3-bytes seq with only a continuation byte + if (ordch2>>6 != 0b10 or + (ordch1 == 0xe0 and ordch2 < 0xa0)): + # or (ordch1 == 0xed and ordch2 > 0x9f) + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + else: + # second byte valid, but third byte missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+2) + result.append(r) + break + elif n == 4: + # 4-bytes seq with 1 or 2 continuation bytes + if (ordch2>>6 != 0b10 or + (ordch1 == 0xf0 and ordch2 < 0x90) or + (ordch1 == 0xf4 and ordch2 > 0x8f)): + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + # third byte invalid, take the first two and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + else: + # there's only 1 or 2 valid cb, but the others are missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+charsleft+1) + result.append(r) + break + + if n == 0: + r, pos = errorhandler(errors, 'utf-8', + 'invalid start byte', + s, pos, pos+1) + result.append(r) + + elif n == 1: + assert 0, "ascii should have gone through the fast path" + + elif n == 2: + ordch2 = ord(s[pos+1]) + if ordch2>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) continue + # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz + result.append(unichr(((ordch1 & 0b00011111) << 6) + + (ordch2 & 0b00111111))) + pos += 2 - if n == 0: - r, pos = errorhandler(errors, "utf-8", - "invalid start byte", - s, pos, pos + 1) - result.append(r) - elif n == 1: - assert 0, "you can never get here" - elif n == 2: - # 110yyyyy 10zzzzzz ====> 00000000 00000yyy yyzzzzzz - - ordch2 = ord(s[pos+1]) - z, two = splitter[6, 2](ordch2) - y, six = splitter[5, 3](ordch1) - assert six == 6 - if two != 2: - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, pos + 1) - result.append(r) - else: - c = (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 3: - # 1110xxxx 10yyyyyy 10zzzzzz ====> 00000000 xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - z, two1 = splitter[6, 2](ordch3) - y, two2 = splitter[6, 2](ordch2) - x, fourteen = splitter[4, 4](ordch1) - assert fourteen == 14 - if (two1 != 2 or two2 != 2 or + if (ordch2>>6 != 0b10 or (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. # or (ordch1 == 0xed and ordch2 > 0x9f) ): + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif ordch3>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz + result.append(unichr(((ordch1 & 0b00001111) << 12) + + ((ordch2 & 0b00111111) << 6) + + (ordch3 & 0b00111111))) + pos += 3 - # if ordch2 first two bits are 1 and 0, then the invalid - # continuation byte is ordch3; else ordch2 is invalid. - if two2 == 2: - endpos = pos + 2 - else: - endpos = pos + 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) - result.append(r) - else: - c = (x << 12) + (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 4: - # 11110www 10xxxxxx 10yyyyyy 10zzzzzz ====> - # 000wwwxx xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - z, two1 = splitter[6, 2](ordch4) - y, two2 = splitter[6, 2](ordch3) - x, two3 = splitter[6, 2](ordch2) - w, thirty = splitter[3, 5](ordch1) - assert thirty == 30 - if (two1 != 2 or two2 != 2 or two3 != 2 or + if (ordch2>>6 != 0b10 or (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): - endpos = pos + 1 - if ordch2 & 0xc0 == 0x80: - endpos += 1 - if ordch3 & 0xc0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) + continue + elif ordch3>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + elif ordch4>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+3) + result.append(r) + continue + # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz + c = (((ordch1 & 0b00000111) << 18) + + ((ordch2 & 0b00111111) << 12) + + ((ordch3 & 0b00111111) << 6) + + (ordch4 & 0b00111111)) + if c <= MAXUNICODE: + result.append(UNICHR(c)) else: - c = (w << 18) + (x << 12) + (y << 6) + z - # convert to UTF-16 if necessary - if c <= MAXUNICODE: - result.append(UNICHR(c)) - else: - # compute and append the two surrogates: - # translate from 10000..10FFFF to 0..FFFF - c -= 0x10000 - # high surrogate = top 10 bits added to D800 - result.append(unichr(0xD800 + (c >> 10))) - # low surrogate = bottom 10 bits added to DC00 - result.append(unichr(0xDC00 + (c & 0x03FF))) - pos += n - else: - r, pos = errorhandler(errors, "utf-8", - "unsupported Unicode code range", - s, pos, pos + n) - result.append(r) + # compute and append the two surrogates: + # translate from 10000..10FFFF to 0..FFFF + c -= 0x10000 + # high surrogate = top 10 bits added to D800 + result.append(unichr(0xD800 + (c >> 10))) + # low surrogate = bottom 10 bits added to DC00 + result.append(unichr(0xDC00 + (c & 0x03FF))) + pos += 4 return result.build(), pos @@ -629,7 +669,7 @@ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, # sp ! " # $ % & ' ( ) * + , - . / 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 3, 0, 0, 0, 0, -# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? +# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, # @ A B C D E F G H I J K L M N O 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -905,20 +945,20 @@ pos = 0 while pos < size: ch = p[pos] - + if ord(ch) < limit: result.append(chr(ord(ch))) pos += 1 else: # startpos for collecting unencodable chars - collstart = pos - collend = pos+1 + collstart = pos + collend = pos+1 while collend < len(p) and ord(p[collend]) >= limit: collend += 1 r, pos = errorhandler(errors, encoding, reason, p, collstart, collend) result.append(r) - + return result.build() def unicode_encode_latin_1(p, size, errors, errorhandler=None): From commits-noreply at bitbucket.org Sat Apr 16 21:13:41 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 16 Apr 2011 21:13:41 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110416191341.3871B282BD4@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43402:3e887780e9e3 Date: 2011-04-16 21:09 +0200 http://bitbucket.org/pypy/pypy/changeset/3e887780e9e3/ Log: merge diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1498,7 +1498,7 @@ try: from _ffi import CDLL, types except ImportError: - sys.stdout.write('SKIP: cannot import _ffi') + sys.stderr.write('SKIP: cannot import _ffi\n') return 0 libm = CDLL(libm_name) diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -271,12 +271,12 @@ assert log.result == 42 def test_skip(self): - import _pytest + import pytest def f(): import sys print >> sys.stderr, 'SKIP: foobar' # - raises(_pytest.runner.Skipped, "self.run(f, [])") + raises(pytest.skip.Exception, "self.run(f, [])") def test_parse_jitlog(self): def f(): From commits-noreply at bitbucket.org Sat Apr 16 21:48:46 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 16 Apr 2011 21:48:46 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: I'm a moron. A test and a fix Message-ID: <20110416194846.546C136C20F@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43403:076d2b524c24 Date: 2011-04-16 21:48 +0200 http://bitbucket.org/pypy/pypy/changeset/076d2b524c24/ Log: I'm a moron. A test and a fix diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -134,6 +134,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token + self.invalidate_positions = [] self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -142,6 +143,7 @@ allblocks) def teardown(self): + self.invalidate_positions = None self.pending_guard_tokens = None self.mc = None self.looppos = -1 @@ -450,9 +452,10 @@ mc.copy_to_raw_memory(addr) else: # guard not invalidate, patch where it jumps - pos, _ = clt.invalidate_positions[inv_counter] - clt.invalidate_positions[inv_counter] = (pos + rawstart, - relative_target) + pos, _ = self.invalidate_positions[inv_counter] + clt.invalidate_positions.append((pos + rawstart, + relative_target)) + inv_counter += 1 def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1458,9 +1461,9 @@ def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, locs, ign_2): - pos = self.mc.get_relative_pos() + 1 # after jmp + pos = self.mc.get_relative_pos() + 1 # after potential jmp guard_token.pos_jump_offset = pos - self.current_clt.invalidate_positions.append((pos, 0)) + self.invalidate_positions.append((pos, 0)) self.pending_guard_tokens.append(guard_token) def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -233,5 +233,34 @@ assert self.meta_interp(g, []) == g() + def test_invalidate_bridge(self): + jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + + def f(foo): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(i=i, total=total, foo=foo) + if i > 5: + total += foo.a + else: + total += 2*foo.a + i += 1 + return total + + def main(): + foo = Foo() + foo.a = 1 + total = f(foo) + foo.a = 2 + total += f(foo) + return total + + res = self.meta_interp(main, []) + assert res == main() + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass From commits-noreply at bitbucket.org Sat Apr 16 21:48:50 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sat, 16 Apr 2011 21:48:50 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: I'm a moron. A test and a fix Message-ID: <20110416194850.4A38E282BAA@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43403:076d2b524c24 Date: 2011-04-16 21:48 +0200 http://bitbucket.org/pypy/pypy/changeset/076d2b524c24/ Log: I'm a moron. A test and a fix diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -134,6 +134,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token + self.invalidate_positions = [] self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -142,6 +143,7 @@ allblocks) def teardown(self): + self.invalidate_positions = None self.pending_guard_tokens = None self.mc = None self.looppos = -1 @@ -450,9 +452,10 @@ mc.copy_to_raw_memory(addr) else: # guard not invalidate, patch where it jumps - pos, _ = clt.invalidate_positions[inv_counter] - clt.invalidate_positions[inv_counter] = (pos + rawstart, - relative_target) + pos, _ = self.invalidate_positions[inv_counter] + clt.invalidate_positions.append((pos + rawstart, + relative_target)) + inv_counter += 1 def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1458,9 +1461,9 @@ def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, locs, ign_2): - pos = self.mc.get_relative_pos() + 1 # after jmp + pos = self.mc.get_relative_pos() + 1 # after potential jmp guard_token.pos_jump_offset = pos - self.current_clt.invalidate_positions.append((pos, 0)) + self.invalidate_positions.append((pos, 0)) self.pending_guard_tokens.append(guard_token) def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -233,5 +233,34 @@ assert self.meta_interp(g, []) == g() + def test_invalidate_bridge(self): + jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + + def f(foo): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(i=i, total=total, foo=foo) + if i > 5: + total += foo.a + else: + total += 2*foo.a + i += 1 + return total + + def main(): + foo = Foo() + foo.a = 1 + total = f(foo) + foo.a = 2 + total += f(foo) + return total + + res = self.meta_interp(main, []) + assert res == main() + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass From commits-noreply at bitbucket.org Sat Apr 16 22:11:55 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:11:55 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Support for ll_str(). Message-ID: <20110416201155.97C06282BA1@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43404:55e7d6c9094e Date: 2011-04-16 09:59 -0700 http://bitbucket.org/pypy/pypy/changeset/55e7d6c9094e/ Log: Support for ll_str(). diff --git a/pypy/rpython/lltypesystem/rcompressed.py b/pypy/rpython/lltypesystem/rcompressed.py --- a/pypy/rpython/lltypesystem/rcompressed.py +++ b/pypy/rpython/lltypesystem/rcompressed.py @@ -4,6 +4,7 @@ from pypy.rpython.rmodel import Repr, inputconst from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.lltypesystem.lloperation import llop +from pypy.rpython.lltypesystem.rstr import string_repr from pypy.rpython.error import TyperError @@ -99,6 +100,11 @@ def get_ll_dummyval_obj(self, rtyper, s_value): return DummyValueBuilder(rtyper) + def ll_str(self, i): + return hiddengcref32_str + +hiddengcref32_str = string_repr.convert_const("") + class DummyValueBuilder(object): TYPE = llmemory.HiddenGcRef32 diff --git a/pypy/rpython/test/test_rcompressed.py b/pypy/rpython/test/test_rcompressed.py --- a/pypy/rpython/test/test_rcompressed.py +++ b/pypy/rpython/test/test_rcompressed.py @@ -223,3 +223,11 @@ assert res == True res = self.interpret(fn, [5, 6, 8]) assert res == False + + def test_str(self): + class A: + pass + def fn(): + return str([A()]) + res = self.interpret(fn, []) + assert 'HiddenGcRef32' in self.ll_to_string(res) From commits-noreply at bitbucket.org Sat Apr 16 22:11:56 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:11:56 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Finish call descrs. Message-ID: <20110416201156.BC9DA282BA1@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43405:ac4ae9746e4e Date: 2011-04-16 10:13 -0700 http://bitbucket.org/pypy/pypy/changeset/ac4ae9746e4e/ Log: Finish call descrs. diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -346,6 +346,23 @@ descr1 = get_call_descr(c2, [], RESTYPE) assert descr1.is_result_signed() == signed +def test_call_descr_hiddengcref32(): + if sys.maxint == 2147483647: + py.test.skip("HiddenGcRef32: for 64-bit only") + for tsc in [False, True]: + c0 = GcCache(tsc) + descr = get_call_descr(c0, [llmemory.HiddenGcRef32], lltype.Void) + assert descr.get_arg_types() == 'H' + # + descr = get_call_descr(c0, [], llmemory.HiddenGcRef32) + assert descr.get_return_type() == 'H' + sz = descr.get_result_size(tsc) + if not tsc: + assert sz == 4 + else: + assert isinstance(sz, Symbolic) + assert sz.TYPE == llmemory.HiddenGcRef32 + def test_repr_of_descr(): c0 = GcCache(False) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -332,6 +332,8 @@ result = 'rffi.cast(lltype.SignedLongLong, res)' elif self.get_return_type() == history.VOID: result = 'None' + elif self.get_return_type() == 'H': + result = 'llop.show_from_ptr32(llmemory.GCREF, res)' else: assert 0 source = py.code.Source(""" @@ -409,6 +411,12 @@ def get_result_size(self, translate_support_code): return symbolic.get_size_of_ptr(translate_support_code) +class GcPtrHidden32CallDescr(BaseCallDescr): + _clsname = 'GcPtrHidden32CallDescr' + _return_type = 'H' + def get_result_size(self, translate_support_code): + return symbolic.get_size(llmemory.HiddenGcRef32,translate_support_code) + class FloatCallDescr(BaseCallDescr): _clsname = 'FloatCallDescr' _return_type = history.FLOAT @@ -437,7 +445,8 @@ return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, NonGcPtrCallDescr, 'Call', 'get_result_size', Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') + '_is_result_signed', + GcPtrHidden32CallDescr) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): arg_classes = [] diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -44,7 +44,8 @@ if (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or isinstance(op.getdescr(), GcPtrHidden32ArrayDescr) or (isinstance(op.getdescr(), BaseCallDescr) and - 'H' in op.getdescr().get_arg_types())): + ('H' in op.getdescr().get_arg_types() or + op.getdescr().get_return_type() == 'H'))): from pypy.jit.metainterp.test.support import SkipThisRun raise SkipThisRun("non-translated test with compressptr") def can_inline_malloc(self, descr): @@ -885,6 +886,11 @@ [v1], v2)) args[i] = v2 op = op.copy_and_change(op.getopnum(), args=args) + if descr.get_return_type() == 'H': + v1 = BoxInt() + v2 = op.result + newops.append(op.copy_and_change(op.getopnum(), result=v1)) + op = ResOperation(rop.SHOW_FROM_PTR32, [v1], v2) # ---------- newops.append(op) del operations[:] From commits-noreply at bitbucket.org Sat Apr 16 22:11:57 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:11:57 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Test and fix: if g is a low-level function pointer, then g(*args) Message-ID: <20110416201157.8C5C4282BA1@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43406:2de9ed30dbdb Date: 2011-04-16 13:07 -0700 http://bitbucket.org/pypy/pypy/changeset/2de9ed30dbdb/ Log: Test and fix: if g is a low-level function pointer, then g(*args) might mess up the exact type of the args if the tuple stores them differently. This is a bit non-obvious to test, and is only apparent in HiddenGcRef32. diff --git a/pypy/rpython/rbuiltin.py b/pypy/rpython/rbuiltin.py --- a/pypy/rpython/rbuiltin.py +++ b/pypy/rpython/rbuiltin.py @@ -71,11 +71,11 @@ if not isinstance(r_tuple, AbstractTupleRepr): raise TyperError("*arg must be a tuple") for i in range(len(r_tuple.items_r)): - v_item = r_tuple.getitem_internal(hop.llops, v_tuple, i) + v_item = r_tuple.getitem(hop.llops, v_tuple, i) hop.nb_args += 1 hop.args_v.append(v_item) hop.args_s.append(s_tuple.items[i]) - hop.args_r.append(r_tuple.items_r[i]) + hop.args_r.append(r_tuple.external_items_r[i]) keywords = arguments.keywords if not takes_kwds and keywords: diff --git a/pypy/rpython/test/test_rcompressed.py b/pypy/rpython/test/test_rcompressed.py --- a/pypy/rpython/test/test_rcompressed.py +++ b/pypy/rpython/test/test_rcompressed.py @@ -2,7 +2,8 @@ from pypy.config.translationoption import IS_64_BITS from pypy.rpython.test import test_rclass from pypy.rpython import rmodel, rint, rclass -from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem.lloperation import llop def setup_module(mod): @@ -231,3 +232,27 @@ return str([A()]) res = self.interpret(fn, []) assert 'HiddenGcRef32' in self.ll_to_string(res) + + def test_nonzero(self): + S = lltype.GcStruct('S') + def fn(): + p = lltype.malloc(S) + r = llop.hide_into_ptr32(llmemory.HiddenGcRef32, p) + return bool(r) + res = self.interpret(fn, []) + assert res == True + + def test_funccall_starargs(self): + def g(a, b): + return a.n - b.n + S = lltype.GcStruct('S', ('n', lltype.Signed)) + SPTR = lltype.Ptr(S) + GFUNC = lltype.FuncType([SPTR, SPTR], lltype.Signed) + gptr = lltype.functionptr(GFUNC, 'g', _callable=g) + def fn(x, y): + a = lltype.malloc(S); a.n = x + b = lltype.malloc(S); b.n = y + args = (a, b) + return gptr(*args) + res = self.interpret(fn, [45, 3]) + assert res == 42 From commits-noreply at bitbucket.org Sat Apr 16 22:11:59 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:11:59 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix: cannot use "x == NULL" for all pointers x, because of HiddenGcRef32. Message-ID: <20110416201159.781E5282BD9@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43407:5cbbbb07b056 Date: 2011-04-16 22:09 +0200 http://bitbucket.org/pypy/pypy/changeset/5cbbbb07b056/ Log: Fix: cannot use "x == NULL" for all pointers x, because of HiddenGcRef32. Use instead "!x". diff --git a/pypy/translator/c/funcgen.py b/pypy/translator/c/funcgen.py --- a/pypy/translator/c/funcgen.py +++ b/pypy/translator/c/funcgen.py @@ -594,11 +594,11 @@ def OP_PTR_NONZERO(self, op): - return '%s = (%s != NULL);' % (self.expr(op.result), - self.expr(op.args[0])) + return '%s = !!%s;' % (self.expr(op.result), + self.expr(op.args[0])) def OP_PTR_ISZERO(self, op): - return '%s = (%s == NULL);' % (self.expr(op.result), - self.expr(op.args[0])) + return '%s = !%s;' % (self.expr(op.result), + self.expr(op.args[0])) def OP_PTR_EQ(self, op): return '%s = (%s == %s);' % (self.expr(op.result), From commits-noreply at bitbucket.org Sat Apr 16 22:12:00 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:12:00 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix, mostly for tests: let c_mmap_safe() be a no-gc operation. Message-ID: <20110416201200.883E9282BD9@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43408:dd7b2ce8b037 Date: 2011-04-16 22:10 +0200 http://bitbucket.org/pypy/pypy/changeset/dd7b2ce8b037/ Log: Fix, mostly for tests: let c_mmap_safe() be a no-gc operation. diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -101,7 +101,8 @@ compilation_info=CConfig._compilation_info_) safe = rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, - sandboxsafe=True, threadsafe=False) + sandboxsafe=True, threadsafe=False, + _nowrapper=True) return unsafe, safe def winexternal(name, args, result, **kwargs): @@ -118,18 +119,31 @@ has_mremap = cConfig['has_mremap'] c_mmap, _c_mmap_safe = external('mmap', [PTR, size_t, rffi.INT, rffi.INT, rffi.INT, off_t], PTR) - c_munmap, c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT) + c_munmap, _c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT) c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT) if has_mremap: c_mremap, _ = external('mremap', [PTR, size_t, size_t, rffi.ULONG], PTR) def c_mmap_safe(addr, length, prot, flags, fd, offset): + length = rffi.cast(rffi.SIZE_T, length) + prot = rffi.cast(rffi.INT, prot) + flags = rffi.cast(rffi.INT, flags) + fd = rffi.cast(rffi.INT, fd) + offset = rffi.cast(off_t, offset) return _c_mmap_safe(addr, length, prot, flags, fd, offset) c_mmap_safe._annenforceargs_ = (PTR, int, int, int, int, int) + def c_munmap_safe(addr, length): + length = rffi.cast(rffi.SIZE_T, length) + res = _c_munmap_safe(addr, length) + return rffi.cast(lltype.Signed, res) + c_munmap_safe._annenforceargs_ = (PTR, int) + # this one is always safe - _, _get_page_size = external('getpagesize', [], rffi.INT) + _, _c_get_page_size = external('getpagesize', [], rffi.INT) + def _get_page_size(): + return rffi.cast(lltype.Signed, _c_get_page_size()) _get_allocation_granularity = _get_page_size elif _MS_WINDOWS: From commits-noreply at bitbucket.org Sat Apr 16 22:12:12 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:12:12 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: hg merge default Message-ID: <20110416201212.8BFF8282BD7@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43409:78429cb225cf Date: 2011-04-16 22:11 +0200 http://bitbucket.org/pypy/pypy/changeset/78429cb225cf/ Log: hg merge default diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support from pypy.rlib.nonconst import NonConstant from pypy.rlib.rsre.test.test_match import get_code from pypy.rlib.rsre import rsre_core @@ -45,7 +45,7 @@ assert m._jit_unroll_safe_ -class TestJitRSre(test_basic.LLJitMixin): +class TestJitRSre(support.LLJitMixin): def meta_interp_match(self, pattern, string, repeat=1): r = get_code(pattern) diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -66,9 +66,10 @@ assert called[0] assert "42424242" in result - def checkdecodeerror(self, s, encoding, start, stop, addstuff=True): + def checkdecodeerror(self, s, encoding, start, stop, + addstuff=True, msg=None): called = [0] - def errorhandler(errors, enc, msg, t, startingpos, + def errorhandler(errors, enc, errmsg, t, startingpos, endingpos): called[0] += 1 if called[0] == 1: @@ -77,6 +78,8 @@ assert t is s assert start == startingpos assert stop == endingpos + if msg is not None: + assert errmsg == msg return u"42424242", stop return u"", endingpos decoder = self.getdecoder(encoding) @@ -90,7 +93,7 @@ class TestDecoding(UnicodeTests): - + # XXX test bom recognition in utf-16 # XXX test proper error handling @@ -131,6 +134,96 @@ "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(uni, encoding) + def test_ascii_error(self): + self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) + + def test_utf16_errors(self): + # trunkated BOM + for s in ["\xff", "\xfe"]: + self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) + + for s in [ + # unexpected end of data ascii + "\xff\xfeF", + # unexpected end of data + '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', + ]: + self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) + for s in [ + # illegal surrogate + "\xff\xfe\xff\xdb\xff\xff", + ]: + self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) + + def test_utf16_bugs(self): + s = '\x80-\xe9\xdeL\xa3\x9b' + py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, + s, len(s), True) + + def test_utf7_bugs(self): + u = u'A\u2262\u0391.' + assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' + + def test_utf7_tofrom_utf8_bug(self): + def _assert_decu7(input, expected): + assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) + + _assert_decu7('+-', u'+') + _assert_decu7('+-+-', u'++') + _assert_decu7('+-+AOQ-', u'+\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ- ', u'\xe4 ') + _assert_decu7(' +AOQ-', u' \xe4') + _assert_decu7(' +AOQ- ', u' \xe4 ') + _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') + + s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' + s_utf8 = u'Die Männer ärgen sich!' + s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' + + _assert_decu7(s_utf7, s_utf8_esc) + _assert_decu7(s_utf7, s_utf8) + + assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 + assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 + + def test_utf7_partial(self): + s = u"a+-b".encode('utf-7') + assert s == "a+--b" + decode = self.getdecoder('utf-7') + assert decode(s, 1, None) == (u'a', 1) + assert decode(s, 2, None) == (u'a', 1) + assert decode(s, 3, None) == (u'a+', 3) + assert decode(s, 4, None) == (u'a+-', 4) + assert decode(s, 5, None) == (u'a+-b', 5) + + def test_utf7_surrogates(self): + encode = self.getencoder('utf-7') + u = u'\U000abcde' + assert encode(u, len(u), None) == '+2m/c3g-' + decode = self.getdecoder('utf-7') + s = '+3ADYAA-' + raises(UnicodeError, decode, s, len(s), None) + def replace_handler(errors, codec, message, input, start, end): + return u'?', end + assert decode(s, len(s), None, final=True, + errorhandler = replace_handler) == (u'??', len(s)) + + +class TestUTF8Decoding(UnicodeTests): + def __init__(self): + self.decoder = self.getdecoder('utf-8') + + def replace_handler(self, errors, codec, message, input, start, end): + return u'\ufffd', end + + def ignore_handler(self, errors, codec, message, input, start, end): + return u'', end + + def to_bytestring(self, bytes): + return ''.join(chr(int(c, 16)) for c in bytes.split()) + def test_single_chars_utf8(self): for s in ["\xd7\x90", "\xd6\x96", "\xeb\x96\x95", "\xf0\x90\x91\x93"]: self.checkdecode(s, "utf-8") @@ -140,30 +233,297 @@ # This test will raise an error with python 3.x self.checkdecode(u"\ud800", "utf-8") + def test_invalid_start_byte(self): + """ + Test that an 'invalid start byte' error is raised when the first byte + is not in the ASCII range or is not a valid start byte of a 2-, 3-, or + 4-bytes sequence. The invalid start byte is replaced with a single + U+FFFD when errors='replace'. + E.g. <80> is a continuation byte and can appear only after a start byte. + """ + FFFD = u'\ufffd' + for byte in '\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF': + raises(UnicodeDecodeError, self.decoder, byte, 1, None, final=True) + self.checkdecodeerror(byte, 'utf-8', 0, 1, addstuff=False, + msg='invalid start byte') + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.replace_handler) == (FFFD, 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', 9)) + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.ignore_handler) == (u'', 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', 9)) + + def test_unexpected_end_of_data(self): + """ + Test that an 'unexpected end of data' error is raised when the string + ends after a start byte of a 2-, 3-, or 4-bytes sequence without having + enough continuation bytes. The incomplete sequence is replaced with a + single U+FFFD when errors='replace'. + E.g. in the sequence , F3 is the start byte of a 4-bytes + sequence, but it's followed by only 2 valid continuation bytes and the + last continuation bytes is missing. + Note: the continuation bytes must be all valid, if one of them is + invalid another error will be raised. + """ + sequences = [ + 'C2', 'DF', + 'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF', + 'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF', + 'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF', + 'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF', + 'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF', + 'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF' + ] + FFFD = u'\ufffd' + for seq in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq), addstuff=False, + msg='unexpected end of data') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (FFFD, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', len(seq) + 8)) + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (u'', len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', len(seq) + 8)) + + def test_invalid_cb_for_2bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte of a 2-bytes sequence is invalid. The start byte + is replaced by a single U+FFFD and the second byte is handled + separately when errors='replace'. + E.g. in the sequence , C2 is the start byte of a 2-bytes + sequence, but 41 is not a valid continuation byte because it's the + ASCII letter 'A'. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('C2 00', FFFD+u'\x00'), ('C2 7F', FFFD+u'\x7f'), + ('C2 C0', FFFDx2), ('C2 FF', FFFDx2), + ('DF 00', FFFD+u'\x00'), ('DF 7F', FFFD+u'\x7f'), + ('DF C0', FFFDx2), ('DF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, 1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_3bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 3-bytes sequence are invalid. When + errors='replace', if the first continuation byte is valid, the first + two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the + third byte is handled separately, otherwise only the start byte is + replaced with a U+FFFD and the other continuation bytes are handled + separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('E0 00', FFFD+u'\x00'), ('E0 7F', FFFD+u'\x7f'), ('E0 80', FFFDx2), + ('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2), + ('E0 A0 00', FFFD+u'\x00'), ('E0 A0 7F', FFFD+u'\x7f'), + ('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2), + ('E0 BF 00', FFFD+u'\x00'), ('E0 BF 7F', FFFD+u'\x7f'), + ('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+u'\x00'), + ('E1 7F', FFFD+u'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2), + ('E1 80 00', FFFD+u'\x00'), ('E1 80 7F', FFFD+u'\x7f'), + ('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2), + ('E1 BF 00', FFFD+u'\x00'), ('E1 BF 7F', FFFD+u'\x7f'), + ('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+u'\x00'), + ('EC 7F', FFFD+u'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2), + ('EC 80 00', FFFD+u'\x00'), ('EC 80 7F', FFFD+u'\x7f'), + ('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2), + ('EC BF 00', FFFD+u'\x00'), ('EC BF 7F', FFFD+u'\x7f'), + ('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+u'\x00'), + ('ED 7F', FFFD+u'\x7f'), + # ('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^ + ('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+u'\x00'), + ('ED 80 7F', FFFD+u'\x7f'), ('ED 80 C0', FFFDx2), + ('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+u'\x00'), + ('ED 9F 7F', FFFD+u'\x7f'), ('ED 9F C0', FFFDx2), + ('ED 9F FF', FFFDx2), ('EE 00', FFFD+u'\x00'), + ('EE 7F', FFFD+u'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2), + ('EE 80 00', FFFD+u'\x00'), ('EE 80 7F', FFFD+u'\x7f'), + ('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2), + ('EE BF 00', FFFD+u'\x00'), ('EE BF 7F', FFFD+u'\x7f'), + ('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+u'\x00'), + ('EF 7F', FFFD+u'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2), + ('EF 80 00', FFFD+u'\x00'), ('EF 80 7F', FFFD+u'\x7f'), + ('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2), + ('EF BF 00', FFFD+u'\x00'), ('EF BF 7F', FFFD+u'\x7f'), + ('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_4bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 4-bytes sequence are invalid. When + errors='replace',the start byte and all the following valid + continuation bytes are replaced with a single U+FFFD, and all the bytes + starting from the first invalid continuation bytes (included) are + handled separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('F0 00', FFFD+u'\x00'), ('F0 7F', FFFD+u'\x7f'), ('F0 80', FFFDx2), + ('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2), + ('F0 90 00', FFFD+u'\x00'), ('F0 90 7F', FFFD+u'\x7f'), + ('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2), + ('F0 BF 00', FFFD+u'\x00'), ('F0 BF 7F', FFFD+u'\x7f'), + ('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2), + ('F0 90 80 00', FFFD+u'\x00'), ('F0 90 80 7F', FFFD+u'\x7f'), + ('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2), + ('F0 90 BF 00', FFFD+u'\x00'), ('F0 90 BF 7F', FFFD+u'\x7f'), + ('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2), + ('F0 BF 80 00', FFFD+u'\x00'), ('F0 BF 80 7F', FFFD+u'\x7f'), + ('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2), + ('F0 BF BF 00', FFFD+u'\x00'), ('F0 BF BF 7F', FFFD+u'\x7f'), + ('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2), + ('F1 00', FFFD+u'\x00'), ('F1 7F', FFFD+u'\x7f'), ('F1 C0', FFFDx2), + ('F1 FF', FFFDx2), ('F1 80 00', FFFD+u'\x00'), + ('F1 80 7F', FFFD+u'\x7f'), ('F1 80 C0', FFFDx2), + ('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+u'\x00'), + ('F1 BF 7F', FFFD+u'\x7f'), ('F1 BF C0', FFFDx2), + ('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+u'\x00'), + ('F1 80 80 7F', FFFD+u'\x7f'), ('F1 80 80 C0', FFFDx2), + ('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+u'\x00'), + ('F1 80 BF 7F', FFFD+u'\x7f'), ('F1 80 BF C0', FFFDx2), + ('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+u'\x00'), + ('F1 BF 80 7F', FFFD+u'\x7f'), ('F1 BF 80 C0', FFFDx2), + ('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+u'\x00'), + ('F1 BF BF 7F', FFFD+u'\x7f'), ('F1 BF BF C0', FFFDx2), + ('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+u'\x00'), + ('F3 7F', FFFD+u'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2), + ('F3 80 00', FFFD+u'\x00'), ('F3 80 7F', FFFD+u'\x7f'), + ('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2), + ('F3 BF 00', FFFD+u'\x00'), ('F3 BF 7F', FFFD+u'\x7f'), + ('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2), + ('F3 80 80 00', FFFD+u'\x00'), ('F3 80 80 7F', FFFD+u'\x7f'), + ('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2), + ('F3 80 BF 00', FFFD+u'\x00'), ('F3 80 BF 7F', FFFD+u'\x7f'), + ('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2), + ('F3 BF 80 00', FFFD+u'\x00'), ('F3 BF 80 7F', FFFD+u'\x7f'), + ('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2), + ('F3 BF BF 00', FFFD+u'\x00'), ('F3 BF BF 7F', FFFD+u'\x7f'), + ('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2), + ('F4 00', FFFD+u'\x00'), ('F4 7F', FFFD+u'\x7f'), ('F4 90', FFFDx2), + ('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2), + ('F4 80 00', FFFD+u'\x00'), ('F4 80 7F', FFFD+u'\x7f'), + ('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2), + ('F4 8F 00', FFFD+u'\x00'), ('F4 8F 7F', FFFD+u'\x7f'), + ('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2), + ('F4 80 80 00', FFFD+u'\x00'), ('F4 80 80 7F', FFFD+u'\x7f'), + ('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2), + ('F4 80 BF 00', FFFD+u'\x00'), ('F4 80 BF 7F', FFFD+u'\x7f'), + ('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2), + ('F4 8F 80 00', FFFD+u'\x00'), ('F4 8F 80 7F', FFFD+u'\x7f'), + ('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2), + ('F4 8F BF 00', FFFD+u'\x00'), ('F4 8F BF 7F', FFFD+u'\x7f'), + ('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2) + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + def test_utf8_errors(self): - for s in [# unexpected end of data - "\xd7", "\xd6", "\xeb\x96", "\xf0\x90\x91"]: - self.checkdecodeerror(s, "utf-8", 0, len(s), addstuff=False) - - # unexpected code byte - for s in ["\x81", "\xbf"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + # unexpected end of data + for s in ['\xd7', '\xd6', '\xeb\x96', '\xf0\x90\x91', '\xc2', '\xdf']: + self.checkdecodeerror(s, 'utf-8', 0, len(s), addstuff=False, + msg='unexpected end of data') # invalid data 2 byte for s in ["\xd7\x50", "\xd6\x06", "\xd6\xD6"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') # invalid data 3 byte for s in ["\xeb\x56\x95", "\xeb\x06\x95", "\xeb\xD6\x95"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xeb\x96\x55", "\xeb\x96\x05", "\xeb\x96\xD5"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') # invalid data 4 byte for s in ["\xf0\x50\x91\x93", "\xf0\x00\x91\x93", "\xf0\xd0\x91\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x51\x93", "\xf0\x90\x01\x93", "\xf0\x90\xd1\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x91\x53", "\xf0\x90\x91\x03", "\xf0\x90\x91\xd3"]: - self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True, + msg='invalid continuation byte') def test_issue8271(self): @@ -249,97 +609,18 @@ ('\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64', u'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'), ] - def replace_handler(errors, codec, message, input, start, end): - return FFFD, end - def ignore_handler(errors, codec, message, input, start, end): - return u'', end + for n, (seq, res) in enumerate(sequences): decoder = self.getdecoder('utf-8') raises(UnicodeDecodeError, decoder, seq, len(seq), None, final=True) assert decoder(seq, len(seq), None, final=True, - errorhandler=replace_handler) == (res, len(seq)) + errorhandler=self.replace_handler) == (res, len(seq)) assert decoder(seq + 'b', len(seq) + 1, None, final=True, - errorhandler=replace_handler) == (res + u'b', - len(seq) + 1) + errorhandler=self.replace_handler) == (res + u'b', + len(seq) + 1) res = res.replace(FFFD, u'') assert decoder(seq, len(seq), None, final=True, - errorhandler=ignore_handler) == (res, len(seq)) - - def test_ascii_error(self): - self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) - - def test_utf16_errors(self): - # trunkated BOM - for s in ["\xff", "\xfe"]: - self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) - - for s in [ - # unexpected end of data ascii - "\xff\xfeF", - # unexpected end of data - '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', - ]: - self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) - for s in [ - # illegal surrogate - "\xff\xfe\xff\xdb\xff\xff", - ]: - self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) - - def test_utf16_bugs(self): - s = '\x80-\xe9\xdeL\xa3\x9b' - py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, - s, len(s), True) - - def test_utf7_bugs(self): - u = u'A\u2262\u0391.' - assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' - - def test_utf7_tofrom_utf8_bug(self): - def _assert_decu7(input, expected): - assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) - - _assert_decu7('+-', u'+') - _assert_decu7('+-+-', u'++') - _assert_decu7('+-+AOQ-', u'+\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ- ', u'\xe4 ') - _assert_decu7(' +AOQ-', u' \xe4') - _assert_decu7(' +AOQ- ', u' \xe4 ') - _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') - - s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' - s_utf8 = u'Die Männer ärgen sich!' - s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' - - _assert_decu7(s_utf7, s_utf8_esc) - _assert_decu7(s_utf7, s_utf8) - - assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 - assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 - - def test_utf7_partial(self): - s = u"a+-b".encode('utf-7') - assert s == "a+--b" - decode = self.getdecoder('utf-7') - assert decode(s, 1, None) == (u'a', 1) - assert decode(s, 2, None) == (u'a', 1) - assert decode(s, 3, None) == (u'a+', 3) - assert decode(s, 4, None) == (u'a+-', 4) - assert decode(s, 5, None) == (u'a+-b', 5) - - def test_utf7_surrogates(self): - encode = self.getencoder('utf-7') - u = u'\U000abcde' - assert encode(u, len(u), None) == '+2m/c3g-' - decode = self.getdecoder('utf-7') - s = '+3ADYAA-' - raises(UnicodeError, decode, s, len(s), None) - def replace_handler(errors, codec, message, input, start, end): - return u'?', end - assert decode(s, len(s), None, final=True, - errorhandler = replace_handler) == (u'??', len(s)) + errorhandler=self.ignore_handler) == (res, len(seq)) class TestEncoding(UnicodeTests): @@ -376,7 +657,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_maxunicode(self): uni = unichr(sys.maxunicode) @@ -384,7 +665,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_single_chars_utf8(self): # check every number of bytes per char @@ -394,7 +675,7 @@ def test_utf8_surrogates(self): # check replacing of two surrogates by single char while encoding # make sure that the string itself is not marshalled - u = u"\ud800" + u = u"\ud800" for i in range(4): u += u"\udc00" self.checkencode(u, "utf-8") @@ -422,7 +703,7 @@ def test_utf8(self): from pypy.rpython.test.test_llinterp import interpret def f(x): - + s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) s2 = runicode.unicode_encode_utf_8(u, len(u), True) @@ -438,6 +719,6 @@ u = runicode.UNICHR(x) t = runicode.ORD(u) return t - + res = interpret(f, [0x10140]) assert res == 0x10140 diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -779,6 +782,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): if not cobj or ( not isinstance(cobj, ctypes.c_uint32) diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -87,8 +87,9 @@ result = UnicodeBuilder(size) pos = 0 while pos < size: - ch = s[pos] - ordch1 = ord(ch) + ordch1 = ord(s[pos]) + # fast path for ASCII + # XXX maybe use a while loop here if ordch1 < 0x80: result.append(unichr(ordch1)) pos += 1 @@ -98,110 +99,149 @@ if pos + n > size: if not final: break - else: - endpos = pos + 1 - while endpos < size and ord(s[endpos]) & 0xC0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "unexpected end of data", - s, pos, endpos) + charsleft = size - pos - 1 # either 0, 1, 2 + # note: when we get the 'unexpected end of data' we don't care + # about the pos anymore and we just ignore the value + if not charsleft: + # there's only the start byte and nothing else + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+1) + result.append(r) + break + ordch2 = ord(s[pos+1]) + if n == 3: + # 3-bytes seq with only a continuation byte + if (ordch2>>6 != 0b10 or + (ordch1 == 0xe0 and ordch2 < 0xa0)): + # or (ordch1 == 0xed and ordch2 > 0x9f) + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + else: + # second byte valid, but third byte missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+2) + result.append(r) + break + elif n == 4: + # 4-bytes seq with 1 or 2 continuation bytes + if (ordch2>>6 != 0b10 or + (ordch1 == 0xf0 and ordch2 < 0x90) or + (ordch1 == 0xf4 and ordch2 > 0x8f)): + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + # third byte invalid, take the first two and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + else: + # there's only 1 or 2 valid cb, but the others are missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+charsleft+1) + result.append(r) + break + + if n == 0: + r, pos = errorhandler(errors, 'utf-8', + 'invalid start byte', + s, pos, pos+1) + result.append(r) + + elif n == 1: + assert 0, "ascii should have gone through the fast path" + + elif n == 2: + ordch2 = ord(s[pos+1]) + if ordch2>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) continue + # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz + result.append(unichr(((ordch1 & 0b00011111) << 6) + + (ordch2 & 0b00111111))) + pos += 2 - if n == 0: - r, pos = errorhandler(errors, "utf-8", - "invalid start byte", - s, pos, pos + 1) - result.append(r) - elif n == 1: - assert 0, "you can never get here" - elif n == 2: - # 110yyyyy 10zzzzzz ====> 00000000 00000yyy yyzzzzzz - - ordch2 = ord(s[pos+1]) - z, two = splitter[6, 2](ordch2) - y, six = splitter[5, 3](ordch1) - assert six == 6 - if two != 2: - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, pos + 1) - result.append(r) - else: - c = (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 3: - # 1110xxxx 10yyyyyy 10zzzzzz ====> 00000000 xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - z, two1 = splitter[6, 2](ordch3) - y, two2 = splitter[6, 2](ordch2) - x, fourteen = splitter[4, 4](ordch1) - assert fourteen == 14 - if (two1 != 2 or two2 != 2 or + if (ordch2>>6 != 0b10 or (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. # or (ordch1 == 0xed and ordch2 > 0x9f) ): + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif ordch3>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz + result.append(unichr(((ordch1 & 0b00001111) << 12) + + ((ordch2 & 0b00111111) << 6) + + (ordch3 & 0b00111111))) + pos += 3 - # if ordch2 first two bits are 1 and 0, then the invalid - # continuation byte is ordch3; else ordch2 is invalid. - if two2 == 2: - endpos = pos + 2 - else: - endpos = pos + 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) - result.append(r) - else: - c = (x << 12) + (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 4: - # 11110www 10xxxxxx 10yyyyyy 10zzzzzz ====> - # 000wwwxx xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - z, two1 = splitter[6, 2](ordch4) - y, two2 = splitter[6, 2](ordch3) - x, two3 = splitter[6, 2](ordch2) - w, thirty = splitter[3, 5](ordch1) - assert thirty == 30 - if (two1 != 2 or two2 != 2 or two3 != 2 or + if (ordch2>>6 != 0b10 or (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): - endpos = pos + 1 - if ordch2 & 0xc0 == 0x80: - endpos += 1 - if ordch3 & 0xc0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) + continue + elif ordch3>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + elif ordch4>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+3) + result.append(r) + continue + # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz + c = (((ordch1 & 0b00000111) << 18) + + ((ordch2 & 0b00111111) << 12) + + ((ordch3 & 0b00111111) << 6) + + (ordch4 & 0b00111111)) + if c <= MAXUNICODE: + result.append(UNICHR(c)) else: - c = (w << 18) + (x << 12) + (y << 6) + z - # convert to UTF-16 if necessary - if c <= MAXUNICODE: - result.append(UNICHR(c)) - else: - # compute and append the two surrogates: - # translate from 10000..10FFFF to 0..FFFF - c -= 0x10000 - # high surrogate = top 10 bits added to D800 - result.append(unichr(0xD800 + (c >> 10))) - # low surrogate = bottom 10 bits added to DC00 - result.append(unichr(0xDC00 + (c & 0x03FF))) - pos += n - else: - r, pos = errorhandler(errors, "utf-8", - "unsupported Unicode code range", - s, pos, pos + n) - result.append(r) + # compute and append the two surrogates: + # translate from 10000..10FFFF to 0..FFFF + c -= 0x10000 + # high surrogate = top 10 bits added to D800 + result.append(unichr(0xD800 + (c >> 10))) + # low surrogate = bottom 10 bits added to DC00 + result.append(unichr(0xDC00 + (c & 0x03FF))) + pos += 4 return result.build(), pos @@ -629,7 +669,7 @@ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, # sp ! " # $ % & ' ( ) * + , - . / 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 3, 0, 0, 0, 0, -# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? +# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, # @ A B C D E F G H I J K L M N O 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -905,20 +945,20 @@ pos = 0 while pos < size: ch = p[pos] - + if ord(ch) < limit: result.append(chr(ord(ch))) pos += 1 else: # startpos for collecting unencodable chars - collstart = pos - collend = pos+1 + collstart = pos + collend = pos+1 while collend < len(p) and ord(p[collend]) >= limit: collend += 1 r, pos = errorhandler(errors, encoding, reason, p, collstart, collend) result.append(r) - + return result.build() def unicode_encode_latin_1(p, size, errors, errorhandler=None): diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -277,6 +277,7 @@ """) def test_default_and_kw(self): + py.test.skip("Wait until we have saner defaults strat") def main(n): def f(i, j=1): return i + j @@ -487,7 +488,6 @@ """) def test_range_iter(self): - py.test.skip("until we fix defaults") def main(n): def g(n): return range(n) @@ -838,7 +838,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -849,7 +849,7 @@ sa += 20000 return sa """ % (op1, a, op2, b) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -867,7 +867,7 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, op2, float(b)/4.0) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_allcases_reflex(self): @@ -888,7 +888,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -899,7 +899,7 @@ sa += 20000 return sa """ % (op1, a, b, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -917,11 +917,13 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, float(b)/4.0, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') for e1 in compares: for e2 in compares: @@ -933,7 +935,7 @@ b = tst() c = tst() sa = 0 - for i in range(1000): + for i in range(300): if %s: sa += 1 else: @@ -946,7 +948,7 @@ a = b return sa """ % (e1, e2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) def test_array_sum(self): def main(): @@ -1010,7 +1012,7 @@ """) def test_func_defaults(self): - py.test.skip("skipped until we fix defaults") + py.test.skip("until we fix defaults") def main(n): i = 1 while i < n: @@ -1063,7 +1065,7 @@ i23 = int_lt(0, i21) guard_true(i23, descr=) i24 = getfield_gc(p17, descr=) - i25 = getarrayitem_raw(i24, 0, descr=) + i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=) i28 = int_add_ovf(i10, i25) @@ -1071,3 +1073,460 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) + + def test_intbound_simple(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + self.run_and_check(src, threshold=200) + + def test_intbound_addsub_mix(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + self.run_and_check(src, threshold=200) + + def test_intbound_gt(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i17 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=) + """) + + def test_intbound_sub_lt(self): + def main(): + i, a = 0, 0 + while i < 300: + if i - 10 < 295: + a += 1 + i += 1 + return a + # + log = self.run(main, [], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i5, 300) + guard_true(i7, descr=...) + i9 = int_sub_ovf(i5, 10) + guard_no_overflow(descr=...) + i11 = int_add_ovf(i4, 1) + guard_no_overflow(descr=...) + i13 = int_add(i5, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i13, descr=) + """) + + def test_intbound_addsub_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i8, 5) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i19 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) + """) + + def test_intbound_addmul_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < 300: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_add(i8, 5) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_lshift(i8, 1) + i18 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i21 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) + """) + + def test_intbound_eq(self): + def main(a, n): + i, s = 0, 0 + while i < 300: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + # + log = self.run(main, [7, 300], threshold=200) + assert log.result == main(7, 300) + log = self.run(main, [10, 300], threshold=200) + assert log.result == main(10, 300) + log = self.run(main, [42, 300], threshold=200) + assert log.result == main(42, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_eq(i8, 10) + guard_false(i12, descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) + """) + + def test_intbound_mul(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + # + log = self.run(main, [7], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_lshift(i6, 1) + i12 = int_add_ovf(i5, 1) + guard_no_overflow(descr=...) + i14 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i12, i14, descr=) + """) + + def test_assert(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert a == 7 + s += a + 1 + i += 1 + return s + log = self.run(main, [7], threshold=200) + assert log.result == 300*8 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_add_ovf(i5, 8) + guard_no_overflow(descr=...) + i12 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i10, i12, descr=) + """) + + def test_zeropadded(self): + def main(): + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= len(self): + return 0 + return array.__getitem__(self, i) # ID: get + # + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + log = self.run(main, [], threshold=200) + assert log.result == 9895050.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i20 = int_ge(i18, i8) + guard_false(i20, descr=...) + f21 = getarrayitem_raw(i13, i18, descr=...) + f23 = getarrayitem_raw(i13, i14, descr=...) + f24 = float_add(f21, f23) + f26 = getarrayitem_raw(i13, i6, descr=...) + f27 = float_add(f24, f26) + i29 = int_add(i6, 1) + i31 = int_ge(i29, i8) + guard_false(i31, descr=...) + f33 = getarrayitem_raw(i13, i29, descr=...) + f34 = float_add(f27, f33) + i36 = int_add(i6, 2) + i38 = int_ge(i36, i8) + guard_false(i38, descr=...) + f39 = getarrayitem_raw(i13, i36, descr=...) + ... + """) + + + def test_circular(self): + def main(): + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + assert len(self) == 256 + return array.__getitem__(self, i & 255) + # + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + # + log = self.run(main, [], threshold=200) + assert log.result == 1239690.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i17 = int_and(i14, 255) + f18 = getarrayitem_raw(i8, i17, descr=...) + f20 = getarrayitem_raw(i8, i9, descr=...) + f21 = float_add(f18, f20) + f23 = getarrayitem_raw(i8, i10, descr=...) + f24 = float_add(f21, f23) + i26 = int_add(i6, 1) + i29 = int_and(i26, 255) + f30 = getarrayitem_raw(i8, i29, descr=...) + f31 = float_add(f24, f30) + i33 = int_add(i6, 2) + i36 = int_and(i33, 255) + f37 = getarrayitem_raw(i8, i36, descr=...) + ... + """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) + + def test_silly_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test_iter_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test__ffi_call(self): + from pypy.rlib.test.test_libffi import get_libm_name + def main(libm_name): + try: + from _ffi import CDLL, types + except ImportError: + sys.stderr.write('SKIP: cannot import _ffi\n') + return 0 + + libm = CDLL(libm_name) + pow = libm.getfunc('pow', [types.double, types.double], + types.double) + i = 0 + res = 0 + while i < 300: + res += pow(2, 3) + i += 1 + return pow.getaddr(), res + # + libm_name = get_libm_name(sys.platform) + log = self.run(main, [libm_name], threshold=200) + pow_addr, res = log.result + assert res == 8.0 * 300 + loop, = log.loops_by_filename(self.filepath) + # XXX: write the actual test when we merge this to jitypes2 + ## ops = self.get_by_bytecode('CALL_FUNCTION') + ## assert len(ops) == 2 # we get two loops, because of specialization + ## call_function = ops[0] + ## last_ops = [op.getopname() for op in call_function[-5:]] + ## assert last_ops == ['force_token', + ## 'setfield_gc', + ## 'call_may_force', + ## 'guard_not_forced', + ## 'guard_no_exception'] + ## call = call_function[-3] + ## assert call.getarg(0).value == pow_addr + ## assert call.getarg(1).value == 2.0 + ## assert call.getarg(2).value == 3.0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -52,6 +52,8 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if stderr.startswith('SKIP:'): + py.test.skip(stderr) assert not stderr # # parse the JIT log @@ -100,11 +102,11 @@ class TestOpMatcher(object): - def match(self, src1, src2): + def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations, src=src1) - return matcher.match(src2) + return matcher.match(src2, **kwds) def test_match_var(self): match_var = OpMatcher([]).match_var @@ -234,6 +236,21 @@ """ assert self.match(loop, expected) + def test_ignore_opcodes(self): + loop = """ + [i0] + i1 = int_add(i0, 1) + i4 = force_token() + i2 = int_sub(i1, 10) + jump(i4) + """ + expected = """ + i1 = int_add(i0, 1) + i2 = int_sub(i1, 10) + jump(i4, descr=...) + """ + assert self.match(loop, expected, ignore_ops=['force_token']) + class TestRunPyPyC(BaseTestPyPyC): @@ -253,6 +270,14 @@ log = self.run(src, [30, 12]) assert log.result == 42 + def test_skip(self): + import pytest + def f(): + import sys + print >> sys.stderr, 'SKIP: foobar' + # + raises(pytest.skip.Exception, "self.run(f, [])") + def test_parse_jitlog(self): def f(): i = 0 diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.llmemory import HiddenGcRef32 @@ -102,6 +102,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF or T == HiddenGcRef32: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -56,6 +56,7 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) +math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -94,7 +95,8 @@ return y != y def ll_math_isinf(y): - return y != 0 and y * .5 == y + # Use a bitwise OR so the JIT doesn't produce 2 different guards. + return (y == INFINITY) | (y == -INFINITY) ll_math_copysign = math_copysign diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): @@ -1014,7 +1021,7 @@ node = lltype.malloc(NODE) ref = lltype.cast_opaque_ptr(llmemory.GCREF, node) back = rffi.cast(llmemory.GCREF, rffi.cast(lltype.Signed, ref)) - assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), ref) == node + assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), back) == node def test_gcref_forth_and_back(self): cp = ctypes.c_void_p(1234) diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -818,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -754,6 +754,8 @@ ("{x for x in z}", "set comprehension"), ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), + ("u'str'", "literal"), + ("b'bytes'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -3,7 +3,8 @@ from pypy.rlib.rarithmetic import most_neg_value_of_same_type from pypy.rlib.rfloat import isinf, isnan from pypy.rlib.debug import make_sure_not_resized, check_regular_int -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython import extregistry @@ -122,7 +123,11 @@ def numdigits(self): return len(self._digits) + @staticmethod + @jit.purefunction def fromint(intval): + # This function is marked as pure, so you must not call it and + # then modify the result. check_regular_int(intval) if intval < 0: sign = -1 @@ -149,20 +154,25 @@ t >>= SHIFT p += 1 return v - fromint = staticmethod(fromint) + @staticmethod + @jit.purefunction def frombool(b): + # This function is marked as pure, so you must not call it and + # then modify the result. if b: return rbigint([ONEDIGIT], 1) return rbigint() - frombool = staticmethod(frombool) + @staticmethod def fromlong(l): + "NOT_RPYTHON" return rbigint(*args_from_long(l)) - fromlong = staticmethod(fromlong) + @staticmethod def fromfloat(dval): """ Create a new bigint object from a float """ + # This function is not marked as pure because it can raise sign = 1 if isinf(dval) or isnan(dval): raise OverflowError @@ -183,16 +193,21 @@ frac -= float(bits) frac = math.ldexp(frac, SHIFT) return v - fromfloat = staticmethod(fromfloat) + @staticmethod + @jit.purefunction + @specialize.argtype(0) def fromrarith_int(i): + # This function is marked as pure, so you must not call it and + # then modify the result. return rbigint(*args_from_rarith_int(i)) - fromrarith_int._annspecialcase_ = "specialize:argtype(0)" - fromrarith_int = staticmethod(fromrarith_int) + @staticmethod + @jit.purefunction def fromdecimalstr(s): + # This function is marked as pure, so you must not call it and + # then modify the result. return _decimalstr_to_bigint(s) - fromdecimalstr = staticmethod(fromdecimalstr) def toint(self): """ @@ -1841,7 +1856,7 @@ elif s[p] == '+': p += 1 - a = rbigint.fromint(0) + a = rbigint() tens = 1 dig = 0 ord0 = ord('0') @@ -1859,7 +1874,7 @@ def parse_digit_string(parser): # helper for objspace.std.strutil - a = rbigint.fromint(0) + a = rbigint() base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -5,6 +5,8 @@ syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -37,8 +39,6 @@ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -804,6 +804,21 @@ hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) assert S._immutable_field('x') == '[*]' +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -153,10 +153,10 @@ for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op - def match(self, expected_src): + def match(self, expected_src, **kwds): ops = list(self.allops()) matcher = OpMatcher(ops, src=self.format_ops()) - return matcher.match(expected_src) + return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) @@ -314,7 +314,7 @@ # it matched! The '...' operator ends here return op - def match_loop(self, expected_ops): + def match_loop(self, expected_ops, ignore_ops): """ A note about partial matching: the '...' operator is non-greedy, i.e. it matches all the operations until it finds one that matches @@ -333,13 +333,16 @@ return op = self.match_until(exp_op, iter_ops) else: - op = self._next_op(iter_ops) + while True: + op = self._next_op(iter_ops) + if op.name not in ignore_ops: + break self.match_op(op, exp_op) # # make sure we exhausted iter_ops self._next_op(iter_ops, assert_raises=True) - def match(self, expected_src): + def match(self, expected_src, ignore_ops=[]): def format(src): if src is None: return '' @@ -348,7 +351,7 @@ expected_src = self.preprocess_expected_src(expected_src) expected_ops = self.parse_ops(expected_src) try: - self.match_loop(expected_ops) + self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 @@ -357,6 +360,7 @@ print e.args print e.msg print + print "Ignore ops:", ignore_ops print "Got:" print format(self.src) print diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -198,44 +198,6 @@ print print '@' * 79 - def test_f1(self): - self.run_source(''' - def main(n): - "Arbitrary test function." - i = 0 - x = 1 - while i 1: - r *= n - n -= 1 - return r - ''', 28, - ([5], 120), - ([25], 15511210043330985984000000L)) - - def test_factorialrec(self): - self.run_source(''' - def main(n): - if n > 1: - return n * main(n-1) - else: - return 1 - ''', 0, - ([5], 120), - ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -247,529 +209,6 @@ ''' % (sys.path,), 7200, ([], 42)) - def test_simple_call(self): - self.run_source(''' - OFFSET = 0 - def f(i): - return i + 1 + OFFSET - def main(n): - i = 0 - while i < n+OFFSET: - i = f(f(i)) - return i - ''', 98, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOAD_GLOBAL", True) - assert len(ops) == 5 - assert ops[0].get_opnames() == ["guard_value", - "getfield_gc", "guard_value", - "getfield_gc", "guard_isnull", - "getfield_gc", "guard_nonnull_class"] - # the second getfield on the same globals is quicker - assert ops[1].get_opnames() == ["getfield_gc", "guard_nonnull_class"] - assert not ops[2] # second LOAD_GLOBAL of the same name folded away - # LOAD_GLOBAL of the same name but in different function partially - # folded away - # XXX could be improved - assert ops[3].get_opnames() == ["guard_value", - "getfield_gc", "guard_isnull"] - assert not ops[4] - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 10 - - ops = self.get_by_bytecode("LOAD_GLOBAL") - assert len(ops) == 5 - for bytecode in ops: - assert not bytecode - - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for bytecode in ops: - assert len(bytecode) <= 1 - - - def test_method_call(self): - self.run_source(''' - class A(object): - def __init__(self, a): - self.a = a - def f(self, i): - return self.a + i - def main(n): - i = 0 - a = A(1) - while i < n: - x = a.f(i) - i = a.f(x) - return i - ''', 93, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOOKUP_METHOD", True) - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 3 - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert not ops[0] # first LOOKUP_METHOD folded away - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("CALL_METHOD", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 6 - assert len(ops[1]) < len(ops[0]) - - ops = self.get_by_bytecode("CALL_METHOD") - assert len(ops) == 2 - assert len(ops[0]) <= 1 - assert len(ops[1]) <= 1 - - ops = self.get_by_bytecode("LOAD_ATTR", True) - assert len(ops) == 2 - # With mapdict, we get fast access to (so far) the 5 first - # attributes, which means it is done with only the following - # operations. (For the other attributes there is additionally - # a getarrayitem_gc.) - assert ops[0].get_opnames() == ["getfield_gc", - "guard_nonnull_class"] - assert not ops[1] # second LOAD_ATTR folded away - - ops = self.get_by_bytecode("LOAD_ATTR") - assert not ops[0] # first LOAD_ATTR folded away - assert not ops[1] # second LOAD_ATTR folded away - - def test_static_classmethod_call(self): - self.run_source(''' - class A(object): - @classmethod - def f(cls, i): - return i + (cls is A) + 1 - - @staticmethod - def g(i): - return i - 1 - - def main(n): - i = 0 - a = A() - while i < n: - x = a.f(i) - i = a.g(x) - return i - ''', 106, - ([20], 20), - ([31], 31)) - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 2 - assert len(ops[0].get_opnames("getfield")) <= 4 - assert not ops[1] # second LOOKUP_METHOD folded away - - def test_default_and_kw(self): - self.run_source(''' - def f(i, j=1): - return i + j - def main(n): - i = 0 - while i < n: - i = f(f(i), j=1) - return i - ''', 100, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - def test_kwargs(self): - self.run_source(''' - d = {} - - def g(**args): - return len(args) - - def main(x): - s = 0 - d = {} - for i in range(x): - s += g(**d) - d[str(i)] = i - if i % 100 == 99: - d = {} - return s - ''', 100000, ([100], 4950), - ([1000], 49500), - ([10000], 495000), - ([100000], 4950000)) - assert len(self.rawloops) + len(self.rawentrybridges) == 4 - op, = self.get_by_bytecode("CALL_FUNCTION_KW") - # XXX a bit too many guards, but better than before - assert len(op.get_opnames("guard")) <= 12 - - def test_stararg_virtual(self): - self.run_source(''' - d = {} - - def g(*args): - return len(args) - def h(a, b, c): - return c - - def main(x): - s = 0 - for i in range(x): - l = [i, x, 2] - s += g(*l) - s += h(*l) - s += g(i, x, 2) - for i in range(x): - l = [x, 2] - s += g(i, *l) - s += h(i, *l) - return s - ''', 100000, ([100], 1300), - ([1000], 13000), - ([10000], 130000), - ([100000], 1300000)) - assert len(self.loops) == 2 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - assert len(ops) == 4 - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - ops = self.get_by_bytecode("CALL_FUNCTION") - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_stararg(self): - self.run_source(''' - d = {} - - def g(*args): - return args[-1] - def h(*args): - return len(args) - - def main(x): - s = 0 - l = [] - i = 0 - while i < x: - l.append(1) - s += g(*l) - i = h(*l) - return s - ''', 100000, ([100], 100), - ([1000], 1000), - ([2000], 2000), - ([4000], 4000)) - assert len(self.loops) == 1 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - for op in ops: - assert len(op.get_opnames("new_with_vtable")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_virtual_instance(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0 - while i < n: - a = A() - assert isinstance(a, A) - assert not isinstance(a, int) - a.x = 2 - i = i + a.x - return i - ''', 69, - ([20], 20), - ([31], 32)) - - callA, callisinstance1, callisinstance2 = ( - self.get_by_bytecode("CALL_FUNCTION")) - assert not callA.get_opnames("call") - assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 2 - assert not callisinstance1.get_opnames("call") - assert not callisinstance1.get_opnames("new") - assert len(callisinstance1.get_opnames("guard")) <= 2 - # calling isinstance on a builtin type gives zero guards - # because the version_tag of a builtin type is immutable - assert not len(callisinstance1.get_opnames("guard")) - - - bytecode, = self.get_by_bytecode("STORE_ATTR") - assert bytecode.get_opnames() == [] - - def test_load_attr(self): - self.run_source(''' - class A(object): - pass - a = A() - a.x = 2 - def main(n): - i = 0 - while i < n: - i = i + a.x - return i - ''', 41, - ([20], 20), - ([31], 32)) - - load, = self.get_by_bytecode("LOAD_ATTR") - # 1 guard_value for the class - # 1 guard_value for the version_tag - # 1 guard_value for the structure - # 1 guard_nonnull_class for the result since it is used later - assert len(load.get_opnames("guard")) <= 4 - - def test_mixed_type_loop(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0.0 - j = 2 - while i < n: - i = j + i - return i, type(i) is float - ''', 35, - ([20], (20, True)), - ([31], (32, True))) - - bytecode, = self.get_by_bytecode("BINARY_ADD") - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 2 - - def test_call_builtin_function(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 2 - l = [] - while i < n: - i += 1 - l.append(i) - return i, len(l) - ''', 39, - ([20], (20, 18)), - ([31], (31, 29))) - - bytecode, = self.get_by_bytecode("CALL_METHOD") - assert len(bytecode.get_opnames("new_with_vtable")) == 1 # the forcing of the int - assert len(bytecode.get_opnames("call")) == 1 # the call to append - assert len(bytecode.get_opnames("guard")) == 1 # guard for guard_no_exception after the call - bytecode, = self.get_by_bytecode("CALL_METHOD", True) - assert len(bytecode.get_opnames("guard")) == 2 # guard for profiling disabledness + guard_no_exception after the call - - def test_range_iter(self): - self.run_source(''' - def g(n): - return range(n) - - def main(n): - s = 0 - for i in range(n): - s += g(n)[i] - return s - ''', 143, ([1000], 1000 * 999 / 2)) - bytecode, = self.get_by_bytecode("BINARY_SUBSCR", True) - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER", True) # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_value", - "guard_class", # check the class of the iterator - "guard_nonnull", # check that the iterator is not finished - "guard_isnull", # check that the range list is not forced - "guard_false", # check that the index is lower than the current length - ] - - bytecode, = self.get_by_bytecode("BINARY_SUBSCR") - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER") # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is lower than the current length - ] - - def test_exception_inside_loop_1(self): - self.run_source(''' - def main(n): - while n: - try: - raise ValueError - except ValueError: - pass - n -= 1 - return n - ''', 33, - ([30], 0)) - - bytecode, = self.get_by_bytecode("SETUP_EXCEPT") - #assert not bytecode.get_opnames("new") -- currently, we have - # new_with_vtable(pypy.interpreter.pyopcode.ExceptBlock) - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert not bytecode.get_opnames() - - def test_exception_inside_loop_2(self): - self.run_source(''' - def g(n): - raise ValueError(n) - def f(n): - g(n) - def main(n): - while n: - try: - f(n) - except ValueError: - pass - n -= 1 - return n - ''', 51, - ([30], 0)) - - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert len(bytecode.get_opnames()) <= 2 # oois, guard_true - - def test_chain_of_guards(self): - self.run_source(''' - class A(object): - def method_x(self): - return 3 - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - i = 0 - while i < 2000: - name = l[arg] - sum += getattr(a, 'method_' + name)() - i += 1 - return sum - ''', 3000, ([0], 2000*3)) - assert len(self.loops) == 1 - - def test_getattr_with_dynamic_attribute(self): - self.run_source(''' - class A(object): - pass - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - a.a1 = 0 - a.a2 = 0 - a.a3 = 0 - a.a4 = 0 - a.a5 = 0 # workaround, because the first five attributes need a promotion - a.x = 1 - a.y = 2 - i = 0 - while i < 2000: - name = l[i % 2] - sum += getattr(a, name) - i += 1 - return sum - ''', 3000, ([0], 3000)) - assert len(self.loops) == 1 - - def test_blockstack_virtualizable(self): - self.run_source(''' - from pypyjit import residual_call - - def main(): - i = 0 - while i < 100: - try: - residual_call(len, []) - except: - pass - i += 1 - return i - ''', 1000, ([], 100)) - bytecode, = self.get_by_bytecode("CALL_FUNCTION") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('new_with_vtable')) == 2 - - def test_import_in_function(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - from sys import version - i += 1 - return i - ''', 100, ([], 100)) - bytecode, = self.get_by_bytecode('IMPORT_NAME') - bytecode2, = self.get_by_bytecode('IMPORT_FROM') - assert len(bytecode.get_opnames('call')) == 2 # split_chr and list_pop - assert len(bytecode2.get_opnames('call')) == 0 - - def test_arraycopy_disappears(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - t = (1, 2, 3, i + 1) - t2 = t[:] - del t - i = t2[3] - del t2 - return i - ''', 40, ([], 100)) - bytecode, = self.get_by_bytecode('BINARY_SUBSCR') - assert len(bytecode.get_opnames('new_array')) == 0 def test_overflow_checking(self): startvalue = sys.maxint - 2147483647 @@ -784,514 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_boolrewrite_invers(self): - for a, b, res, ops in (('2000', '2000', 20001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 16001700, 83), - ( 'a', 'b', 16001700, 89), - ( 'a', 'a', 13001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if i >= %s: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - def test_boolrewrite_reflex(self): - for a, b, res, ops in (('2000', '2000', 10001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 14001700, 83), - ( 'a', 'b', 14001700, 89), - ( 'a', 'a', 17001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if %s > i: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - - def test_boolrewrite_correct_invers(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b-1, op2, b) * 10000 * (b) - res += opval( b, op2, b) * 10000 - res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if i %s %d: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, op2, b), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if i %s %f: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, op2, float(b)/4.0), 156, ([], res)) - - - def test_boolrewrite_correct_reflex(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b, op2, b-1) * 10000 * (b) - res += opval(b, op2, b) * 10000 - res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if %d %s i: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, b, op2), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if %f %s i: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, float(b)/4.0, op2), 156, ([], res)) - - def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') - for e1 in compares: - for e2 in compares: - a, b, c = 1, 2, 3 - if eval(e1): res = 752 * 1 - else: res = 752 * 2 - if eval(e2): res += 752 * 10000 - else: res += 752 * 20000 - a = b - if eval(e1): res += 248 * 1 - else: res += 248 * 2 - if eval(e2): res += 248 * 10000 - else: res += 248 * 20000 - - - if 'c' in e1 or 'c' in e2: - n = 337 - else: - n = 215 - - print - print 'Test:', e1, e2, n, res - self.run_source(''' - class tst(object): - pass - def main(): - a = tst() - b = tst() - c = tst() - sa = 0 - for i in range(1000): - if %s: sa += 1 - else: sa += 2 - if %s: sa += 10000 - else: sa += 20000 - if i > 750: a = b - return sa - '''%(e1, e2), n, ([], res)) - - def test_array_sum(self): - for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): - res = 19352859 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(): - img = array("%s", range(127) * 5) * 484 - l, i = 0, 0 - while i < 640 * 480: - l += img[i] - i += 1 - return l - ''' % tc, maxops, ([], res)) - - def test_array_sum_char(self): - self.run_source(''' - from array import array - - def main(): - img = array("c", "Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - l += ord(img[i]) - i += 1 - return l - ''', 60, ([], 30720000)) - - def test_array_sum_unicode(self): - self.run_source(''' - from array import array - - def main(): - img = array("u", u"Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - if img[i] == u"l": - l += 1 - i += 1 - return l - ''', 65, ([], 122880)) - - def test_array_intimg(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - for tc, maxops in zip('ilILd', (67, 67, 70, 70, 61)): - print - print '='*65 - print '='*20, 'running test for tc=%r' % (tc,), '='*20 - res = 73574560 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(tc): - img = array(tc, range(3)) * (350 * 480) - intimg = array(tc, (0,)) * (640 * 480) - l, i = 0, 640 - while i < 640 * 480: - l = l + img[i] - intimg[i] = (intimg[i-640] + l) - i += 1 - return intimg[i - 1] - ''', maxops, ([tc], res)) - - def test_unpackiterable(self): - self.run_source(''' - from array import array - - def main(): - i = 0 - t = array('l', (1, 2)) - while i < 2000: - a, b = t - i += 1 - return 3 - - ''', 100, ([], 3)) - bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('call_may_force')) == 0 - - - def test_intbound_simple(self): - ops = ('<', '>', '<=', '>=', '==', '!=') - nbr = (3, 7) - for o1 in ops: - for o2 in ops: - for n1 in nbr: - for n2 in nbr: - src = ''' - def f(i): - a, b = 3, 3 - if i %s %d: - a = 0 - else: - a = 1 - if i %s %d: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (o1, n1, o2, n2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 268, ([], res)) - - def test_intbound_addsub_mix(self): - tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', - 'i - 1 > 1', '1 - i > 1', '1 - i < -3', - 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') - for t1 in tests: - for t2 in tests: - print t1, t2 - src = ''' - def f(i): - a, b = 3, 3 - if %s: - a = 0 - else: - a = 1 - if %s: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (t1, t2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 280, ([], res)) - - def test_intbound_gt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i > -1: - a += 1 - if i > -2: - b += 1 - i += 1 - return (a, b) - ''', 48, ([], (2000, 2000))) - - def test_intbound_sub_lt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i - 10 < 1995: - a += 1 - i += 1 - return (a, b) - ''', 38, ([], (2000, 0))) - - def test_intbound_addsub_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if i - 1 >= -1: - b += 1 - i += 1 - return (a, b) - ''', 56, ([], (2000, 2000))) - - def test_intbound_addmul_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if 2 * i >= 0: - b += 1 - i += 1 - return (a, b) - ''', 53, ([], (2000, 2000))) - - def test_intbound_eq(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - if a == 7: - s += a + 1 - elif i == 10: - s += i - else: - s += 1 - i += 1 - return s - ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) - - def test_intbound_mul(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert i >= 0 - if 2 * i < 30000: - s += 1 - else: - s += a - i += 1 - return s - ''', 43, ([7], 1500)) - - def test_assert(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert a == 7 - s += a + 1 - i += 1 - return s - ''', 38, ([7], 8*1500)) - - def test_zeropadded(self): - self.run_source(''' - from array import array - class ZeroPadded(array): - def __new__(cls, l): - self = array.__new__(cls, 'd', range(l)) - return self - - def __getitem__(self, i): - if i < 0 or i >= self.__len__(): - return 0 - return array.__getitem__(self, i) - - - def main(): - buf = ZeroPadded(2000) - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 232, ([], 9895050.0)) - - def test_circular(self): - self.run_source(''' - from array import array - class Circular(array): - def __new__(cls): - self = array.__new__(cls, 'd', range(256)) - return self - def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) - return array.__getitem__(self, i & 255) - - def main(): - buf = Circular() - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 170, ([], 1239690.0)) - - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) - def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name libm_name = get_libm_name(sys.platform) diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -70,11 +70,35 @@ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.module import ll_math from pypy.module.math.test.test_direct import MathTests, get_tester +from pypy.translator.c.test.test_genc import compile class TestMath(MathTests): @@ -21,6 +22,13 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isinf(self): + def f(x): + return ll_math.ll_math_isinf(1. / x) + f = compile(f, [float], backendopt=False) + assert f(5.5e-309) + + def make_test_case((fnname, args, expected), dict): # def test_func(self): From commits-noreply at bitbucket.org Sat Apr 16 22:12:21 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:12:21 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Support for ll_str(). Message-ID: <20110416201221.DB7B82A204A@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43404:55e7d6c9094e Date: 2011-04-16 09:59 -0700 http://bitbucket.org/pypy/pypy/changeset/55e7d6c9094e/ Log: Support for ll_str(). diff --git a/pypy/rpython/lltypesystem/rcompressed.py b/pypy/rpython/lltypesystem/rcompressed.py --- a/pypy/rpython/lltypesystem/rcompressed.py +++ b/pypy/rpython/lltypesystem/rcompressed.py @@ -4,6 +4,7 @@ from pypy.rpython.rmodel import Repr, inputconst from pypy.rpython.lltypesystem import lltype, llmemory, rffi from pypy.rpython.lltypesystem.lloperation import llop +from pypy.rpython.lltypesystem.rstr import string_repr from pypy.rpython.error import TyperError @@ -99,6 +100,11 @@ def get_ll_dummyval_obj(self, rtyper, s_value): return DummyValueBuilder(rtyper) + def ll_str(self, i): + return hiddengcref32_str + +hiddengcref32_str = string_repr.convert_const("") + class DummyValueBuilder(object): TYPE = llmemory.HiddenGcRef32 diff --git a/pypy/rpython/test/test_rcompressed.py b/pypy/rpython/test/test_rcompressed.py --- a/pypy/rpython/test/test_rcompressed.py +++ b/pypy/rpython/test/test_rcompressed.py @@ -223,3 +223,11 @@ assert res == True res = self.interpret(fn, [5, 6, 8]) assert res == False + + def test_str(self): + class A: + pass + def fn(): + return str([A()]) + res = self.interpret(fn, []) + assert 'HiddenGcRef32' in self.ll_to_string(res) From commits-noreply at bitbucket.org Sat Apr 16 22:12:24 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:12:24 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Finish call descrs. Message-ID: <20110416201224.6121F2A204D@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43405:ac4ae9746e4e Date: 2011-04-16 10:13 -0700 http://bitbucket.org/pypy/pypy/changeset/ac4ae9746e4e/ Log: Finish call descrs. diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -346,6 +346,23 @@ descr1 = get_call_descr(c2, [], RESTYPE) assert descr1.is_result_signed() == signed +def test_call_descr_hiddengcref32(): + if sys.maxint == 2147483647: + py.test.skip("HiddenGcRef32: for 64-bit only") + for tsc in [False, True]: + c0 = GcCache(tsc) + descr = get_call_descr(c0, [llmemory.HiddenGcRef32], lltype.Void) + assert descr.get_arg_types() == 'H' + # + descr = get_call_descr(c0, [], llmemory.HiddenGcRef32) + assert descr.get_return_type() == 'H' + sz = descr.get_result_size(tsc) + if not tsc: + assert sz == 4 + else: + assert isinstance(sz, Symbolic) + assert sz.TYPE == llmemory.HiddenGcRef32 + def test_repr_of_descr(): c0 = GcCache(False) diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -332,6 +332,8 @@ result = 'rffi.cast(lltype.SignedLongLong, res)' elif self.get_return_type() == history.VOID: result = 'None' + elif self.get_return_type() == 'H': + result = 'llop.show_from_ptr32(llmemory.GCREF, res)' else: assert 0 source = py.code.Source(""" @@ -409,6 +411,12 @@ def get_result_size(self, translate_support_code): return symbolic.get_size_of_ptr(translate_support_code) +class GcPtrHidden32CallDescr(BaseCallDescr): + _clsname = 'GcPtrHidden32CallDescr' + _return_type = 'H' + def get_result_size(self, translate_support_code): + return symbolic.get_size(llmemory.HiddenGcRef32,translate_support_code) + class FloatCallDescr(BaseCallDescr): _clsname = 'FloatCallDescr' _return_type = history.FLOAT @@ -437,7 +445,8 @@ return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr, NonGcPtrCallDescr, 'Call', 'get_result_size', Ellipsis, # <= floatattrname should not be used here - '_is_result_signed') + '_is_result_signed', + GcPtrHidden32CallDescr) def get_call_descr(gccache, ARGS, RESULT, extrainfo=None): arg_classes = [] diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -44,7 +44,8 @@ if (isinstance(op.getdescr(), GcPtrHidden32FieldDescr) or isinstance(op.getdescr(), GcPtrHidden32ArrayDescr) or (isinstance(op.getdescr(), BaseCallDescr) and - 'H' in op.getdescr().get_arg_types())): + ('H' in op.getdescr().get_arg_types() or + op.getdescr().get_return_type() == 'H'))): from pypy.jit.metainterp.test.support import SkipThisRun raise SkipThisRun("non-translated test with compressptr") def can_inline_malloc(self, descr): @@ -885,6 +886,11 @@ [v1], v2)) args[i] = v2 op = op.copy_and_change(op.getopnum(), args=args) + if descr.get_return_type() == 'H': + v1 = BoxInt() + v2 = op.result + newops.append(op.copy_and_change(op.getopnum(), result=v1)) + op = ResOperation(rop.SHOW_FROM_PTR32, [v1], v2) # ---------- newops.append(op) del operations[:] From commits-noreply at bitbucket.org Sat Apr 16 22:12:25 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:12:25 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Test and fix: if g is a low-level function pointer, then g(*args) Message-ID: <20110416201225.D49F92A2047@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43406:2de9ed30dbdb Date: 2011-04-16 13:07 -0700 http://bitbucket.org/pypy/pypy/changeset/2de9ed30dbdb/ Log: Test and fix: if g is a low-level function pointer, then g(*args) might mess up the exact type of the args if the tuple stores them differently. This is a bit non-obvious to test, and is only apparent in HiddenGcRef32. diff --git a/pypy/rpython/rbuiltin.py b/pypy/rpython/rbuiltin.py --- a/pypy/rpython/rbuiltin.py +++ b/pypy/rpython/rbuiltin.py @@ -71,11 +71,11 @@ if not isinstance(r_tuple, AbstractTupleRepr): raise TyperError("*arg must be a tuple") for i in range(len(r_tuple.items_r)): - v_item = r_tuple.getitem_internal(hop.llops, v_tuple, i) + v_item = r_tuple.getitem(hop.llops, v_tuple, i) hop.nb_args += 1 hop.args_v.append(v_item) hop.args_s.append(s_tuple.items[i]) - hop.args_r.append(r_tuple.items_r[i]) + hop.args_r.append(r_tuple.external_items_r[i]) keywords = arguments.keywords if not takes_kwds and keywords: diff --git a/pypy/rpython/test/test_rcompressed.py b/pypy/rpython/test/test_rcompressed.py --- a/pypy/rpython/test/test_rcompressed.py +++ b/pypy/rpython/test/test_rcompressed.py @@ -2,7 +2,8 @@ from pypy.config.translationoption import IS_64_BITS from pypy.rpython.test import test_rclass from pypy.rpython import rmodel, rint, rclass -from pypy.rpython.lltypesystem import llmemory +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem.lloperation import llop def setup_module(mod): @@ -231,3 +232,27 @@ return str([A()]) res = self.interpret(fn, []) assert 'HiddenGcRef32' in self.ll_to_string(res) + + def test_nonzero(self): + S = lltype.GcStruct('S') + def fn(): + p = lltype.malloc(S) + r = llop.hide_into_ptr32(llmemory.HiddenGcRef32, p) + return bool(r) + res = self.interpret(fn, []) + assert res == True + + def test_funccall_starargs(self): + def g(a, b): + return a.n - b.n + S = lltype.GcStruct('S', ('n', lltype.Signed)) + SPTR = lltype.Ptr(S) + GFUNC = lltype.FuncType([SPTR, SPTR], lltype.Signed) + gptr = lltype.functionptr(GFUNC, 'g', _callable=g) + def fn(x, y): + a = lltype.malloc(S); a.n = x + b = lltype.malloc(S); b.n = y + args = (a, b) + return gptr(*args) + res = self.interpret(fn, [45, 3]) + assert res == 42 From commits-noreply at bitbucket.org Sat Apr 16 22:12:26 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:12:26 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix: cannot use "x == NULL" for all pointers x, because of HiddenGcRef32. Message-ID: <20110416201226.9C94E2A2047@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43407:5cbbbb07b056 Date: 2011-04-16 22:09 +0200 http://bitbucket.org/pypy/pypy/changeset/5cbbbb07b056/ Log: Fix: cannot use "x == NULL" for all pointers x, because of HiddenGcRef32. Use instead "!x". diff --git a/pypy/translator/c/funcgen.py b/pypy/translator/c/funcgen.py --- a/pypy/translator/c/funcgen.py +++ b/pypy/translator/c/funcgen.py @@ -594,11 +594,11 @@ def OP_PTR_NONZERO(self, op): - return '%s = (%s != NULL);' % (self.expr(op.result), - self.expr(op.args[0])) + return '%s = !!%s;' % (self.expr(op.result), + self.expr(op.args[0])) def OP_PTR_ISZERO(self, op): - return '%s = (%s == NULL);' % (self.expr(op.result), - self.expr(op.args[0])) + return '%s = !%s;' % (self.expr(op.result), + self.expr(op.args[0])) def OP_PTR_EQ(self, op): return '%s = (%s == %s);' % (self.expr(op.result), From commits-noreply at bitbucket.org Sat Apr 16 22:12:27 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:12:27 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix, mostly for tests: let c_mmap_safe() be a no-gc operation. Message-ID: <20110416201227.317082A204D@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43408:dd7b2ce8b037 Date: 2011-04-16 22:10 +0200 http://bitbucket.org/pypy/pypy/changeset/dd7b2ce8b037/ Log: Fix, mostly for tests: let c_mmap_safe() be a no-gc operation. diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -101,7 +101,8 @@ compilation_info=CConfig._compilation_info_) safe = rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, - sandboxsafe=True, threadsafe=False) + sandboxsafe=True, threadsafe=False, + _nowrapper=True) return unsafe, safe def winexternal(name, args, result, **kwargs): @@ -118,18 +119,31 @@ has_mremap = cConfig['has_mremap'] c_mmap, _c_mmap_safe = external('mmap', [PTR, size_t, rffi.INT, rffi.INT, rffi.INT, off_t], PTR) - c_munmap, c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT) + c_munmap, _c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT) c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT) if has_mremap: c_mremap, _ = external('mremap', [PTR, size_t, size_t, rffi.ULONG], PTR) def c_mmap_safe(addr, length, prot, flags, fd, offset): + length = rffi.cast(rffi.SIZE_T, length) + prot = rffi.cast(rffi.INT, prot) + flags = rffi.cast(rffi.INT, flags) + fd = rffi.cast(rffi.INT, fd) + offset = rffi.cast(off_t, offset) return _c_mmap_safe(addr, length, prot, flags, fd, offset) c_mmap_safe._annenforceargs_ = (PTR, int, int, int, int, int) + def c_munmap_safe(addr, length): + length = rffi.cast(rffi.SIZE_T, length) + res = _c_munmap_safe(addr, length) + return rffi.cast(lltype.Signed, res) + c_munmap_safe._annenforceargs_ = (PTR, int) + # this one is always safe - _, _get_page_size = external('getpagesize', [], rffi.INT) + _, _c_get_page_size = external('getpagesize', [], rffi.INT) + def _get_page_size(): + return rffi.cast(lltype.Signed, _c_get_page_size()) _get_allocation_granularity = _get_page_size elif _MS_WINDOWS: From commits-noreply at bitbucket.org Sat Apr 16 22:12:38 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 22:12:38 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: hg merge default Message-ID: <20110416201238.214D72A2047@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43409:78429cb225cf Date: 2011-04-16 22:11 +0200 http://bitbucket.org/pypy/pypy/changeset/78429cb225cf/ Log: hg merge default diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support from pypy.rlib.nonconst import NonConstant from pypy.rlib.rsre.test.test_match import get_code from pypy.rlib.rsre import rsre_core @@ -45,7 +45,7 @@ assert m._jit_unroll_safe_ -class TestJitRSre(test_basic.LLJitMixin): +class TestJitRSre(support.LLJitMixin): def meta_interp_match(self, pattern, string, repeat=1): r = get_code(pattern) diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -66,9 +66,10 @@ assert called[0] assert "42424242" in result - def checkdecodeerror(self, s, encoding, start, stop, addstuff=True): + def checkdecodeerror(self, s, encoding, start, stop, + addstuff=True, msg=None): called = [0] - def errorhandler(errors, enc, msg, t, startingpos, + def errorhandler(errors, enc, errmsg, t, startingpos, endingpos): called[0] += 1 if called[0] == 1: @@ -77,6 +78,8 @@ assert t is s assert start == startingpos assert stop == endingpos + if msg is not None: + assert errmsg == msg return u"42424242", stop return u"", endingpos decoder = self.getdecoder(encoding) @@ -90,7 +93,7 @@ class TestDecoding(UnicodeTests): - + # XXX test bom recognition in utf-16 # XXX test proper error handling @@ -131,6 +134,96 @@ "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(uni, encoding) + def test_ascii_error(self): + self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) + + def test_utf16_errors(self): + # trunkated BOM + for s in ["\xff", "\xfe"]: + self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) + + for s in [ + # unexpected end of data ascii + "\xff\xfeF", + # unexpected end of data + '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', + ]: + self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) + for s in [ + # illegal surrogate + "\xff\xfe\xff\xdb\xff\xff", + ]: + self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) + + def test_utf16_bugs(self): + s = '\x80-\xe9\xdeL\xa3\x9b' + py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, + s, len(s), True) + + def test_utf7_bugs(self): + u = u'A\u2262\u0391.' + assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' + + def test_utf7_tofrom_utf8_bug(self): + def _assert_decu7(input, expected): + assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) + + _assert_decu7('+-', u'+') + _assert_decu7('+-+-', u'++') + _assert_decu7('+-+AOQ-', u'+\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ- ', u'\xe4 ') + _assert_decu7(' +AOQ-', u' \xe4') + _assert_decu7(' +AOQ- ', u' \xe4 ') + _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') + + s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' + s_utf8 = u'Die Männer ärgen sich!' + s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' + + _assert_decu7(s_utf7, s_utf8_esc) + _assert_decu7(s_utf7, s_utf8) + + assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 + assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 + + def test_utf7_partial(self): + s = u"a+-b".encode('utf-7') + assert s == "a+--b" + decode = self.getdecoder('utf-7') + assert decode(s, 1, None) == (u'a', 1) + assert decode(s, 2, None) == (u'a', 1) + assert decode(s, 3, None) == (u'a+', 3) + assert decode(s, 4, None) == (u'a+-', 4) + assert decode(s, 5, None) == (u'a+-b', 5) + + def test_utf7_surrogates(self): + encode = self.getencoder('utf-7') + u = u'\U000abcde' + assert encode(u, len(u), None) == '+2m/c3g-' + decode = self.getdecoder('utf-7') + s = '+3ADYAA-' + raises(UnicodeError, decode, s, len(s), None) + def replace_handler(errors, codec, message, input, start, end): + return u'?', end + assert decode(s, len(s), None, final=True, + errorhandler = replace_handler) == (u'??', len(s)) + + +class TestUTF8Decoding(UnicodeTests): + def __init__(self): + self.decoder = self.getdecoder('utf-8') + + def replace_handler(self, errors, codec, message, input, start, end): + return u'\ufffd', end + + def ignore_handler(self, errors, codec, message, input, start, end): + return u'', end + + def to_bytestring(self, bytes): + return ''.join(chr(int(c, 16)) for c in bytes.split()) + def test_single_chars_utf8(self): for s in ["\xd7\x90", "\xd6\x96", "\xeb\x96\x95", "\xf0\x90\x91\x93"]: self.checkdecode(s, "utf-8") @@ -140,30 +233,297 @@ # This test will raise an error with python 3.x self.checkdecode(u"\ud800", "utf-8") + def test_invalid_start_byte(self): + """ + Test that an 'invalid start byte' error is raised when the first byte + is not in the ASCII range or is not a valid start byte of a 2-, 3-, or + 4-bytes sequence. The invalid start byte is replaced with a single + U+FFFD when errors='replace'. + E.g. <80> is a continuation byte and can appear only after a start byte. + """ + FFFD = u'\ufffd' + for byte in '\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF': + raises(UnicodeDecodeError, self.decoder, byte, 1, None, final=True) + self.checkdecodeerror(byte, 'utf-8', 0, 1, addstuff=False, + msg='invalid start byte') + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.replace_handler) == (FFFD, 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', 9)) + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.ignore_handler) == (u'', 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', 9)) + + def test_unexpected_end_of_data(self): + """ + Test that an 'unexpected end of data' error is raised when the string + ends after a start byte of a 2-, 3-, or 4-bytes sequence without having + enough continuation bytes. The incomplete sequence is replaced with a + single U+FFFD when errors='replace'. + E.g. in the sequence , F3 is the start byte of a 4-bytes + sequence, but it's followed by only 2 valid continuation bytes and the + last continuation bytes is missing. + Note: the continuation bytes must be all valid, if one of them is + invalid another error will be raised. + """ + sequences = [ + 'C2', 'DF', + 'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF', + 'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF', + 'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF', + 'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF', + 'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF', + 'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF' + ] + FFFD = u'\ufffd' + for seq in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq), addstuff=False, + msg='unexpected end of data') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (FFFD, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', len(seq) + 8)) + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (u'', len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', len(seq) + 8)) + + def test_invalid_cb_for_2bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte of a 2-bytes sequence is invalid. The start byte + is replaced by a single U+FFFD and the second byte is handled + separately when errors='replace'. + E.g. in the sequence , C2 is the start byte of a 2-bytes + sequence, but 41 is not a valid continuation byte because it's the + ASCII letter 'A'. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('C2 00', FFFD+u'\x00'), ('C2 7F', FFFD+u'\x7f'), + ('C2 C0', FFFDx2), ('C2 FF', FFFDx2), + ('DF 00', FFFD+u'\x00'), ('DF 7F', FFFD+u'\x7f'), + ('DF C0', FFFDx2), ('DF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, 1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_3bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 3-bytes sequence are invalid. When + errors='replace', if the first continuation byte is valid, the first + two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the + third byte is handled separately, otherwise only the start byte is + replaced with a U+FFFD and the other continuation bytes are handled + separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('E0 00', FFFD+u'\x00'), ('E0 7F', FFFD+u'\x7f'), ('E0 80', FFFDx2), + ('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2), + ('E0 A0 00', FFFD+u'\x00'), ('E0 A0 7F', FFFD+u'\x7f'), + ('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2), + ('E0 BF 00', FFFD+u'\x00'), ('E0 BF 7F', FFFD+u'\x7f'), + ('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+u'\x00'), + ('E1 7F', FFFD+u'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2), + ('E1 80 00', FFFD+u'\x00'), ('E1 80 7F', FFFD+u'\x7f'), + ('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2), + ('E1 BF 00', FFFD+u'\x00'), ('E1 BF 7F', FFFD+u'\x7f'), + ('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+u'\x00'), + ('EC 7F', FFFD+u'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2), + ('EC 80 00', FFFD+u'\x00'), ('EC 80 7F', FFFD+u'\x7f'), + ('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2), + ('EC BF 00', FFFD+u'\x00'), ('EC BF 7F', FFFD+u'\x7f'), + ('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+u'\x00'), + ('ED 7F', FFFD+u'\x7f'), + # ('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^ + ('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+u'\x00'), + ('ED 80 7F', FFFD+u'\x7f'), ('ED 80 C0', FFFDx2), + ('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+u'\x00'), + ('ED 9F 7F', FFFD+u'\x7f'), ('ED 9F C0', FFFDx2), + ('ED 9F FF', FFFDx2), ('EE 00', FFFD+u'\x00'), + ('EE 7F', FFFD+u'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2), + ('EE 80 00', FFFD+u'\x00'), ('EE 80 7F', FFFD+u'\x7f'), + ('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2), + ('EE BF 00', FFFD+u'\x00'), ('EE BF 7F', FFFD+u'\x7f'), + ('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+u'\x00'), + ('EF 7F', FFFD+u'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2), + ('EF 80 00', FFFD+u'\x00'), ('EF 80 7F', FFFD+u'\x7f'), + ('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2), + ('EF BF 00', FFFD+u'\x00'), ('EF BF 7F', FFFD+u'\x7f'), + ('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_4bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 4-bytes sequence are invalid. When + errors='replace',the start byte and all the following valid + continuation bytes are replaced with a single U+FFFD, and all the bytes + starting from the first invalid continuation bytes (included) are + handled separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('F0 00', FFFD+u'\x00'), ('F0 7F', FFFD+u'\x7f'), ('F0 80', FFFDx2), + ('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2), + ('F0 90 00', FFFD+u'\x00'), ('F0 90 7F', FFFD+u'\x7f'), + ('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2), + ('F0 BF 00', FFFD+u'\x00'), ('F0 BF 7F', FFFD+u'\x7f'), + ('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2), + ('F0 90 80 00', FFFD+u'\x00'), ('F0 90 80 7F', FFFD+u'\x7f'), + ('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2), + ('F0 90 BF 00', FFFD+u'\x00'), ('F0 90 BF 7F', FFFD+u'\x7f'), + ('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2), + ('F0 BF 80 00', FFFD+u'\x00'), ('F0 BF 80 7F', FFFD+u'\x7f'), + ('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2), + ('F0 BF BF 00', FFFD+u'\x00'), ('F0 BF BF 7F', FFFD+u'\x7f'), + ('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2), + ('F1 00', FFFD+u'\x00'), ('F1 7F', FFFD+u'\x7f'), ('F1 C0', FFFDx2), + ('F1 FF', FFFDx2), ('F1 80 00', FFFD+u'\x00'), + ('F1 80 7F', FFFD+u'\x7f'), ('F1 80 C0', FFFDx2), + ('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+u'\x00'), + ('F1 BF 7F', FFFD+u'\x7f'), ('F1 BF C0', FFFDx2), + ('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+u'\x00'), + ('F1 80 80 7F', FFFD+u'\x7f'), ('F1 80 80 C0', FFFDx2), + ('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+u'\x00'), + ('F1 80 BF 7F', FFFD+u'\x7f'), ('F1 80 BF C0', FFFDx2), + ('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+u'\x00'), + ('F1 BF 80 7F', FFFD+u'\x7f'), ('F1 BF 80 C0', FFFDx2), + ('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+u'\x00'), + ('F1 BF BF 7F', FFFD+u'\x7f'), ('F1 BF BF C0', FFFDx2), + ('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+u'\x00'), + ('F3 7F', FFFD+u'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2), + ('F3 80 00', FFFD+u'\x00'), ('F3 80 7F', FFFD+u'\x7f'), + ('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2), + ('F3 BF 00', FFFD+u'\x00'), ('F3 BF 7F', FFFD+u'\x7f'), + ('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2), + ('F3 80 80 00', FFFD+u'\x00'), ('F3 80 80 7F', FFFD+u'\x7f'), + ('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2), + ('F3 80 BF 00', FFFD+u'\x00'), ('F3 80 BF 7F', FFFD+u'\x7f'), + ('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2), + ('F3 BF 80 00', FFFD+u'\x00'), ('F3 BF 80 7F', FFFD+u'\x7f'), + ('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2), + ('F3 BF BF 00', FFFD+u'\x00'), ('F3 BF BF 7F', FFFD+u'\x7f'), + ('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2), + ('F4 00', FFFD+u'\x00'), ('F4 7F', FFFD+u'\x7f'), ('F4 90', FFFDx2), + ('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2), + ('F4 80 00', FFFD+u'\x00'), ('F4 80 7F', FFFD+u'\x7f'), + ('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2), + ('F4 8F 00', FFFD+u'\x00'), ('F4 8F 7F', FFFD+u'\x7f'), + ('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2), + ('F4 80 80 00', FFFD+u'\x00'), ('F4 80 80 7F', FFFD+u'\x7f'), + ('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2), + ('F4 80 BF 00', FFFD+u'\x00'), ('F4 80 BF 7F', FFFD+u'\x7f'), + ('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2), + ('F4 8F 80 00', FFFD+u'\x00'), ('F4 8F 80 7F', FFFD+u'\x7f'), + ('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2), + ('F4 8F BF 00', FFFD+u'\x00'), ('F4 8F BF 7F', FFFD+u'\x7f'), + ('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2) + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + def test_utf8_errors(self): - for s in [# unexpected end of data - "\xd7", "\xd6", "\xeb\x96", "\xf0\x90\x91"]: - self.checkdecodeerror(s, "utf-8", 0, len(s), addstuff=False) - - # unexpected code byte - for s in ["\x81", "\xbf"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + # unexpected end of data + for s in ['\xd7', '\xd6', '\xeb\x96', '\xf0\x90\x91', '\xc2', '\xdf']: + self.checkdecodeerror(s, 'utf-8', 0, len(s), addstuff=False, + msg='unexpected end of data') # invalid data 2 byte for s in ["\xd7\x50", "\xd6\x06", "\xd6\xD6"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') # invalid data 3 byte for s in ["\xeb\x56\x95", "\xeb\x06\x95", "\xeb\xD6\x95"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xeb\x96\x55", "\xeb\x96\x05", "\xeb\x96\xD5"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') # invalid data 4 byte for s in ["\xf0\x50\x91\x93", "\xf0\x00\x91\x93", "\xf0\xd0\x91\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x51\x93", "\xf0\x90\x01\x93", "\xf0\x90\xd1\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x91\x53", "\xf0\x90\x91\x03", "\xf0\x90\x91\xd3"]: - self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True, + msg='invalid continuation byte') def test_issue8271(self): @@ -249,97 +609,18 @@ ('\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64', u'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'), ] - def replace_handler(errors, codec, message, input, start, end): - return FFFD, end - def ignore_handler(errors, codec, message, input, start, end): - return u'', end + for n, (seq, res) in enumerate(sequences): decoder = self.getdecoder('utf-8') raises(UnicodeDecodeError, decoder, seq, len(seq), None, final=True) assert decoder(seq, len(seq), None, final=True, - errorhandler=replace_handler) == (res, len(seq)) + errorhandler=self.replace_handler) == (res, len(seq)) assert decoder(seq + 'b', len(seq) + 1, None, final=True, - errorhandler=replace_handler) == (res + u'b', - len(seq) + 1) + errorhandler=self.replace_handler) == (res + u'b', + len(seq) + 1) res = res.replace(FFFD, u'') assert decoder(seq, len(seq), None, final=True, - errorhandler=ignore_handler) == (res, len(seq)) - - def test_ascii_error(self): - self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) - - def test_utf16_errors(self): - # trunkated BOM - for s in ["\xff", "\xfe"]: - self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) - - for s in [ - # unexpected end of data ascii - "\xff\xfeF", - # unexpected end of data - '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', - ]: - self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) - for s in [ - # illegal surrogate - "\xff\xfe\xff\xdb\xff\xff", - ]: - self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) - - def test_utf16_bugs(self): - s = '\x80-\xe9\xdeL\xa3\x9b' - py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, - s, len(s), True) - - def test_utf7_bugs(self): - u = u'A\u2262\u0391.' - assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' - - def test_utf7_tofrom_utf8_bug(self): - def _assert_decu7(input, expected): - assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) - - _assert_decu7('+-', u'+') - _assert_decu7('+-+-', u'++') - _assert_decu7('+-+AOQ-', u'+\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ- ', u'\xe4 ') - _assert_decu7(' +AOQ-', u' \xe4') - _assert_decu7(' +AOQ- ', u' \xe4 ') - _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') - - s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' - s_utf8 = u'Die Männer ärgen sich!' - s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' - - _assert_decu7(s_utf7, s_utf8_esc) - _assert_decu7(s_utf7, s_utf8) - - assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 - assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 - - def test_utf7_partial(self): - s = u"a+-b".encode('utf-7') - assert s == "a+--b" - decode = self.getdecoder('utf-7') - assert decode(s, 1, None) == (u'a', 1) - assert decode(s, 2, None) == (u'a', 1) - assert decode(s, 3, None) == (u'a+', 3) - assert decode(s, 4, None) == (u'a+-', 4) - assert decode(s, 5, None) == (u'a+-b', 5) - - def test_utf7_surrogates(self): - encode = self.getencoder('utf-7') - u = u'\U000abcde' - assert encode(u, len(u), None) == '+2m/c3g-' - decode = self.getdecoder('utf-7') - s = '+3ADYAA-' - raises(UnicodeError, decode, s, len(s), None) - def replace_handler(errors, codec, message, input, start, end): - return u'?', end - assert decode(s, len(s), None, final=True, - errorhandler = replace_handler) == (u'??', len(s)) + errorhandler=self.ignore_handler) == (res, len(seq)) class TestEncoding(UnicodeTests): @@ -376,7 +657,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_maxunicode(self): uni = unichr(sys.maxunicode) @@ -384,7 +665,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_single_chars_utf8(self): # check every number of bytes per char @@ -394,7 +675,7 @@ def test_utf8_surrogates(self): # check replacing of two surrogates by single char while encoding # make sure that the string itself is not marshalled - u = u"\ud800" + u = u"\ud800" for i in range(4): u += u"\udc00" self.checkencode(u, "utf-8") @@ -422,7 +703,7 @@ def test_utf8(self): from pypy.rpython.test.test_llinterp import interpret def f(x): - + s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) s2 = runicode.unicode_encode_utf_8(u, len(u), True) @@ -438,6 +719,6 @@ u = runicode.UNICHR(x) t = runicode.ORD(u) return t - + res = interpret(f, [0x10140]) assert res == 0x10140 diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -779,6 +782,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): if not cobj or ( not isinstance(cobj, ctypes.c_uint32) diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -87,8 +87,9 @@ result = UnicodeBuilder(size) pos = 0 while pos < size: - ch = s[pos] - ordch1 = ord(ch) + ordch1 = ord(s[pos]) + # fast path for ASCII + # XXX maybe use a while loop here if ordch1 < 0x80: result.append(unichr(ordch1)) pos += 1 @@ -98,110 +99,149 @@ if pos + n > size: if not final: break - else: - endpos = pos + 1 - while endpos < size and ord(s[endpos]) & 0xC0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "unexpected end of data", - s, pos, endpos) + charsleft = size - pos - 1 # either 0, 1, 2 + # note: when we get the 'unexpected end of data' we don't care + # about the pos anymore and we just ignore the value + if not charsleft: + # there's only the start byte and nothing else + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+1) + result.append(r) + break + ordch2 = ord(s[pos+1]) + if n == 3: + # 3-bytes seq with only a continuation byte + if (ordch2>>6 != 0b10 or + (ordch1 == 0xe0 and ordch2 < 0xa0)): + # or (ordch1 == 0xed and ordch2 > 0x9f) + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + else: + # second byte valid, but third byte missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+2) + result.append(r) + break + elif n == 4: + # 4-bytes seq with 1 or 2 continuation bytes + if (ordch2>>6 != 0b10 or + (ordch1 == 0xf0 and ordch2 < 0x90) or + (ordch1 == 0xf4 and ordch2 > 0x8f)): + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + # third byte invalid, take the first two and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + else: + # there's only 1 or 2 valid cb, but the others are missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+charsleft+1) + result.append(r) + break + + if n == 0: + r, pos = errorhandler(errors, 'utf-8', + 'invalid start byte', + s, pos, pos+1) + result.append(r) + + elif n == 1: + assert 0, "ascii should have gone through the fast path" + + elif n == 2: + ordch2 = ord(s[pos+1]) + if ordch2>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) continue + # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz + result.append(unichr(((ordch1 & 0b00011111) << 6) + + (ordch2 & 0b00111111))) + pos += 2 - if n == 0: - r, pos = errorhandler(errors, "utf-8", - "invalid start byte", - s, pos, pos + 1) - result.append(r) - elif n == 1: - assert 0, "you can never get here" - elif n == 2: - # 110yyyyy 10zzzzzz ====> 00000000 00000yyy yyzzzzzz - - ordch2 = ord(s[pos+1]) - z, two = splitter[6, 2](ordch2) - y, six = splitter[5, 3](ordch1) - assert six == 6 - if two != 2: - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, pos + 1) - result.append(r) - else: - c = (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 3: - # 1110xxxx 10yyyyyy 10zzzzzz ====> 00000000 xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - z, two1 = splitter[6, 2](ordch3) - y, two2 = splitter[6, 2](ordch2) - x, fourteen = splitter[4, 4](ordch1) - assert fourteen == 14 - if (two1 != 2 or two2 != 2 or + if (ordch2>>6 != 0b10 or (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. # or (ordch1 == 0xed and ordch2 > 0x9f) ): + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif ordch3>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz + result.append(unichr(((ordch1 & 0b00001111) << 12) + + ((ordch2 & 0b00111111) << 6) + + (ordch3 & 0b00111111))) + pos += 3 - # if ordch2 first two bits are 1 and 0, then the invalid - # continuation byte is ordch3; else ordch2 is invalid. - if two2 == 2: - endpos = pos + 2 - else: - endpos = pos + 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) - result.append(r) - else: - c = (x << 12) + (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 4: - # 11110www 10xxxxxx 10yyyyyy 10zzzzzz ====> - # 000wwwxx xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - z, two1 = splitter[6, 2](ordch4) - y, two2 = splitter[6, 2](ordch3) - x, two3 = splitter[6, 2](ordch2) - w, thirty = splitter[3, 5](ordch1) - assert thirty == 30 - if (two1 != 2 or two2 != 2 or two3 != 2 or + if (ordch2>>6 != 0b10 or (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): - endpos = pos + 1 - if ordch2 & 0xc0 == 0x80: - endpos += 1 - if ordch3 & 0xc0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) + continue + elif ordch3>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + elif ordch4>>6 != 0b10: + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+3) + result.append(r) + continue + # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz + c = (((ordch1 & 0b00000111) << 18) + + ((ordch2 & 0b00111111) << 12) + + ((ordch3 & 0b00111111) << 6) + + (ordch4 & 0b00111111)) + if c <= MAXUNICODE: + result.append(UNICHR(c)) else: - c = (w << 18) + (x << 12) + (y << 6) + z - # convert to UTF-16 if necessary - if c <= MAXUNICODE: - result.append(UNICHR(c)) - else: - # compute and append the two surrogates: - # translate from 10000..10FFFF to 0..FFFF - c -= 0x10000 - # high surrogate = top 10 bits added to D800 - result.append(unichr(0xD800 + (c >> 10))) - # low surrogate = bottom 10 bits added to DC00 - result.append(unichr(0xDC00 + (c & 0x03FF))) - pos += n - else: - r, pos = errorhandler(errors, "utf-8", - "unsupported Unicode code range", - s, pos, pos + n) - result.append(r) + # compute and append the two surrogates: + # translate from 10000..10FFFF to 0..FFFF + c -= 0x10000 + # high surrogate = top 10 bits added to D800 + result.append(unichr(0xD800 + (c >> 10))) + # low surrogate = bottom 10 bits added to DC00 + result.append(unichr(0xDC00 + (c & 0x03FF))) + pos += 4 return result.build(), pos @@ -629,7 +669,7 @@ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, # sp ! " # $ % & ' ( ) * + , - . / 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 3, 0, 0, 0, 0, -# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? +# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, # @ A B C D E F G H I J K L M N O 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -905,20 +945,20 @@ pos = 0 while pos < size: ch = p[pos] - + if ord(ch) < limit: result.append(chr(ord(ch))) pos += 1 else: # startpos for collecting unencodable chars - collstart = pos - collend = pos+1 + collstart = pos + collend = pos+1 while collend < len(p) and ord(p[collend]) >= limit: collend += 1 r, pos = errorhandler(errors, encoding, reason, p, collstart, collend) result.append(r) - + return result.build() def unicode_encode_latin_1(p, size, errors, errorhandler=None): diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -277,6 +277,7 @@ """) def test_default_and_kw(self): + py.test.skip("Wait until we have saner defaults strat") def main(n): def f(i, j=1): return i + j @@ -487,7 +488,6 @@ """) def test_range_iter(self): - py.test.skip("until we fix defaults") def main(n): def g(n): return range(n) @@ -838,7 +838,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -849,7 +849,7 @@ sa += 20000 return sa """ % (op1, a, op2, b) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -867,7 +867,7 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, op2, float(b)/4.0) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_allcases_reflex(self): @@ -888,7 +888,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -899,7 +899,7 @@ sa += 20000 return sa """ % (op1, a, b, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -917,11 +917,13 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, float(b)/4.0, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') for e1 in compares: for e2 in compares: @@ -933,7 +935,7 @@ b = tst() c = tst() sa = 0 - for i in range(1000): + for i in range(300): if %s: sa += 1 else: @@ -946,7 +948,7 @@ a = b return sa """ % (e1, e2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) def test_array_sum(self): def main(): @@ -1010,7 +1012,7 @@ """) def test_func_defaults(self): - py.test.skip("skipped until we fix defaults") + py.test.skip("until we fix defaults") def main(n): i = 1 while i < n: @@ -1063,7 +1065,7 @@ i23 = int_lt(0, i21) guard_true(i23, descr=) i24 = getfield_gc(p17, descr=) - i25 = getarrayitem_raw(i24, 0, descr=) + i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=) i28 = int_add_ovf(i10, i25) @@ -1071,3 +1073,460 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) + + def test_intbound_simple(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + self.run_and_check(src, threshold=200) + + def test_intbound_addsub_mix(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + self.run_and_check(src, threshold=200) + + def test_intbound_gt(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i17 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=) + """) + + def test_intbound_sub_lt(self): + def main(): + i, a = 0, 0 + while i < 300: + if i - 10 < 295: + a += 1 + i += 1 + return a + # + log = self.run(main, [], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i5, 300) + guard_true(i7, descr=...) + i9 = int_sub_ovf(i5, 10) + guard_no_overflow(descr=...) + i11 = int_add_ovf(i4, 1) + guard_no_overflow(descr=...) + i13 = int_add(i5, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i13, descr=) + """) + + def test_intbound_addsub_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i8, 5) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i19 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) + """) + + def test_intbound_addmul_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < 300: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_add(i8, 5) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_lshift(i8, 1) + i18 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i21 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) + """) + + def test_intbound_eq(self): + def main(a, n): + i, s = 0, 0 + while i < 300: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + # + log = self.run(main, [7, 300], threshold=200) + assert log.result == main(7, 300) + log = self.run(main, [10, 300], threshold=200) + assert log.result == main(10, 300) + log = self.run(main, [42, 300], threshold=200) + assert log.result == main(42, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_eq(i8, 10) + guard_false(i12, descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) + """) + + def test_intbound_mul(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + # + log = self.run(main, [7], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_lshift(i6, 1) + i12 = int_add_ovf(i5, 1) + guard_no_overflow(descr=...) + i14 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i12, i14, descr=) + """) + + def test_assert(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert a == 7 + s += a + 1 + i += 1 + return s + log = self.run(main, [7], threshold=200) + assert log.result == 300*8 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_add_ovf(i5, 8) + guard_no_overflow(descr=...) + i12 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i10, i12, descr=) + """) + + def test_zeropadded(self): + def main(): + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= len(self): + return 0 + return array.__getitem__(self, i) # ID: get + # + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + log = self.run(main, [], threshold=200) + assert log.result == 9895050.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i20 = int_ge(i18, i8) + guard_false(i20, descr=...) + f21 = getarrayitem_raw(i13, i18, descr=...) + f23 = getarrayitem_raw(i13, i14, descr=...) + f24 = float_add(f21, f23) + f26 = getarrayitem_raw(i13, i6, descr=...) + f27 = float_add(f24, f26) + i29 = int_add(i6, 1) + i31 = int_ge(i29, i8) + guard_false(i31, descr=...) + f33 = getarrayitem_raw(i13, i29, descr=...) + f34 = float_add(f27, f33) + i36 = int_add(i6, 2) + i38 = int_ge(i36, i8) + guard_false(i38, descr=...) + f39 = getarrayitem_raw(i13, i36, descr=...) + ... + """) + + + def test_circular(self): + def main(): + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + assert len(self) == 256 + return array.__getitem__(self, i & 255) + # + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + # + log = self.run(main, [], threshold=200) + assert log.result == 1239690.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i17 = int_and(i14, 255) + f18 = getarrayitem_raw(i8, i17, descr=...) + f20 = getarrayitem_raw(i8, i9, descr=...) + f21 = float_add(f18, f20) + f23 = getarrayitem_raw(i8, i10, descr=...) + f24 = float_add(f21, f23) + i26 = int_add(i6, 1) + i29 = int_and(i26, 255) + f30 = getarrayitem_raw(i8, i29, descr=...) + f31 = float_add(f24, f30) + i33 = int_add(i6, 2) + i36 = int_and(i33, 255) + f37 = getarrayitem_raw(i8, i36, descr=...) + ... + """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) + + def test_silly_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test_iter_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test__ffi_call(self): + from pypy.rlib.test.test_libffi import get_libm_name + def main(libm_name): + try: + from _ffi import CDLL, types + except ImportError: + sys.stderr.write('SKIP: cannot import _ffi\n') + return 0 + + libm = CDLL(libm_name) + pow = libm.getfunc('pow', [types.double, types.double], + types.double) + i = 0 + res = 0 + while i < 300: + res += pow(2, 3) + i += 1 + return pow.getaddr(), res + # + libm_name = get_libm_name(sys.platform) + log = self.run(main, [libm_name], threshold=200) + pow_addr, res = log.result + assert res == 8.0 * 300 + loop, = log.loops_by_filename(self.filepath) + # XXX: write the actual test when we merge this to jitypes2 + ## ops = self.get_by_bytecode('CALL_FUNCTION') + ## assert len(ops) == 2 # we get two loops, because of specialization + ## call_function = ops[0] + ## last_ops = [op.getopname() for op in call_function[-5:]] + ## assert last_ops == ['force_token', + ## 'setfield_gc', + ## 'call_may_force', + ## 'guard_not_forced', + ## 'guard_no_exception'] + ## call = call_function[-3] + ## assert call.getarg(0).value == pow_addr + ## assert call.getarg(1).value == 2.0 + ## assert call.getarg(2).value == 3.0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -52,6 +52,8 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if stderr.startswith('SKIP:'): + py.test.skip(stderr) assert not stderr # # parse the JIT log @@ -100,11 +102,11 @@ class TestOpMatcher(object): - def match(self, src1, src2): + def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations, src=src1) - return matcher.match(src2) + return matcher.match(src2, **kwds) def test_match_var(self): match_var = OpMatcher([]).match_var @@ -234,6 +236,21 @@ """ assert self.match(loop, expected) + def test_ignore_opcodes(self): + loop = """ + [i0] + i1 = int_add(i0, 1) + i4 = force_token() + i2 = int_sub(i1, 10) + jump(i4) + """ + expected = """ + i1 = int_add(i0, 1) + i2 = int_sub(i1, 10) + jump(i4, descr=...) + """ + assert self.match(loop, expected, ignore_ops=['force_token']) + class TestRunPyPyC(BaseTestPyPyC): @@ -253,6 +270,14 @@ log = self.run(src, [30, 12]) assert log.result == 42 + def test_skip(self): + import pytest + def f(): + import sys + print >> sys.stderr, 'SKIP: foobar' + # + raises(pytest.skip.Exception, "self.run(f, [])") + def test_parse_jitlog(self): def f(): i = 0 diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.llmemory import HiddenGcRef32 @@ -102,6 +102,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF or T == HiddenGcRef32: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -56,6 +56,7 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) +math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -94,7 +95,8 @@ return y != y def ll_math_isinf(y): - return y != 0 and y * .5 == y + # Use a bitwise OR so the JIT doesn't produce 2 different guards. + return (y == INFINITY) | (y == -INFINITY) ll_math_copysign = math_copysign diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): @@ -1014,7 +1021,7 @@ node = lltype.malloc(NODE) ref = lltype.cast_opaque_ptr(llmemory.GCREF, node) back = rffi.cast(llmemory.GCREF, rffi.cast(lltype.Signed, ref)) - assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), ref) == node + assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), back) == node def test_gcref_forth_and_back(self): cp = ctypes.c_void_p(1234) diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -818,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -754,6 +754,8 @@ ("{x for x in z}", "set comprehension"), ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), + ("u'str'", "literal"), + ("b'bytes'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -3,7 +3,8 @@ from pypy.rlib.rarithmetic import most_neg_value_of_same_type from pypy.rlib.rfloat import isinf, isnan from pypy.rlib.debug import make_sure_not_resized, check_regular_int -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython import extregistry @@ -122,7 +123,11 @@ def numdigits(self): return len(self._digits) + @staticmethod + @jit.purefunction def fromint(intval): + # This function is marked as pure, so you must not call it and + # then modify the result. check_regular_int(intval) if intval < 0: sign = -1 @@ -149,20 +154,25 @@ t >>= SHIFT p += 1 return v - fromint = staticmethod(fromint) + @staticmethod + @jit.purefunction def frombool(b): + # This function is marked as pure, so you must not call it and + # then modify the result. if b: return rbigint([ONEDIGIT], 1) return rbigint() - frombool = staticmethod(frombool) + @staticmethod def fromlong(l): + "NOT_RPYTHON" return rbigint(*args_from_long(l)) - fromlong = staticmethod(fromlong) + @staticmethod def fromfloat(dval): """ Create a new bigint object from a float """ + # This function is not marked as pure because it can raise sign = 1 if isinf(dval) or isnan(dval): raise OverflowError @@ -183,16 +193,21 @@ frac -= float(bits) frac = math.ldexp(frac, SHIFT) return v - fromfloat = staticmethod(fromfloat) + @staticmethod + @jit.purefunction + @specialize.argtype(0) def fromrarith_int(i): + # This function is marked as pure, so you must not call it and + # then modify the result. return rbigint(*args_from_rarith_int(i)) - fromrarith_int._annspecialcase_ = "specialize:argtype(0)" - fromrarith_int = staticmethod(fromrarith_int) + @staticmethod + @jit.purefunction def fromdecimalstr(s): + # This function is marked as pure, so you must not call it and + # then modify the result. return _decimalstr_to_bigint(s) - fromdecimalstr = staticmethod(fromdecimalstr) def toint(self): """ @@ -1841,7 +1856,7 @@ elif s[p] == '+': p += 1 - a = rbigint.fromint(0) + a = rbigint() tens = 1 dig = 0 ord0 = ord('0') @@ -1859,7 +1874,7 @@ def parse_digit_string(parser): # helper for objspace.std.strutil - a = rbigint.fromint(0) + a = rbigint() base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -5,6 +5,8 @@ syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -37,8 +39,6 @@ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -804,6 +804,21 @@ hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) assert S._immutable_field('x') == '[*]' +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -153,10 +153,10 @@ for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op - def match(self, expected_src): + def match(self, expected_src, **kwds): ops = list(self.allops()) matcher = OpMatcher(ops, src=self.format_ops()) - return matcher.match(expected_src) + return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) @@ -314,7 +314,7 @@ # it matched! The '...' operator ends here return op - def match_loop(self, expected_ops): + def match_loop(self, expected_ops, ignore_ops): """ A note about partial matching: the '...' operator is non-greedy, i.e. it matches all the operations until it finds one that matches @@ -333,13 +333,16 @@ return op = self.match_until(exp_op, iter_ops) else: - op = self._next_op(iter_ops) + while True: + op = self._next_op(iter_ops) + if op.name not in ignore_ops: + break self.match_op(op, exp_op) # # make sure we exhausted iter_ops self._next_op(iter_ops, assert_raises=True) - def match(self, expected_src): + def match(self, expected_src, ignore_ops=[]): def format(src): if src is None: return '' @@ -348,7 +351,7 @@ expected_src = self.preprocess_expected_src(expected_src) expected_ops = self.parse_ops(expected_src) try: - self.match_loop(expected_ops) + self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 @@ -357,6 +360,7 @@ print e.args print e.msg print + print "Ignore ops:", ignore_ops print "Got:" print format(self.src) print diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -198,44 +198,6 @@ print print '@' * 79 - def test_f1(self): - self.run_source(''' - def main(n): - "Arbitrary test function." - i = 0 - x = 1 - while i 1: - r *= n - n -= 1 - return r - ''', 28, - ([5], 120), - ([25], 15511210043330985984000000L)) - - def test_factorialrec(self): - self.run_source(''' - def main(n): - if n > 1: - return n * main(n-1) - else: - return 1 - ''', 0, - ([5], 120), - ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -247,529 +209,6 @@ ''' % (sys.path,), 7200, ([], 42)) - def test_simple_call(self): - self.run_source(''' - OFFSET = 0 - def f(i): - return i + 1 + OFFSET - def main(n): - i = 0 - while i < n+OFFSET: - i = f(f(i)) - return i - ''', 98, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOAD_GLOBAL", True) - assert len(ops) == 5 - assert ops[0].get_opnames() == ["guard_value", - "getfield_gc", "guard_value", - "getfield_gc", "guard_isnull", - "getfield_gc", "guard_nonnull_class"] - # the second getfield on the same globals is quicker - assert ops[1].get_opnames() == ["getfield_gc", "guard_nonnull_class"] - assert not ops[2] # second LOAD_GLOBAL of the same name folded away - # LOAD_GLOBAL of the same name but in different function partially - # folded away - # XXX could be improved - assert ops[3].get_opnames() == ["guard_value", - "getfield_gc", "guard_isnull"] - assert not ops[4] - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 10 - - ops = self.get_by_bytecode("LOAD_GLOBAL") - assert len(ops) == 5 - for bytecode in ops: - assert not bytecode - - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for bytecode in ops: - assert len(bytecode) <= 1 - - - def test_method_call(self): - self.run_source(''' - class A(object): - def __init__(self, a): - self.a = a - def f(self, i): - return self.a + i - def main(n): - i = 0 - a = A(1) - while i < n: - x = a.f(i) - i = a.f(x) - return i - ''', 93, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOOKUP_METHOD", True) - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 3 - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert not ops[0] # first LOOKUP_METHOD folded away - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("CALL_METHOD", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 6 - assert len(ops[1]) < len(ops[0]) - - ops = self.get_by_bytecode("CALL_METHOD") - assert len(ops) == 2 - assert len(ops[0]) <= 1 - assert len(ops[1]) <= 1 - - ops = self.get_by_bytecode("LOAD_ATTR", True) - assert len(ops) == 2 - # With mapdict, we get fast access to (so far) the 5 first - # attributes, which means it is done with only the following - # operations. (For the other attributes there is additionally - # a getarrayitem_gc.) - assert ops[0].get_opnames() == ["getfield_gc", - "guard_nonnull_class"] - assert not ops[1] # second LOAD_ATTR folded away - - ops = self.get_by_bytecode("LOAD_ATTR") - assert not ops[0] # first LOAD_ATTR folded away - assert not ops[1] # second LOAD_ATTR folded away - - def test_static_classmethod_call(self): - self.run_source(''' - class A(object): - @classmethod - def f(cls, i): - return i + (cls is A) + 1 - - @staticmethod - def g(i): - return i - 1 - - def main(n): - i = 0 - a = A() - while i < n: - x = a.f(i) - i = a.g(x) - return i - ''', 106, - ([20], 20), - ([31], 31)) - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 2 - assert len(ops[0].get_opnames("getfield")) <= 4 - assert not ops[1] # second LOOKUP_METHOD folded away - - def test_default_and_kw(self): - self.run_source(''' - def f(i, j=1): - return i + j - def main(n): - i = 0 - while i < n: - i = f(f(i), j=1) - return i - ''', 100, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - def test_kwargs(self): - self.run_source(''' - d = {} - - def g(**args): - return len(args) - - def main(x): - s = 0 - d = {} - for i in range(x): - s += g(**d) - d[str(i)] = i - if i % 100 == 99: - d = {} - return s - ''', 100000, ([100], 4950), - ([1000], 49500), - ([10000], 495000), - ([100000], 4950000)) - assert len(self.rawloops) + len(self.rawentrybridges) == 4 - op, = self.get_by_bytecode("CALL_FUNCTION_KW") - # XXX a bit too many guards, but better than before - assert len(op.get_opnames("guard")) <= 12 - - def test_stararg_virtual(self): - self.run_source(''' - d = {} - - def g(*args): - return len(args) - def h(a, b, c): - return c - - def main(x): - s = 0 - for i in range(x): - l = [i, x, 2] - s += g(*l) - s += h(*l) - s += g(i, x, 2) - for i in range(x): - l = [x, 2] - s += g(i, *l) - s += h(i, *l) - return s - ''', 100000, ([100], 1300), - ([1000], 13000), - ([10000], 130000), - ([100000], 1300000)) - assert len(self.loops) == 2 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - assert len(ops) == 4 - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - ops = self.get_by_bytecode("CALL_FUNCTION") - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_stararg(self): - self.run_source(''' - d = {} - - def g(*args): - return args[-1] - def h(*args): - return len(args) - - def main(x): - s = 0 - l = [] - i = 0 - while i < x: - l.append(1) - s += g(*l) - i = h(*l) - return s - ''', 100000, ([100], 100), - ([1000], 1000), - ([2000], 2000), - ([4000], 4000)) - assert len(self.loops) == 1 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - for op in ops: - assert len(op.get_opnames("new_with_vtable")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_virtual_instance(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0 - while i < n: - a = A() - assert isinstance(a, A) - assert not isinstance(a, int) - a.x = 2 - i = i + a.x - return i - ''', 69, - ([20], 20), - ([31], 32)) - - callA, callisinstance1, callisinstance2 = ( - self.get_by_bytecode("CALL_FUNCTION")) - assert not callA.get_opnames("call") - assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 2 - assert not callisinstance1.get_opnames("call") - assert not callisinstance1.get_opnames("new") - assert len(callisinstance1.get_opnames("guard")) <= 2 - # calling isinstance on a builtin type gives zero guards - # because the version_tag of a builtin type is immutable - assert not len(callisinstance1.get_opnames("guard")) - - - bytecode, = self.get_by_bytecode("STORE_ATTR") - assert bytecode.get_opnames() == [] - - def test_load_attr(self): - self.run_source(''' - class A(object): - pass - a = A() - a.x = 2 - def main(n): - i = 0 - while i < n: - i = i + a.x - return i - ''', 41, - ([20], 20), - ([31], 32)) - - load, = self.get_by_bytecode("LOAD_ATTR") - # 1 guard_value for the class - # 1 guard_value for the version_tag - # 1 guard_value for the structure - # 1 guard_nonnull_class for the result since it is used later - assert len(load.get_opnames("guard")) <= 4 - - def test_mixed_type_loop(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0.0 - j = 2 - while i < n: - i = j + i - return i, type(i) is float - ''', 35, - ([20], (20, True)), - ([31], (32, True))) - - bytecode, = self.get_by_bytecode("BINARY_ADD") - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 2 - - def test_call_builtin_function(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 2 - l = [] - while i < n: - i += 1 - l.append(i) - return i, len(l) - ''', 39, - ([20], (20, 18)), - ([31], (31, 29))) - - bytecode, = self.get_by_bytecode("CALL_METHOD") - assert len(bytecode.get_opnames("new_with_vtable")) == 1 # the forcing of the int - assert len(bytecode.get_opnames("call")) == 1 # the call to append - assert len(bytecode.get_opnames("guard")) == 1 # guard for guard_no_exception after the call - bytecode, = self.get_by_bytecode("CALL_METHOD", True) - assert len(bytecode.get_opnames("guard")) == 2 # guard for profiling disabledness + guard_no_exception after the call - - def test_range_iter(self): - self.run_source(''' - def g(n): - return range(n) - - def main(n): - s = 0 - for i in range(n): - s += g(n)[i] - return s - ''', 143, ([1000], 1000 * 999 / 2)) - bytecode, = self.get_by_bytecode("BINARY_SUBSCR", True) - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER", True) # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_value", - "guard_class", # check the class of the iterator - "guard_nonnull", # check that the iterator is not finished - "guard_isnull", # check that the range list is not forced - "guard_false", # check that the index is lower than the current length - ] - - bytecode, = self.get_by_bytecode("BINARY_SUBSCR") - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER") # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is lower than the current length - ] - - def test_exception_inside_loop_1(self): - self.run_source(''' - def main(n): - while n: - try: - raise ValueError - except ValueError: - pass - n -= 1 - return n - ''', 33, - ([30], 0)) - - bytecode, = self.get_by_bytecode("SETUP_EXCEPT") - #assert not bytecode.get_opnames("new") -- currently, we have - # new_with_vtable(pypy.interpreter.pyopcode.ExceptBlock) - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert not bytecode.get_opnames() - - def test_exception_inside_loop_2(self): - self.run_source(''' - def g(n): - raise ValueError(n) - def f(n): - g(n) - def main(n): - while n: - try: - f(n) - except ValueError: - pass - n -= 1 - return n - ''', 51, - ([30], 0)) - - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert len(bytecode.get_opnames()) <= 2 # oois, guard_true - - def test_chain_of_guards(self): - self.run_source(''' - class A(object): - def method_x(self): - return 3 - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - i = 0 - while i < 2000: - name = l[arg] - sum += getattr(a, 'method_' + name)() - i += 1 - return sum - ''', 3000, ([0], 2000*3)) - assert len(self.loops) == 1 - - def test_getattr_with_dynamic_attribute(self): - self.run_source(''' - class A(object): - pass - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - a.a1 = 0 - a.a2 = 0 - a.a3 = 0 - a.a4 = 0 - a.a5 = 0 # workaround, because the first five attributes need a promotion - a.x = 1 - a.y = 2 - i = 0 - while i < 2000: - name = l[i % 2] - sum += getattr(a, name) - i += 1 - return sum - ''', 3000, ([0], 3000)) - assert len(self.loops) == 1 - - def test_blockstack_virtualizable(self): - self.run_source(''' - from pypyjit import residual_call - - def main(): - i = 0 - while i < 100: - try: - residual_call(len, []) - except: - pass - i += 1 - return i - ''', 1000, ([], 100)) - bytecode, = self.get_by_bytecode("CALL_FUNCTION") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('new_with_vtable')) == 2 - - def test_import_in_function(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - from sys import version - i += 1 - return i - ''', 100, ([], 100)) - bytecode, = self.get_by_bytecode('IMPORT_NAME') - bytecode2, = self.get_by_bytecode('IMPORT_FROM') - assert len(bytecode.get_opnames('call')) == 2 # split_chr and list_pop - assert len(bytecode2.get_opnames('call')) == 0 - - def test_arraycopy_disappears(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - t = (1, 2, 3, i + 1) - t2 = t[:] - del t - i = t2[3] - del t2 - return i - ''', 40, ([], 100)) - bytecode, = self.get_by_bytecode('BINARY_SUBSCR') - assert len(bytecode.get_opnames('new_array')) == 0 def test_overflow_checking(self): startvalue = sys.maxint - 2147483647 @@ -784,514 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_boolrewrite_invers(self): - for a, b, res, ops in (('2000', '2000', 20001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 16001700, 83), - ( 'a', 'b', 16001700, 89), - ( 'a', 'a', 13001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if i >= %s: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - def test_boolrewrite_reflex(self): - for a, b, res, ops in (('2000', '2000', 10001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 14001700, 83), - ( 'a', 'b', 14001700, 89), - ( 'a', 'a', 17001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if %s > i: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - - def test_boolrewrite_correct_invers(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b-1, op2, b) * 10000 * (b) - res += opval( b, op2, b) * 10000 - res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if i %s %d: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, op2, b), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if i %s %f: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, op2, float(b)/4.0), 156, ([], res)) - - - def test_boolrewrite_correct_reflex(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b, op2, b-1) * 10000 * (b) - res += opval(b, op2, b) * 10000 - res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if %d %s i: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, b, op2), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if %f %s i: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, float(b)/4.0, op2), 156, ([], res)) - - def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') - for e1 in compares: - for e2 in compares: - a, b, c = 1, 2, 3 - if eval(e1): res = 752 * 1 - else: res = 752 * 2 - if eval(e2): res += 752 * 10000 - else: res += 752 * 20000 - a = b - if eval(e1): res += 248 * 1 - else: res += 248 * 2 - if eval(e2): res += 248 * 10000 - else: res += 248 * 20000 - - - if 'c' in e1 or 'c' in e2: - n = 337 - else: - n = 215 - - print - print 'Test:', e1, e2, n, res - self.run_source(''' - class tst(object): - pass - def main(): - a = tst() - b = tst() - c = tst() - sa = 0 - for i in range(1000): - if %s: sa += 1 - else: sa += 2 - if %s: sa += 10000 - else: sa += 20000 - if i > 750: a = b - return sa - '''%(e1, e2), n, ([], res)) - - def test_array_sum(self): - for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): - res = 19352859 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(): - img = array("%s", range(127) * 5) * 484 - l, i = 0, 0 - while i < 640 * 480: - l += img[i] - i += 1 - return l - ''' % tc, maxops, ([], res)) - - def test_array_sum_char(self): - self.run_source(''' - from array import array - - def main(): - img = array("c", "Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - l += ord(img[i]) - i += 1 - return l - ''', 60, ([], 30720000)) - - def test_array_sum_unicode(self): - self.run_source(''' - from array import array - - def main(): - img = array("u", u"Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - if img[i] == u"l": - l += 1 - i += 1 - return l - ''', 65, ([], 122880)) - - def test_array_intimg(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - for tc, maxops in zip('ilILd', (67, 67, 70, 70, 61)): - print - print '='*65 - print '='*20, 'running test for tc=%r' % (tc,), '='*20 - res = 73574560 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(tc): - img = array(tc, range(3)) * (350 * 480) - intimg = array(tc, (0,)) * (640 * 480) - l, i = 0, 640 - while i < 640 * 480: - l = l + img[i] - intimg[i] = (intimg[i-640] + l) - i += 1 - return intimg[i - 1] - ''', maxops, ([tc], res)) - - def test_unpackiterable(self): - self.run_source(''' - from array import array - - def main(): - i = 0 - t = array('l', (1, 2)) - while i < 2000: - a, b = t - i += 1 - return 3 - - ''', 100, ([], 3)) - bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('call_may_force')) == 0 - - - def test_intbound_simple(self): - ops = ('<', '>', '<=', '>=', '==', '!=') - nbr = (3, 7) - for o1 in ops: - for o2 in ops: - for n1 in nbr: - for n2 in nbr: - src = ''' - def f(i): - a, b = 3, 3 - if i %s %d: - a = 0 - else: - a = 1 - if i %s %d: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (o1, n1, o2, n2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 268, ([], res)) - - def test_intbound_addsub_mix(self): - tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', - 'i - 1 > 1', '1 - i > 1', '1 - i < -3', - 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') - for t1 in tests: - for t2 in tests: - print t1, t2 - src = ''' - def f(i): - a, b = 3, 3 - if %s: - a = 0 - else: - a = 1 - if %s: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (t1, t2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 280, ([], res)) - - def test_intbound_gt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i > -1: - a += 1 - if i > -2: - b += 1 - i += 1 - return (a, b) - ''', 48, ([], (2000, 2000))) - - def test_intbound_sub_lt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i - 10 < 1995: - a += 1 - i += 1 - return (a, b) - ''', 38, ([], (2000, 0))) - - def test_intbound_addsub_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if i - 1 >= -1: - b += 1 - i += 1 - return (a, b) - ''', 56, ([], (2000, 2000))) - - def test_intbound_addmul_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if 2 * i >= 0: - b += 1 - i += 1 - return (a, b) - ''', 53, ([], (2000, 2000))) - - def test_intbound_eq(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - if a == 7: - s += a + 1 - elif i == 10: - s += i - else: - s += 1 - i += 1 - return s - ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) - - def test_intbound_mul(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert i >= 0 - if 2 * i < 30000: - s += 1 - else: - s += a - i += 1 - return s - ''', 43, ([7], 1500)) - - def test_assert(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert a == 7 - s += a + 1 - i += 1 - return s - ''', 38, ([7], 8*1500)) - - def test_zeropadded(self): - self.run_source(''' - from array import array - class ZeroPadded(array): - def __new__(cls, l): - self = array.__new__(cls, 'd', range(l)) - return self - - def __getitem__(self, i): - if i < 0 or i >= self.__len__(): - return 0 - return array.__getitem__(self, i) - - - def main(): - buf = ZeroPadded(2000) - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 232, ([], 9895050.0)) - - def test_circular(self): - self.run_source(''' - from array import array - class Circular(array): - def __new__(cls): - self = array.__new__(cls, 'd', range(256)) - return self - def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) - return array.__getitem__(self, i & 255) - - def main(): - buf = Circular() - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 170, ([], 1239690.0)) - - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) - def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name libm_name = get_libm_name(sys.platform) diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -70,11 +70,35 @@ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.module import ll_math from pypy.module.math.test.test_direct import MathTests, get_tester +from pypy.translator.c.test.test_genc import compile class TestMath(MathTests): @@ -21,6 +22,13 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isinf(self): + def f(x): + return ll_math.ll_math_isinf(1. / x) + f = compile(f, [float], backendopt=False) + assert f(5.5e-309) + + def make_test_case((fnname, args, expected), dict): # def test_func(self): From commits-noreply at bitbucket.org Sat Apr 16 23:10:02 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 23:10:02 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Add comments. Message-ID: <20110416211002.9A00D2A204C@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43410:32f1206b8852 Date: 2011-04-16 22:37 +0200 http://bitbucket.org/pypy/pypy/changeset/32f1206b8852/ Log: Add comments. diff --git a/pypy/translator/c/funcgen.py b/pypy/translator/c/funcgen.py --- a/pypy/translator/c/funcgen.py +++ b/pypy/translator/c/funcgen.py @@ -594,12 +594,15 @@ def OP_PTR_NONZERO(self, op): + # equivalent to (%s != NULL), but works also for HiddenGcRef32 return '%s = !!%s;' % (self.expr(op.result), self.expr(op.args[0])) + def OP_PTR_ISZERO(self, op): + # equivalent to (%s == NULL), but works also for HiddenGcRef32 return '%s = !%s;' % (self.expr(op.result), self.expr(op.args[0])) - + def OP_PTR_EQ(self, op): return '%s = (%s == %s);' % (self.expr(op.result), self.expr(op.args[0]), From commits-noreply at bitbucket.org Sat Apr 16 23:10:04 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 23:10:04 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Implement support for HIDE_INTO_PTR32 and SHOW_FROM_PTR32. Easy. Message-ID: <20110416211004.1EC622A204C@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43411:c773659e239c Date: 2011-04-16 22:50 +0200 http://bitbucket.org/pypy/pypy/changeset/c773659e239c/ Log: Implement support for HIDE_INTO_PTR32 and SHOW_FROM_PTR32. Easy. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1258,6 +1258,12 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) + def genop_hide_into_ptr32(self, op, arglocs, resloc): + self.mc.SHR(resloc, imm(3)) + + def genop_show_from_ptr32(self, op, arglocs, resloc): + self.mc.SHL(resloc, imm(3)) + def genop_new_with_vtable(self, op, arglocs, result_loc): assert result_loc is eax loc_vtable = arglocs[-1] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -556,6 +556,8 @@ self.Perform(op, [res], res) consider_int_invert = consider_int_neg + consider_hide_into_ptr32 = consider_int_neg # SHR reg, 3 + consider_show_from_ptr32 = consider_int_neg # SHL reg, 3 def consider_int_lshift(self, op): if isinstance(op.getarg(1), Const): From commits-noreply at bitbucket.org Sat Apr 16 23:10:06 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 23:10:06 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Skip some not-written-yet tests, and a hard-to-test fix... Message-ID: <20110416211006.CFCB52A2051@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43412:6e3d7a746010 Date: 2011-04-16 22:55 +0200 http://bitbucket.org/pypy/pypy/changeset/6e3d7a746010/ Log: Skip some not-written-yet tests, and a hard-to-test fix... diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -46,7 +46,10 @@ assert res == 42063 def test_call_argument(self): - ... + # the issue here is that even if we wrote this test, for now, it's + # not going to really test the interesting parts, which are in + # GcLLDescr_framework.rewrite_assembler + py.test.skip("write me") def test_call_result(self): - ... + py.test.skip("write me") diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -877,14 +877,15 @@ isinstance(descr, BaseCallDescr)): args = op.getarglist() arg_classes = descr.get_arg_types() - assert len(args) == len(arg_classes) + fixed = 1 + assert len(args) == fixed + len(arg_classes) for i in range(len(arg_classes)): if arg_classes[i] == 'H': - v1 = args[i] + v1 = args[fixed + i] v2 = BoxInt() newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) - args[i] = v2 + args[fixed + i] = v2 op = op.copy_and_change(op.getopnum(), args=args) if descr.get_return_type() == 'H': v1 = BoxInt() From commits-noreply at bitbucket.org Sat Apr 16 23:10:08 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 23:10:08 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Add support for 'H' in pyjitpl too. Message-ID: <20110416211008.37A162A204F@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43413:2e27a9ff5b61 Date: 2011-04-16 22:59 +0200 http://bitbucket.org/pypy/pypy/changeset/2e27a9ff5b61/ Log: Add support for 'H' in pyjitpl too. diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -138,6 +138,7 @@ def get_arg_types(self): """ Implement in call descr. Must return a string of INT, REF and FLOAT ('i', 'r', 'f'). + Can also contain 'L' or 'H'; see get_return_type(). """ raise NotImplementedError @@ -145,6 +146,7 @@ """ Implement in call descr. Must return INT, REF, FLOAT, or 'v' for void. On 32-bit (hack) it can also be 'L' for longlongs. + On 64-bit (hack) it can also be 'H' for HiddenGcRef32. """ raise NotImplementedError diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -56,7 +56,7 @@ metainterp.execute_raised(e) result = 0 return BoxInt(result) - if rettype == REF: + if rettype == REF or rettype == 'H': try: result = cpu.bh_call_r(func, descr, args_i, args_r, args_f) except Exception, e: diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1157,7 +1157,7 @@ src_i += 1 if box.type == history.INT: break - elif kind == history.REF: + elif kind == history.REF or kind == 'H': # HiddenGcRef32 while True: box = argboxes[src_r] src_r += 1 From commits-noreply at bitbucket.org Sat Apr 16 23:10:14 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 23:10:14 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Add comments. Message-ID: <20110416211014.79C192A2051@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43410:32f1206b8852 Date: 2011-04-16 22:37 +0200 http://bitbucket.org/pypy/pypy/changeset/32f1206b8852/ Log: Add comments. diff --git a/pypy/translator/c/funcgen.py b/pypy/translator/c/funcgen.py --- a/pypy/translator/c/funcgen.py +++ b/pypy/translator/c/funcgen.py @@ -594,12 +594,15 @@ def OP_PTR_NONZERO(self, op): + # equivalent to (%s != NULL), but works also for HiddenGcRef32 return '%s = !!%s;' % (self.expr(op.result), self.expr(op.args[0])) + def OP_PTR_ISZERO(self, op): + # equivalent to (%s == NULL), but works also for HiddenGcRef32 return '%s = !%s;' % (self.expr(op.result), self.expr(op.args[0])) - + def OP_PTR_EQ(self, op): return '%s = (%s == %s);' % (self.expr(op.result), self.expr(op.args[0]), From commits-noreply at bitbucket.org Sat Apr 16 23:10:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 23:10:15 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Implement support for HIDE_INTO_PTR32 and SHOW_FROM_PTR32. Easy. Message-ID: <20110416211015.70B792A2051@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43411:c773659e239c Date: 2011-04-16 22:50 +0200 http://bitbucket.org/pypy/pypy/changeset/c773659e239c/ Log: Implement support for HIDE_INTO_PTR32 and SHOW_FROM_PTR32. Easy. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -1258,6 +1258,12 @@ self.mc.SHR_ri(resloc.value, 7) self.mc.AND_ri(resloc.value, 1) + def genop_hide_into_ptr32(self, op, arglocs, resloc): + self.mc.SHR(resloc, imm(3)) + + def genop_show_from_ptr32(self, op, arglocs, resloc): + self.mc.SHL(resloc, imm(3)) + def genop_new_with_vtable(self, op, arglocs, result_loc): assert result_loc is eax loc_vtable = arglocs[-1] diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -556,6 +556,8 @@ self.Perform(op, [res], res) consider_int_invert = consider_int_neg + consider_hide_into_ptr32 = consider_int_neg # SHR reg, 3 + consider_show_from_ptr32 = consider_int_neg # SHL reg, 3 def consider_int_lshift(self, op): if isinstance(op.getarg(1), Const): From commits-noreply at bitbucket.org Sat Apr 16 23:10:16 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 23:10:16 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Skip some not-written-yet tests, and a hard-to-test fix... Message-ID: <20110416211016.EEA222A2059@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43412:6e3d7a746010 Date: 2011-04-16 22:55 +0200 http://bitbucket.org/pypy/pypy/changeset/6e3d7a746010/ Log: Skip some not-written-yet tests, and a hard-to-test fix... diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -46,7 +46,10 @@ assert res == 42063 def test_call_argument(self): - ... + # the issue here is that even if we wrote this test, for now, it's + # not going to really test the interesting parts, which are in + # GcLLDescr_framework.rewrite_assembler + py.test.skip("write me") def test_call_result(self): - ... + py.test.skip("write me") diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -877,14 +877,15 @@ isinstance(descr, BaseCallDescr)): args = op.getarglist() arg_classes = descr.get_arg_types() - assert len(args) == len(arg_classes) + fixed = 1 + assert len(args) == fixed + len(arg_classes) for i in range(len(arg_classes)): if arg_classes[i] == 'H': - v1 = args[i] + v1 = args[fixed + i] v2 = BoxInt() newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) - args[i] = v2 + args[fixed + i] = v2 op = op.copy_and_change(op.getopnum(), args=args) if descr.get_return_type() == 'H': v1 = BoxInt() From commits-noreply at bitbucket.org Sat Apr 16 23:10:18 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 16 Apr 2011 23:10:18 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Add support for 'H' in pyjitpl too. Message-ID: <20110416211018.6C66D2A2059@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43413:2e27a9ff5b61 Date: 2011-04-16 22:59 +0200 http://bitbucket.org/pypy/pypy/changeset/2e27a9ff5b61/ Log: Add support for 'H' in pyjitpl too. diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -138,6 +138,7 @@ def get_arg_types(self): """ Implement in call descr. Must return a string of INT, REF and FLOAT ('i', 'r', 'f'). + Can also contain 'L' or 'H'; see get_return_type(). """ raise NotImplementedError @@ -145,6 +146,7 @@ """ Implement in call descr. Must return INT, REF, FLOAT, or 'v' for void. On 32-bit (hack) it can also be 'L' for longlongs. + On 64-bit (hack) it can also be 'H' for HiddenGcRef32. """ raise NotImplementedError diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -56,7 +56,7 @@ metainterp.execute_raised(e) result = 0 return BoxInt(result) - if rettype == REF: + if rettype == REF or rettype == 'H': try: result = cpu.bh_call_r(func, descr, args_i, args_r, args_f) except Exception, e: diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1157,7 +1157,7 @@ src_i += 1 if box.type == history.INT: break - elif kind == history.REF: + elif kind == history.REF or kind == 'H': # HiddenGcRef32 while True: box = argboxes[src_r] src_r += 1 From commits-noreply at bitbucket.org Sun Apr 17 01:44:06 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sun, 17 Apr 2011 01:44:06 +0200 (CEST) Subject: [pypy-svn] pypy default: Merge out-of-line-guards-2. This branch provides a way to specify a field Message-ID: <20110416234406.44BB62A2052@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43414:124ebb7828dd Date: 2011-04-17 01:43 +0200 http://bitbucket.org/pypy/pypy/changeset/124ebb7828dd/ Log: Merge out-of-line-guards-2. This branch provides a way to specify a field that doesn't change very often by saying _immutable_fields_ = ['a?'] which means when it changes, a new assembler will be compiled. diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py --- a/pypy/rpython/ootypesystem/rclass.py +++ b/pypy/rpython/ootypesystem/rclass.py @@ -262,6 +262,10 @@ self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef) self.rbase.setup() + for name, attrdef in selfattrs.iteritems(): + if not attrdef.readonly and self.is_quasi_immutable(name): + ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT}) + classattributes = {} baseInstance = self.lowleveltype._superclass classrepr = getclassrepr(self.rtyper, self.classdef) @@ -476,11 +480,9 @@ mangled_name = mangle(attr, self.rtyper.getconfig()) cname = inputconst(ootype.Void, mangled_name) self.hook_access_field(vinst, cname, llops, flags) + self.hook_setfield(vinst, attr, llops) llops.genop('oosetfield', [vinst, cname, vvalue]) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - def rtype_is_true(self, hop): vinst, = hop.inputargs(self) return hop.genop('oononnull', [vinst], resulttype=ootype.Bool) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -291,6 +291,7 @@ # that belong to this loop or to a bridge attached to it. # Filled by the frontend calling record_faildescr_index(). self.faildescr_indices = [] + self.invalidate_positions = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc") diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -791,6 +791,7 @@ operations = None token = None call_pure_results = None + quasi_immutable_deps = None def __init__(self, name): self.name = name diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -341,13 +341,14 @@ return _struct(self, n, initialization='example') def _immutable_field(self, field): + if self._hints.get('immutable'): + return True if 'immutable_fields' in self._hints: try: - s = self._hints['immutable_fields'].fields[field] - return s or True + return self._hints['immutable_fields'].fields[field] except KeyError: pass - return self._hints.get('immutable', False) + return False class RttiStruct(Struct): _runtime_type_info = None @@ -1029,6 +1030,8 @@ return None # null pointer if type(p._obj0) is int: return p # a pointer obtained by cast_int_to_ptr + if getattr(p._obj0, '_carry_around_for_tests', False): + return p # a pointer obtained by cast_instance_to_base_ptr container = obj._normalizedcontainer() if type(container) is int: # this must be an opaque ptr originating from an integer @@ -1881,8 +1884,8 @@ if self.__class__ is not other.__class__: return NotImplemented if hasattr(self, 'container') and hasattr(other, 'container'): - obj1 = self.container._normalizedcontainer() - obj2 = other.container._normalizedcontainer() + obj1 = self._normalizedcontainer() + obj2 = other._normalizedcontainer() return obj1 == obj2 else: return self is other @@ -1906,6 +1909,8 @@ # an integer, cast to a ptr, cast to an opaque if type(self.container) is int: return self.container + if getattr(self.container, '_carry_around_for_tests', False): + return self.container return self.container._normalizedcontainer() else: return _parentable._normalizedcontainer(self) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -578,6 +578,7 @@ _all_callbacks_results = [] _int2obj = {} _callback_exc_info = None +_opaque_objs = [None] def get_rtyper(): llinterp = LLInterpreter.current_interpreter @@ -616,6 +617,10 @@ T = lltype.Ptr(lltype.typeOf(container)) # otherwise it came from integer and we want a c_void_p with # the same valu + if getattr(container, 'llopaque', None): + no = len(_opaque_objs) + _opaque_objs.append(container) + return no * 2 + 1 else: container = llobj._obj if isinstance(T.TO, lltype.FuncType): @@ -764,10 +769,14 @@ if isinstance(T, lltype.Typedef): T = T.OF if isinstance(T, lltype.Ptr): - if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer + ptrval = ctypes.cast(cobj, ctypes.c_void_p).value + if not cobj or not ptrval: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 return lltype.nullptr(T.TO) if isinstance(T.TO, lltype.Struct): + if ptrval & 1: # a tagged pointer + gcref = _opaque_objs[ptrval // 2].hide() + return lltype.cast_opaque_ptr(T, gcref) REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) @@ -1228,7 +1237,9 @@ return not self == other def _cast_to_ptr(self, PTRTYPE): - return force_cast(PTRTYPE, self.intval) + if self.intval & 1: + return _opaque_objs[self.intval // 2] + return force_cast(PTRTYPE, self.intval) ## def _cast_to_int(self): ## return self.intval diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -0,0 +1,266 @@ + +import py + +from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE +from pypy.jit.metainterp import typesystem +from pypy.jit.metainterp.quasiimmut import QuasiImmut +from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance +from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.codewriter.policy import StopAtXPolicy +from pypy.rlib.jit import JitDriver, dont_look_inside + + +def test_get_current_qmut_instance(): + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + STRUCT = lltype.GcStruct('Foo', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + foo = lltype.malloc(STRUCT, zero=True) + foo.inst_x = 42 + assert not foo.mutate_x + + class FakeCPU: + ts = typesystem.llhelper + + def bh_getfield_gc_r(self, gcref, fielddescr): + assert fielddescr == mutatefielddescr + foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) + result = foo.mutate_x + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + + def bh_setfield_gc_r(self, gcref, fielddescr, newvalue_gcref): + assert fielddescr == mutatefielddescr + foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) + newvalue = lltype.cast_opaque_ptr(rclass.OBJECTPTR, newvalue_gcref) + foo.mutate_x = newvalue + + cpu = FakeCPU() + mutatefielddescr = ('fielddescr', STRUCT, 'mutate_x') + + foo_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) + qmut1 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) + assert isinstance(qmut1, QuasiImmut) + qmut2 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) + assert qmut1 is qmut2 + + +class QuasiImmutTests(object): + + def test_simple_1(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + x -= 1 + return total + # + res = self.meta_interp(f, [100, 7]) + assert res == 700 + self.check_loops(getfield_gc=0, everywhere=True) + # + from pypy.jit.metainterp.warmspot import get_stats + loops = get_stats().loops + for loop in loops: + assert len(loop.quasi_immutable_deps) == 1 + assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) + + def test_nonopt_1(self): + myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def setup(x): + return [Foo(100 + i) for i in range(x)] + def f(a, x): + lst = setup(x) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(lst=lst, x=x, total=total) + # read a quasi-immutable field out of a variable + x -= 1 + total += lst[x].a + return total + # + assert f(100, 7) == 721 + res = self.meta_interp(f, [100, 7]) + assert res == 721 + self.check_loops(getfield_gc=1) + # + from pypy.jit.metainterp.warmspot import get_stats + loops = get_stats().loops + for loop in loops: + assert loop.quasi_immutable_deps is None + + def test_change_during_tracing_1(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + @dont_look_inside + def residual_call(foo): + foo.a += 1 + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + residual_call(foo) + x -= 1 + return total + # + assert f(100, 7) == 721 + res = self.meta_interp(f, [100, 7]) + assert res == 721 + self.check_loops(getfield_gc=1) + + def test_change_during_tracing_2(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + @dont_look_inside + def residual_call(foo, difference): + foo.a += difference + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + residual_call(foo, +1) + residual_call(foo, -1) + x -= 1 + return total + # + assert f(100, 7) == 700 + res = self.meta_interp(f, [100, 7]) + assert res == 700 + self.check_loops(getfield_gc=1) + + def test_change_invalidate_reentering(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(foo, x): + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + x -= 1 + return total + def g(a, x): + foo = Foo(a) + res1 = f(foo, x) + foo.a += 1 # invalidation, while the jit is not running + res2 = f(foo, x) # should still mark the loop as invalid + return res1 * 1000 + res2 + # + assert g(100, 7) == 700707 + res = self.meta_interp(g, [100, 7]) + assert res == 700707 + self.check_loops(getfield_gc=0) + + def test_invalidate_while_running(self): + jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + + def external(foo, v): + if v: + foo.a = 2 + + def f(foo): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(i=i, foo=foo, total=total) + external(foo, i > 7) + i += 1 + total += foo.a + return total + + def g(): + return f(Foo(1)) + + assert self.meta_interp(g, [], policy=StopAtXPolicy(external)) == g() + + def test_invalidate_by_setfield(self): + py.test.skip("Not implemented") + jitdriver = JitDriver(greens=['bc', 'foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + + def f(foo, bc): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(bc=bc, i=i, foo=foo, total=total) + if bc == 0: + f(foo, 1) + if bc == 1: + foo.a = int(i > 5) + i += 1 + total += foo.a + return total + + def g(): + return f(Foo(1), 0) + + assert self.meta_interp(g, []) == g() + + def test_invalidate_bridge(self): + jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + + def f(foo): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(i=i, total=total, foo=foo) + if i > 5: + total += foo.a + else: + total += 2*foo.a + i += 1 + return total + + def main(): + foo = Foo() + foo.a = 1 + total = f(foo) + foo.a = 2 + total += f(foo) + return total + + res = self.meta_interp(main, []) + assert res == main() + +class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/quasiimmut.py @@ -0,0 +1,116 @@ +import weakref +from pypy.rpython.rclass import IR_QUASI_IMMUTABLE +from pypy.rpython.lltypesystem import lltype, rclass +from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.jit.metainterp.history import AbstractDescr + + +def is_quasi_immutable(STRUCT, fieldname): + imm_fields = STRUCT._hints.get('immutable_fields') + return (imm_fields is not None and + imm_fields.fields.get(fieldname) is IR_QUASI_IMMUTABLE) + +def get_mutate_field_name(fieldname): + if fieldname.startswith('inst_'): # lltype + return 'mutate_' + fieldname[5:] + elif fieldname.startswith('o'): # ootype + return 'mutate_' + fieldname[1:] + else: + raise AssertionError(fieldname) + +def get_current_qmut_instance(cpu, gcref, mutatefielddescr): + """Returns the current QuasiImmut instance in the field, + possibly creating one. + """ + # XXX this is broken on x86 + qmut_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) + if qmut_gcref: + qmut = QuasiImmut.show(cpu, qmut_gcref) + else: + qmut = QuasiImmut(cpu) + cpu.bh_setfield_gc_r(gcref, mutatefielddescr, qmut.hide()) + return qmut + +def make_invalidation_function(STRUCT, mutatefieldname): + # + def _invalidate_now(p): + qmut_ptr = getattr(p, mutatefieldname) + setattr(p, mutatefieldname, lltype.nullptr(rclass.OBJECT)) + qmut = cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) + qmut.invalidate() + _invalidate_now._dont_inline_ = True + # + def invalidation(p): + if getattr(p, mutatefieldname): + _invalidate_now(p) + # + return invalidation + + +class QuasiImmut(object): + llopaque = True + + def __init__(self, cpu): + self.cpu = cpu + # list of weakrefs to the LoopTokens that must be invalidated if + # this value ever changes + self.looptokens_wrefs = [] + self.compress_limit = 30 + + def hide(self): + qmut_ptr = self.cpu.ts.cast_instance_to_base_ref(self) + return self.cpu.ts.cast_to_ref(qmut_ptr) + + @staticmethod + def show(cpu, qmut_gcref): + qmut_ptr = cpu.ts.cast_to_baseclass(qmut_gcref) + return cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) + + def register_loop_token(self, wref_looptoken): + if len(self.looptokens_wrefs) > self.compress_limit: + self.compress_looptokens_list() + self.looptokens_wrefs.append(wref_looptoken) + + def compress_looptokens_list(self): + self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs + if wref() is not None] + self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2 + + def invalidate(self): + # When this is called, all the loops that we record become + # invalid and must not be called again, nor returned to. + wrefs = self.looptokens_wrefs + self.looptokens_wrefs = [] + for wref in wrefs: + looptoken = wref() + if looptoken is not None: + self.cpu.invalidate_loop(looptoken) + + +class QuasiImmutDescr(AbstractDescr): + def __init__(self, cpu, structbox, fielddescr, mutatefielddescr): + self.cpu = cpu + self.structbox = structbox + self.fielddescr = fielddescr + self.mutatefielddescr = mutatefielddescr + gcref = structbox.getref_base() + self.qmut = get_current_qmut_instance(cpu, gcref, mutatefielddescr) + self.constantfieldbox = self.get_current_constant_fieldvalue() + + def get_current_constant_fieldvalue(self): + from pypy.jit.metainterp import executor + from pypy.jit.metainterp.resoperation import rop + fieldbox = executor.execute(self.cpu, None, rop.GETFIELD_GC, + self.fielddescr, self.structbox) + return fieldbox.constbox() + + def is_still_valid(self): + cpu = self.cpu + gcref = self.structbox.getref_base() + qmut = get_current_qmut_instance(cpu, gcref, self.mutatefielddescr) + if qmut is not self.qmut: + return False + else: + currentbox = self.get_current_constant_fieldvalue() + assert self.constantfieldbox.same_constant(currentbox) + return True diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -2,6 +2,7 @@ from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype, lloperation, rclass, llmemory from pypy.rpython.annlowlevel import llhelper +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside @@ -45,7 +46,7 @@ ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY._hints['virtualizable2_accessor'].initialize( - XY, {'inst_x' : "", 'inst_node' : ""}) + XY, {'inst_x' : IR_IMMUTABLE, 'inst_node' : IR_IMMUTABLE}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY, xy_vtable, 'XY') @@ -210,7 +211,8 @@ ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY2._hints['virtualizable2_accessor'].initialize( - XY2, {'inst_x' : "", 'inst_l1' : "[*]", 'inst_l2' : "[*]"}) + XY2, {'inst_x' : IR_IMMUTABLE, + 'inst_l1' : IR_ARRAY_IMMUTABLE, 'inst_l2' : IR_ARRAY_IMMUTABLE}) xy2_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY2, xy2_vtable, 'XY2') diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -41,7 +41,8 @@ # during preamble but to keep it during the loop optimizations.append(o) - if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: + if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts + or 'heap' not in enable_opts): optimizations.append(OptSimplify()) if inline_short_preamble: diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -7,8 +7,9 @@ from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.codewriter.policy import log +from pypy.jit.codewriter.policy import log, check_skip_operation from pypy.jit.metainterp.typesystem import deref, arrayItem +from pypy.jit.metainterp import quasiimmut from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj @@ -561,7 +562,8 @@ arraydescr) return [] # check for _immutable_fields_ hints - if v_inst.concretetype.TO._immutable_field(c_fieldname.value): + immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + if immut: if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): @@ -574,10 +576,21 @@ descr = self.cpu.fielddescrof(v_inst.concretetype.TO, c_fieldname.value) kind = getkind(RESULT)[0] - return SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), - [v_inst, descr], op.result) + op1 = SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), + [v_inst, descr], op.result) + # + if immut is quasiimmut.IR_QUASI_IMMUTABLE: + descr1 = self.cpu.fielddescrof( + v_inst.concretetype.TO, + quasiimmut.get_mutate_field_name(c_fieldname.value)) + op1 = [SpaceOperation('-live-', [], None), + SpaceOperation('record_quasiimmut_field', + [v_inst, descr, descr1], None), + op1] + return op1 def rewrite_op_setfield(self, op): + check_skip_operation(op) # just to check it doesn't raise if self.is_typeptr_getset(op): # ignore the operation completely -- instead, it's done by 'new' return diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -257,6 +257,7 @@ self.pendingfields = [] self.posponedop = None self.exception_might_have_happened = False + self.quasi_immutable_deps = None self.newoperations = [] if loop is not None: self.call_pure_results = loop.call_pure_results @@ -309,6 +310,7 @@ new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None + new.quasi_immutable_deps = self.quasi_immutable_deps return new @@ -410,6 +412,7 @@ self.first_optimization.propagate_forward(op) self.i += 1 self.loop.operations = self.newoperations + self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5717,8 +5717,35 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() - - + def test_quasi_immut(self): + ops = """ + [p0, p1, i0] + quasiimmut_field(p0, descr=quasiimmutdescr) + guard_not_invalidated() [] + i1 = getfield_gc(p0, descr=quasifielddescr) + jump(p1, p0, i1) + """ + expected = """ + [p0, p1, i0] + i1 = getfield_gc(p0, descr=quasifielddescr) + jump(p1, p0, i1) + """ + self.optimize_loop(ops, expected) + + def test_quasi_immut_2(self): + ops = """ + [] + quasiimmut_field(ConstPtr(myptr), descr=quasiimmutdescr) + guard_not_invalidated() [] + i1 = getfield_gc(ConstPtr(myptr), descr=quasifielddescr) + jump() + """ + expected = """ + [] + guard_not_invalidated() [] + jump() + """ + self.optimize_loop(ops, expected, expected) ##class TestOOtype(OptimizeOptTest, OOtypeMixin): diff --git a/pypy/rpython/test/test_annlowlevel.py b/pypy/rpython/test/test_annlowlevel.py --- a/pypy/rpython/test/test_annlowlevel.py +++ b/pypy/rpython/test/test_annlowlevel.py @@ -4,9 +4,12 @@ from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode +from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, oostr from pypy.rpython.annlowlevel import hlunicode, llunicode +from pypy.rpython import annlowlevel + class TestLLType(BaseRtypingTest, LLRtypeMixin): def test_hlstr(self): @@ -53,6 +56,15 @@ res = self.interpret(f, [self.unicode_to_ll(u"abc")]) assert res == 3 + def test_cast_instance_to_base_ptr(self): + class X(object): + pass + x = X() + ptr = annlowlevel.cast_instance_to_base_ptr(x) + assert lltype.typeOf(ptr) == annlowlevel.base_ptr_lltype() + y = annlowlevel.cast_base_ptr_to_instance(X, ptr) + assert y is x + class TestOOType(BaseRtypingTest, OORtypeMixin): def test_hlstr(self): @@ -71,3 +83,12 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 + + def test_cast_instance_to_base_obj(self): + class X(object): + pass + x = X() + obj = annlowlevel.cast_instance_to_base_obj(x) + assert lltype.typeOf(obj) == annlowlevel.base_obj_ootype() + y = annlowlevel.cast_base_ptr_to_instance(X, obj) + assert y is x diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -286,6 +286,10 @@ raise ValueError("CALL_ASSEMBLER not supported") llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) + def invalidate_loop(self, looptoken): + for loop in looptoken.compiled_loop_token.loop_and_bridges: + loop._obj.externalobj.invalid = True + # ---------- def sizeof(self, S): diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE +from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, @@ -12,6 +13,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.heaptracker import register_known_gctype, adr2int from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -62,6 +64,18 @@ nextdescr = cpu.fielddescrof(NODE, 'next') otherdescr = cpu.fielddescrof(NODE2, 'other') + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_field': IR_QUASI_IMMUTABLE}) + QUASI = lltype.GcStruct('QUASIIMMUT', ('inst_field', lltype.Signed), + ('mutate_field', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + quasi = lltype.malloc(QUASI, immortal=True) + quasifielddescr = cpu.fielddescrof(QUASI, 'inst_field') + quasibox = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, quasi)) + quasiimmutdescr = QuasiImmutDescr(cpu, quasibox, + quasifielddescr, + cpu.fielddescrof(QUASI, 'mutate_field')) + NODEOBJ = lltype.GcStruct('NODEOBJ', ('parent', OBJECT), ('ref', lltype.Ptr(OBJECT))) nodeobj = lltype.malloc(NODEOBJ) diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -1,6 +1,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.rpython.rclass import IR_ARRAY_IMMUTABLE, IR_IMMUTABLE from pypy.rpython import rvirtualizable2 from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.unroll import unrolling_iterable @@ -10,7 +11,7 @@ from pypy.jit.metainterp.warmstate import wrap, unwrap from pypy.rlib.objectmodel import specialize -class VirtualizableInfo: +class VirtualizableInfo(object): TOKEN_NONE = 0 # must be 0 -- see also x86.call_assembler TOKEN_TRACING_RESCALL = -1 @@ -33,11 +34,13 @@ all_fields = accessor.fields static_fields = [] array_fields = [] - for name, suffix in all_fields.iteritems(): - if suffix == '[*]': + for name, tp in all_fields.iteritems(): + if tp == IR_ARRAY_IMMUTABLE: array_fields.append(name) + elif tp == IR_IMMUTABLE: + static_fields.append(name) else: - static_fields.append(name) + raise Exception("unknown type: %s" % tp) self.static_fields = static_fields self.array_fields = array_fields # diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -492,6 +492,8 @@ def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) + consider_guard_not_invalidated = consider_guard_no_exception + def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1293,6 +1293,28 @@ rffi.cast(SP, p).x = 0 lltype.free(chunk, flavor='raw') + def test_opaque_tagged_pointers(self): + from pypy.rpython.annlowlevel import cast_base_ptr_to_instance + from pypy.rpython.annlowlevel import cast_instance_to_base_ptr + from pypy.rpython.lltypesystem import rclass + + class Opaque(object): + llopaque = True + + def hide(self): + ptr = cast_instance_to_base_ptr(self) + return lltype.cast_opaque_ptr(llmemory.GCREF, ptr) + + @staticmethod + def show(gcref): + ptr = lltype.cast_opaque_ptr(lltype.Ptr(rclass.OBJECT), gcref) + return cast_base_ptr_to_instance(Opaque, ptr) + + opaque = Opaque() + round = ctypes2lltype(llmemory.GCREF, lltype2ctypes(opaque.hide())) + assert Opaque.show(round) is opaque + + class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -119,6 +119,7 @@ self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} + self._remove_guard_not_invalidated = False def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() @@ -378,6 +379,43 @@ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) + def optimize_QUASIIMMUT_FIELD(self, op): + # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) + # x = GETFIELD_GC(s, descr='inst_x') + # If 's' is a constant (after optimizations), then we make 's.inst_x' + # a constant too, and we rely on the rest of the optimizations to + # constant-fold the following getfield_gc. + structvalue = self.getvalue(op.getarg(0)) + if not structvalue.is_constant(): + self._remove_guard_not_invalidated = True + return # not a constant at all; ignore QUASIIMMUT_FIELD + # + from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr + qmutdescr = op.getdescr() + assert isinstance(qmutdescr, QuasiImmutDescr) + # check that the value is still correct; it could have changed + # already between the tracing and now. In this case, we are + # simply ignoring the QUASIIMMUT_FIELD hint and compiling it + # as a regular getfield. + if not qmutdescr.is_still_valid(): + self._remove_guard_not_invalidated = True + return + # record as an out-of-line guard + if self.optimizer.quasi_immutable_deps is None: + self.optimizer.quasi_immutable_deps = {} + self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None + # perform the replacement in the list of operations + fieldvalue = self.getvalue(qmutdescr.constantfieldbox) + cf = self.field_cache(qmutdescr.fielddescr) + cf.remember_field_value(structvalue, fieldvalue) + self._remove_guard_not_invalidated = False + + def optimize_GUARD_NOT_INVALIDATED(self, op): + if self._remove_guard_not_invalidated: + return + self._remove_guard_not_invalidated = False + self.emit_operation(op) + def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -312,6 +312,7 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.QUASIIMMUT_FIELD, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -3,7 +3,8 @@ #from pypy.annotation.classdef import isclassdef from pypy.annotation import description from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, getgcflavor +from pypy.rpython.rmodel import Repr, getgcflavor, inputconst +from pypy.rpython.lltypesystem.lltype import Void class FieldListAccessor(object): @@ -12,6 +13,8 @@ assert type(fields) is dict self.TYPE = TYPE self.fields = fields + for x in fields.itervalues(): + assert isinstance(x, ImmutableRanking) def __repr__(self): return '' % getattr(self, 'TYPE', '?') @@ -19,6 +22,20 @@ def _freeze_(self): return True +class ImmutableRanking(object): + def __init__(self, name, is_immutable): + self.name = name + self.is_immutable = is_immutable + def __nonzero__(self): + return self.is_immutable + def __repr__(self): + return '<%s>' % self.name + +IR_MUTABLE = ImmutableRanking('mutable', False) +IR_IMMUTABLE = ImmutableRanking('immutable', True) +IR_ARRAY_IMMUTABLE = ImmutableRanking('array_immutable', True) +IR_QUASI_IMMUTABLE = ImmutableRanking('quasi_immutable', False) + class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" @@ -155,7 +172,8 @@ self.classdef = classdef def _setup_repr(self): - pass + if self.classdef is None: + self.immutable_field_set = set() def _check_for_immutable_hints(self, hints): loc = self.classdef.classdesc.lookup('_immutable_') @@ -167,13 +185,13 @@ self.classdef,)) hints = hints.copy() hints['immutable'] = True - self.immutable_field_list = [] # unless overwritten below + self.immutable_field_set = set() # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() immutable_fields = self.classdef.classdesc.classdict.get( '_immutable_fields_') if immutable_fields is not None: - self.immutable_field_list = immutable_fields.value + self.immutable_field_set = set(immutable_fields.value) accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -201,33 +219,35 @@ if "immutable_fields" in hints: accessor = hints["immutable_fields"] if not hasattr(accessor, 'fields'): - immutable_fields = [] + immutable_fields = set() rbase = self while rbase.classdef is not None: - immutable_fields += rbase.immutable_field_list + immutable_fields.update(rbase.immutable_field_set) rbase = rbase.rbase self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): - with_suffix = {} + ranking = {} for name in fields: - if name.endswith('[*]'): + if name.endswith('[*]'): # for virtualizables' lists name = name[:-3] - suffix = '[*]' - else: - suffix = '' + rank = IR_ARRAY_IMMUTABLE + elif name.endswith('?'): # a quasi-immutable field + name = name[:-1] + rank = IR_QUASI_IMMUTABLE + else: # a regular immutable/green field + rank = IR_IMMUTABLE try: mangled_name, r = self._get_field(name) except KeyError: continue - with_suffix[mangled_name] = suffix - accessor.initialize(self.object_type, with_suffix) - return with_suffix + ranking[mangled_name] = rank + accessor.initialize(self.object_type, ranking) + return ranking def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as # mutable in some parent class but that is now declared immutable - from pypy.rpython.lltypesystem.lltype import Void is_self_immutable = "immutable" in self.object_type._hints base = self while base.classdef is not None: @@ -248,12 +268,30 @@ "class %r has _immutable_=True, but parent class %r " "defines (at least) the mutable field %r" % ( self, base, fieldname)) - if fieldname in self.immutable_field_list: + if (fieldname in self.immutable_field_set or + (fieldname + '?') in self.immutable_field_set): raise ImmutableConflictError( "field %r is defined mutable in class %r, but " "listed in _immutable_fields_ in subclass %r" % ( fieldname, base, self)) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + + def hook_setfield(self, vinst, fieldname, llops): + if self.is_quasi_immutable(fieldname): + c_fieldname = inputconst(Void, 'mutate_' + fieldname) + llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname]) + + def is_quasi_immutable(self, fieldname): + search = fieldname + '?' + rbase = self + while rbase.classdef is not None: + if search in rbase.immutable_field_set: + return True + rbase = rbase.rbase + return False + def new_instance(self, llops, classcallhop=None): raise NotImplementedError diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -1,5 +1,5 @@ from pypy.translator.simplify import get_funcobj -from pypy.jit.metainterp import history +from pypy.jit.metainterp import history, quasiimmut from pypy.rpython.lltypesystem import lltype, rclass from pypy.tool.udir import udir @@ -85,12 +85,20 @@ getkind(v.concretetype, supports_floats, supports_longlong) v = op.result getkind(v.concretetype, supports_floats, supports_longlong) + check_skip_operation(op) except NotImplementedError, e: log.WARNING('%s, ignoring graph' % (e,)) log.WARNING(' %s' % (graph,)) return True return False +def check_skip_operation(op): + if op.opname == 'setfield': + if quasiimmut.is_quasi_immutable(op.args[0].concretetype.TO, + op.args[1].value): + raise NotImplementedError("write to quasi-immutable field %r" + % (op.args[1].value,)) + # ____________________________________________________________ class StopAtXPolicy(JitPolicy): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -167,6 +167,7 @@ class CompiledLoop(object): has_been_freed = False + invalid = False def __init__(self): self.inputargs = [] @@ -933,6 +934,9 @@ if forced: raise GuardFailed + def op_guard_not_invalidated(self, descr): + if self.loop.invalid: + raise GuardFailed class OOFrame(Frame): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -947,3 +947,43 @@ assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) assert op1.args[3] == ListOfKind('ref', [v1, v2]) + +def test_quasi_immutable(): + from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + v2 = varoftype(lltype.Signed) + STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: + op = SpaceOperation('getfield', [v_x, Constant('inst_x', lltype.Void)], + v2) + tr = Transformer(FakeCPU()) + [_, op1, op2] = tr.rewrite_operation(op) + assert op1.opname == 'record_quasiimmut_field' + assert len(op1.args) == 3 + assert op1.args[0] == v_x + assert op1.args[1] == ('fielddescr', STRUCT, 'inst_x') + assert op1.args[2] == ('fielddescr', STRUCT, 'mutate_x') + assert op1.result is None + assert op2.opname == 'getfield_gc_i' + assert len(op2.args) == 2 + assert op2.args[0] == v_x + assert op2.args[1] == ('fielddescr', STRUCT, 'inst_x') + assert op2.result is op.result + +def test_quasi_immutable_setfield(): + from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + v1 = varoftype(lltype.Signed) + STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: + op = SpaceOperation('setfield', + [v_x, Constant('inst_x', lltype.Void), v1], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU()) + raises(NotImplementedError, tr.rewrite_operation, op) diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -171,7 +171,8 @@ class VirtualizableAnalyzer(BoolGraphAnalyzer): def analyze_simple_operation(self, op, graphinfo): return op.opname in ('jit_force_virtualizable', - 'jit_force_virtual') + 'jit_force_virtual', + 'jit_force_quasi_immutable') # ____________________________________________________________ diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -555,6 +555,16 @@ opimpl_setfield_raw_r = _opimpl_setfield_raw_any opimpl_setfield_raw_f = _opimpl_setfield_raw_any + @arguments("box", "descr", "descr", "orgpc") + def opimpl_record_quasiimmut_field(self, box, fielddescr, + mutatefielddescr, orgpc): + from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr + cpu = self.metainterp.cpu + descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) + self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], + None, descr=descr) + self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) + def _nonstandard_virtualizable(self, pc, box): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] @@ -1076,6 +1086,8 @@ if opnum == rop.GUARD_NOT_FORCED: resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, metainterp.jitdriver_sd) + elif opnum == rop.GUARD_NOT_INVALIDATED: + resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() guard_op = metainterp.history.record(opnum, moreargs, None, @@ -1848,6 +1860,9 @@ self.handle_possible_exception() except ChangeFrame: pass + elif opnum == rop.GUARD_NOT_INVALIDATED: + pass # XXX we want to do something special in resume descr, + # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected self.execute_raised(OverflowError(), constant=True) try: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -76,6 +76,11 @@ op.setdescr(None) # clear reference, mostly for tests if not we_are_translated(): op._jumptarget_number = descr.number + # record this looptoken on the QuasiImmut used in the code + if loop.quasi_immutable_deps is not None: + for qmut in loop.quasi_immutable_deps: + qmut.register_loop_token(wref) + # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken loop.token = None if not we_are_translated(): @@ -396,6 +401,12 @@ self.copy_all_attributes_into(res) return res +class ResumeGuardNotInvalidated(ResumeGuardDescr): + def _clone_if_mutable(self): + res = ResumeGuardNotInvalidated() + self.copy_all_attributes_into(res) + return res + class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py --- a/pypy/rpython/lltypesystem/test/test_lloperation.py +++ b/pypy/rpython/lltypesystem/test/test_lloperation.py @@ -54,6 +54,7 @@ def test_is_pure(): from pypy.objspace.flow.model import Variable, Constant + from pypy.rpython import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) @@ -85,38 +86,50 @@ assert llop.getarrayitem.is_pure([v_a2, Variable()]) assert llop.getarraysize.is_pure([v_a2]) # - accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': ''}) - v_s3 = Variable() - v_s3.concretetype = lltype.Ptr(S3) - assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) - assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) - assert llop.getfield.is_pure([v_s3, Constant('x')]) - assert not llop.getfield.is_pure([v_s3, Constant('y')]) + for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, + rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: + accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': kind}) + v_s3 = Variable() + v_s3.concretetype = lltype.Ptr(S3) + assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) + assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) + assert llop.getfield.is_pure([v_s3, Constant('x')]) is kind + assert not llop.getfield.is_pure([v_s3, Constant('y')]) def test_getfield_pure(): S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) S2 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable': True}) accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') s2 = lltype.malloc(S2); s2.x = 45 assert llop.getfield(lltype.Signed, s2, 'x') == 45 - s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 - assert llop.getfield(lltype.Signed, s3, 'x') == 46 - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') # py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s1, 'x') assert llop.getinteriorfield(lltype.Signed, s2, 'x') == 45 - assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 - py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s3, 'y') + # + for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, + rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: + # + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': kind}) + s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 + if kind in [rclass.IR_IMMUTABLE, rclass.IR_ARRAY_IMMUTABLE]: + assert llop.getfield(lltype.Signed, s3, 'x') == 46 + assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 + else: + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'x') + py.test.raises(TypeError, llop.getinteriorfield, + lltype.Signed, s3, 'x') + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') + py.test.raises(TypeError, llop.getinteriorfield, + lltype.Signed, s3, 'y') # ___________________________________________________________________________ # This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync. diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -145,6 +145,14 @@ def redirect_call_assembler(self, oldlooptoken, newlooptoken): self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) + def invalidate_loop(self, looptoken): + from pypy.jit.backend.x86 import codebuf + + for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: + mc = codebuf.MachineCodeBlockWrapper() + mc.JMP_l(tgt) + mc.copy_to_raw_memory(addr - 1) + class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,6 +525,9 @@ def op_jit_force_virtual(x): return x +def op_jit_force_quasi_immutable(*args): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/test/test_rvirtualizable2.py b/pypy/rpython/test/test_rvirtualizable2.py --- a/pypy/rpython/test/test_rvirtualizable2.py +++ b/pypy/rpython/test/test_rvirtualizable2.py @@ -5,6 +5,7 @@ from pypy.rlib.jit import hint from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy import conftest @@ -116,8 +117,8 @@ TYPE = self.gettype(v_inst) accessor = TYPE._hints['virtualizable2_accessor'] assert accessor.TYPE == TYPE - assert accessor.fields == {self.prefix + 'v1' : "", - self.prefix + 'v2': "[*]"} + assert accessor.fields == {self.prefix + 'v1': IR_IMMUTABLE, + self.prefix + 'v2': IR_ARRAY_IMMUTABLE} # def fn2(n): Base().base1 = 42 diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1166,6 +1166,11 @@ def bhimpl_setfield_raw_f(cpu, struct, fielddescr, newvalue): cpu.bh_setfield_raw_f(struct, fielddescr, newvalue) + @arguments("cpu", "r", "d", "d") + def bhimpl_record_quasiimmut_field(self, struct, fielddescr, + mutatefielddescr): + pass + @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) @@ -1287,6 +1292,8 @@ # We get here because it used to overflow, but now it no longer # does. pass + elif opnum == rop.GUARD_NOT_INVALIDATED: + pass else: from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -433,6 +433,7 @@ 'jit_marker': LLOp(), 'jit_force_virtualizable':LLOp(canrun=True), 'jit_force_virtual': LLOp(canrun=True), + 'jit_force_quasi_immutable': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), diff --git a/pypy/jit/backend/x86/test/test_quasiimmut.py b/pypy/jit/backend/x86/test/test_quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/x86/test/test_quasiimmut.py @@ -0,0 +1,9 @@ + +import py +from pypy.jit.backend.x86.test.test_basic import Jit386Mixin +from pypy.jit.metainterp.test import test_quasiimmut + +class TestLoopSpec(Jit386Mixin, test_quasiimmut.QuasiImmutTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_loop.py + pass diff --git a/pypy/rpython/rvirtualizable2.py b/pypy/rpython/rvirtualizable2.py --- a/pypy/rpython/rvirtualizable2.py +++ b/pypy/rpython/rvirtualizable2.py @@ -50,7 +50,7 @@ def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): - if cname.value in self.my_redirected_fields: + if self.my_redirected_fields.get(cname.value): cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -380,6 +380,7 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -475,6 +476,7 @@ 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', + 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -794,15 +794,8 @@ def __init__(self, fields): self.fields = fields S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x':''})}) - assert S._immutable_field('x') == True - # - class FieldListAccessor(object): - def __init__(self, fields): - self.fields = fields - S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) - assert S._immutable_field('x') == '[*]' + hints={'immutable_fields': FieldListAccessor({'x': 1234})}) + assert S._immutable_field('x') == 1234 def test_typedef(): T = Typedef(Signed, 'T') diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -267,6 +267,8 @@ virtual_state = modifier.get_virtual_state(jump_args) loop.preamble.operations = self.optimizer.newoperations + loop.preamble.quasi_immutable_deps = ( + self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.reconstruct_for_next_iteration() inputargs = self.inline(self.cloned_operations, loop.inputargs, jump_args) @@ -276,6 +278,7 @@ loop.preamble.operations.append(jmp) loop.operations = self.optimizer.newoperations + loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() assert isinstance(start_resumedescr, ResumeGuardDescr) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -85,6 +85,7 @@ 'nslots', 'instancetypedef', 'terminator', + '_version_tag?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -131,6 +131,16 @@ def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') +def find_force_quasi_immutable(graphs): + results = [] + for graph in graphs: + for block in graph.iterblocks(): + for i in range(len(block.operations)): + op = block.operations[i] + if op.opname == 'jit_force_quasi_immutable': + results.append((graph, block, i)) + return results + def get_stats(): return pyjitpl._warmrunnerdesc.stats @@ -187,6 +197,7 @@ self.rewrite_can_enter_jits() self.rewrite_set_param() self.rewrite_force_virtual(vrefinfo) + self.rewrite_force_quasi_immutable() self.add_finish() self.metainterp_sd.finish_setup(self.codewriter) @@ -842,6 +853,28 @@ all_graphs = self.translator.graphs vrefinfo.replace_force_virtual_with_call(all_graphs) + def replace_force_quasiimmut_with_direct_call(self, op): + ARG = op.args[0].concretetype + mutatefieldname = op.args[1].value + key = (ARG, mutatefieldname) + if key in self._cache_force_quasiimmed_funcs: + cptr = self._cache_force_quasiimmed_funcs[key] + else: + from pypy.jit.metainterp import quasiimmut + func = quasiimmut.make_invalidation_function(ARG, mutatefieldname) + FUNC = lltype.Ptr(lltype.FuncType([ARG], lltype.Void)) + llptr = self.helper_func(FUNC, func) + cptr = Constant(llptr, FUNC) + self._cache_force_quasiimmed_funcs[key] = cptr + op.opname = 'direct_call' + op.args = [cptr, op.args[0]] + + def rewrite_force_quasi_immutable(self): + self._cache_force_quasiimmed_funcs = {} + graphs = self.translator.graphs + for graph, block, i in find_force_quasi_immutable(graphs): + self.replace_force_quasiimmut_with_direct_call(block.operations[i]) + # ____________________________________________________________ def execute_token(self, loop_token): diff --git a/pypy/rpython/lltypesystem/rclass.py b/pypy/rpython/lltypesystem/rclass.py --- a/pypy/rpython/lltypesystem/rclass.py +++ b/pypy/rpython/lltypesystem/rclass.py @@ -322,6 +322,7 @@ # before they are fully built, to avoid strange bugs in case # of recursion where other code would uses these # partially-initialized dicts. + AbstractInstanceRepr._setup_repr(self) self.rclass = getclassrepr(self.rtyper, self.classdef) fields = {} allinstancefields = {} @@ -370,6 +371,11 @@ kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True + + for name, attrdef in attrs: + if not attrdef.readonly and self.is_quasi_immutable(name): + llfields.append(('mutate_' + name, OBJECTPTR)) + object_type = MkStruct(self.classdef.name, ('super', self.rbase.object_type), hints=hints, @@ -488,6 +494,7 @@ if force_cast: vinst = llops.genop('cast_pointer', [vinst], resulttype=self) self.hook_access_field(vinst, cname, llops, flags) + self.hook_setfield(vinst, attr, llops) llops.genop('setfield', [vinst, cname, vvalue]) else: if self.classdef is None: @@ -495,9 +502,6 @@ self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True, flags=flags) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - def new_instance(self, llops, classcallhop=None): """Build a new instance, without calling __init__.""" flavor = self.gcflavor diff --git a/pypy/rpython/annlowlevel.py b/pypy/rpython/annlowlevel.py --- a/pypy/rpython/annlowlevel.py +++ b/pypy/rpython/annlowlevel.py @@ -480,7 +480,26 @@ # ____________________________________________________________ def cast_object_to_ptr(PTR, object): - raise NotImplementedError("cast_object_to_ptr") + """NOT_RPYTHON: hack. The object may be disguised as a PTR now. + Limited to casting a given object to a single type. + """ + if isinstance(PTR, lltype.Ptr): + TO = PTR.TO + else: + TO = PTR + if not hasattr(object, '_carry_around_for_tests'): + assert not hasattr(object, '_TYPE') + object._carry_around_for_tests = True + object._TYPE = TO + else: + assert object._TYPE == TO + # + if isinstance(PTR, lltype.Ptr): + return lltype._ptr(PTR, object, True) + elif isinstance(PTR, ootype.Instance): + return object + else: + raise NotImplementedError("cast_object_to_ptr(%r, ...)" % PTR) def cast_instance_to_base_ptr(instance): return cast_object_to_ptr(base_ptr_lltype(), instance) @@ -535,7 +554,13 @@ # ____________________________________________________________ def cast_base_ptr_to_instance(Class, ptr): - raise NotImplementedError("cast_base_ptr_to_instance") + """NOT_RPYTHON: hack. Reverse the hacking done in cast_object_to_ptr().""" + if isinstance(lltype.typeOf(ptr), lltype.Ptr): + ptr = ptr._as_obj() + if not isinstance(ptr, Class): + raise NotImplementedError("cast_base_ptr_to_instance: casting %r to %r" + % (ptr, Class)) + return ptr class CastBasePtrToInstanceEntry(extregistry.ExtRegistryEntry): _about_ = cast_base_ptr_to_instance diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -20,6 +20,9 @@ op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) self.emit_operation(op) + def optimize_QUASIIMMUT_FIELD(self, op): + pass + def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -268,13 +268,14 @@ return self._superclass._get_fields_with_default() + self._fields_with_default def _immutable_field(self, field): + if self._hints.get('immutable'): + return True if 'immutable_fields' in self._hints: try: - s = self._hints['immutable_fields'].fields[field] - return s or True + return self._hints['immutable_fields'].fields[field] except KeyError: pass - return self._hints.get('immutable', False) + return False class SpecializableType(OOType): diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -5,6 +5,8 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.rarithmetic import intmask, r_longlong from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE +from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.objspace.flow.model import summary class EmptyBase(object): @@ -746,8 +748,10 @@ t, typer, graph = self.gengraph(f, []) A_TYPE = deref(graph.getreturnvar().concretetype) accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ - accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE, + "inst_y": IR_ARRAY_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_ARRAY_IMMUTABLE} # for ootype def test_immutable_fields_subclass_1(self): from pypy.jit.metainterp.typesystem import deref @@ -765,8 +769,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : ""} or \ - accessor.fields == {"ox" : ""} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE} # for ootype def test_immutable_fields_subclass_2(self): from pypy.jit.metainterp.typesystem import deref @@ -785,8 +789,10 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ - accessor.fields == {"ox" : "", "oy" : ""} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE, + "inst_y": IR_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_IMMUTABLE} # for ootype def test_immutable_fields_only_in_subclass(self): from pypy.jit.metainterp.typesystem import deref @@ -804,8 +810,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y" : ""} or \ - accessor.fields == {"oy" : ""} # for ootype + assert accessor.fields == {"inst_y": IR_IMMUTABLE} or \ + accessor.fields == {"oy": IR_IMMUTABLE} # for ootype def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -849,8 +855,8 @@ except AttributeError: A_TYPE = B_TYPE._superclass # for ootype accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_v" : ""} or \ - accessor.fields == {"ov" : ""} # for ootype + assert accessor.fields == {"inst_v": IR_IMMUTABLE} or \ + accessor.fields == {"ov": IR_IMMUTABLE} # for ootype def test_immutable_subclass_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -895,6 +901,37 @@ B_TYPE = deref(graph.getreturnvar().concretetype) assert B_TYPE._hints["immutable"] + def test_quasi_immutable(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['x', 'y', 'a?', 'b?'] + class B(A): + pass + def f(): + a = A() + a.x = 42 + a.a = 142 + b = B() + b.x = 43 + b.y = 41 + b.a = 44 + b.b = 45 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + accessor = B_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_y": IR_IMMUTABLE, + "inst_b": IR_QUASI_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_IMMUTABLE, + "oa": IR_QUASI_IMMUTABLE, + "ob": IR_QUASI_IMMUTABLE} # for ootype + found = [] + for op in graph.startblock.operations: + if op.opname == 'jit_force_quasi_immutable': + found.append(op.args[1].value) + assert found == ['mutate_a', 'mutate_a', 'mutate_b'] + class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -48,11 +48,12 @@ class GuardToken(object): - def __init__(self, faildescr, failargs, fail_locs, exc): + def __init__(self, faildescr, failargs, fail_locs, exc, has_jump): self.faildescr = faildescr self.failargs = failargs self.fail_locs = fail_locs self.exc = exc + self.has_jump = has_jump DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed)) @@ -133,6 +134,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token + self.invalidate_positions = [] self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -141,6 +143,7 @@ allblocks) def teardown(self): + self.invalidate_positions = None self.pending_guard_tokens = None self.mc = None self.looppos = -1 @@ -435,15 +438,24 @@ # tok.faildescr._x86_adr_jump_offset to contain the raw address of # the 4-byte target field in the JMP/Jcond instruction, and patch # the field in question to point (initially) to the recovery stub + inv_counter = 0 + clt = self.current_clt for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset tok.faildescr._x86_adr_jump_offset = addr relative_target = tok.pos_recovery_stub - (tok.pos_jump_offset + 4) assert rx86.fits_in_32bits(relative_target) # - mc = codebuf.MachineCodeBlockWrapper() - mc.writeimm32(relative_target) - mc.copy_to_raw_memory(addr) + if tok.has_jump: + mc = codebuf.MachineCodeBlockWrapper() + mc.writeimm32(relative_target) + mc.copy_to_raw_memory(addr) + else: + # guard not invalidate, patch where it jumps + pos, _ = self.invalidate_positions[inv_counter] + clt.invalidate_positions.append((pos + rawstart, + relative_target)) + inv_counter += 1 def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1447,6 +1459,13 @@ self.mc.CMP(heap(self.cpu.pos_exception()), imm0) self.implement_guard(guard_token, 'NZ') + def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, + locs, ign_2): + pos = self.mc.get_relative_pos() + 1 # after potential jmp + guard_token.pos_jump_offset = pos + self.invalidate_positions.append((pos, 0)) + self.pending_guard_tokens.append(guard_token) + def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, locs, resloc): loc = locs[0] @@ -1545,7 +1564,8 @@ exc = (guard_opnum == rop.GUARD_EXCEPTION or guard_opnum == rop.GUARD_NO_EXCEPTION or guard_opnum == rop.GUARD_NOT_FORCED) - return GuardToken(faildescr, failargs, fail_locs, exc) + return GuardToken(faildescr, failargs, fail_locs, exc, has_jump= + guard_opnum != rop.GUARD_NOT_INVALIDATED) def generate_quick_failure(self, guardtok): """Generate the initial code for handling a failure. We try to From commits-noreply at bitbucket.org Sun Apr 17 01:44:22 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sun, 17 Apr 2011 01:44:22 +0200 (CEST) Subject: [pypy-svn] pypy default: Merge out-of-line-guards-2. This branch provides a way to specify a field Message-ID: <20110416234422.DDE0C2A2057@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43414:124ebb7828dd Date: 2011-04-17 01:43 +0200 http://bitbucket.org/pypy/pypy/changeset/124ebb7828dd/ Log: Merge out-of-line-guards-2. This branch provides a way to specify a field that doesn't change very often by saying _immutable_fields_ = ['a?'] which means when it changes, a new assembler will be compiled. diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py --- a/pypy/rpython/ootypesystem/rclass.py +++ b/pypy/rpython/ootypesystem/rclass.py @@ -262,6 +262,10 @@ self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef) self.rbase.setup() + for name, attrdef in selfattrs.iteritems(): + if not attrdef.readonly and self.is_quasi_immutable(name): + ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT}) + classattributes = {} baseInstance = self.lowleveltype._superclass classrepr = getclassrepr(self.rtyper, self.classdef) @@ -476,11 +480,9 @@ mangled_name = mangle(attr, self.rtyper.getconfig()) cname = inputconst(ootype.Void, mangled_name) self.hook_access_field(vinst, cname, llops, flags) + self.hook_setfield(vinst, attr, llops) llops.genop('oosetfield', [vinst, cname, vvalue]) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - def rtype_is_true(self, hop): vinst, = hop.inputargs(self) return hop.genop('oononnull', [vinst], resulttype=ootype.Bool) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -291,6 +291,7 @@ # that belong to this loop or to a bridge attached to it. # Filled by the frontend calling record_faildescr_index(). self.faildescr_indices = [] + self.invalidate_positions = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc") diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -791,6 +791,7 @@ operations = None token = None call_pure_results = None + quasi_immutable_deps = None def __init__(self, name): self.name = name diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -341,13 +341,14 @@ return _struct(self, n, initialization='example') def _immutable_field(self, field): + if self._hints.get('immutable'): + return True if 'immutable_fields' in self._hints: try: - s = self._hints['immutable_fields'].fields[field] - return s or True + return self._hints['immutable_fields'].fields[field] except KeyError: pass - return self._hints.get('immutable', False) + return False class RttiStruct(Struct): _runtime_type_info = None @@ -1029,6 +1030,8 @@ return None # null pointer if type(p._obj0) is int: return p # a pointer obtained by cast_int_to_ptr + if getattr(p._obj0, '_carry_around_for_tests', False): + return p # a pointer obtained by cast_instance_to_base_ptr container = obj._normalizedcontainer() if type(container) is int: # this must be an opaque ptr originating from an integer @@ -1881,8 +1884,8 @@ if self.__class__ is not other.__class__: return NotImplemented if hasattr(self, 'container') and hasattr(other, 'container'): - obj1 = self.container._normalizedcontainer() - obj2 = other.container._normalizedcontainer() + obj1 = self._normalizedcontainer() + obj2 = other._normalizedcontainer() return obj1 == obj2 else: return self is other @@ -1906,6 +1909,8 @@ # an integer, cast to a ptr, cast to an opaque if type(self.container) is int: return self.container + if getattr(self.container, '_carry_around_for_tests', False): + return self.container return self.container._normalizedcontainer() else: return _parentable._normalizedcontainer(self) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -578,6 +578,7 @@ _all_callbacks_results = [] _int2obj = {} _callback_exc_info = None +_opaque_objs = [None] def get_rtyper(): llinterp = LLInterpreter.current_interpreter @@ -616,6 +617,10 @@ T = lltype.Ptr(lltype.typeOf(container)) # otherwise it came from integer and we want a c_void_p with # the same valu + if getattr(container, 'llopaque', None): + no = len(_opaque_objs) + _opaque_objs.append(container) + return no * 2 + 1 else: container = llobj._obj if isinstance(T.TO, lltype.FuncType): @@ -764,10 +769,14 @@ if isinstance(T, lltype.Typedef): T = T.OF if isinstance(T, lltype.Ptr): - if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer + ptrval = ctypes.cast(cobj, ctypes.c_void_p).value + if not cobj or not ptrval: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 return lltype.nullptr(T.TO) if isinstance(T.TO, lltype.Struct): + if ptrval & 1: # a tagged pointer + gcref = _opaque_objs[ptrval // 2].hide() + return lltype.cast_opaque_ptr(T, gcref) REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) @@ -1228,7 +1237,9 @@ return not self == other def _cast_to_ptr(self, PTRTYPE): - return force_cast(PTRTYPE, self.intval) + if self.intval & 1: + return _opaque_objs[self.intval // 2] + return force_cast(PTRTYPE, self.intval) ## def _cast_to_int(self): ## return self.intval diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -0,0 +1,266 @@ + +import py + +from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE +from pypy.jit.metainterp import typesystem +from pypy.jit.metainterp.quasiimmut import QuasiImmut +from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance +from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.codewriter.policy import StopAtXPolicy +from pypy.rlib.jit import JitDriver, dont_look_inside + + +def test_get_current_qmut_instance(): + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + STRUCT = lltype.GcStruct('Foo', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + foo = lltype.malloc(STRUCT, zero=True) + foo.inst_x = 42 + assert not foo.mutate_x + + class FakeCPU: + ts = typesystem.llhelper + + def bh_getfield_gc_r(self, gcref, fielddescr): + assert fielddescr == mutatefielddescr + foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) + result = foo.mutate_x + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + + def bh_setfield_gc_r(self, gcref, fielddescr, newvalue_gcref): + assert fielddescr == mutatefielddescr + foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) + newvalue = lltype.cast_opaque_ptr(rclass.OBJECTPTR, newvalue_gcref) + foo.mutate_x = newvalue + + cpu = FakeCPU() + mutatefielddescr = ('fielddescr', STRUCT, 'mutate_x') + + foo_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) + qmut1 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) + assert isinstance(qmut1, QuasiImmut) + qmut2 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) + assert qmut1 is qmut2 + + +class QuasiImmutTests(object): + + def test_simple_1(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + x -= 1 + return total + # + res = self.meta_interp(f, [100, 7]) + assert res == 700 + self.check_loops(getfield_gc=0, everywhere=True) + # + from pypy.jit.metainterp.warmspot import get_stats + loops = get_stats().loops + for loop in loops: + assert len(loop.quasi_immutable_deps) == 1 + assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) + + def test_nonopt_1(self): + myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def setup(x): + return [Foo(100 + i) for i in range(x)] + def f(a, x): + lst = setup(x) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(lst=lst, x=x, total=total) + # read a quasi-immutable field out of a variable + x -= 1 + total += lst[x].a + return total + # + assert f(100, 7) == 721 + res = self.meta_interp(f, [100, 7]) + assert res == 721 + self.check_loops(getfield_gc=1) + # + from pypy.jit.metainterp.warmspot import get_stats + loops = get_stats().loops + for loop in loops: + assert loop.quasi_immutable_deps is None + + def test_change_during_tracing_1(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + @dont_look_inside + def residual_call(foo): + foo.a += 1 + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + residual_call(foo) + x -= 1 + return total + # + assert f(100, 7) == 721 + res = self.meta_interp(f, [100, 7]) + assert res == 721 + self.check_loops(getfield_gc=1) + + def test_change_during_tracing_2(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + @dont_look_inside + def residual_call(foo, difference): + foo.a += difference + def f(a, x): + foo = Foo(a) + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + residual_call(foo, +1) + residual_call(foo, -1) + x -= 1 + return total + # + assert f(100, 7) == 700 + res = self.meta_interp(f, [100, 7]) + assert res == 700 + self.check_loops(getfield_gc=1) + + def test_change_invalidate_reentering(self): + myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) + class Foo: + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + def f(foo, x): + total = 0 + while x > 0: + myjitdriver.jit_merge_point(foo=foo, x=x, total=total) + # read a quasi-immutable field out of a Constant + total += foo.a + x -= 1 + return total + def g(a, x): + foo = Foo(a) + res1 = f(foo, x) + foo.a += 1 # invalidation, while the jit is not running + res2 = f(foo, x) # should still mark the loop as invalid + return res1 * 1000 + res2 + # + assert g(100, 7) == 700707 + res = self.meta_interp(g, [100, 7]) + assert res == 700707 + self.check_loops(getfield_gc=0) + + def test_invalidate_while_running(self): + jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + + def external(foo, v): + if v: + foo.a = 2 + + def f(foo): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(i=i, foo=foo, total=total) + external(foo, i > 7) + i += 1 + total += foo.a + return total + + def g(): + return f(Foo(1)) + + assert self.meta_interp(g, [], policy=StopAtXPolicy(external)) == g() + + def test_invalidate_by_setfield(self): + py.test.skip("Not implemented") + jitdriver = JitDriver(greens=['bc', 'foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + def __init__(self, a): + self.a = a + + def f(foo, bc): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(bc=bc, i=i, foo=foo, total=total) + if bc == 0: + f(foo, 1) + if bc == 1: + foo.a = int(i > 5) + i += 1 + total += foo.a + return total + + def g(): + return f(Foo(1), 0) + + assert self.meta_interp(g, []) == g() + + def test_invalidate_bridge(self): + jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + + def f(foo): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(i=i, total=total, foo=foo) + if i > 5: + total += foo.a + else: + total += 2*foo.a + i += 1 + return total + + def main(): + foo = Foo() + foo.a = 1 + total = f(foo) + foo.a = 2 + total += f(foo) + return total + + res = self.meta_interp(main, []) + assert res == main() + +class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): + pass diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/quasiimmut.py @@ -0,0 +1,116 @@ +import weakref +from pypy.rpython.rclass import IR_QUASI_IMMUTABLE +from pypy.rpython.lltypesystem import lltype, rclass +from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.jit.metainterp.history import AbstractDescr + + +def is_quasi_immutable(STRUCT, fieldname): + imm_fields = STRUCT._hints.get('immutable_fields') + return (imm_fields is not None and + imm_fields.fields.get(fieldname) is IR_QUASI_IMMUTABLE) + +def get_mutate_field_name(fieldname): + if fieldname.startswith('inst_'): # lltype + return 'mutate_' + fieldname[5:] + elif fieldname.startswith('o'): # ootype + return 'mutate_' + fieldname[1:] + else: + raise AssertionError(fieldname) + +def get_current_qmut_instance(cpu, gcref, mutatefielddescr): + """Returns the current QuasiImmut instance in the field, + possibly creating one. + """ + # XXX this is broken on x86 + qmut_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) + if qmut_gcref: + qmut = QuasiImmut.show(cpu, qmut_gcref) + else: + qmut = QuasiImmut(cpu) + cpu.bh_setfield_gc_r(gcref, mutatefielddescr, qmut.hide()) + return qmut + +def make_invalidation_function(STRUCT, mutatefieldname): + # + def _invalidate_now(p): + qmut_ptr = getattr(p, mutatefieldname) + setattr(p, mutatefieldname, lltype.nullptr(rclass.OBJECT)) + qmut = cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) + qmut.invalidate() + _invalidate_now._dont_inline_ = True + # + def invalidation(p): + if getattr(p, mutatefieldname): + _invalidate_now(p) + # + return invalidation + + +class QuasiImmut(object): + llopaque = True + + def __init__(self, cpu): + self.cpu = cpu + # list of weakrefs to the LoopTokens that must be invalidated if + # this value ever changes + self.looptokens_wrefs = [] + self.compress_limit = 30 + + def hide(self): + qmut_ptr = self.cpu.ts.cast_instance_to_base_ref(self) + return self.cpu.ts.cast_to_ref(qmut_ptr) + + @staticmethod + def show(cpu, qmut_gcref): + qmut_ptr = cpu.ts.cast_to_baseclass(qmut_gcref) + return cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) + + def register_loop_token(self, wref_looptoken): + if len(self.looptokens_wrefs) > self.compress_limit: + self.compress_looptokens_list() + self.looptokens_wrefs.append(wref_looptoken) + + def compress_looptokens_list(self): + self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs + if wref() is not None] + self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2 + + def invalidate(self): + # When this is called, all the loops that we record become + # invalid and must not be called again, nor returned to. + wrefs = self.looptokens_wrefs + self.looptokens_wrefs = [] + for wref in wrefs: + looptoken = wref() + if looptoken is not None: + self.cpu.invalidate_loop(looptoken) + + +class QuasiImmutDescr(AbstractDescr): + def __init__(self, cpu, structbox, fielddescr, mutatefielddescr): + self.cpu = cpu + self.structbox = structbox + self.fielddescr = fielddescr + self.mutatefielddescr = mutatefielddescr + gcref = structbox.getref_base() + self.qmut = get_current_qmut_instance(cpu, gcref, mutatefielddescr) + self.constantfieldbox = self.get_current_constant_fieldvalue() + + def get_current_constant_fieldvalue(self): + from pypy.jit.metainterp import executor + from pypy.jit.metainterp.resoperation import rop + fieldbox = executor.execute(self.cpu, None, rop.GETFIELD_GC, + self.fielddescr, self.structbox) + return fieldbox.constbox() + + def is_still_valid(self): + cpu = self.cpu + gcref = self.structbox.getref_base() + qmut = get_current_qmut_instance(cpu, gcref, self.mutatefielddescr) + if qmut is not self.qmut: + return False + else: + currentbox = self.get_current_constant_fieldvalue() + assert self.constantfieldbox.same_constant(currentbox) + return True diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -2,6 +2,7 @@ from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype, lloperation, rclass, llmemory from pypy.rpython.annlowlevel import llhelper +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside @@ -45,7 +46,7 @@ ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY._hints['virtualizable2_accessor'].initialize( - XY, {'inst_x' : "", 'inst_node' : ""}) + XY, {'inst_x' : IR_IMMUTABLE, 'inst_node' : IR_IMMUTABLE}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY, xy_vtable, 'XY') @@ -210,7 +211,8 @@ ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY2._hints['virtualizable2_accessor'].initialize( - XY2, {'inst_x' : "", 'inst_l1' : "[*]", 'inst_l2' : "[*]"}) + XY2, {'inst_x' : IR_IMMUTABLE, + 'inst_l1' : IR_ARRAY_IMMUTABLE, 'inst_l2' : IR_ARRAY_IMMUTABLE}) xy2_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY2, xy2_vtable, 'XY2') diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -41,7 +41,8 @@ # during preamble but to keep it during the loop optimizations.append(o) - if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: + if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts + or 'heap' not in enable_opts): optimizations.append(OptSimplify()) if inline_short_preamble: diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -7,8 +7,9 @@ from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.codewriter.policy import log +from pypy.jit.codewriter.policy import log, check_skip_operation from pypy.jit.metainterp.typesystem import deref, arrayItem +from pypy.jit.metainterp import quasiimmut from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj @@ -561,7 +562,8 @@ arraydescr) return [] # check for _immutable_fields_ hints - if v_inst.concretetype.TO._immutable_field(c_fieldname.value): + immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + if immut: if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): @@ -574,10 +576,21 @@ descr = self.cpu.fielddescrof(v_inst.concretetype.TO, c_fieldname.value) kind = getkind(RESULT)[0] - return SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), - [v_inst, descr], op.result) + op1 = SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), + [v_inst, descr], op.result) + # + if immut is quasiimmut.IR_QUASI_IMMUTABLE: + descr1 = self.cpu.fielddescrof( + v_inst.concretetype.TO, + quasiimmut.get_mutate_field_name(c_fieldname.value)) + op1 = [SpaceOperation('-live-', [], None), + SpaceOperation('record_quasiimmut_field', + [v_inst, descr, descr1], None), + op1] + return op1 def rewrite_op_setfield(self, op): + check_skip_operation(op) # just to check it doesn't raise if self.is_typeptr_getset(op): # ignore the operation completely -- instead, it's done by 'new' return diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -257,6 +257,7 @@ self.pendingfields = [] self.posponedop = None self.exception_might_have_happened = False + self.quasi_immutable_deps = None self.newoperations = [] if loop is not None: self.call_pure_results = loop.call_pure_results @@ -309,6 +310,7 @@ new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None + new.quasi_immutable_deps = self.quasi_immutable_deps return new @@ -410,6 +412,7 @@ self.first_optimization.propagate_forward(op) self.i += 1 self.loop.operations = self.newoperations + self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5717,8 +5717,35 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() - - + def test_quasi_immut(self): + ops = """ + [p0, p1, i0] + quasiimmut_field(p0, descr=quasiimmutdescr) + guard_not_invalidated() [] + i1 = getfield_gc(p0, descr=quasifielddescr) + jump(p1, p0, i1) + """ + expected = """ + [p0, p1, i0] + i1 = getfield_gc(p0, descr=quasifielddescr) + jump(p1, p0, i1) + """ + self.optimize_loop(ops, expected) + + def test_quasi_immut_2(self): + ops = """ + [] + quasiimmut_field(ConstPtr(myptr), descr=quasiimmutdescr) + guard_not_invalidated() [] + i1 = getfield_gc(ConstPtr(myptr), descr=quasifielddescr) + jump() + """ + expected = """ + [] + guard_not_invalidated() [] + jump() + """ + self.optimize_loop(ops, expected, expected) ##class TestOOtype(OptimizeOptTest, OOtypeMixin): diff --git a/pypy/rpython/test/test_annlowlevel.py b/pypy/rpython/test/test_annlowlevel.py --- a/pypy/rpython/test/test_annlowlevel.py +++ b/pypy/rpython/test/test_annlowlevel.py @@ -4,9 +4,12 @@ from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode +from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, oostr from pypy.rpython.annlowlevel import hlunicode, llunicode +from pypy.rpython import annlowlevel + class TestLLType(BaseRtypingTest, LLRtypeMixin): def test_hlstr(self): @@ -53,6 +56,15 @@ res = self.interpret(f, [self.unicode_to_ll(u"abc")]) assert res == 3 + def test_cast_instance_to_base_ptr(self): + class X(object): + pass + x = X() + ptr = annlowlevel.cast_instance_to_base_ptr(x) + assert lltype.typeOf(ptr) == annlowlevel.base_ptr_lltype() + y = annlowlevel.cast_base_ptr_to_instance(X, ptr) + assert y is x + class TestOOType(BaseRtypingTest, OORtypeMixin): def test_hlstr(self): @@ -71,3 +83,12 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 + + def test_cast_instance_to_base_obj(self): + class X(object): + pass + x = X() + obj = annlowlevel.cast_instance_to_base_obj(x) + assert lltype.typeOf(obj) == annlowlevel.base_obj_ootype() + y = annlowlevel.cast_base_ptr_to_instance(X, obj) + assert y is x diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -286,6 +286,10 @@ raise ValueError("CALL_ASSEMBLER not supported") llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) + def invalidate_loop(self, looptoken): + for loop in looptoken.compiled_loop_token.loop_and_bridges: + loop._obj.externalobj.invalid = True + # ---------- def sizeof(self, S): diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE +from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, @@ -12,6 +13,7 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.heaptracker import register_known_gctype, adr2int from pypy.jit.tool.oparser import parse +from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -62,6 +64,18 @@ nextdescr = cpu.fielddescrof(NODE, 'next') otherdescr = cpu.fielddescrof(NODE2, 'other') + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_field': IR_QUASI_IMMUTABLE}) + QUASI = lltype.GcStruct('QUASIIMMUT', ('inst_field', lltype.Signed), + ('mutate_field', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + quasi = lltype.malloc(QUASI, immortal=True) + quasifielddescr = cpu.fielddescrof(QUASI, 'inst_field') + quasibox = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, quasi)) + quasiimmutdescr = QuasiImmutDescr(cpu, quasibox, + quasifielddescr, + cpu.fielddescrof(QUASI, 'mutate_field')) + NODEOBJ = lltype.GcStruct('NODEOBJ', ('parent', OBJECT), ('ref', lltype.Ptr(OBJECT))) nodeobj = lltype.malloc(NODEOBJ) diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -1,6 +1,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.rpython.rclass import IR_ARRAY_IMMUTABLE, IR_IMMUTABLE from pypy.rpython import rvirtualizable2 from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.unroll import unrolling_iterable @@ -10,7 +11,7 @@ from pypy.jit.metainterp.warmstate import wrap, unwrap from pypy.rlib.objectmodel import specialize -class VirtualizableInfo: +class VirtualizableInfo(object): TOKEN_NONE = 0 # must be 0 -- see also x86.call_assembler TOKEN_TRACING_RESCALL = -1 @@ -33,11 +34,13 @@ all_fields = accessor.fields static_fields = [] array_fields = [] - for name, suffix in all_fields.iteritems(): - if suffix == '[*]': + for name, tp in all_fields.iteritems(): + if tp == IR_ARRAY_IMMUTABLE: array_fields.append(name) + elif tp == IR_IMMUTABLE: + static_fields.append(name) else: - static_fields.append(name) + raise Exception("unknown type: %s" % tp) self.static_fields = static_fields self.array_fields = array_fields # diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -492,6 +492,8 @@ def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) + consider_guard_not_invalidated = consider_guard_no_exception + def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1293,6 +1293,28 @@ rffi.cast(SP, p).x = 0 lltype.free(chunk, flavor='raw') + def test_opaque_tagged_pointers(self): + from pypy.rpython.annlowlevel import cast_base_ptr_to_instance + from pypy.rpython.annlowlevel import cast_instance_to_base_ptr + from pypy.rpython.lltypesystem import rclass + + class Opaque(object): + llopaque = True + + def hide(self): + ptr = cast_instance_to_base_ptr(self) + return lltype.cast_opaque_ptr(llmemory.GCREF, ptr) + + @staticmethod + def show(gcref): + ptr = lltype.cast_opaque_ptr(lltype.Ptr(rclass.OBJECT), gcref) + return cast_base_ptr_to_instance(Opaque, ptr) + + opaque = Opaque() + round = ctypes2lltype(llmemory.GCREF, lltype2ctypes(opaque.hide())) + assert Opaque.show(round) is opaque + + class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -119,6 +119,7 @@ self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} + self._remove_guard_not_invalidated = False def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() @@ -378,6 +379,43 @@ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) + def optimize_QUASIIMMUT_FIELD(self, op): + # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) + # x = GETFIELD_GC(s, descr='inst_x') + # If 's' is a constant (after optimizations), then we make 's.inst_x' + # a constant too, and we rely on the rest of the optimizations to + # constant-fold the following getfield_gc. + structvalue = self.getvalue(op.getarg(0)) + if not structvalue.is_constant(): + self._remove_guard_not_invalidated = True + return # not a constant at all; ignore QUASIIMMUT_FIELD + # + from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr + qmutdescr = op.getdescr() + assert isinstance(qmutdescr, QuasiImmutDescr) + # check that the value is still correct; it could have changed + # already between the tracing and now. In this case, we are + # simply ignoring the QUASIIMMUT_FIELD hint and compiling it + # as a regular getfield. + if not qmutdescr.is_still_valid(): + self._remove_guard_not_invalidated = True + return + # record as an out-of-line guard + if self.optimizer.quasi_immutable_deps is None: + self.optimizer.quasi_immutable_deps = {} + self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None + # perform the replacement in the list of operations + fieldvalue = self.getvalue(qmutdescr.constantfieldbox) + cf = self.field_cache(qmutdescr.fielddescr) + cf.remember_field_value(structvalue, fieldvalue) + self._remove_guard_not_invalidated = False + + def optimize_GUARD_NOT_INVALIDATED(self, op): + if self._remove_guard_not_invalidated: + return + self._remove_guard_not_invalidated = False + self.emit_operation(op) + def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -312,6 +312,7 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.QUASIIMMUT_FIELD, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -3,7 +3,8 @@ #from pypy.annotation.classdef import isclassdef from pypy.annotation import description from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, getgcflavor +from pypy.rpython.rmodel import Repr, getgcflavor, inputconst +from pypy.rpython.lltypesystem.lltype import Void class FieldListAccessor(object): @@ -12,6 +13,8 @@ assert type(fields) is dict self.TYPE = TYPE self.fields = fields + for x in fields.itervalues(): + assert isinstance(x, ImmutableRanking) def __repr__(self): return '' % getattr(self, 'TYPE', '?') @@ -19,6 +22,20 @@ def _freeze_(self): return True +class ImmutableRanking(object): + def __init__(self, name, is_immutable): + self.name = name + self.is_immutable = is_immutable + def __nonzero__(self): + return self.is_immutable + def __repr__(self): + return '<%s>' % self.name + +IR_MUTABLE = ImmutableRanking('mutable', False) +IR_IMMUTABLE = ImmutableRanking('immutable', True) +IR_ARRAY_IMMUTABLE = ImmutableRanking('array_immutable', True) +IR_QUASI_IMMUTABLE = ImmutableRanking('quasi_immutable', False) + class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" @@ -155,7 +172,8 @@ self.classdef = classdef def _setup_repr(self): - pass + if self.classdef is None: + self.immutable_field_set = set() def _check_for_immutable_hints(self, hints): loc = self.classdef.classdesc.lookup('_immutable_') @@ -167,13 +185,13 @@ self.classdef,)) hints = hints.copy() hints['immutable'] = True - self.immutable_field_list = [] # unless overwritten below + self.immutable_field_set = set() # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() immutable_fields = self.classdef.classdesc.classdict.get( '_immutable_fields_') if immutable_fields is not None: - self.immutable_field_list = immutable_fields.value + self.immutable_field_set = set(immutable_fields.value) accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -201,33 +219,35 @@ if "immutable_fields" in hints: accessor = hints["immutable_fields"] if not hasattr(accessor, 'fields'): - immutable_fields = [] + immutable_fields = set() rbase = self while rbase.classdef is not None: - immutable_fields += rbase.immutable_field_list + immutable_fields.update(rbase.immutable_field_set) rbase = rbase.rbase self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): - with_suffix = {} + ranking = {} for name in fields: - if name.endswith('[*]'): + if name.endswith('[*]'): # for virtualizables' lists name = name[:-3] - suffix = '[*]' - else: - suffix = '' + rank = IR_ARRAY_IMMUTABLE + elif name.endswith('?'): # a quasi-immutable field + name = name[:-1] + rank = IR_QUASI_IMMUTABLE + else: # a regular immutable/green field + rank = IR_IMMUTABLE try: mangled_name, r = self._get_field(name) except KeyError: continue - with_suffix[mangled_name] = suffix - accessor.initialize(self.object_type, with_suffix) - return with_suffix + ranking[mangled_name] = rank + accessor.initialize(self.object_type, ranking) + return ranking def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as # mutable in some parent class but that is now declared immutable - from pypy.rpython.lltypesystem.lltype import Void is_self_immutable = "immutable" in self.object_type._hints base = self while base.classdef is not None: @@ -248,12 +268,30 @@ "class %r has _immutable_=True, but parent class %r " "defines (at least) the mutable field %r" % ( self, base, fieldname)) - if fieldname in self.immutable_field_list: + if (fieldname in self.immutable_field_set or + (fieldname + '?') in self.immutable_field_set): raise ImmutableConflictError( "field %r is defined mutable in class %r, but " "listed in _immutable_fields_ in subclass %r" % ( fieldname, base, self)) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + + def hook_setfield(self, vinst, fieldname, llops): + if self.is_quasi_immutable(fieldname): + c_fieldname = inputconst(Void, 'mutate_' + fieldname) + llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname]) + + def is_quasi_immutable(self, fieldname): + search = fieldname + '?' + rbase = self + while rbase.classdef is not None: + if search in rbase.immutable_field_set: + return True + rbase = rbase.rbase + return False + def new_instance(self, llops, classcallhop=None): raise NotImplementedError diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -1,5 +1,5 @@ from pypy.translator.simplify import get_funcobj -from pypy.jit.metainterp import history +from pypy.jit.metainterp import history, quasiimmut from pypy.rpython.lltypesystem import lltype, rclass from pypy.tool.udir import udir @@ -85,12 +85,20 @@ getkind(v.concretetype, supports_floats, supports_longlong) v = op.result getkind(v.concretetype, supports_floats, supports_longlong) + check_skip_operation(op) except NotImplementedError, e: log.WARNING('%s, ignoring graph' % (e,)) log.WARNING(' %s' % (graph,)) return True return False +def check_skip_operation(op): + if op.opname == 'setfield': + if quasiimmut.is_quasi_immutable(op.args[0].concretetype.TO, + op.args[1].value): + raise NotImplementedError("write to quasi-immutable field %r" + % (op.args[1].value,)) + # ____________________________________________________________ class StopAtXPolicy(JitPolicy): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -167,6 +167,7 @@ class CompiledLoop(object): has_been_freed = False + invalid = False def __init__(self): self.inputargs = [] @@ -933,6 +934,9 @@ if forced: raise GuardFailed + def op_guard_not_invalidated(self, descr): + if self.loop.invalid: + raise GuardFailed class OOFrame(Frame): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -947,3 +947,43 @@ assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) assert op1.args[3] == ListOfKind('ref', [v1, v2]) + +def test_quasi_immutable(): + from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + v2 = varoftype(lltype.Signed) + STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: + op = SpaceOperation('getfield', [v_x, Constant('inst_x', lltype.Void)], + v2) + tr = Transformer(FakeCPU()) + [_, op1, op2] = tr.rewrite_operation(op) + assert op1.opname == 'record_quasiimmut_field' + assert len(op1.args) == 3 + assert op1.args[0] == v_x + assert op1.args[1] == ('fielddescr', STRUCT, 'inst_x') + assert op1.args[2] == ('fielddescr', STRUCT, 'mutate_x') + assert op1.result is None + assert op2.opname == 'getfield_gc_i' + assert len(op2.args) == 2 + assert op2.args[0] == v_x + assert op2.args[1] == ('fielddescr', STRUCT, 'inst_x') + assert op2.result is op.result + +def test_quasi_immutable_setfield(): + from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE + accessor = FieldListAccessor() + accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) + v1 = varoftype(lltype.Signed) + STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), + ('mutate_x', rclass.OBJECTPTR), + hints={'immutable_fields': accessor}) + for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: + op = SpaceOperation('setfield', + [v_x, Constant('inst_x', lltype.Void), v1], + varoftype(lltype.Void)) + tr = Transformer(FakeCPU()) + raises(NotImplementedError, tr.rewrite_operation, op) diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -171,7 +171,8 @@ class VirtualizableAnalyzer(BoolGraphAnalyzer): def analyze_simple_operation(self, op, graphinfo): return op.opname in ('jit_force_virtualizable', - 'jit_force_virtual') + 'jit_force_virtual', + 'jit_force_quasi_immutable') # ____________________________________________________________ diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -555,6 +555,16 @@ opimpl_setfield_raw_r = _opimpl_setfield_raw_any opimpl_setfield_raw_f = _opimpl_setfield_raw_any + @arguments("box", "descr", "descr", "orgpc") + def opimpl_record_quasiimmut_field(self, box, fielddescr, + mutatefielddescr, orgpc): + from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr + cpu = self.metainterp.cpu + descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) + self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], + None, descr=descr) + self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) + def _nonstandard_virtualizable(self, pc, box): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] @@ -1076,6 +1086,8 @@ if opnum == rop.GUARD_NOT_FORCED: resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, metainterp.jitdriver_sd) + elif opnum == rop.GUARD_NOT_INVALIDATED: + resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() guard_op = metainterp.history.record(opnum, moreargs, None, @@ -1848,6 +1860,9 @@ self.handle_possible_exception() except ChangeFrame: pass + elif opnum == rop.GUARD_NOT_INVALIDATED: + pass # XXX we want to do something special in resume descr, + # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected self.execute_raised(OverflowError(), constant=True) try: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -76,6 +76,11 @@ op.setdescr(None) # clear reference, mostly for tests if not we_are_translated(): op._jumptarget_number = descr.number + # record this looptoken on the QuasiImmut used in the code + if loop.quasi_immutable_deps is not None: + for qmut in loop.quasi_immutable_deps: + qmut.register_loop_token(wref) + # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken loop.token = None if not we_are_translated(): @@ -396,6 +401,12 @@ self.copy_all_attributes_into(res) return res +class ResumeGuardNotInvalidated(ResumeGuardDescr): + def _clone_if_mutable(self): + res = ResumeGuardNotInvalidated() + self.copy_all_attributes_into(res) + return res + class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py --- a/pypy/rpython/lltypesystem/test/test_lloperation.py +++ b/pypy/rpython/lltypesystem/test/test_lloperation.py @@ -54,6 +54,7 @@ def test_is_pure(): from pypy.objspace.flow.model import Variable, Constant + from pypy.rpython import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) @@ -85,38 +86,50 @@ assert llop.getarrayitem.is_pure([v_a2, Variable()]) assert llop.getarraysize.is_pure([v_a2]) # - accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': ''}) - v_s3 = Variable() - v_s3.concretetype = lltype.Ptr(S3) - assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) - assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) - assert llop.getfield.is_pure([v_s3, Constant('x')]) - assert not llop.getfield.is_pure([v_s3, Constant('y')]) + for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, + rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: + accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': kind}) + v_s3 = Variable() + v_s3.concretetype = lltype.Ptr(S3) + assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) + assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) + assert llop.getfield.is_pure([v_s3, Constant('x')]) is kind + assert not llop.getfield.is_pure([v_s3, Constant('y')]) def test_getfield_pure(): S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) S2 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable': True}) accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') s2 = lltype.malloc(S2); s2.x = 45 assert llop.getfield(lltype.Signed, s2, 'x') == 45 - s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 - assert llop.getfield(lltype.Signed, s3, 'x') == 46 - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') # py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s1, 'x') assert llop.getinteriorfield(lltype.Signed, s2, 'x') == 45 - assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 - py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s3, 'y') + # + for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, + rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: + # + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': kind}) + s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 + if kind in [rclass.IR_IMMUTABLE, rclass.IR_ARRAY_IMMUTABLE]: + assert llop.getfield(lltype.Signed, s3, 'x') == 46 + assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 + else: + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'x') + py.test.raises(TypeError, llop.getinteriorfield, + lltype.Signed, s3, 'x') + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') + py.test.raises(TypeError, llop.getinteriorfield, + lltype.Signed, s3, 'y') # ___________________________________________________________________________ # This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync. diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -145,6 +145,14 @@ def redirect_call_assembler(self, oldlooptoken, newlooptoken): self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) + def invalidate_loop(self, looptoken): + from pypy.jit.backend.x86 import codebuf + + for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: + mc = codebuf.MachineCodeBlockWrapper() + mc.JMP_l(tgt) + mc.copy_to_raw_memory(addr - 1) + class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,6 +525,9 @@ def op_jit_force_virtual(x): return x +def op_jit_force_quasi_immutable(*args): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/test/test_rvirtualizable2.py b/pypy/rpython/test/test_rvirtualizable2.py --- a/pypy/rpython/test/test_rvirtualizable2.py +++ b/pypy/rpython/test/test_rvirtualizable2.py @@ -5,6 +5,7 @@ from pypy.rlib.jit import hint from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy import conftest @@ -116,8 +117,8 @@ TYPE = self.gettype(v_inst) accessor = TYPE._hints['virtualizable2_accessor'] assert accessor.TYPE == TYPE - assert accessor.fields == {self.prefix + 'v1' : "", - self.prefix + 'v2': "[*]"} + assert accessor.fields == {self.prefix + 'v1': IR_IMMUTABLE, + self.prefix + 'v2': IR_ARRAY_IMMUTABLE} # def fn2(n): Base().base1 = 42 diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1166,6 +1166,11 @@ def bhimpl_setfield_raw_f(cpu, struct, fielddescr, newvalue): cpu.bh_setfield_raw_f(struct, fielddescr, newvalue) + @arguments("cpu", "r", "d", "d") + def bhimpl_record_quasiimmut_field(self, struct, fielddescr, + mutatefielddescr): + pass + @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) @@ -1287,6 +1292,8 @@ # We get here because it used to overflow, but now it no longer # does. pass + elif opnum == rop.GUARD_NOT_INVALIDATED: + pass else: from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -433,6 +433,7 @@ 'jit_marker': LLOp(), 'jit_force_virtualizable':LLOp(canrun=True), 'jit_force_virtual': LLOp(canrun=True), + 'jit_force_quasi_immutable': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), diff --git a/pypy/jit/backend/x86/test/test_quasiimmut.py b/pypy/jit/backend/x86/test/test_quasiimmut.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/x86/test/test_quasiimmut.py @@ -0,0 +1,9 @@ + +import py +from pypy.jit.backend.x86.test.test_basic import Jit386Mixin +from pypy.jit.metainterp.test import test_quasiimmut + +class TestLoopSpec(Jit386Mixin, test_quasiimmut.QuasiImmutTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_loop.py + pass diff --git a/pypy/rpython/rvirtualizable2.py b/pypy/rpython/rvirtualizable2.py --- a/pypy/rpython/rvirtualizable2.py +++ b/pypy/rpython/rvirtualizable2.py @@ -50,7 +50,7 @@ def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): - if cname.value in self.my_redirected_fields: + if self.my_redirected_fields.get(cname.value): cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -380,6 +380,7 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', + 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -475,6 +476,7 @@ 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', + 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -794,15 +794,8 @@ def __init__(self, fields): self.fields = fields S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x':''})}) - assert S._immutable_field('x') == True - # - class FieldListAccessor(object): - def __init__(self, fields): - self.fields = fields - S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) - assert S._immutable_field('x') == '[*]' + hints={'immutable_fields': FieldListAccessor({'x': 1234})}) + assert S._immutable_field('x') == 1234 def test_typedef(): T = Typedef(Signed, 'T') diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -267,6 +267,8 @@ virtual_state = modifier.get_virtual_state(jump_args) loop.preamble.operations = self.optimizer.newoperations + loop.preamble.quasi_immutable_deps = ( + self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.reconstruct_for_next_iteration() inputargs = self.inline(self.cloned_operations, loop.inputargs, jump_args) @@ -276,6 +278,7 @@ loop.preamble.operations.append(jmp) loop.operations = self.optimizer.newoperations + loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() assert isinstance(start_resumedescr, ResumeGuardDescr) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -85,6 +85,7 @@ 'nslots', 'instancetypedef', 'terminator', + '_version_tag?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -131,6 +131,16 @@ def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') +def find_force_quasi_immutable(graphs): + results = [] + for graph in graphs: + for block in graph.iterblocks(): + for i in range(len(block.operations)): + op = block.operations[i] + if op.opname == 'jit_force_quasi_immutable': + results.append((graph, block, i)) + return results + def get_stats(): return pyjitpl._warmrunnerdesc.stats @@ -187,6 +197,7 @@ self.rewrite_can_enter_jits() self.rewrite_set_param() self.rewrite_force_virtual(vrefinfo) + self.rewrite_force_quasi_immutable() self.add_finish() self.metainterp_sd.finish_setup(self.codewriter) @@ -842,6 +853,28 @@ all_graphs = self.translator.graphs vrefinfo.replace_force_virtual_with_call(all_graphs) + def replace_force_quasiimmut_with_direct_call(self, op): + ARG = op.args[0].concretetype + mutatefieldname = op.args[1].value + key = (ARG, mutatefieldname) + if key in self._cache_force_quasiimmed_funcs: + cptr = self._cache_force_quasiimmed_funcs[key] + else: + from pypy.jit.metainterp import quasiimmut + func = quasiimmut.make_invalidation_function(ARG, mutatefieldname) + FUNC = lltype.Ptr(lltype.FuncType([ARG], lltype.Void)) + llptr = self.helper_func(FUNC, func) + cptr = Constant(llptr, FUNC) + self._cache_force_quasiimmed_funcs[key] = cptr + op.opname = 'direct_call' + op.args = [cptr, op.args[0]] + + def rewrite_force_quasi_immutable(self): + self._cache_force_quasiimmed_funcs = {} + graphs = self.translator.graphs + for graph, block, i in find_force_quasi_immutable(graphs): + self.replace_force_quasiimmut_with_direct_call(block.operations[i]) + # ____________________________________________________________ def execute_token(self, loop_token): diff --git a/pypy/rpython/lltypesystem/rclass.py b/pypy/rpython/lltypesystem/rclass.py --- a/pypy/rpython/lltypesystem/rclass.py +++ b/pypy/rpython/lltypesystem/rclass.py @@ -322,6 +322,7 @@ # before they are fully built, to avoid strange bugs in case # of recursion where other code would uses these # partially-initialized dicts. + AbstractInstanceRepr._setup_repr(self) self.rclass = getclassrepr(self.rtyper, self.classdef) fields = {} allinstancefields = {} @@ -370,6 +371,11 @@ kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True + + for name, attrdef in attrs: + if not attrdef.readonly and self.is_quasi_immutable(name): + llfields.append(('mutate_' + name, OBJECTPTR)) + object_type = MkStruct(self.classdef.name, ('super', self.rbase.object_type), hints=hints, @@ -488,6 +494,7 @@ if force_cast: vinst = llops.genop('cast_pointer', [vinst], resulttype=self) self.hook_access_field(vinst, cname, llops, flags) + self.hook_setfield(vinst, attr, llops) llops.genop('setfield', [vinst, cname, vvalue]) else: if self.classdef is None: @@ -495,9 +502,6 @@ self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True, flags=flags) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - def new_instance(self, llops, classcallhop=None): """Build a new instance, without calling __init__.""" flavor = self.gcflavor diff --git a/pypy/rpython/annlowlevel.py b/pypy/rpython/annlowlevel.py --- a/pypy/rpython/annlowlevel.py +++ b/pypy/rpython/annlowlevel.py @@ -480,7 +480,26 @@ # ____________________________________________________________ def cast_object_to_ptr(PTR, object): - raise NotImplementedError("cast_object_to_ptr") + """NOT_RPYTHON: hack. The object may be disguised as a PTR now. + Limited to casting a given object to a single type. + """ + if isinstance(PTR, lltype.Ptr): + TO = PTR.TO + else: + TO = PTR + if not hasattr(object, '_carry_around_for_tests'): + assert not hasattr(object, '_TYPE') + object._carry_around_for_tests = True + object._TYPE = TO + else: + assert object._TYPE == TO + # + if isinstance(PTR, lltype.Ptr): + return lltype._ptr(PTR, object, True) + elif isinstance(PTR, ootype.Instance): + return object + else: + raise NotImplementedError("cast_object_to_ptr(%r, ...)" % PTR) def cast_instance_to_base_ptr(instance): return cast_object_to_ptr(base_ptr_lltype(), instance) @@ -535,7 +554,13 @@ # ____________________________________________________________ def cast_base_ptr_to_instance(Class, ptr): - raise NotImplementedError("cast_base_ptr_to_instance") + """NOT_RPYTHON: hack. Reverse the hacking done in cast_object_to_ptr().""" + if isinstance(lltype.typeOf(ptr), lltype.Ptr): + ptr = ptr._as_obj() + if not isinstance(ptr, Class): + raise NotImplementedError("cast_base_ptr_to_instance: casting %r to %r" + % (ptr, Class)) + return ptr class CastBasePtrToInstanceEntry(extregistry.ExtRegistryEntry): _about_ = cast_base_ptr_to_instance diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -20,6 +20,9 @@ op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) self.emit_operation(op) + def optimize_QUASIIMMUT_FIELD(self, op): + pass + def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -268,13 +268,14 @@ return self._superclass._get_fields_with_default() + self._fields_with_default def _immutable_field(self, field): + if self._hints.get('immutable'): + return True if 'immutable_fields' in self._hints: try: - s = self._hints['immutable_fields'].fields[field] - return s or True + return self._hints['immutable_fields'].fields[field] except KeyError: pass - return self._hints.get('immutable', False) + return False class SpecializableType(OOType): diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -5,6 +5,8 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.rarithmetic import intmask, r_longlong from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE +from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.objspace.flow.model import summary class EmptyBase(object): @@ -746,8 +748,10 @@ t, typer, graph = self.gengraph(f, []) A_TYPE = deref(graph.getreturnvar().concretetype) accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ - accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE, + "inst_y": IR_ARRAY_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_ARRAY_IMMUTABLE} # for ootype def test_immutable_fields_subclass_1(self): from pypy.jit.metainterp.typesystem import deref @@ -765,8 +769,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : ""} or \ - accessor.fields == {"ox" : ""} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE} # for ootype def test_immutable_fields_subclass_2(self): from pypy.jit.metainterp.typesystem import deref @@ -785,8 +789,10 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ - accessor.fields == {"ox" : "", "oy" : ""} # for ootype + assert accessor.fields == {"inst_x": IR_IMMUTABLE, + "inst_y": IR_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_IMMUTABLE} # for ootype def test_immutable_fields_only_in_subclass(self): from pypy.jit.metainterp.typesystem import deref @@ -804,8 +810,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y" : ""} or \ - accessor.fields == {"oy" : ""} # for ootype + assert accessor.fields == {"inst_y": IR_IMMUTABLE} or \ + accessor.fields == {"oy": IR_IMMUTABLE} # for ootype def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -849,8 +855,8 @@ except AttributeError: A_TYPE = B_TYPE._superclass # for ootype accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_v" : ""} or \ - accessor.fields == {"ov" : ""} # for ootype + assert accessor.fields == {"inst_v": IR_IMMUTABLE} or \ + accessor.fields == {"ov": IR_IMMUTABLE} # for ootype def test_immutable_subclass_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -895,6 +901,37 @@ B_TYPE = deref(graph.getreturnvar().concretetype) assert B_TYPE._hints["immutable"] + def test_quasi_immutable(self): + from pypy.jit.metainterp.typesystem import deref + class A(object): + _immutable_fields_ = ['x', 'y', 'a?', 'b?'] + class B(A): + pass + def f(): + a = A() + a.x = 42 + a.a = 142 + b = B() + b.x = 43 + b.y = 41 + b.a = 44 + b.b = 45 + return B() + t, typer, graph = self.gengraph(f, []) + B_TYPE = deref(graph.getreturnvar().concretetype) + accessor = B_TYPE._hints["immutable_fields"] + assert accessor.fields == {"inst_y": IR_IMMUTABLE, + "inst_b": IR_QUASI_IMMUTABLE} or \ + accessor.fields == {"ox": IR_IMMUTABLE, + "oy": IR_IMMUTABLE, + "oa": IR_QUASI_IMMUTABLE, + "ob": IR_QUASI_IMMUTABLE} # for ootype + found = [] + for op in graph.startblock.operations: + if op.opname == 'jit_force_quasi_immutable': + found.append(op.args[1].value) + assert found == ['mutate_a', 'mutate_a', 'mutate_b'] + class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -48,11 +48,12 @@ class GuardToken(object): - def __init__(self, faildescr, failargs, fail_locs, exc): + def __init__(self, faildescr, failargs, fail_locs, exc, has_jump): self.faildescr = faildescr self.failargs = failargs self.fail_locs = fail_locs self.exc = exc + self.has_jump = has_jump DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed)) @@ -133,6 +134,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token + self.invalidate_positions = [] self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -141,6 +143,7 @@ allblocks) def teardown(self): + self.invalidate_positions = None self.pending_guard_tokens = None self.mc = None self.looppos = -1 @@ -435,15 +438,24 @@ # tok.faildescr._x86_adr_jump_offset to contain the raw address of # the 4-byte target field in the JMP/Jcond instruction, and patch # the field in question to point (initially) to the recovery stub + inv_counter = 0 + clt = self.current_clt for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset tok.faildescr._x86_adr_jump_offset = addr relative_target = tok.pos_recovery_stub - (tok.pos_jump_offset + 4) assert rx86.fits_in_32bits(relative_target) # - mc = codebuf.MachineCodeBlockWrapper() - mc.writeimm32(relative_target) - mc.copy_to_raw_memory(addr) + if tok.has_jump: + mc = codebuf.MachineCodeBlockWrapper() + mc.writeimm32(relative_target) + mc.copy_to_raw_memory(addr) + else: + # guard not invalidate, patch where it jumps + pos, _ = self.invalidate_positions[inv_counter] + clt.invalidate_positions.append((pos + rawstart, + relative_target)) + inv_counter += 1 def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1447,6 +1459,13 @@ self.mc.CMP(heap(self.cpu.pos_exception()), imm0) self.implement_guard(guard_token, 'NZ') + def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, + locs, ign_2): + pos = self.mc.get_relative_pos() + 1 # after potential jmp + guard_token.pos_jump_offset = pos + self.invalidate_positions.append((pos, 0)) + self.pending_guard_tokens.append(guard_token) + def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, locs, resloc): loc = locs[0] @@ -1545,7 +1564,8 @@ exc = (guard_opnum == rop.GUARD_EXCEPTION or guard_opnum == rop.GUARD_NO_EXCEPTION or guard_opnum == rop.GUARD_NOT_FORCED) - return GuardToken(faildescr, failargs, fail_locs, exc) + return GuardToken(faildescr, failargs, fail_locs, exc, has_jump= + guard_opnum != rop.GUARD_NOT_INVALIDATED) def generate_quick_failure(self, guardtok): """Generate the initial code for handling a failure. We try to From commits-noreply at bitbucket.org Sun Apr 17 01:55:42 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sun, 17 Apr 2011 01:55:42 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: closing merged branch Message-ID: <20110416235542.C72CD2A2052@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43415:ba4ae81e2bfb Date: 2011-04-17 01:55 +0200 http://bitbucket.org/pypy/pypy/changeset/ba4ae81e2bfb/ Log: closing merged branch From commits-noreply at bitbucket.org Sun Apr 17 01:55:45 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sun, 17 Apr 2011 01:55:45 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: closing merged branch Message-ID: <20110416235545.D54492A205A@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43415:ba4ae81e2bfb Date: 2011-04-17 01:55 +0200 http://bitbucket.org/pypy/pypy/changeset/ba4ae81e2bfb/ Log: closing merged branch From commits-noreply at bitbucket.org Sun Apr 17 10:12:50 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:12:50 +0200 (CEST) Subject: [pypy-svn] pypy default: CPython 2.5 compatibility. Message-ID: <20110417081250.6D5DB282B90@codespeak.net> Author: Armin Rigo Branch: Changeset: r43416:91646c654940 Date: 2011-04-17 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/91646c654940/ Log: CPython 2.5 compatibility. diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -31,22 +31,28 @@ def checkdecode(self, s, encoding): decoder = self.getdecoder(encoding) - if isinstance(s, str): - trueresult = s.decode(encoding) - else: - trueresult = s - s = s.encode(encoding) + try: + if isinstance(s, str): + trueresult = s.decode(encoding) + else: + trueresult = s + s = s.encode(encoding) + except LookupError, e: + py.test.skip(e) result, consumed = decoder(s, len(s), True) assert consumed == len(s) self.typeequals(trueresult, result) def checkencode(self, s, encoding): encoder = self.getencoder(encoding) - if isinstance(s, unicode): - trueresult = s.encode(encoding) - else: - trueresult = s - s = s.decode(encoding) + try: + if isinstance(s, unicode): + trueresult = s.encode(encoding) + else: + trueresult = s + s = s.decode(encoding) + except LookupError, e: + py.test.skip(e) result = encoder(s, len(s), True) self.typeequals(trueresult, result) diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -112,7 +112,7 @@ ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0)): # or (ordch1 == 0xed and ordch2 > 0x9f) # second byte invalid, take the first and continue @@ -130,7 +130,7 @@ break elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): # second byte invalid, take the first and continue @@ -139,7 +139,7 @@ s, pos, pos+1) result.append(r) continue - elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', @@ -165,21 +165,21 @@ elif n == 2: ordch2 = ord(s[pos+1]) - if ordch2>>6 != 0b10: + if ordch2>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00011111) << 6) + - (ordch2 & 0b00111111))) + result.append(unichr(((ordch1 & 0x1F) << 6) + # 0b00011111 + (ordch2 & 0x3F))) # 0b00111111 pos += 2 elif n == 3: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. @@ -190,23 +190,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00001111) << 12) + - ((ordch2 & 0b00111111) << 6) + - (ordch3 & 0b00111111))) + result.append(unichr(((ordch1 & 0x0F) << 12) + # 0b00001111 + ((ordch2 & 0x3F) << 6) + # 0b00111111 + (ordch3 & 0x3F))) # 0b00111111 pos += 3 elif n == 4: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): r, pos = errorhandler(errors, 'utf-8', @@ -214,23 +214,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue - elif ordch4>>6 != 0b10: + elif ordch4>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+3) result.append(r) continue # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz - c = (((ordch1 & 0b00000111) << 18) + - ((ordch2 & 0b00111111) << 12) + - ((ordch3 & 0b00111111) << 6) + - (ordch4 & 0b00111111)) + c = (((ordch1 & 0x07) << 18) + # 0b00000111 + ((ordch2 & 0x3F) << 12) + # 0b00111111 + ((ordch3 & 0x3F) << 6) + # 0b00111111 + (ordch4 & 0x3F)) # 0b00111111 if c <= MAXUNICODE: result.append(UNICHR(c)) else: From commits-noreply at bitbucket.org Sun Apr 17 10:12:51 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:12:51 +0200 (CEST) Subject: [pypy-svn] pypy default: Silence the warnings about missing '$memofield_xxx'. Message-ID: <20110417081251.54251282B90@codespeak.net> Author: Armin Rigo Branch: Changeset: r43417:8991573a5314 Date: 2011-04-17 09:53 +0200 http://bitbucket.org/pypy/pypy/changeset/8991573a5314/ Log: Silence the warnings about missing '$memofield_xxx'. diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -880,6 +880,11 @@ except AttributeError: return False + def warn_missing_attribute(self, attr): + # only warn for missing attribute names whose name doesn't start + # with '$', to silence the warnings about '$memofield_xxx'. + return not self.has_attribute(attr) and not attr.startswith('$') + def read_attribute(self, attr): try: return self.attrcache[attr] diff --git a/pypy/rpython/rpbc.py b/pypy/rpython/rpbc.py --- a/pypy/rpython/rpbc.py +++ b/pypy/rpython/rpbc.py @@ -485,7 +485,7 @@ try: thisattrvalue = frozendesc.attrcache[attr] except KeyError: - if not frozendesc.has_attribute(attr): + if frozendesc.warn_missing_attribute(attr): warning("Desc %r has no attribute %r" % (frozendesc, attr)) continue llvalue = r_value.convert_const(thisattrvalue) From commits-noreply at bitbucket.org Sun Apr 17 10:12:52 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:12:52 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: CPython 2.5 compatibility. Message-ID: <20110417081252.973C2282BAD@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43418:96f314c7f779 Date: 2011-04-17 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/96f314c7f779/ Log: CPython 2.5 compatibility. diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -31,22 +31,28 @@ def checkdecode(self, s, encoding): decoder = self.getdecoder(encoding) - if isinstance(s, str): - trueresult = s.decode(encoding) - else: - trueresult = s - s = s.encode(encoding) + try: + if isinstance(s, str): + trueresult = s.decode(encoding) + else: + trueresult = s + s = s.encode(encoding) + except LookupError, e: + py.test.skip(e) result, consumed = decoder(s, len(s), True) assert consumed == len(s) self.typeequals(trueresult, result) def checkencode(self, s, encoding): encoder = self.getencoder(encoding) - if isinstance(s, unicode): - trueresult = s.encode(encoding) - else: - trueresult = s - s = s.decode(encoding) + try: + if isinstance(s, unicode): + trueresult = s.encode(encoding) + else: + trueresult = s + s = s.decode(encoding) + except LookupError, e: + py.test.skip(e) result = encoder(s, len(s), True) self.typeequals(trueresult, result) diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -112,7 +112,7 @@ ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0)): # or (ordch1 == 0xed and ordch2 > 0x9f) # second byte invalid, take the first and continue @@ -130,7 +130,7 @@ break elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): # second byte invalid, take the first and continue @@ -139,7 +139,7 @@ s, pos, pos+1) result.append(r) continue - elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', @@ -165,21 +165,21 @@ elif n == 2: ordch2 = ord(s[pos+1]) - if ordch2>>6 != 0b10: + if ordch2>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00011111) << 6) + - (ordch2 & 0b00111111))) + result.append(unichr(((ordch1 & 0x1F) << 6) + # 0b00011111 + (ordch2 & 0x3F))) # 0b00111111 pos += 2 elif n == 3: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. @@ -190,23 +190,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00001111) << 12) + - ((ordch2 & 0b00111111) << 6) + - (ordch3 & 0b00111111))) + result.append(unichr(((ordch1 & 0x0F) << 12) + # 0b00001111 + ((ordch2 & 0x3F) << 6) + # 0b00111111 + (ordch3 & 0x3F))) # 0b00111111 pos += 3 elif n == 4: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): r, pos = errorhandler(errors, 'utf-8', @@ -214,23 +214,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue - elif ordch4>>6 != 0b10: + elif ordch4>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+3) result.append(r) continue # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz - c = (((ordch1 & 0b00000111) << 18) + - ((ordch2 & 0b00111111) << 12) + - ((ordch3 & 0b00111111) << 6) + - (ordch4 & 0b00111111)) + c = (((ordch1 & 0x07) << 18) + # 0b00000111 + ((ordch2 & 0x3F) << 12) + # 0b00111111 + ((ordch3 & 0x3F) << 6) + # 0b00111111 + (ordch4 & 0x3F)) # 0b00111111 if c <= MAXUNICODE: result.append(UNICHR(c)) else: From commits-noreply at bitbucket.org Sun Apr 17 10:12:58 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:12:58 +0200 (CEST) Subject: [pypy-svn] pypy default: CPython 2.5 compatibility. Message-ID: <20110417081258.CCD5C282BA1@codespeak.net> Author: Armin Rigo Branch: Changeset: r43416:91646c654940 Date: 2011-04-17 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/91646c654940/ Log: CPython 2.5 compatibility. diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -31,22 +31,28 @@ def checkdecode(self, s, encoding): decoder = self.getdecoder(encoding) - if isinstance(s, str): - trueresult = s.decode(encoding) - else: - trueresult = s - s = s.encode(encoding) + try: + if isinstance(s, str): + trueresult = s.decode(encoding) + else: + trueresult = s + s = s.encode(encoding) + except LookupError, e: + py.test.skip(e) result, consumed = decoder(s, len(s), True) assert consumed == len(s) self.typeequals(trueresult, result) def checkencode(self, s, encoding): encoder = self.getencoder(encoding) - if isinstance(s, unicode): - trueresult = s.encode(encoding) - else: - trueresult = s - s = s.decode(encoding) + try: + if isinstance(s, unicode): + trueresult = s.encode(encoding) + else: + trueresult = s + s = s.decode(encoding) + except LookupError, e: + py.test.skip(e) result = encoder(s, len(s), True) self.typeequals(trueresult, result) diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -112,7 +112,7 @@ ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0)): # or (ordch1 == 0xed and ordch2 > 0x9f) # second byte invalid, take the first and continue @@ -130,7 +130,7 @@ break elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): # second byte invalid, take the first and continue @@ -139,7 +139,7 @@ s, pos, pos+1) result.append(r) continue - elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', @@ -165,21 +165,21 @@ elif n == 2: ordch2 = ord(s[pos+1]) - if ordch2>>6 != 0b10: + if ordch2>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00011111) << 6) + - (ordch2 & 0b00111111))) + result.append(unichr(((ordch1 & 0x1F) << 6) + # 0b00011111 + (ordch2 & 0x3F))) # 0b00111111 pos += 2 elif n == 3: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. @@ -190,23 +190,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00001111) << 12) + - ((ordch2 & 0b00111111) << 6) + - (ordch3 & 0b00111111))) + result.append(unichr(((ordch1 & 0x0F) << 12) + # 0b00001111 + ((ordch2 & 0x3F) << 6) + # 0b00111111 + (ordch3 & 0x3F))) # 0b00111111 pos += 3 elif n == 4: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): r, pos = errorhandler(errors, 'utf-8', @@ -214,23 +214,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue - elif ordch4>>6 != 0b10: + elif ordch4>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+3) result.append(r) continue # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz - c = (((ordch1 & 0b00000111) << 18) + - ((ordch2 & 0b00111111) << 12) + - ((ordch3 & 0b00111111) << 6) + - (ordch4 & 0b00111111)) + c = (((ordch1 & 0x07) << 18) + # 0b00000111 + ((ordch2 & 0x3F) << 12) + # 0b00111111 + ((ordch3 & 0x3F) << 6) + # 0b00111111 + (ordch4 & 0x3F)) # 0b00111111 if c <= MAXUNICODE: result.append(UNICHR(c)) else: From commits-noreply at bitbucket.org Sun Apr 17 10:13:00 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:13:00 +0200 (CEST) Subject: [pypy-svn] pypy default: Silence the warnings about missing '$memofield_xxx'. Message-ID: <20110417081300.1F3D8282BA1@codespeak.net> Author: Armin Rigo Branch: Changeset: r43417:8991573a5314 Date: 2011-04-17 09:53 +0200 http://bitbucket.org/pypy/pypy/changeset/8991573a5314/ Log: Silence the warnings about missing '$memofield_xxx'. diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -880,6 +880,11 @@ except AttributeError: return False + def warn_missing_attribute(self, attr): + # only warn for missing attribute names whose name doesn't start + # with '$', to silence the warnings about '$memofield_xxx'. + return not self.has_attribute(attr) and not attr.startswith('$') + def read_attribute(self, attr): try: return self.attrcache[attr] diff --git a/pypy/rpython/rpbc.py b/pypy/rpython/rpbc.py --- a/pypy/rpython/rpbc.py +++ b/pypy/rpython/rpbc.py @@ -485,7 +485,7 @@ try: thisattrvalue = frozendesc.attrcache[attr] except KeyError: - if not frozendesc.has_attribute(attr): + if frozendesc.warn_missing_attribute(attr): warning("Desc %r has no attribute %r" % (frozendesc, attr)) continue llvalue = r_value.convert_const(thisattrvalue) From commits-noreply at bitbucket.org Sun Apr 17 10:13:01 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:13:01 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: CPython 2.5 compatibility. Message-ID: <20110417081301.C36B3282BD7@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43418:96f314c7f779 Date: 2011-04-17 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/96f314c7f779/ Log: CPython 2.5 compatibility. diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -31,22 +31,28 @@ def checkdecode(self, s, encoding): decoder = self.getdecoder(encoding) - if isinstance(s, str): - trueresult = s.decode(encoding) - else: - trueresult = s - s = s.encode(encoding) + try: + if isinstance(s, str): + trueresult = s.decode(encoding) + else: + trueresult = s + s = s.encode(encoding) + except LookupError, e: + py.test.skip(e) result, consumed = decoder(s, len(s), True) assert consumed == len(s) self.typeequals(trueresult, result) def checkencode(self, s, encoding): encoder = self.getencoder(encoding) - if isinstance(s, unicode): - trueresult = s.encode(encoding) - else: - trueresult = s - s = s.decode(encoding) + try: + if isinstance(s, unicode): + trueresult = s.encode(encoding) + else: + trueresult = s + s = s.decode(encoding) + except LookupError, e: + py.test.skip(e) result = encoder(s, len(s), True) self.typeequals(trueresult, result) diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -112,7 +112,7 @@ ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0)): # or (ordch1 == 0xed and ordch2 > 0x9f) # second byte invalid, take the first and continue @@ -130,7 +130,7 @@ break elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): # second byte invalid, take the first and continue @@ -139,7 +139,7 @@ s, pos, pos+1) result.append(r) continue - elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', @@ -165,21 +165,21 @@ elif n == 2: ordch2 = ord(s[pos+1]) - if ordch2>>6 != 0b10: + if ordch2>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00011111) << 6) + - (ordch2 & 0b00111111))) + result.append(unichr(((ordch1 & 0x1F) << 6) + # 0b00011111 + (ordch2 & 0x3F))) # 0b00111111 pos += 2 elif n == 3: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. @@ -190,23 +190,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00001111) << 12) + - ((ordch2 & 0b00111111) << 6) + - (ordch3 & 0b00111111))) + result.append(unichr(((ordch1 & 0x0F) << 12) + # 0b00001111 + ((ordch2 & 0x3F) << 6) + # 0b00111111 + (ordch3 & 0x3F))) # 0b00111111 pos += 3 elif n == 4: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): r, pos = errorhandler(errors, 'utf-8', @@ -214,23 +214,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue - elif ordch4>>6 != 0b10: + elif ordch4>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+3) result.append(r) continue # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz - c = (((ordch1 & 0b00000111) << 18) + - ((ordch2 & 0b00111111) << 12) + - ((ordch3 & 0b00111111) << 6) + - (ordch4 & 0b00111111)) + c = (((ordch1 & 0x07) << 18) + # 0b00000111 + ((ordch2 & 0x3F) << 12) + # 0b00111111 + ((ordch3 & 0x3F) << 6) + # 0b00111111 + (ordch4 & 0x3F)) # 0b00111111 if c <= MAXUNICODE: result.append(UNICHR(c)) else: From commits-noreply at bitbucket.org Sun Apr 17 10:28:03 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sun, 17 Apr 2011 10:28:03 +0200 (CEST) Subject: [pypy-svn] pypy default: Backed out changeset 124ebb7828dd, merge of out-of-line-guards-2. It seems Message-ID: <20110417082803.013D7282B90@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43419:671bbe385458 Date: 2011-04-17 10:13 +0200 http://bitbucket.org/pypy/pypy/changeset/671bbe385458/ Log: Backed out changeset 124ebb7828dd, merge of out-of-line-guards-2. It seems to have broken benchmarks, investigation needed diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py --- a/pypy/rpython/ootypesystem/rclass.py +++ b/pypy/rpython/ootypesystem/rclass.py @@ -262,10 +262,6 @@ self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef) self.rbase.setup() - for name, attrdef in selfattrs.iteritems(): - if not attrdef.readonly and self.is_quasi_immutable(name): - ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT}) - classattributes = {} baseInstance = self.lowleveltype._superclass classrepr = getclassrepr(self.rtyper, self.classdef) @@ -480,9 +476,11 @@ mangled_name = mangle(attr, self.rtyper.getconfig()) cname = inputconst(ootype.Void, mangled_name) self.hook_access_field(vinst, cname, llops, flags) - self.hook_setfield(vinst, attr, llops) llops.genop('oosetfield', [vinst, cname, vvalue]) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + def rtype_is_true(self, hop): vinst, = hop.inputargs(self) return hop.genop('oononnull', [vinst], resulttype=ootype.Bool) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -291,7 +291,6 @@ # that belong to this loop or to a bridge attached to it. # Filled by the frontend calling record_faildescr_index(). self.faildescr_indices = [] - self.invalidate_positions = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc") diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -791,7 +791,6 @@ operations = None token = None call_pure_results = None - quasi_immutable_deps = None def __init__(self, name): self.name = name diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -341,14 +341,13 @@ return _struct(self, n, initialization='example') def _immutable_field(self, field): - if self._hints.get('immutable'): - return True if 'immutable_fields' in self._hints: try: - return self._hints['immutable_fields'].fields[field] + s = self._hints['immutable_fields'].fields[field] + return s or True except KeyError: pass - return False + return self._hints.get('immutable', False) class RttiStruct(Struct): _runtime_type_info = None @@ -1030,8 +1029,6 @@ return None # null pointer if type(p._obj0) is int: return p # a pointer obtained by cast_int_to_ptr - if getattr(p._obj0, '_carry_around_for_tests', False): - return p # a pointer obtained by cast_instance_to_base_ptr container = obj._normalizedcontainer() if type(container) is int: # this must be an opaque ptr originating from an integer @@ -1884,8 +1881,8 @@ if self.__class__ is not other.__class__: return NotImplemented if hasattr(self, 'container') and hasattr(other, 'container'): - obj1 = self._normalizedcontainer() - obj2 = other._normalizedcontainer() + obj1 = self.container._normalizedcontainer() + obj2 = other.container._normalizedcontainer() return obj1 == obj2 else: return self is other @@ -1909,8 +1906,6 @@ # an integer, cast to a ptr, cast to an opaque if type(self.container) is int: return self.container - if getattr(self.container, '_carry_around_for_tests', False): - return self.container return self.container._normalizedcontainer() else: return _parentable._normalizedcontainer(self) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -578,7 +578,6 @@ _all_callbacks_results = [] _int2obj = {} _callback_exc_info = None -_opaque_objs = [None] def get_rtyper(): llinterp = LLInterpreter.current_interpreter @@ -617,10 +616,6 @@ T = lltype.Ptr(lltype.typeOf(container)) # otherwise it came from integer and we want a c_void_p with # the same valu - if getattr(container, 'llopaque', None): - no = len(_opaque_objs) - _opaque_objs.append(container) - return no * 2 + 1 else: container = llobj._obj if isinstance(T.TO, lltype.FuncType): @@ -769,14 +764,10 @@ if isinstance(T, lltype.Typedef): T = T.OF if isinstance(T, lltype.Ptr): - ptrval = ctypes.cast(cobj, ctypes.c_void_p).value - if not cobj or not ptrval: # NULL pointer + if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 return lltype.nullptr(T.TO) if isinstance(T.TO, lltype.Struct): - if ptrval & 1: # a tagged pointer - gcref = _opaque_objs[ptrval // 2].hide() - return lltype.cast_opaque_ptr(T, gcref) REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) @@ -1237,9 +1228,7 @@ return not self == other def _cast_to_ptr(self, PTRTYPE): - if self.intval & 1: - return _opaque_objs[self.intval // 2] - return force_cast(PTRTYPE, self.intval) + return force_cast(PTRTYPE, self.intval) ## def _cast_to_int(self): ## return self.intval diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ /dev/null @@ -1,266 +0,0 @@ - -import py - -from pypy.rpython.lltypesystem import lltype, llmemory, rclass -from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE -from pypy.jit.metainterp import typesystem -from pypy.jit.metainterp.quasiimmut import QuasiImmut -from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance -from pypy.jit.metainterp.test.test_basic import LLJitMixin -from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.rlib.jit import JitDriver, dont_look_inside - - -def test_get_current_qmut_instance(): - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - STRUCT = lltype.GcStruct('Foo', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - foo = lltype.malloc(STRUCT, zero=True) - foo.inst_x = 42 - assert not foo.mutate_x - - class FakeCPU: - ts = typesystem.llhelper - - def bh_getfield_gc_r(self, gcref, fielddescr): - assert fielddescr == mutatefielddescr - foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) - result = foo.mutate_x - return lltype.cast_opaque_ptr(llmemory.GCREF, result) - - def bh_setfield_gc_r(self, gcref, fielddescr, newvalue_gcref): - assert fielddescr == mutatefielddescr - foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) - newvalue = lltype.cast_opaque_ptr(rclass.OBJECTPTR, newvalue_gcref) - foo.mutate_x = newvalue - - cpu = FakeCPU() - mutatefielddescr = ('fielddescr', STRUCT, 'mutate_x') - - foo_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) - qmut1 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) - assert isinstance(qmut1, QuasiImmut) - qmut2 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) - assert qmut1 is qmut2 - - -class QuasiImmutTests(object): - - def test_simple_1(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - x -= 1 - return total - # - res = self.meta_interp(f, [100, 7]) - assert res == 700 - self.check_loops(getfield_gc=0, everywhere=True) - # - from pypy.jit.metainterp.warmspot import get_stats - loops = get_stats().loops - for loop in loops: - assert len(loop.quasi_immutable_deps) == 1 - assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) - - def test_nonopt_1(self): - myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def setup(x): - return [Foo(100 + i) for i in range(x)] - def f(a, x): - lst = setup(x) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(lst=lst, x=x, total=total) - # read a quasi-immutable field out of a variable - x -= 1 - total += lst[x].a - return total - # - assert f(100, 7) == 721 - res = self.meta_interp(f, [100, 7]) - assert res == 721 - self.check_loops(getfield_gc=1) - # - from pypy.jit.metainterp.warmspot import get_stats - loops = get_stats().loops - for loop in loops: - assert loop.quasi_immutable_deps is None - - def test_change_during_tracing_1(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - @dont_look_inside - def residual_call(foo): - foo.a += 1 - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - residual_call(foo) - x -= 1 - return total - # - assert f(100, 7) == 721 - res = self.meta_interp(f, [100, 7]) - assert res == 721 - self.check_loops(getfield_gc=1) - - def test_change_during_tracing_2(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - @dont_look_inside - def residual_call(foo, difference): - foo.a += difference - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - residual_call(foo, +1) - residual_call(foo, -1) - x -= 1 - return total - # - assert f(100, 7) == 700 - res = self.meta_interp(f, [100, 7]) - assert res == 700 - self.check_loops(getfield_gc=1) - - def test_change_invalidate_reentering(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def f(foo, x): - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - x -= 1 - return total - def g(a, x): - foo = Foo(a) - res1 = f(foo, x) - foo.a += 1 # invalidation, while the jit is not running - res2 = f(foo, x) # should still mark the loop as invalid - return res1 * 1000 + res2 - # - assert g(100, 7) == 700707 - res = self.meta_interp(g, [100, 7]) - assert res == 700707 - self.check_loops(getfield_gc=0) - - def test_invalidate_while_running(self): - jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - - def external(foo, v): - if v: - foo.a = 2 - - def f(foo): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(i=i, foo=foo, total=total) - external(foo, i > 7) - i += 1 - total += foo.a - return total - - def g(): - return f(Foo(1)) - - assert self.meta_interp(g, [], policy=StopAtXPolicy(external)) == g() - - def test_invalidate_by_setfield(self): - py.test.skip("Not implemented") - jitdriver = JitDriver(greens=['bc', 'foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - - def f(foo, bc): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(bc=bc, i=i, foo=foo, total=total) - if bc == 0: - f(foo, 1) - if bc == 1: - foo.a = int(i > 5) - i += 1 - total += foo.a - return total - - def g(): - return f(Foo(1), 0) - - assert self.meta_interp(g, []) == g() - - def test_invalidate_bridge(self): - jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - - def f(foo): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(i=i, total=total, foo=foo) - if i > 5: - total += foo.a - else: - total += 2*foo.a - i += 1 - return total - - def main(): - foo = Foo() - foo.a = 1 - total = f(foo) - foo.a = 2 - total += f(foo) - return total - - res = self.meta_interp(main, []) - assert res == main() - -class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py deleted file mode 100644 --- a/pypy/jit/metainterp/quasiimmut.py +++ /dev/null @@ -1,116 +0,0 @@ -import weakref -from pypy.rpython.rclass import IR_QUASI_IMMUTABLE -from pypy.rpython.lltypesystem import lltype, rclass -from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.jit.metainterp.history import AbstractDescr - - -def is_quasi_immutable(STRUCT, fieldname): - imm_fields = STRUCT._hints.get('immutable_fields') - return (imm_fields is not None and - imm_fields.fields.get(fieldname) is IR_QUASI_IMMUTABLE) - -def get_mutate_field_name(fieldname): - if fieldname.startswith('inst_'): # lltype - return 'mutate_' + fieldname[5:] - elif fieldname.startswith('o'): # ootype - return 'mutate_' + fieldname[1:] - else: - raise AssertionError(fieldname) - -def get_current_qmut_instance(cpu, gcref, mutatefielddescr): - """Returns the current QuasiImmut instance in the field, - possibly creating one. - """ - # XXX this is broken on x86 - qmut_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) - if qmut_gcref: - qmut = QuasiImmut.show(cpu, qmut_gcref) - else: - qmut = QuasiImmut(cpu) - cpu.bh_setfield_gc_r(gcref, mutatefielddescr, qmut.hide()) - return qmut - -def make_invalidation_function(STRUCT, mutatefieldname): - # - def _invalidate_now(p): - qmut_ptr = getattr(p, mutatefieldname) - setattr(p, mutatefieldname, lltype.nullptr(rclass.OBJECT)) - qmut = cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) - qmut.invalidate() - _invalidate_now._dont_inline_ = True - # - def invalidation(p): - if getattr(p, mutatefieldname): - _invalidate_now(p) - # - return invalidation - - -class QuasiImmut(object): - llopaque = True - - def __init__(self, cpu): - self.cpu = cpu - # list of weakrefs to the LoopTokens that must be invalidated if - # this value ever changes - self.looptokens_wrefs = [] - self.compress_limit = 30 - - def hide(self): - qmut_ptr = self.cpu.ts.cast_instance_to_base_ref(self) - return self.cpu.ts.cast_to_ref(qmut_ptr) - - @staticmethod - def show(cpu, qmut_gcref): - qmut_ptr = cpu.ts.cast_to_baseclass(qmut_gcref) - return cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) - - def register_loop_token(self, wref_looptoken): - if len(self.looptokens_wrefs) > self.compress_limit: - self.compress_looptokens_list() - self.looptokens_wrefs.append(wref_looptoken) - - def compress_looptokens_list(self): - self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs - if wref() is not None] - self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2 - - def invalidate(self): - # When this is called, all the loops that we record become - # invalid and must not be called again, nor returned to. - wrefs = self.looptokens_wrefs - self.looptokens_wrefs = [] - for wref in wrefs: - looptoken = wref() - if looptoken is not None: - self.cpu.invalidate_loop(looptoken) - - -class QuasiImmutDescr(AbstractDescr): - def __init__(self, cpu, structbox, fielddescr, mutatefielddescr): - self.cpu = cpu - self.structbox = structbox - self.fielddescr = fielddescr - self.mutatefielddescr = mutatefielddescr - gcref = structbox.getref_base() - self.qmut = get_current_qmut_instance(cpu, gcref, mutatefielddescr) - self.constantfieldbox = self.get_current_constant_fieldvalue() - - def get_current_constant_fieldvalue(self): - from pypy.jit.metainterp import executor - from pypy.jit.metainterp.resoperation import rop - fieldbox = executor.execute(self.cpu, None, rop.GETFIELD_GC, - self.fielddescr, self.structbox) - return fieldbox.constbox() - - def is_still_valid(self): - cpu = self.cpu - gcref = self.structbox.getref_base() - qmut = get_current_qmut_instance(cpu, gcref, self.mutatefielddescr) - if qmut is not self.qmut: - return False - else: - currentbox = self.get_current_constant_fieldvalue() - assert self.constantfieldbox.same_constant(currentbox) - return True diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -2,7 +2,6 @@ from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype, lloperation, rclass, llmemory from pypy.rpython.annlowlevel import llhelper -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside @@ -46,7 +45,7 @@ ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY._hints['virtualizable2_accessor'].initialize( - XY, {'inst_x' : IR_IMMUTABLE, 'inst_node' : IR_IMMUTABLE}) + XY, {'inst_x' : "", 'inst_node' : ""}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY, xy_vtable, 'XY') @@ -211,8 +210,7 @@ ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY2._hints['virtualizable2_accessor'].initialize( - XY2, {'inst_x' : IR_IMMUTABLE, - 'inst_l1' : IR_ARRAY_IMMUTABLE, 'inst_l2' : IR_ARRAY_IMMUTABLE}) + XY2, {'inst_x' : "", 'inst_l1' : "[*]", 'inst_l2' : "[*]"}) xy2_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY2, xy2_vtable, 'XY2') diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5717,35 +5717,8 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() - def test_quasi_immut(self): - ops = """ - [p0, p1, i0] - quasiimmut_field(p0, descr=quasiimmutdescr) - guard_not_invalidated() [] - i1 = getfield_gc(p0, descr=quasifielddescr) - jump(p1, p0, i1) - """ - expected = """ - [p0, p1, i0] - i1 = getfield_gc(p0, descr=quasifielddescr) - jump(p1, p0, i1) - """ - self.optimize_loop(ops, expected) - - def test_quasi_immut_2(self): - ops = """ - [] - quasiimmut_field(ConstPtr(myptr), descr=quasiimmutdescr) - guard_not_invalidated() [] - i1 = getfield_gc(ConstPtr(myptr), descr=quasifielddescr) - jump() - """ - expected = """ - [] - guard_not_invalidated() [] - jump() - """ - self.optimize_loop(ops, expected, expected) + + ##class TestOOtype(OptimizeOptTest, OOtypeMixin): diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -7,9 +7,8 @@ from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.codewriter.policy import log, check_skip_operation +from pypy.jit.codewriter.policy import log from pypy.jit.metainterp.typesystem import deref, arrayItem -from pypy.jit.metainterp import quasiimmut from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj @@ -562,8 +561,7 @@ arraydescr) return [] # check for _immutable_fields_ hints - immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) - if immut: + if v_inst.concretetype.TO._immutable_field(c_fieldname.value): if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): @@ -576,21 +574,10 @@ descr = self.cpu.fielddescrof(v_inst.concretetype.TO, c_fieldname.value) kind = getkind(RESULT)[0] - op1 = SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), - [v_inst, descr], op.result) - # - if immut is quasiimmut.IR_QUASI_IMMUTABLE: - descr1 = self.cpu.fielddescrof( - v_inst.concretetype.TO, - quasiimmut.get_mutate_field_name(c_fieldname.value)) - op1 = [SpaceOperation('-live-', [], None), - SpaceOperation('record_quasiimmut_field', - [v_inst, descr, descr1], None), - op1] - return op1 + return SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), + [v_inst, descr], op.result) def rewrite_op_setfield(self, op): - check_skip_operation(op) # just to check it doesn't raise if self.is_typeptr_getset(op): # ignore the operation completely -- instead, it's done by 'new' return diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -257,7 +257,6 @@ self.pendingfields = [] self.posponedop = None self.exception_might_have_happened = False - self.quasi_immutable_deps = None self.newoperations = [] if loop is not None: self.call_pure_results = loop.call_pure_results @@ -310,7 +309,6 @@ new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None - new.quasi_immutable_deps = self.quasi_immutable_deps return new @@ -412,7 +410,6 @@ self.first_optimization.propagate_forward(op) self.i += 1 self.loop.operations = self.newoperations - self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -41,8 +41,7 @@ # during preamble but to keep it during the loop optimizations.append(o) - if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: optimizations.append(OptSimplify()) if inline_short_preamble: diff --git a/pypy/rpython/test/test_annlowlevel.py b/pypy/rpython/test/test_annlowlevel.py --- a/pypy/rpython/test/test_annlowlevel.py +++ b/pypy/rpython/test/test_annlowlevel.py @@ -4,12 +4,9 @@ from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode -from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, oostr from pypy.rpython.annlowlevel import hlunicode, llunicode -from pypy.rpython import annlowlevel - class TestLLType(BaseRtypingTest, LLRtypeMixin): def test_hlstr(self): @@ -56,15 +53,6 @@ res = self.interpret(f, [self.unicode_to_ll(u"abc")]) assert res == 3 - def test_cast_instance_to_base_ptr(self): - class X(object): - pass - x = X() - ptr = annlowlevel.cast_instance_to_base_ptr(x) - assert lltype.typeOf(ptr) == annlowlevel.base_ptr_lltype() - y = annlowlevel.cast_base_ptr_to_instance(X, ptr) - assert y is x - class TestOOType(BaseRtypingTest, OORtypeMixin): def test_hlstr(self): @@ -83,12 +71,3 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 - - def test_cast_instance_to_base_obj(self): - class X(object): - pass - x = X() - obj = annlowlevel.cast_instance_to_base_obj(x) - assert lltype.typeOf(obj) == annlowlevel.base_obj_ootype() - y = annlowlevel.cast_base_ptr_to_instance(X, obj) - assert y is x diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -286,10 +286,6 @@ raise ValueError("CALL_ASSEMBLER not supported") llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) - def invalidate_loop(self, looptoken): - for loop in looptoken.compiled_loop_token.loop_and_bridges: - loop._obj.externalobj.invalid = True - # ---------- def sizeof(self, S): diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -3,7 +3,6 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE -from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, @@ -13,7 +12,6 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.heaptracker import register_known_gctype, adr2int from pypy.jit.tool.oparser import parse -from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -64,18 +62,6 @@ nextdescr = cpu.fielddescrof(NODE, 'next') otherdescr = cpu.fielddescrof(NODE2, 'other') - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_field': IR_QUASI_IMMUTABLE}) - QUASI = lltype.GcStruct('QUASIIMMUT', ('inst_field', lltype.Signed), - ('mutate_field', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - quasi = lltype.malloc(QUASI, immortal=True) - quasifielddescr = cpu.fielddescrof(QUASI, 'inst_field') - quasibox = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, quasi)) - quasiimmutdescr = QuasiImmutDescr(cpu, quasibox, - quasifielddescr, - cpu.fielddescrof(QUASI, 'mutate_field')) - NODEOBJ = lltype.GcStruct('NODEOBJ', ('parent', OBJECT), ('ref', lltype.Ptr(OBJECT))) nodeobj = lltype.malloc(NODEOBJ) diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -1,7 +1,6 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.rclass import IR_ARRAY_IMMUTABLE, IR_IMMUTABLE from pypy.rpython import rvirtualizable2 from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.unroll import unrolling_iterable @@ -11,7 +10,7 @@ from pypy.jit.metainterp.warmstate import wrap, unwrap from pypy.rlib.objectmodel import specialize -class VirtualizableInfo(object): +class VirtualizableInfo: TOKEN_NONE = 0 # must be 0 -- see also x86.call_assembler TOKEN_TRACING_RESCALL = -1 @@ -34,13 +33,11 @@ all_fields = accessor.fields static_fields = [] array_fields = [] - for name, tp in all_fields.iteritems(): - if tp == IR_ARRAY_IMMUTABLE: + for name, suffix in all_fields.iteritems(): + if suffix == '[*]': array_fields.append(name) - elif tp == IR_IMMUTABLE: + else: static_fields.append(name) - else: - raise Exception("unknown type: %s" % tp) self.static_fields = static_fields self.array_fields = array_fields # diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -492,8 +492,6 @@ def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) - consider_guard_not_invalidated = consider_guard_no_exception - def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1293,28 +1293,6 @@ rffi.cast(SP, p).x = 0 lltype.free(chunk, flavor='raw') - def test_opaque_tagged_pointers(self): - from pypy.rpython.annlowlevel import cast_base_ptr_to_instance - from pypy.rpython.annlowlevel import cast_instance_to_base_ptr - from pypy.rpython.lltypesystem import rclass - - class Opaque(object): - llopaque = True - - def hide(self): - ptr = cast_instance_to_base_ptr(self) - return lltype.cast_opaque_ptr(llmemory.GCREF, ptr) - - @staticmethod - def show(gcref): - ptr = lltype.cast_opaque_ptr(lltype.Ptr(rclass.OBJECT), gcref) - return cast_base_ptr_to_instance(Opaque, ptr) - - opaque = Opaque() - round = ctypes2lltype(llmemory.GCREF, lltype2ctypes(opaque.hide())) - assert Opaque.show(round) is opaque - - class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -119,7 +119,6 @@ self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} - self._remove_guard_not_invalidated = False def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() @@ -379,43 +378,6 @@ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) - def optimize_QUASIIMMUT_FIELD(self, op): - # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC(s, descr='inst_x') - # If 's' is a constant (after optimizations), then we make 's.inst_x' - # a constant too, and we rely on the rest of the optimizations to - # constant-fold the following getfield_gc. - structvalue = self.getvalue(op.getarg(0)) - if not structvalue.is_constant(): - self._remove_guard_not_invalidated = True - return # not a constant at all; ignore QUASIIMMUT_FIELD - # - from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr - qmutdescr = op.getdescr() - assert isinstance(qmutdescr, QuasiImmutDescr) - # check that the value is still correct; it could have changed - # already between the tracing and now. In this case, we are - # simply ignoring the QUASIIMMUT_FIELD hint and compiling it - # as a regular getfield. - if not qmutdescr.is_still_valid(): - self._remove_guard_not_invalidated = True - return - # record as an out-of-line guard - if self.optimizer.quasi_immutable_deps is None: - self.optimizer.quasi_immutable_deps = {} - self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None - # perform the replacement in the list of operations - fieldvalue = self.getvalue(qmutdescr.constantfieldbox) - cf = self.field_cache(qmutdescr.fielddescr) - cf.remember_field_value(structvalue, fieldvalue) - self._remove_guard_not_invalidated = False - - def optimize_GUARD_NOT_INVALIDATED(self, op): - if self._remove_guard_not_invalidated: - return - self._remove_guard_not_invalidated = False - self.emit_operation(op) - def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -312,7 +312,6 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, - rop.QUASIIMMUT_FIELD, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -3,8 +3,7 @@ #from pypy.annotation.classdef import isclassdef from pypy.annotation import description from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, getgcflavor, inputconst -from pypy.rpython.lltypesystem.lltype import Void +from pypy.rpython.rmodel import Repr, getgcflavor class FieldListAccessor(object): @@ -13,8 +12,6 @@ assert type(fields) is dict self.TYPE = TYPE self.fields = fields - for x in fields.itervalues(): - assert isinstance(x, ImmutableRanking) def __repr__(self): return '' % getattr(self, 'TYPE', '?') @@ -22,20 +19,6 @@ def _freeze_(self): return True -class ImmutableRanking(object): - def __init__(self, name, is_immutable): - self.name = name - self.is_immutable = is_immutable - def __nonzero__(self): - return self.is_immutable - def __repr__(self): - return '<%s>' % self.name - -IR_MUTABLE = ImmutableRanking('mutable', False) -IR_IMMUTABLE = ImmutableRanking('immutable', True) -IR_ARRAY_IMMUTABLE = ImmutableRanking('array_immutable', True) -IR_QUASI_IMMUTABLE = ImmutableRanking('quasi_immutable', False) - class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" @@ -172,8 +155,7 @@ self.classdef = classdef def _setup_repr(self): - if self.classdef is None: - self.immutable_field_set = set() + pass def _check_for_immutable_hints(self, hints): loc = self.classdef.classdesc.lookup('_immutable_') @@ -185,13 +167,13 @@ self.classdef,)) hints = hints.copy() hints['immutable'] = True - self.immutable_field_set = set() # unless overwritten below + self.immutable_field_list = [] # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() immutable_fields = self.classdef.classdesc.classdict.get( '_immutable_fields_') if immutable_fields is not None: - self.immutable_field_set = set(immutable_fields.value) + self.immutable_field_list = immutable_fields.value accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -219,35 +201,33 @@ if "immutable_fields" in hints: accessor = hints["immutable_fields"] if not hasattr(accessor, 'fields'): - immutable_fields = set() + immutable_fields = [] rbase = self while rbase.classdef is not None: - immutable_fields.update(rbase.immutable_field_set) + immutable_fields += rbase.immutable_field_list rbase = rbase.rbase self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): - ranking = {} + with_suffix = {} for name in fields: - if name.endswith('[*]'): # for virtualizables' lists + if name.endswith('[*]'): name = name[:-3] - rank = IR_ARRAY_IMMUTABLE - elif name.endswith('?'): # a quasi-immutable field - name = name[:-1] - rank = IR_QUASI_IMMUTABLE - else: # a regular immutable/green field - rank = IR_IMMUTABLE + suffix = '[*]' + else: + suffix = '' try: mangled_name, r = self._get_field(name) except KeyError: continue - ranking[mangled_name] = rank - accessor.initialize(self.object_type, ranking) - return ranking + with_suffix[mangled_name] = suffix + accessor.initialize(self.object_type, with_suffix) + return with_suffix def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void is_self_immutable = "immutable" in self.object_type._hints base = self while base.classdef is not None: @@ -268,30 +248,12 @@ "class %r has _immutable_=True, but parent class %r " "defines (at least) the mutable field %r" % ( self, base, fieldname)) - if (fieldname in self.immutable_field_set or - (fieldname + '?') in self.immutable_field_set): + if fieldname in self.immutable_field_list: raise ImmutableConflictError( "field %r is defined mutable in class %r, but " "listed in _immutable_fields_ in subclass %r" % ( fieldname, base, self)) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - - def hook_setfield(self, vinst, fieldname, llops): - if self.is_quasi_immutable(fieldname): - c_fieldname = inputconst(Void, 'mutate_' + fieldname) - llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname]) - - def is_quasi_immutable(self, fieldname): - search = fieldname + '?' - rbase = self - while rbase.classdef is not None: - if search in rbase.immutable_field_set: - return True - rbase = rbase.rbase - return False - def new_instance(self, llops, classcallhop=None): raise NotImplementedError diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -1,5 +1,5 @@ from pypy.translator.simplify import get_funcobj -from pypy.jit.metainterp import history, quasiimmut +from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import lltype, rclass from pypy.tool.udir import udir @@ -85,20 +85,12 @@ getkind(v.concretetype, supports_floats, supports_longlong) v = op.result getkind(v.concretetype, supports_floats, supports_longlong) - check_skip_operation(op) except NotImplementedError, e: log.WARNING('%s, ignoring graph' % (e,)) log.WARNING(' %s' % (graph,)) return True return False -def check_skip_operation(op): - if op.opname == 'setfield': - if quasiimmut.is_quasi_immutable(op.args[0].concretetype.TO, - op.args[1].value): - raise NotImplementedError("write to quasi-immutable field %r" - % (op.args[1].value,)) - # ____________________________________________________________ class StopAtXPolicy(JitPolicy): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -167,7 +167,6 @@ class CompiledLoop(object): has_been_freed = False - invalid = False def __init__(self): self.inputargs = [] @@ -934,9 +933,6 @@ if forced: raise GuardFailed - def op_guard_not_invalidated(self, descr): - if self.loop.invalid: - raise GuardFailed class OOFrame(Frame): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -947,43 +947,3 @@ assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) assert op1.args[3] == ListOfKind('ref', [v1, v2]) - -def test_quasi_immutable(): - from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - v2 = varoftype(lltype.Signed) - STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: - op = SpaceOperation('getfield', [v_x, Constant('inst_x', lltype.Void)], - v2) - tr = Transformer(FakeCPU()) - [_, op1, op2] = tr.rewrite_operation(op) - assert op1.opname == 'record_quasiimmut_field' - assert len(op1.args) == 3 - assert op1.args[0] == v_x - assert op1.args[1] == ('fielddescr', STRUCT, 'inst_x') - assert op1.args[2] == ('fielddescr', STRUCT, 'mutate_x') - assert op1.result is None - assert op2.opname == 'getfield_gc_i' - assert len(op2.args) == 2 - assert op2.args[0] == v_x - assert op2.args[1] == ('fielddescr', STRUCT, 'inst_x') - assert op2.result is op.result - -def test_quasi_immutable_setfield(): - from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - v1 = varoftype(lltype.Signed) - STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: - op = SpaceOperation('setfield', - [v_x, Constant('inst_x', lltype.Void), v1], - varoftype(lltype.Void)) - tr = Transformer(FakeCPU()) - raises(NotImplementedError, tr.rewrite_operation, op) diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -171,8 +171,7 @@ class VirtualizableAnalyzer(BoolGraphAnalyzer): def analyze_simple_operation(self, op, graphinfo): return op.opname in ('jit_force_virtualizable', - 'jit_force_virtual', - 'jit_force_quasi_immutable') + 'jit_force_virtual') # ____________________________________________________________ diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -555,16 +555,6 @@ opimpl_setfield_raw_r = _opimpl_setfield_raw_any opimpl_setfield_raw_f = _opimpl_setfield_raw_any - @arguments("box", "descr", "descr", "orgpc") - def opimpl_record_quasiimmut_field(self, box, fielddescr, - mutatefielddescr, orgpc): - from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr - cpu = self.metainterp.cpu - descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) - self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], - None, descr=descr) - self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) - def _nonstandard_virtualizable(self, pc, box): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] @@ -1086,8 +1076,6 @@ if opnum == rop.GUARD_NOT_FORCED: resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, metainterp.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() guard_op = metainterp.history.record(opnum, moreargs, None, @@ -1860,9 +1848,6 @@ self.handle_possible_exception() except ChangeFrame: pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass # XXX we want to do something special in resume descr, - # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected self.execute_raised(OverflowError(), constant=True) try: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -76,11 +76,6 @@ op.setdescr(None) # clear reference, mostly for tests if not we_are_translated(): op._jumptarget_number = descr.number - # record this looptoken on the QuasiImmut used in the code - if loop.quasi_immutable_deps is not None: - for qmut in loop.quasi_immutable_deps: - qmut.register_loop_token(wref) - # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken loop.token = None if not we_are_translated(): @@ -401,12 +396,6 @@ self.copy_all_attributes_into(res) return res -class ResumeGuardNotInvalidated(ResumeGuardDescr): - def _clone_if_mutable(self): - res = ResumeGuardNotInvalidated() - self.copy_all_attributes_into(res) - return res - class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py --- a/pypy/rpython/lltypesystem/test/test_lloperation.py +++ b/pypy/rpython/lltypesystem/test/test_lloperation.py @@ -54,7 +54,6 @@ def test_is_pure(): from pypy.objspace.flow.model import Variable, Constant - from pypy.rpython import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) @@ -86,50 +85,38 @@ assert llop.getarrayitem.is_pure([v_a2, Variable()]) assert llop.getarraysize.is_pure([v_a2]) # - for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, - rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: - accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': kind}) - v_s3 = Variable() - v_s3.concretetype = lltype.Ptr(S3) - assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) - assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) - assert llop.getfield.is_pure([v_s3, Constant('x')]) is kind - assert not llop.getfield.is_pure([v_s3, Constant('y')]) + accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': ''}) + v_s3 = Variable() + v_s3.concretetype = lltype.Ptr(S3) + assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) + assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) + assert llop.getfield.is_pure([v_s3, Constant('x')]) + assert not llop.getfield.is_pure([v_s3, Constant('y')]) def test_getfield_pure(): S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) S2 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable': True}) accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') s2 = lltype.malloc(S2); s2.x = 45 assert llop.getfield(lltype.Signed, s2, 'x') == 45 + s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 + assert llop.getfield(lltype.Signed, s3, 'x') == 46 + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') # py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s1, 'x') assert llop.getinteriorfield(lltype.Signed, s2, 'x') == 45 - # - for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, - rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: - # - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': kind}) - s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 - if kind in [rclass.IR_IMMUTABLE, rclass.IR_ARRAY_IMMUTABLE]: - assert llop.getfield(lltype.Signed, s3, 'x') == 46 - assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 - else: - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'x') - py.test.raises(TypeError, llop.getinteriorfield, - lltype.Signed, s3, 'x') - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') - py.test.raises(TypeError, llop.getinteriorfield, - lltype.Signed, s3, 'y') + assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 + py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s3, 'y') # ___________________________________________________________________________ # This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync. diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -145,14 +145,6 @@ def redirect_call_assembler(self, oldlooptoken, newlooptoken): self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) - def invalidate_loop(self, looptoken): - from pypy.jit.backend.x86 import codebuf - - for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: - mc = codebuf.MachineCodeBlockWrapper() - mc.JMP_l(tgt) - mc.copy_to_raw_memory(addr - 1) - class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,9 +525,6 @@ def op_jit_force_virtual(x): return x -def op_jit_force_quasi_immutable(*args): - pass - def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/test/test_rvirtualizable2.py b/pypy/rpython/test/test_rvirtualizable2.py --- a/pypy/rpython/test/test_rvirtualizable2.py +++ b/pypy/rpython/test/test_rvirtualizable2.py @@ -5,7 +5,6 @@ from pypy.rlib.jit import hint from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy import conftest @@ -117,8 +116,8 @@ TYPE = self.gettype(v_inst) accessor = TYPE._hints['virtualizable2_accessor'] assert accessor.TYPE == TYPE - assert accessor.fields == {self.prefix + 'v1': IR_IMMUTABLE, - self.prefix + 'v2': IR_ARRAY_IMMUTABLE} + assert accessor.fields == {self.prefix + 'v1' : "", + self.prefix + 'v2': "[*]"} # def fn2(n): Base().base1 = 42 diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1166,11 +1166,6 @@ def bhimpl_setfield_raw_f(cpu, struct, fielddescr, newvalue): cpu.bh_setfield_raw_f(struct, fielddescr, newvalue) - @arguments("cpu", "r", "d", "d") - def bhimpl_record_quasiimmut_field(self, struct, fielddescr, - mutatefielddescr): - pass - @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) @@ -1292,8 +1287,6 @@ # We get here because it used to overflow, but now it no longer # does. pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass else: from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -433,7 +433,6 @@ 'jit_marker': LLOp(), 'jit_force_virtualizable':LLOp(canrun=True), 'jit_force_virtual': LLOp(canrun=True), - 'jit_force_quasi_immutable': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), diff --git a/pypy/jit/backend/x86/test/test_quasiimmut.py b/pypy/jit/backend/x86/test/test_quasiimmut.py deleted file mode 100644 --- a/pypy/jit/backend/x86/test/test_quasiimmut.py +++ /dev/null @@ -1,9 +0,0 @@ - -import py -from pypy.jit.backend.x86.test.test_basic import Jit386Mixin -from pypy.jit.metainterp.test import test_quasiimmut - -class TestLoopSpec(Jit386Mixin, test_quasiimmut.QuasiImmutTests): - # for the individual tests see - # ====> ../../../metainterp/test/test_loop.py - pass diff --git a/pypy/rpython/rvirtualizable2.py b/pypy/rpython/rvirtualizable2.py --- a/pypy/rpython/rvirtualizable2.py +++ b/pypy/rpython/rvirtualizable2.py @@ -50,7 +50,7 @@ def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): - if self.my_redirected_fields.get(cname.value): + if cname.value in self.my_redirected_fields: cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -380,7 +380,6 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', - 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -476,7 +475,6 @@ 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', - 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -794,8 +794,15 @@ def __init__(self, fields): self.fields = fields S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x': 1234})}) - assert S._immutable_field('x') == 1234 + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' def test_typedef(): T = Typedef(Signed, 'T') diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -267,8 +267,6 @@ virtual_state = modifier.get_virtual_state(jump_args) loop.preamble.operations = self.optimizer.newoperations - loop.preamble.quasi_immutable_deps = ( - self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.reconstruct_for_next_iteration() inputargs = self.inline(self.cloned_operations, loop.inputargs, jump_args) @@ -278,7 +276,6 @@ loop.preamble.operations.append(jmp) loop.operations = self.optimizer.newoperations - loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() assert isinstance(start_resumedescr, ResumeGuardDescr) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -85,7 +85,6 @@ 'nslots', 'instancetypedef', 'terminator', - '_version_tag?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -131,16 +131,6 @@ def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') -def find_force_quasi_immutable(graphs): - results = [] - for graph in graphs: - for block in graph.iterblocks(): - for i in range(len(block.operations)): - op = block.operations[i] - if op.opname == 'jit_force_quasi_immutable': - results.append((graph, block, i)) - return results - def get_stats(): return pyjitpl._warmrunnerdesc.stats @@ -197,7 +187,6 @@ self.rewrite_can_enter_jits() self.rewrite_set_param() self.rewrite_force_virtual(vrefinfo) - self.rewrite_force_quasi_immutable() self.add_finish() self.metainterp_sd.finish_setup(self.codewriter) @@ -853,28 +842,6 @@ all_graphs = self.translator.graphs vrefinfo.replace_force_virtual_with_call(all_graphs) - def replace_force_quasiimmut_with_direct_call(self, op): - ARG = op.args[0].concretetype - mutatefieldname = op.args[1].value - key = (ARG, mutatefieldname) - if key in self._cache_force_quasiimmed_funcs: - cptr = self._cache_force_quasiimmed_funcs[key] - else: - from pypy.jit.metainterp import quasiimmut - func = quasiimmut.make_invalidation_function(ARG, mutatefieldname) - FUNC = lltype.Ptr(lltype.FuncType([ARG], lltype.Void)) - llptr = self.helper_func(FUNC, func) - cptr = Constant(llptr, FUNC) - self._cache_force_quasiimmed_funcs[key] = cptr - op.opname = 'direct_call' - op.args = [cptr, op.args[0]] - - def rewrite_force_quasi_immutable(self): - self._cache_force_quasiimmed_funcs = {} - graphs = self.translator.graphs - for graph, block, i in find_force_quasi_immutable(graphs): - self.replace_force_quasiimmut_with_direct_call(block.operations[i]) - # ____________________________________________________________ def execute_token(self, loop_token): diff --git a/pypy/rpython/lltypesystem/rclass.py b/pypy/rpython/lltypesystem/rclass.py --- a/pypy/rpython/lltypesystem/rclass.py +++ b/pypy/rpython/lltypesystem/rclass.py @@ -322,7 +322,6 @@ # before they are fully built, to avoid strange bugs in case # of recursion where other code would uses these # partially-initialized dicts. - AbstractInstanceRepr._setup_repr(self) self.rclass = getclassrepr(self.rtyper, self.classdef) fields = {} allinstancefields = {} @@ -371,11 +370,6 @@ kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True - - for name, attrdef in attrs: - if not attrdef.readonly and self.is_quasi_immutable(name): - llfields.append(('mutate_' + name, OBJECTPTR)) - object_type = MkStruct(self.classdef.name, ('super', self.rbase.object_type), hints=hints, @@ -494,7 +488,6 @@ if force_cast: vinst = llops.genop('cast_pointer', [vinst], resulttype=self) self.hook_access_field(vinst, cname, llops, flags) - self.hook_setfield(vinst, attr, llops) llops.genop('setfield', [vinst, cname, vvalue]) else: if self.classdef is None: @@ -502,6 +495,9 @@ self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True, flags=flags) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + def new_instance(self, llops, classcallhop=None): """Build a new instance, without calling __init__.""" flavor = self.gcflavor diff --git a/pypy/rpython/annlowlevel.py b/pypy/rpython/annlowlevel.py --- a/pypy/rpython/annlowlevel.py +++ b/pypy/rpython/annlowlevel.py @@ -480,26 +480,7 @@ # ____________________________________________________________ def cast_object_to_ptr(PTR, object): - """NOT_RPYTHON: hack. The object may be disguised as a PTR now. - Limited to casting a given object to a single type. - """ - if isinstance(PTR, lltype.Ptr): - TO = PTR.TO - else: - TO = PTR - if not hasattr(object, '_carry_around_for_tests'): - assert not hasattr(object, '_TYPE') - object._carry_around_for_tests = True - object._TYPE = TO - else: - assert object._TYPE == TO - # - if isinstance(PTR, lltype.Ptr): - return lltype._ptr(PTR, object, True) - elif isinstance(PTR, ootype.Instance): - return object - else: - raise NotImplementedError("cast_object_to_ptr(%r, ...)" % PTR) + raise NotImplementedError("cast_object_to_ptr") def cast_instance_to_base_ptr(instance): return cast_object_to_ptr(base_ptr_lltype(), instance) @@ -554,13 +535,7 @@ # ____________________________________________________________ def cast_base_ptr_to_instance(Class, ptr): - """NOT_RPYTHON: hack. Reverse the hacking done in cast_object_to_ptr().""" - if isinstance(lltype.typeOf(ptr), lltype.Ptr): - ptr = ptr._as_obj() - if not isinstance(ptr, Class): - raise NotImplementedError("cast_base_ptr_to_instance: casting %r to %r" - % (ptr, Class)) - return ptr + raise NotImplementedError("cast_base_ptr_to_instance") class CastBasePtrToInstanceEntry(extregistry.ExtRegistryEntry): _about_ = cast_base_ptr_to_instance diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -20,9 +20,6 @@ op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) self.emit_operation(op) - def optimize_QUASIIMMUT_FIELD(self, op): - pass - def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -268,14 +268,13 @@ return self._superclass._get_fields_with_default() + self._fields_with_default def _immutable_field(self, field): - if self._hints.get('immutable'): - return True if 'immutable_fields' in self._hints: try: - return self._hints['immutable_fields'].fields[field] + s = self._hints['immutable_fields'].fields[field] + return s or True except KeyError: pass - return False + return self._hints.get('immutable', False) class SpecializableType(OOType): diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -5,8 +5,6 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.rarithmetic import intmask, r_longlong from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE -from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.objspace.flow.model import summary class EmptyBase(object): @@ -748,10 +746,8 @@ t, typer, graph = self.gengraph(f, []) A_TYPE = deref(graph.getreturnvar().concretetype) accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE, - "inst_y": IR_ARRAY_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_ARRAY_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ + accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype def test_immutable_fields_subclass_1(self): from pypy.jit.metainterp.typesystem import deref @@ -769,8 +765,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : ""} or \ + accessor.fields == {"ox" : ""} # for ootype def test_immutable_fields_subclass_2(self): from pypy.jit.metainterp.typesystem import deref @@ -789,10 +785,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE, - "inst_y": IR_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ + accessor.fields == {"ox" : "", "oy" : ""} # for ootype def test_immutable_fields_only_in_subclass(self): from pypy.jit.metainterp.typesystem import deref @@ -810,8 +804,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y": IR_IMMUTABLE} or \ - accessor.fields == {"oy": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_y" : ""} or \ + accessor.fields == {"oy" : ""} # for ootype def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -855,8 +849,8 @@ except AttributeError: A_TYPE = B_TYPE._superclass # for ootype accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_v": IR_IMMUTABLE} or \ - accessor.fields == {"ov": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype def test_immutable_subclass_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -901,37 +895,6 @@ B_TYPE = deref(graph.getreturnvar().concretetype) assert B_TYPE._hints["immutable"] - def test_quasi_immutable(self): - from pypy.jit.metainterp.typesystem import deref - class A(object): - _immutable_fields_ = ['x', 'y', 'a?', 'b?'] - class B(A): - pass - def f(): - a = A() - a.x = 42 - a.a = 142 - b = B() - b.x = 43 - b.y = 41 - b.a = 44 - b.b = 45 - return B() - t, typer, graph = self.gengraph(f, []) - B_TYPE = deref(graph.getreturnvar().concretetype) - accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y": IR_IMMUTABLE, - "inst_b": IR_QUASI_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_IMMUTABLE, - "oa": IR_QUASI_IMMUTABLE, - "ob": IR_QUASI_IMMUTABLE} # for ootype - found = [] - for op in graph.startblock.operations: - if op.opname == 'jit_force_quasi_immutable': - found.append(op.args[1].value) - assert found == ['mutate_a', 'mutate_a', 'mutate_b'] - class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -48,12 +48,11 @@ class GuardToken(object): - def __init__(self, faildescr, failargs, fail_locs, exc, has_jump): + def __init__(self, faildescr, failargs, fail_locs, exc): self.faildescr = faildescr self.failargs = failargs self.fail_locs = fail_locs self.exc = exc - self.has_jump = has_jump DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed)) @@ -134,7 +133,6 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token - self.invalidate_positions = [] self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -143,7 +141,6 @@ allblocks) def teardown(self): - self.invalidate_positions = None self.pending_guard_tokens = None self.mc = None self.looppos = -1 @@ -438,24 +435,15 @@ # tok.faildescr._x86_adr_jump_offset to contain the raw address of # the 4-byte target field in the JMP/Jcond instruction, and patch # the field in question to point (initially) to the recovery stub - inv_counter = 0 - clt = self.current_clt for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset tok.faildescr._x86_adr_jump_offset = addr relative_target = tok.pos_recovery_stub - (tok.pos_jump_offset + 4) assert rx86.fits_in_32bits(relative_target) # - if tok.has_jump: - mc = codebuf.MachineCodeBlockWrapper() - mc.writeimm32(relative_target) - mc.copy_to_raw_memory(addr) - else: - # guard not invalidate, patch where it jumps - pos, _ = self.invalidate_positions[inv_counter] - clt.invalidate_positions.append((pos + rawstart, - relative_target)) - inv_counter += 1 + mc = codebuf.MachineCodeBlockWrapper() + mc.writeimm32(relative_target) + mc.copy_to_raw_memory(addr) def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1459,13 +1447,6 @@ self.mc.CMP(heap(self.cpu.pos_exception()), imm0) self.implement_guard(guard_token, 'NZ') - def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, - locs, ign_2): - pos = self.mc.get_relative_pos() + 1 # after potential jmp - guard_token.pos_jump_offset = pos - self.invalidate_positions.append((pos, 0)) - self.pending_guard_tokens.append(guard_token) - def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, locs, resloc): loc = locs[0] @@ -1564,8 +1545,7 @@ exc = (guard_opnum == rop.GUARD_EXCEPTION or guard_opnum == rop.GUARD_NO_EXCEPTION or guard_opnum == rop.GUARD_NOT_FORCED) - return GuardToken(faildescr, failargs, fail_locs, exc, has_jump= - guard_opnum != rop.GUARD_NOT_INVALIDATED) + return GuardToken(faildescr, failargs, fail_locs, exc) def generate_quick_failure(self, guardtok): """Generate the initial code for handling a failure. We try to From commits-noreply at bitbucket.org Sun Apr 17 10:28:07 2011 From: commits-noreply at bitbucket.org (fijal) Date: Sun, 17 Apr 2011 10:28:07 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110417082807.3AB1E282BAD@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43420:0d46e20ace72 Date: 2011-04-17 10:27 +0200 http://bitbucket.org/pypy/pypy/changeset/0d46e20ace72/ Log: merge diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -880,6 +880,11 @@ except AttributeError: return False + def warn_missing_attribute(self, attr): + # only warn for missing attribute names whose name doesn't start + # with '$', to silence the warnings about '$memofield_xxx'. + return not self.has_attribute(attr) and not attr.startswith('$') + def read_attribute(self, attr): try: return self.attrcache[attr] diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -31,22 +31,28 @@ def checkdecode(self, s, encoding): decoder = self.getdecoder(encoding) - if isinstance(s, str): - trueresult = s.decode(encoding) - else: - trueresult = s - s = s.encode(encoding) + try: + if isinstance(s, str): + trueresult = s.decode(encoding) + else: + trueresult = s + s = s.encode(encoding) + except LookupError, e: + py.test.skip(e) result, consumed = decoder(s, len(s), True) assert consumed == len(s) self.typeequals(trueresult, result) def checkencode(self, s, encoding): encoder = self.getencoder(encoding) - if isinstance(s, unicode): - trueresult = s.encode(encoding) - else: - trueresult = s - s = s.decode(encoding) + try: + if isinstance(s, unicode): + trueresult = s.encode(encoding) + else: + trueresult = s + s = s.decode(encoding) + except LookupError, e: + py.test.skip(e) result = encoder(s, len(s), True) self.typeequals(trueresult, result) diff --git a/pypy/rpython/rpbc.py b/pypy/rpython/rpbc.py --- a/pypy/rpython/rpbc.py +++ b/pypy/rpython/rpbc.py @@ -485,7 +485,7 @@ try: thisattrvalue = frozendesc.attrcache[attr] except KeyError: - if not frozendesc.has_attribute(attr): + if frozendesc.warn_missing_attribute(attr): warning("Desc %r has no attribute %r" % (frozendesc, attr)) continue llvalue = r_value.convert_const(thisattrvalue) diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -112,7 +112,7 @@ ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0)): # or (ordch1 == 0xed and ordch2 > 0x9f) # second byte invalid, take the first and continue @@ -130,7 +130,7 @@ break elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): # second byte invalid, take the first and continue @@ -139,7 +139,7 @@ s, pos, pos+1) result.append(r) continue - elif charsleft == 2 and ord(s[pos+2])>>6 != 0b10: + elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', @@ -165,21 +165,21 @@ elif n == 2: ordch2 = ord(s[pos+1]) - if ordch2>>6 != 0b10: + if ordch2>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00011111) << 6) + - (ordch2 & 0b00111111))) + result.append(unichr(((ordch1 & 0x1F) << 6) + # 0b00011111 + (ordch2 & 0x3F))) # 0b00111111 pos += 2 elif n == 3: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. @@ -190,23 +190,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz - result.append(unichr(((ordch1 & 0b00001111) << 12) + - ((ordch2 & 0b00111111) << 6) + - (ordch3 & 0b00111111))) + result.append(unichr(((ordch1 & 0x0F) << 12) + # 0b00001111 + ((ordch2 & 0x3F) << 6) + # 0b00111111 + (ordch3 & 0x3F))) # 0b00111111 pos += 3 elif n == 4: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - if (ordch2>>6 != 0b10 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): r, pos = errorhandler(errors, 'utf-8', @@ -214,23 +214,23 @@ s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0b10: + elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue - elif ordch4>>6 != 0b10: + elif ordch4>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf-8', 'invalid continuation byte', s, pos, pos+3) result.append(r) continue # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz - c = (((ordch1 & 0b00000111) << 18) + - ((ordch2 & 0b00111111) << 12) + - ((ordch3 & 0b00111111) << 6) + - (ordch4 & 0b00111111)) + c = (((ordch1 & 0x07) << 18) + # 0b00000111 + ((ordch2 & 0x3F) << 12) + # 0b00111111 + ((ordch3 & 0x3F) << 6) + # 0b00111111 + (ordch4 & 0x3F)) # 0b00111111 if c <= MAXUNICODE: result.append(UNICHR(c)) else: From commits-noreply at bitbucket.org Sun Apr 17 10:52:14 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:52:14 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Test and fix. Message-ID: <20110417085214.811D4282BA1@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43421:18e3be70dc88 Date: 2011-04-17 09:27 +0200 http://bitbucket.org/pypy/pypy/changeset/18e3be70dc88/ Log: Test and fix. diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -520,7 +520,7 @@ unerase = unerase_item else: erase = lambda x: x - unerase = lambda x, t: x + unerase = lambda x: x # class subcls(BaseMapdictObject, supercls): _nmin1 = nmin1 diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -438,6 +438,14 @@ assert obj2.getdictvalue(space, "b") is w6 assert obj2.map is abmap + extras = {} + for attr in "cdefghijklmnopqrstuvwxyz": + extras[attr] = W_Root() + obj2.setdictvalue(space, attr, extras[attr]) + assert obj2.getdictvalue(space, attr) is extras[attr] + for attr in "cdefghijklmnopqrstuvwxyz": + assert obj2.getdictvalue(space, attr) is extras[attr] + def test_specialized_class_compressptr(): test_specialized_class(compressptr=True) From commits-noreply at bitbucket.org Sun Apr 17 10:52:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:52:15 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417085215.9DC69282BD4@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43422:cc9bea98a4e7 Date: 2011-04-17 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/cc9bea98a4e7/ Log: merge heads diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -520,7 +520,7 @@ unerase = unerase_item else: erase = lambda x: x - unerase = lambda x, t: x + unerase = lambda x: x # class subcls(BaseMapdictObject, supercls): _nmin1 = nmin1 diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -438,6 +438,14 @@ assert obj2.getdictvalue(space, "b") is w6 assert obj2.map is abmap + extras = {} + for attr in "cdefghijklmnopqrstuvwxyz": + extras[attr] = W_Root() + obj2.setdictvalue(space, attr, extras[attr]) + assert obj2.getdictvalue(space, attr) is extras[attr] + for attr in "cdefghijklmnopqrstuvwxyz": + assert obj2.getdictvalue(space, attr) is extras[attr] + def test_specialized_class_compressptr(): test_specialized_class(compressptr=True) From commits-noreply at bitbucket.org Sun Apr 17 10:52:20 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:52:20 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Test and fix. Message-ID: <20110417085220.01072282BAA@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43421:18e3be70dc88 Date: 2011-04-17 09:27 +0200 http://bitbucket.org/pypy/pypy/changeset/18e3be70dc88/ Log: Test and fix. diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -520,7 +520,7 @@ unerase = unerase_item else: erase = lambda x: x - unerase = lambda x, t: x + unerase = lambda x: x # class subcls(BaseMapdictObject, supercls): _nmin1 = nmin1 diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -438,6 +438,14 @@ assert obj2.getdictvalue(space, "b") is w6 assert obj2.map is abmap + extras = {} + for attr in "cdefghijklmnopqrstuvwxyz": + extras[attr] = W_Root() + obj2.setdictvalue(space, attr, extras[attr]) + assert obj2.getdictvalue(space, attr) is extras[attr] + for attr in "cdefghijklmnopqrstuvwxyz": + assert obj2.getdictvalue(space, attr) is extras[attr] + def test_specialized_class_compressptr(): test_specialized_class(compressptr=True) From commits-noreply at bitbucket.org Sun Apr 17 10:52:21 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 10:52:21 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417085221.36C63282BDA@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43422:cc9bea98a4e7 Date: 2011-04-17 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/cc9bea98a4e7/ Log: merge heads diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -520,7 +520,7 @@ unerase = unerase_item else: erase = lambda x: x - unerase = lambda x, t: x + unerase = lambda x: x # class subcls(BaseMapdictObject, supercls): _nmin1 = nmin1 diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -438,6 +438,14 @@ assert obj2.getdictvalue(space, "b") is w6 assert obj2.map is abmap + extras = {} + for attr in "cdefghijklmnopqrstuvwxyz": + extras[attr] = W_Root() + obj2.setdictvalue(space, attr, extras[attr]) + assert obj2.getdictvalue(space, attr) is extras[attr] + for attr in "cdefghijklmnopqrstuvwxyz": + assert obj2.getdictvalue(space, attr) is extras[attr] + def test_specialized_class_compressptr(): test_specialized_class(compressptr=True) From commits-noreply at bitbucket.org Sun Apr 17 11:49:56 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:49:56 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Test and fix. Message-ID: <20110417094956.BA76C282B90@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43423:f8d1f198cee3 Date: 2011-04-17 11:30 +0200 http://bitbucket.org/pypy/pypy/changeset/f8d1f198cee3/ Log: Test and fix. diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -4,6 +4,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import jit +from pypy.rlib.jit import JitDriver class TestRCompressed(LLJitMixin): @@ -45,6 +46,9 @@ res = self.interp_operations(f, [42]) assert res == 42063 + def test_store_load_array(self): + py.test.skip("write me") + def test_call_argument(self): # the issue here is that even if we wrote this test, for now, it's # not going to really test the interesting parts, which are in @@ -53,3 +57,28 @@ def test_call_result(self): py.test.skip("write me") + + def test_jit_merge_point(self): + jitdriver = JitDriver(greens=[], reds=['total', 'a']) + S = lltype.GcStruct('S', ('n', lltype.Signed)) + def main(n): + a = f(n) + s = llop.show_from_ptr32(lltype.Ptr(S), a) + return s.n + def f(n): + s = lltype.malloc(S) + s.n = n + total = 0 + while s.n > 0: + a = llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + jitdriver.jit_merge_point(a=a, total=total) + s = llop.show_from_ptr32(lltype.Ptr(S), a) + n = s.n + total += n + s = lltype.malloc(S) + s.n = n - 1 + s = lltype.malloc(S) + s.n = total + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + res = self.meta_interp(main, [8]) + assert res == 36 diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,5 +1,6 @@ import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr @@ -30,21 +31,26 @@ elif INPUT is longlong.FLOATSTORAGE: assert TYPE is lltype.Float return longlong.getrealfloat(x) + elif TYPE == llmemory.HiddenGcRef32: + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, x) else: return lltype.cast_opaque_ptr(TYPE, x) @specialize.ll() def unspecialize_value(value): """Casts 'value' to a Signed, a GCREF or a FLOATSTORAGE.""" - if isinstance(lltype.typeOf(value), lltype.Ptr): - if lltype.typeOf(value).TO._gckind == 'gc': + TYPE = lltype.typeOf(value) + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + if TYPE == llmemory.HiddenGcRef32: + return llop.show_from_ptr32(llmemory.GCREF, value) return lltype.cast_opaque_ptr(llmemory.GCREF, value) else: adr = llmemory.cast_ptr_to_adr(value) return heaptracker.adr2int(adr) - elif isinstance(lltype.typeOf(value), ootype.OOType): + elif isinstance(TYPE, ootype.OOType): return ootype.cast_to_object(value) - elif isinstance(value, float): + elif TYPE == lltype.Float: return longlong.getfloatstorage(value) else: return lltype.cast_primitive(lltype.Signed, value) @@ -64,34 +70,23 @@ @specialize.ll() def wrap(cpu, value, in_const_box=False): - if isinstance(lltype.typeOf(value), lltype.Ptr): - if lltype.typeOf(value).TO._gckind == 'gc': - value = lltype.cast_opaque_ptr(llmemory.GCREF, value) - if in_const_box: - return history.ConstPtr(value) - else: - return history.BoxPtr(value) + value = unspecialize_value(value) + TYPE = lltype.typeOf(value) + if isinstance(TYPE, lltype.Ptr): + if in_const_box: + return history.ConstPtr(value) else: - adr = llmemory.cast_ptr_to_adr(value) - value = heaptracker.adr2int(adr) - # fall through to the end of the function - elif isinstance(lltype.typeOf(value), ootype.OOType): - value = ootype.cast_to_object(value) + return history.BoxPtr(value) + if isinstance(TYPE, ootype.OOType): if in_const_box: return history.ConstObj(value) else: return history.BoxObj(value) - elif isinstance(value, float): - value = longlong.getfloatstorage(value) + if TYPE == lltype.Float: if in_const_box: return history.ConstFloat(value) else: return history.BoxFloat(value) - elif isinstance(value, str) or isinstance(value, unicode): - assert len(value) == 1 # must be a character - value = ord(value) - else: - value = intmask(value) if in_const_box: return history.ConstInt(value) else: From commits-noreply at bitbucket.org Sun Apr 17 11:49:57 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:49:57 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417094957.6CAEE282B90@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43424:ed5ecec37b3d Date: 2011-04-17 11:31 +0200 http://bitbucket.org/pypy/pypy/changeset/ed5ecec37b3d/ Log: merge heads diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -64,34 +64,23 @@ @specialize.ll() def wrap(cpu, value, in_const_box=False): - if isinstance(lltype.typeOf(value), lltype.Ptr): - if lltype.typeOf(value).TO._gckind == 'gc': - value = lltype.cast_opaque_ptr(llmemory.GCREF, value) - if in_const_box: - return history.ConstPtr(value) - else: - return history.BoxPtr(value) + value = unspecialize_value(value) + TYPE = lltype.typeOf(value) + if isinstance(TYPE, lltype.Ptr): + if in_const_box: + return history.ConstPtr(value) else: - adr = llmemory.cast_ptr_to_adr(value) - value = heaptracker.adr2int(adr) - # fall through to the end of the function - elif isinstance(lltype.typeOf(value), ootype.OOType): - value = ootype.cast_to_object(value) + return history.BoxPtr(value) + if isinstance(TYPE, ootype.OOType): if in_const_box: return history.ConstObj(value) else: return history.BoxObj(value) - elif isinstance(value, float): - value = longlong.getfloatstorage(value) + if TYPE == lltype.Float: if in_const_box: return history.ConstFloat(value) else: return history.BoxFloat(value) - elif isinstance(value, str) or isinstance(value, unicode): - assert len(value) == 1 # must be a character - value = ord(value) - else: - value = intmask(value) if in_const_box: return history.ConstInt(value) else: From commits-noreply at bitbucket.org Sun Apr 17 11:49:58 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:49:58 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417094958.487AD282B90@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43425:023210fffbc7 Date: 2011-04-17 11:32 +0200 http://bitbucket.org/pypy/pypy/changeset/023210fffbc7/ Log: merge heads diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -4,6 +4,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import jit +from pypy.rlib.jit import JitDriver class TestRCompressed(LLJitMixin): @@ -45,6 +46,9 @@ res = self.interp_operations(f, [42]) assert res == 42063 + def test_store_load_array(self): + py.test.skip("write me") + def test_call_argument(self): # the issue here is that even if we wrote this test, for now, it's # not going to really test the interesting parts, which are in @@ -53,3 +57,28 @@ def test_call_result(self): py.test.skip("write me") + + def test_jit_merge_point(self): + jitdriver = JitDriver(greens=[], reds=['total', 'a']) + S = lltype.GcStruct('S', ('n', lltype.Signed)) + def main(n): + a = f(n) + s = llop.show_from_ptr32(lltype.Ptr(S), a) + return s.n + def f(n): + s = lltype.malloc(S) + s.n = n + total = 0 + while s.n > 0: + a = llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + jitdriver.jit_merge_point(a=a, total=total) + s = llop.show_from_ptr32(lltype.Ptr(S), a) + n = s.n + total += n + s = lltype.malloc(S) + s.n = n - 1 + s = lltype.malloc(S) + s.n = total + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + res = self.meta_interp(main, [8]) + assert res == 36 diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,5 +1,6 @@ import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr @@ -30,21 +31,26 @@ elif INPUT is longlong.FLOATSTORAGE: assert TYPE is lltype.Float return longlong.getrealfloat(x) + elif TYPE == llmemory.HiddenGcRef32: + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, x) else: return lltype.cast_opaque_ptr(TYPE, x) @specialize.ll() def unspecialize_value(value): """Casts 'value' to a Signed, a GCREF or a FLOATSTORAGE.""" - if isinstance(lltype.typeOf(value), lltype.Ptr): - if lltype.typeOf(value).TO._gckind == 'gc': + TYPE = lltype.typeOf(value) + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + if TYPE == llmemory.HiddenGcRef32: + return llop.show_from_ptr32(llmemory.GCREF, value) return lltype.cast_opaque_ptr(llmemory.GCREF, value) else: adr = llmemory.cast_ptr_to_adr(value) return heaptracker.adr2int(adr) - elif isinstance(lltype.typeOf(value), ootype.OOType): + elif isinstance(TYPE, ootype.OOType): return ootype.cast_to_object(value) - elif isinstance(value, float): + elif TYPE == lltype.Float: return longlong.getfloatstorage(value) else: return lltype.cast_primitive(lltype.Signed, value) From commits-noreply at bitbucket.org Sun Apr 17 11:50:05 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:05 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Test and fix. Message-ID: <20110417095005.67C7F282BDA@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43423:f8d1f198cee3 Date: 2011-04-17 11:30 +0200 http://bitbucket.org/pypy/pypy/changeset/f8d1f198cee3/ Log: Test and fix. diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -4,6 +4,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import jit +from pypy.rlib.jit import JitDriver class TestRCompressed(LLJitMixin): @@ -45,6 +46,9 @@ res = self.interp_operations(f, [42]) assert res == 42063 + def test_store_load_array(self): + py.test.skip("write me") + def test_call_argument(self): # the issue here is that even if we wrote this test, for now, it's # not going to really test the interesting parts, which are in @@ -53,3 +57,28 @@ def test_call_result(self): py.test.skip("write me") + + def test_jit_merge_point(self): + jitdriver = JitDriver(greens=[], reds=['total', 'a']) + S = lltype.GcStruct('S', ('n', lltype.Signed)) + def main(n): + a = f(n) + s = llop.show_from_ptr32(lltype.Ptr(S), a) + return s.n + def f(n): + s = lltype.malloc(S) + s.n = n + total = 0 + while s.n > 0: + a = llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + jitdriver.jit_merge_point(a=a, total=total) + s = llop.show_from_ptr32(lltype.Ptr(S), a) + n = s.n + total += n + s = lltype.malloc(S) + s.n = n - 1 + s = lltype.malloc(S) + s.n = total + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + res = self.meta_interp(main, [8]) + assert res == 36 diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,5 +1,6 @@ import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr @@ -30,21 +31,26 @@ elif INPUT is longlong.FLOATSTORAGE: assert TYPE is lltype.Float return longlong.getrealfloat(x) + elif TYPE == llmemory.HiddenGcRef32: + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, x) else: return lltype.cast_opaque_ptr(TYPE, x) @specialize.ll() def unspecialize_value(value): """Casts 'value' to a Signed, a GCREF or a FLOATSTORAGE.""" - if isinstance(lltype.typeOf(value), lltype.Ptr): - if lltype.typeOf(value).TO._gckind == 'gc': + TYPE = lltype.typeOf(value) + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + if TYPE == llmemory.HiddenGcRef32: + return llop.show_from_ptr32(llmemory.GCREF, value) return lltype.cast_opaque_ptr(llmemory.GCREF, value) else: adr = llmemory.cast_ptr_to_adr(value) return heaptracker.adr2int(adr) - elif isinstance(lltype.typeOf(value), ootype.OOType): + elif isinstance(TYPE, ootype.OOType): return ootype.cast_to_object(value) - elif isinstance(value, float): + elif TYPE == lltype.Float: return longlong.getfloatstorage(value) else: return lltype.cast_primitive(lltype.Signed, value) @@ -64,34 +70,23 @@ @specialize.ll() def wrap(cpu, value, in_const_box=False): - if isinstance(lltype.typeOf(value), lltype.Ptr): - if lltype.typeOf(value).TO._gckind == 'gc': - value = lltype.cast_opaque_ptr(llmemory.GCREF, value) - if in_const_box: - return history.ConstPtr(value) - else: - return history.BoxPtr(value) + value = unspecialize_value(value) + TYPE = lltype.typeOf(value) + if isinstance(TYPE, lltype.Ptr): + if in_const_box: + return history.ConstPtr(value) else: - adr = llmemory.cast_ptr_to_adr(value) - value = heaptracker.adr2int(adr) - # fall through to the end of the function - elif isinstance(lltype.typeOf(value), ootype.OOType): - value = ootype.cast_to_object(value) + return history.BoxPtr(value) + if isinstance(TYPE, ootype.OOType): if in_const_box: return history.ConstObj(value) else: return history.BoxObj(value) - elif isinstance(value, float): - value = longlong.getfloatstorage(value) + if TYPE == lltype.Float: if in_const_box: return history.ConstFloat(value) else: return history.BoxFloat(value) - elif isinstance(value, str) or isinstance(value, unicode): - assert len(value) == 1 # must be a character - value = ord(value) - else: - value = intmask(value) if in_const_box: return history.ConstInt(value) else: From commits-noreply at bitbucket.org Sun Apr 17 11:50:06 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:06 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417095006.C5A04282BAA@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43424:ed5ecec37b3d Date: 2011-04-17 11:31 +0200 http://bitbucket.org/pypy/pypy/changeset/ed5ecec37b3d/ Log: merge heads diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -64,34 +64,23 @@ @specialize.ll() def wrap(cpu, value, in_const_box=False): - if isinstance(lltype.typeOf(value), lltype.Ptr): - if lltype.typeOf(value).TO._gckind == 'gc': - value = lltype.cast_opaque_ptr(llmemory.GCREF, value) - if in_const_box: - return history.ConstPtr(value) - else: - return history.BoxPtr(value) + value = unspecialize_value(value) + TYPE = lltype.typeOf(value) + if isinstance(TYPE, lltype.Ptr): + if in_const_box: + return history.ConstPtr(value) else: - adr = llmemory.cast_ptr_to_adr(value) - value = heaptracker.adr2int(adr) - # fall through to the end of the function - elif isinstance(lltype.typeOf(value), ootype.OOType): - value = ootype.cast_to_object(value) + return history.BoxPtr(value) + if isinstance(TYPE, ootype.OOType): if in_const_box: return history.ConstObj(value) else: return history.BoxObj(value) - elif isinstance(value, float): - value = longlong.getfloatstorage(value) + if TYPE == lltype.Float: if in_const_box: return history.ConstFloat(value) else: return history.BoxFloat(value) - elif isinstance(value, str) or isinstance(value, unicode): - assert len(value) == 1 # must be a character - value = ord(value) - else: - value = intmask(value) if in_const_box: return history.ConstInt(value) else: From commits-noreply at bitbucket.org Sun Apr 17 11:50:08 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:08 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417095008.C10B9282BAA@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43425:023210fffbc7 Date: 2011-04-17 11:32 +0200 http://bitbucket.org/pypy/pypy/changeset/023210fffbc7/ Log: merge heads diff --git a/pypy/jit/metainterp/test/test_rcompressed.py b/pypy/jit/metainterp/test/test_rcompressed.py --- a/pypy/jit/metainterp/test/test_rcompressed.py +++ b/pypy/jit/metainterp/test/test_rcompressed.py @@ -4,6 +4,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import jit +from pypy.rlib.jit import JitDriver class TestRCompressed(LLJitMixin): @@ -45,6 +46,9 @@ res = self.interp_operations(f, [42]) assert res == 42063 + def test_store_load_array(self): + py.test.skip("write me") + def test_call_argument(self): # the issue here is that even if we wrote this test, for now, it's # not going to really test the interesting parts, which are in @@ -53,3 +57,28 @@ def test_call_result(self): py.test.skip("write me") + + def test_jit_merge_point(self): + jitdriver = JitDriver(greens=[], reds=['total', 'a']) + S = lltype.GcStruct('S', ('n', lltype.Signed)) + def main(n): + a = f(n) + s = llop.show_from_ptr32(lltype.Ptr(S), a) + return s.n + def f(n): + s = lltype.malloc(S) + s.n = n + total = 0 + while s.n > 0: + a = llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + jitdriver.jit_merge_point(a=a, total=total) + s = llop.show_from_ptr32(lltype.Ptr(S), a) + n = s.n + total += n + s = lltype.malloc(S) + s.n = n - 1 + s = lltype.malloc(S) + s.n = total + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, s) + res = self.meta_interp(main, [8]) + assert res == 36 diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py --- a/pypy/jit/metainterp/warmstate.py +++ b/pypy/jit/metainterp/warmstate.py @@ -1,5 +1,6 @@ import sys, weakref from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, cast_base_ptr_to_instance from pypy.rpython.annlowlevel import cast_object_to_ptr @@ -30,21 +31,26 @@ elif INPUT is longlong.FLOATSTORAGE: assert TYPE is lltype.Float return longlong.getrealfloat(x) + elif TYPE == llmemory.HiddenGcRef32: + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, x) else: return lltype.cast_opaque_ptr(TYPE, x) @specialize.ll() def unspecialize_value(value): """Casts 'value' to a Signed, a GCREF or a FLOATSTORAGE.""" - if isinstance(lltype.typeOf(value), lltype.Ptr): - if lltype.typeOf(value).TO._gckind == 'gc': + TYPE = lltype.typeOf(value) + if isinstance(TYPE, lltype.Ptr): + if TYPE.TO._gckind == 'gc': + if TYPE == llmemory.HiddenGcRef32: + return llop.show_from_ptr32(llmemory.GCREF, value) return lltype.cast_opaque_ptr(llmemory.GCREF, value) else: adr = llmemory.cast_ptr_to_adr(value) return heaptracker.adr2int(adr) - elif isinstance(lltype.typeOf(value), ootype.OOType): + elif isinstance(TYPE, ootype.OOType): return ootype.cast_to_object(value) - elif isinstance(value, float): + elif TYPE == lltype.Float: return longlong.getfloatstorage(value) else: return lltype.cast_primitive(lltype.Signed, value) From commits-noreply at bitbucket.org Sun Apr 17 11:50:40 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:40 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Passing tests for rewrite_assembler. Message-ID: <20110417095040.946E9282BA1@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43426:6dde296107bb Date: 2011-04-17 02:44 -0700 http://bitbucket.org/pypy/pypy/changeset/6dde296107bb/ Log: Passing tests for rewrite_assembler. diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -298,6 +298,7 @@ gcrootfinder = 'asmgcc' gctransformer = 'framework' gcremovetypeptr = False + compressptr = True class FakeTranslator(object): config = config_ class FakeCPU(object): @@ -305,6 +306,9 @@ ptr = llmemory.cast_adr_to_ptr(adr, gc_ll_descr.WB_FUNCPTR) assert ptr._obj._callable == llop1._write_barrier_failing_case return 42 + class FakeWriteBarrierDescr(AbstractDescr): + def __eq__(self, other): + return 'WriteBarrierDescr' in repr(other) gcdescr = get_description(config_) translator = FakeTranslator() llop1 = FakeLLOp() @@ -314,6 +318,7 @@ self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() + self.writebarrierdescr = FakeWriteBarrierDescr() def test_args_for_new(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -597,5 +602,111 @@ self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) equaloplists(ops.operations, expected.operations) + def test_rewrite_assembler_hidden_getfield_gc(self): + S = lltype.GcStruct('S', ('x', llmemory.HiddenGcRef32)) + xdescr = get_field_descr(self.gc_ll_descr, S, 'x') + ops = parse(""" + [p0] + p1 = getfield_gc(p0, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0] + i1 = getfield_gc(p0, descr=xdescr) + p1 = show_from_ptr32(i1) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_getfield_gc_pure(self): + S = lltype.GcStruct('S', ('x', llmemory.HiddenGcRef32)) + xdescr = get_field_descr(self.gc_ll_descr, S, 'x') + ops = parse(""" + [p0] + p1 = getfield_gc_pure(p0, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0] + i1 = getfield_gc_pure(p0, descr=xdescr) + p1 = show_from_ptr32(i1) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_setfield_gc(self): + S = lltype.GcStruct('S', ('x', llmemory.HiddenGcRef32)) + xdescr = get_field_descr(self.gc_ll_descr, S, 'x') + writebarrierdescr = self.writebarrierdescr + ops = parse(""" + [p0, p1] + setfield_gc(p0, p1, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0, p1] + cond_call_gc_wb(p0, p1, descr=writebarrierdescr) + i1 = hide_into_ptr32(p1) + setfield_raw(p0, i1, descr=xdescr) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_getarrayitem_gc(self): + A = lltype.GcArray(llmemory.HiddenGcRef32) + xdescr = get_array_descr(self.gc_ll_descr, A) + ops = parse(""" + [p0, i2] + p1 = getarrayitem_gc(p0, i2, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0, i2] + i1 = getarrayitem_gc(p0, i2, descr=xdescr) + p1 = show_from_ptr32(i1) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_getarrayitem_gc_pure(self): + A = lltype.GcArray(llmemory.HiddenGcRef32) + xdescr = get_array_descr(self.gc_ll_descr, A) + ops = parse(""" + [p0, i2] + p1 = getarrayitem_gc_pure(p0, i2, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0, i2] + i1 = getarrayitem_gc_pure(p0, i2, descr=xdescr) + p1 = show_from_ptr32(i1) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_setarrayitem_gc(self): + A = lltype.GcArray(llmemory.HiddenGcRef32) + xdescr = get_array_descr(self.gc_ll_descr, A) + writebarrierdescr = self.writebarrierdescr + ops = parse(""" + [p0, p1, i2] + setarrayitem_gc(p0, i2, p1, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0, p1, i2] + cond_call_gc_wb(p0, p1, descr=writebarrierdescr) + i1 = hide_into_ptr32(p1) + setarrayitem_raw(p0, i2, i1, descr=xdescr) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + class TestFrameworkMiniMark(TestFramework): gc = 'minimark' From commits-noreply at bitbucket.org Sun Apr 17 11:50:41 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:41 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Test the call handling too. Message-ID: <20110417095041.1E5B3282BA1@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43427:9542bd1acfa1 Date: 2011-04-17 02:48 -0700 http://bitbucket.org/pypy/pypy/changeset/9542bd1acfa1/ Log: Test the call handling too. diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -708,5 +708,50 @@ self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) equaloplists(ops.operations, expected.operations) + def test_rewrite_assembler_hidden_callarg(self): + cdescr = get_call_descr(self.gc_ll_descr, [llmemory.HiddenGcRef32], + lltype.Void) + ops = parse(""" + [p0] + call(100, p0, descr=cdescr) + call_assembler(101, p0, descr=cdescr) + call_may_force(102, p0, descr=cdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0] + i0 = hide_into_ptr32(p0) + call(100, i0, descr=cdescr) + i1 = hide_into_ptr32(p0) + call_assembler(101, i1, descr=cdescr) + i2 = hide_into_ptr32(p0) + call_may_force(102, i2, descr=cdescr) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_callresult(self): + cdescr = get_call_descr(self.gc_ll_descr, [], llmemory.HiddenGcRef32) + ops = parse(""" + [] + p0 = call(100, descr=cdescr) + p1 = call_assembler(101, descr=cdescr) + p2 = call_may_force(102, descr=cdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [] + i0 = call(100, descr=cdescr) + p0 = show_from_ptr32(i0) + i1 = call_assembler(101, descr=cdescr) + p1 = show_from_ptr32(i1) + i2 = call_may_force(102, descr=cdescr) + p2 = show_from_ptr32(i2) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + class TestFrameworkMiniMark(TestFramework): gc = 'minimark' From commits-noreply at bitbucket.org Sun Apr 17 11:50:41 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:41 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417095041.F301D282BA1@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43428:7d65297ee4b3 Date: 2011-04-17 02:49 -0700 http://bitbucket.org/pypy/pypy/changeset/7d65297ee4b3/ Log: merge heads diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -520,7 +520,7 @@ unerase = unerase_item else: erase = lambda x: x - unerase = lambda x, t: x + unerase = lambda x: x # class subcls(BaseMapdictObject, supercls): _nmin1 = nmin1 diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -438,6 +438,14 @@ assert obj2.getdictvalue(space, "b") is w6 assert obj2.map is abmap + extras = {} + for attr in "cdefghijklmnopqrstuvwxyz": + extras[attr] = W_Root() + obj2.setdictvalue(space, attr, extras[attr]) + assert obj2.getdictvalue(space, attr) is extras[attr] + for attr in "cdefghijklmnopqrstuvwxyz": + assert obj2.getdictvalue(space, attr) is extras[attr] + def test_specialized_class_compressptr(): test_specialized_class(compressptr=True) From commits-noreply at bitbucket.org Sun Apr 17 11:50:43 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:43 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417095043.8EED0282BDC@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43429:8bf7421532e0 Date: 2011-04-17 02:49 -0700 http://bitbucket.org/pypy/pypy/changeset/8bf7421532e0/ Log: merge heads From commits-noreply at bitbucket.org Sun Apr 17 11:50:49 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:49 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Passing tests for rewrite_assembler. Message-ID: <20110417095049.6A2F4282BD8@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43426:6dde296107bb Date: 2011-04-17 02:44 -0700 http://bitbucket.org/pypy/pypy/changeset/6dde296107bb/ Log: Passing tests for rewrite_assembler. diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -298,6 +298,7 @@ gcrootfinder = 'asmgcc' gctransformer = 'framework' gcremovetypeptr = False + compressptr = True class FakeTranslator(object): config = config_ class FakeCPU(object): @@ -305,6 +306,9 @@ ptr = llmemory.cast_adr_to_ptr(adr, gc_ll_descr.WB_FUNCPTR) assert ptr._obj._callable == llop1._write_barrier_failing_case return 42 + class FakeWriteBarrierDescr(AbstractDescr): + def __eq__(self, other): + return 'WriteBarrierDescr' in repr(other) gcdescr = get_description(config_) translator = FakeTranslator() llop1 = FakeLLOp() @@ -314,6 +318,7 @@ self.llop1 = llop1 self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() + self.writebarrierdescr = FakeWriteBarrierDescr() def test_args_for_new(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -597,5 +602,111 @@ self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) equaloplists(ops.operations, expected.operations) + def test_rewrite_assembler_hidden_getfield_gc(self): + S = lltype.GcStruct('S', ('x', llmemory.HiddenGcRef32)) + xdescr = get_field_descr(self.gc_ll_descr, S, 'x') + ops = parse(""" + [p0] + p1 = getfield_gc(p0, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0] + i1 = getfield_gc(p0, descr=xdescr) + p1 = show_from_ptr32(i1) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_getfield_gc_pure(self): + S = lltype.GcStruct('S', ('x', llmemory.HiddenGcRef32)) + xdescr = get_field_descr(self.gc_ll_descr, S, 'x') + ops = parse(""" + [p0] + p1 = getfield_gc_pure(p0, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0] + i1 = getfield_gc_pure(p0, descr=xdescr) + p1 = show_from_ptr32(i1) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_setfield_gc(self): + S = lltype.GcStruct('S', ('x', llmemory.HiddenGcRef32)) + xdescr = get_field_descr(self.gc_ll_descr, S, 'x') + writebarrierdescr = self.writebarrierdescr + ops = parse(""" + [p0, p1] + setfield_gc(p0, p1, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0, p1] + cond_call_gc_wb(p0, p1, descr=writebarrierdescr) + i1 = hide_into_ptr32(p1) + setfield_raw(p0, i1, descr=xdescr) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_getarrayitem_gc(self): + A = lltype.GcArray(llmemory.HiddenGcRef32) + xdescr = get_array_descr(self.gc_ll_descr, A) + ops = parse(""" + [p0, i2] + p1 = getarrayitem_gc(p0, i2, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0, i2] + i1 = getarrayitem_gc(p0, i2, descr=xdescr) + p1 = show_from_ptr32(i1) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_getarrayitem_gc_pure(self): + A = lltype.GcArray(llmemory.HiddenGcRef32) + xdescr = get_array_descr(self.gc_ll_descr, A) + ops = parse(""" + [p0, i2] + p1 = getarrayitem_gc_pure(p0, i2, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0, i2] + i1 = getarrayitem_gc_pure(p0, i2, descr=xdescr) + p1 = show_from_ptr32(i1) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_setarrayitem_gc(self): + A = lltype.GcArray(llmemory.HiddenGcRef32) + xdescr = get_array_descr(self.gc_ll_descr, A) + writebarrierdescr = self.writebarrierdescr + ops = parse(""" + [p0, p1, i2] + setarrayitem_gc(p0, i2, p1, descr=xdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0, p1, i2] + cond_call_gc_wb(p0, p1, descr=writebarrierdescr) + i1 = hide_into_ptr32(p1) + setarrayitem_raw(p0, i2, i1, descr=xdescr) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + class TestFrameworkMiniMark(TestFramework): gc = 'minimark' From commits-noreply at bitbucket.org Sun Apr 17 11:50:50 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:50 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Test the call handling too. Message-ID: <20110417095050.14E7F282BD8@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43427:9542bd1acfa1 Date: 2011-04-17 02:48 -0700 http://bitbucket.org/pypy/pypy/changeset/9542bd1acfa1/ Log: Test the call handling too. diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -708,5 +708,50 @@ self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) equaloplists(ops.operations, expected.operations) + def test_rewrite_assembler_hidden_callarg(self): + cdescr = get_call_descr(self.gc_ll_descr, [llmemory.HiddenGcRef32], + lltype.Void) + ops = parse(""" + [p0] + call(100, p0, descr=cdescr) + call_assembler(101, p0, descr=cdescr) + call_may_force(102, p0, descr=cdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [p0] + i0 = hide_into_ptr32(p0) + call(100, i0, descr=cdescr) + i1 = hide_into_ptr32(p0) + call_assembler(101, i1, descr=cdescr) + i2 = hide_into_ptr32(p0) + call_may_force(102, i2, descr=cdescr) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + + def test_rewrite_assembler_hidden_callresult(self): + cdescr = get_call_descr(self.gc_ll_descr, [], llmemory.HiddenGcRef32) + ops = parse(""" + [] + p0 = call(100, descr=cdescr) + p1 = call_assembler(101, descr=cdescr) + p2 = call_may_force(102, descr=cdescr) + jump() + """, namespace=locals()) + expected = parse(""" + [] + i0 = call(100, descr=cdescr) + p0 = show_from_ptr32(i0) + i1 = call_assembler(101, descr=cdescr) + p1 = show_from_ptr32(i1) + i2 = call_may_force(102, descr=cdescr) + p2 = show_from_ptr32(i2) + jump() + """, namespace=locals()) + self.gc_ll_descr.rewrite_assembler(self.fake_cpu, ops.operations) + equaloplists(ops.operations, expected.operations) + class TestFrameworkMiniMark(TestFramework): gc = 'minimark' From commits-noreply at bitbucket.org Sun Apr 17 11:50:50 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:50 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417095050.D6672282BD7@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43428:7d65297ee4b3 Date: 2011-04-17 02:49 -0700 http://bitbucket.org/pypy/pypy/changeset/7d65297ee4b3/ Log: merge heads diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -520,7 +520,7 @@ unerase = unerase_item else: erase = lambda x: x - unerase = lambda x, t: x + unerase = lambda x: x # class subcls(BaseMapdictObject, supercls): _nmin1 = nmin1 diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -438,6 +438,14 @@ assert obj2.getdictvalue(space, "b") is w6 assert obj2.map is abmap + extras = {} + for attr in "cdefghijklmnopqrstuvwxyz": + extras[attr] = W_Root() + obj2.setdictvalue(space, attr, extras[attr]) + assert obj2.getdictvalue(space, attr) is extras[attr] + for attr in "cdefghijklmnopqrstuvwxyz": + assert obj2.getdictvalue(space, attr) is extras[attr] + def test_specialized_class_compressptr(): test_specialized_class(compressptr=True) From commits-noreply at bitbucket.org Sun Apr 17 11:50:51 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 11:50:51 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: merge heads Message-ID: <20110417095051.CD862282BD7@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43429:8bf7421532e0 Date: 2011-04-17 02:49 -0700 http://bitbucket.org/pypy/pypy/changeset/8bf7421532e0/ Log: merge heads From commits-noreply at bitbucket.org Sun Apr 17 12:04:00 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 12:04:00 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Unify a bit various helpers. Message-ID: <20110417100400.80A13282B9C@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43430:099f3bef8fd7 Date: 2011-04-17 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/099f3bef8fd7/ Log: Unify a bit various helpers. diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -369,7 +369,8 @@ return self.value def getref(self, PTR): - return lltype.cast_opaque_ptr(PTR, self.getref_base()) + from pypy.jit.metainterp.typesystem import llhelper + return llhelper.cast_from_ref(PTR, self.value) getref._annspecialcase_ = 'specialize:arg(1)' def _get_hash_(self): @@ -426,7 +427,8 @@ return self.value def getref(self, OBJ): - return ootype.cast_from_object(OBJ, self.getref_base()) + from pypy.jit.metainterp.typesystem import oohelper + return oohelper.cast_from_ref(PTR, self.value) getref._annspecialcase_ = 'specialize:arg(1)' def _get_hash_(self): @@ -611,7 +613,8 @@ return self.value def getref(self, PTR): - return lltype.cast_opaque_ptr(PTR, self.getref_base()) + from pypy.jit.metainterp.typesystem import llhelper + return llhelper.cast_from_ref(PTR, self.value) getref._annspecialcase_ = 'specialize:arg(1)' def getaddr(self): @@ -658,7 +661,8 @@ return self.value def getref(self, OBJ): - return ootype.cast_from_object(OBJ, self.getref_base()) + from pypy.jit.metainterp.typesystem import oohelper + return oohelper.cast_from_ref(PTR, self.value) getref._annspecialcase_ = 'specialize:arg(1)' def _get_hash_(self): diff --git a/pypy/jit/metainterp/typesystem.py b/pypy/jit/metainterp/typesystem.py --- a/pypy/jit/metainterp/typesystem.py +++ b/pypy/jit/metainterp/typesystem.py @@ -123,13 +123,28 @@ return heaptracker.adr2int(adr) def cast_from_ref(self, TYPE, value): + if isinstance(TYPE.TO, lltype.GcOpaqueType): + if TYPE == llmemory.GCREF: + return value + elif TYPE == llmemory.HiddenGcRef32: + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, value) + else: + raise TypeError(TYPE) return lltype.cast_opaque_ptr(TYPE, value) cast_from_ref._annspecialcase_ = 'specialize:arg(1)' def cast_to_ref(self, value): + TYPE = lltype.typeOf(value) + if isinstance(TYPE.TO, lltype.GcOpaqueType): + if TYPE == llmemory.GCREF: + return value + elif TYPE == llmemory.HiddenGcRef32: + return llop.show_from_ptr32(llmemory.GCREF, value) + else: + raise TypeError(TYPE) return lltype.cast_opaque_ptr(llmemory.GCREF, value) cast_to_ref._annspecialcase_ = 'specialize:ll' - + def getaddr_for_box(self, box): return box.getaddr() diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -36,16 +36,10 @@ return result def cast_to_gcref(value): - TYPE = lltype.typeOf(value) - if isinstance(TYPE.TO, lltype.GcOpaqueType): - if TYPE == llmemory.GCREF: - return value - elif TYPE == llmemory.HiddenGcRef32: - return llop.show_from_ptr32(llmemory.GCREF, value) - else: - raise TypeError(TYPE) - else: - return lltype.cast_opaque_ptr(llmemory.GCREF, value) + # XXX unify a bit more + from pypy.jit.metainterp.typesystem import llhelper + return llhelper.cast_to_ref(value) +cast_to_gcref._annspecialcase_ = 'specialize:ll' # ____________________________________________________________ From commits-noreply at bitbucket.org Sun Apr 17 12:04:04 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 12:04:04 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Unify a bit various helpers. Message-ID: <20110417100404.A3972282BD7@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43430:099f3bef8fd7 Date: 2011-04-17 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/099f3bef8fd7/ Log: Unify a bit various helpers. diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -369,7 +369,8 @@ return self.value def getref(self, PTR): - return lltype.cast_opaque_ptr(PTR, self.getref_base()) + from pypy.jit.metainterp.typesystem import llhelper + return llhelper.cast_from_ref(PTR, self.value) getref._annspecialcase_ = 'specialize:arg(1)' def _get_hash_(self): @@ -426,7 +427,8 @@ return self.value def getref(self, OBJ): - return ootype.cast_from_object(OBJ, self.getref_base()) + from pypy.jit.metainterp.typesystem import oohelper + return oohelper.cast_from_ref(PTR, self.value) getref._annspecialcase_ = 'specialize:arg(1)' def _get_hash_(self): @@ -611,7 +613,8 @@ return self.value def getref(self, PTR): - return lltype.cast_opaque_ptr(PTR, self.getref_base()) + from pypy.jit.metainterp.typesystem import llhelper + return llhelper.cast_from_ref(PTR, self.value) getref._annspecialcase_ = 'specialize:arg(1)' def getaddr(self): @@ -658,7 +661,8 @@ return self.value def getref(self, OBJ): - return ootype.cast_from_object(OBJ, self.getref_base()) + from pypy.jit.metainterp.typesystem import oohelper + return oohelper.cast_from_ref(PTR, self.value) getref._annspecialcase_ = 'specialize:arg(1)' def _get_hash_(self): diff --git a/pypy/jit/metainterp/typesystem.py b/pypy/jit/metainterp/typesystem.py --- a/pypy/jit/metainterp/typesystem.py +++ b/pypy/jit/metainterp/typesystem.py @@ -123,13 +123,28 @@ return heaptracker.adr2int(adr) def cast_from_ref(self, TYPE, value): + if isinstance(TYPE.TO, lltype.GcOpaqueType): + if TYPE == llmemory.GCREF: + return value + elif TYPE == llmemory.HiddenGcRef32: + return llop.hide_into_ptr32(llmemory.HiddenGcRef32, value) + else: + raise TypeError(TYPE) return lltype.cast_opaque_ptr(TYPE, value) cast_from_ref._annspecialcase_ = 'specialize:arg(1)' def cast_to_ref(self, value): + TYPE = lltype.typeOf(value) + if isinstance(TYPE.TO, lltype.GcOpaqueType): + if TYPE == llmemory.GCREF: + return value + elif TYPE == llmemory.HiddenGcRef32: + return llop.show_from_ptr32(llmemory.GCREF, value) + else: + raise TypeError(TYPE) return lltype.cast_opaque_ptr(llmemory.GCREF, value) cast_to_ref._annspecialcase_ = 'specialize:ll' - + def getaddr_for_box(self, box): return box.getaddr() diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -36,16 +36,10 @@ return result def cast_to_gcref(value): - TYPE = lltype.typeOf(value) - if isinstance(TYPE.TO, lltype.GcOpaqueType): - if TYPE == llmemory.GCREF: - return value - elif TYPE == llmemory.HiddenGcRef32: - return llop.show_from_ptr32(llmemory.GCREF, value) - else: - raise TypeError(TYPE) - else: - return lltype.cast_opaque_ptr(llmemory.GCREF, value) + # XXX unify a bit more + from pypy.jit.metainterp.typesystem import llhelper + return llhelper.cast_to_ref(value) +cast_to_gcref._annspecialcase_ = 'specialize:ll' # ____________________________________________________________ From commits-noreply at bitbucket.org Sun Apr 17 12:11:45 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 12:11:45 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix imports. Message-ID: <20110417101145.E46EC282B9C@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43431:9a42106d158e Date: 2011-04-17 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/9a42106d158e/ Log: Fix imports. diff --git a/pypy/jit/metainterp/typesystem.py b/pypy/jit/metainterp/typesystem.py --- a/pypy/jit/metainterp/typesystem.py +++ b/pypy/jit/metainterp/typesystem.py @@ -1,4 +1,5 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance, llstr, oostr from pypy.rpython.annlowlevel import cast_instance_to_base_ptr diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -1,5 +1,4 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass -from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated From commits-noreply at bitbucket.org Sun Apr 17 12:11:49 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 12:11:49 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix imports. Message-ID: <20110417101149.7EBA5282BAD@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43431:9a42106d158e Date: 2011-04-17 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/9a42106d158e/ Log: Fix imports. diff --git a/pypy/jit/metainterp/typesystem.py b/pypy/jit/metainterp/typesystem.py --- a/pypy/jit/metainterp/typesystem.py +++ b/pypy/jit/metainterp/typesystem.py @@ -1,4 +1,5 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass +from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance, llstr, oostr from pypy.rpython.annlowlevel import cast_instance_to_base_ptr diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -1,5 +1,4 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass -from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated From commits-noreply at bitbucket.org Sun Apr 17 18:51:33 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 18:51:33 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Bah :-( Message-ID: <20110417165133.47A41282B9C@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43432:50f0de807154 Date: 2011-04-17 18:48 +0200 http://bitbucket.org/pypy/pypy/changeset/50f0de807154/ Log: Bah :-( diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -523,7 +523,7 @@ while i >= 0: newarray[i] = self._callshapes[i] i -= 1 - lltype.free(self._callshapes, flavor='raw') + lltype.free(self._callshapes, flavor='raw', track_allocation=False) self._callshapes = newarray self._callshapes_maxlength = newlength From commits-noreply at bitbucket.org Sun Apr 17 18:51:33 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 18:51:33 +0200 (CEST) Subject: [pypy-svn] pypy default: Bah :-( Message-ID: <20110417165133.C8E67282B9C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43433:f7145e3e6182 Date: 2011-04-17 18:48 +0200 http://bitbucket.org/pypy/pypy/changeset/f7145e3e6182/ Log: Bah :-( diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -511,7 +511,7 @@ while i >= 0: newarray[i] = self._callshapes[i] i -= 1 - lltype.free(self._callshapes, flavor='raw') + lltype.free(self._callshapes, flavor='raw', track_allocation=False) self._callshapes = newarray self._callshapes_maxlength = newlength From commits-noreply at bitbucket.org Sun Apr 17 18:51:51 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 18:51:51 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110417165151.224DB282B9C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43434:6493811adb56 Date: 2011-04-17 18:51 +0200 http://bitbucket.org/pypy/pypy/changeset/6493811adb56/ Log: merge heads diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py --- a/pypy/rpython/ootypesystem/rclass.py +++ b/pypy/rpython/ootypesystem/rclass.py @@ -262,10 +262,6 @@ self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef) self.rbase.setup() - for name, attrdef in selfattrs.iteritems(): - if not attrdef.readonly and self.is_quasi_immutable(name): - ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT}) - classattributes = {} baseInstance = self.lowleveltype._superclass classrepr = getclassrepr(self.rtyper, self.classdef) @@ -480,9 +476,11 @@ mangled_name = mangle(attr, self.rtyper.getconfig()) cname = inputconst(ootype.Void, mangled_name) self.hook_access_field(vinst, cname, llops, flags) - self.hook_setfield(vinst, attr, llops) llops.genop('oosetfield', [vinst, cname, vvalue]) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + def rtype_is_true(self, hop): vinst, = hop.inputargs(self) return hop.genop('oononnull', [vinst], resulttype=ootype.Bool) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -291,7 +291,6 @@ # that belong to this loop or to a bridge attached to it. # Filled by the frontend calling record_faildescr_index(). self.faildescr_indices = [] - self.invalidate_positions = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc") diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -791,7 +791,6 @@ operations = None token = None call_pure_results = None - quasi_immutable_deps = None def __init__(self, name): self.name = name diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -341,14 +341,13 @@ return _struct(self, n, initialization='example') def _immutable_field(self, field): - if self._hints.get('immutable'): - return True if 'immutable_fields' in self._hints: try: - return self._hints['immutable_fields'].fields[field] + s = self._hints['immutable_fields'].fields[field] + return s or True except KeyError: pass - return False + return self._hints.get('immutable', False) class RttiStruct(Struct): _runtime_type_info = None @@ -1030,8 +1029,6 @@ return None # null pointer if type(p._obj0) is int: return p # a pointer obtained by cast_int_to_ptr - if getattr(p._obj0, '_carry_around_for_tests', False): - return p # a pointer obtained by cast_instance_to_base_ptr container = obj._normalizedcontainer() if type(container) is int: # this must be an opaque ptr originating from an integer @@ -1884,8 +1881,8 @@ if self.__class__ is not other.__class__: return NotImplemented if hasattr(self, 'container') and hasattr(other, 'container'): - obj1 = self._normalizedcontainer() - obj2 = other._normalizedcontainer() + obj1 = self.container._normalizedcontainer() + obj2 = other.container._normalizedcontainer() return obj1 == obj2 else: return self is other @@ -1909,8 +1906,6 @@ # an integer, cast to a ptr, cast to an opaque if type(self.container) is int: return self.container - if getattr(self.container, '_carry_around_for_tests', False): - return self.container return self.container._normalizedcontainer() else: return _parentable._normalizedcontainer(self) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -578,7 +578,6 @@ _all_callbacks_results = [] _int2obj = {} _callback_exc_info = None -_opaque_objs = [None] def get_rtyper(): llinterp = LLInterpreter.current_interpreter @@ -617,10 +616,6 @@ T = lltype.Ptr(lltype.typeOf(container)) # otherwise it came from integer and we want a c_void_p with # the same valu - if getattr(container, 'llopaque', None): - no = len(_opaque_objs) - _opaque_objs.append(container) - return no * 2 + 1 else: container = llobj._obj if isinstance(T.TO, lltype.FuncType): @@ -769,14 +764,10 @@ if isinstance(T, lltype.Typedef): T = T.OF if isinstance(T, lltype.Ptr): - ptrval = ctypes.cast(cobj, ctypes.c_void_p).value - if not cobj or not ptrval: # NULL pointer + if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 return lltype.nullptr(T.TO) if isinstance(T.TO, lltype.Struct): - if ptrval & 1: # a tagged pointer - gcref = _opaque_objs[ptrval // 2].hide() - return lltype.cast_opaque_ptr(T, gcref) REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) @@ -1237,9 +1228,7 @@ return not self == other def _cast_to_ptr(self, PTRTYPE): - if self.intval & 1: - return _opaque_objs[self.intval // 2] - return force_cast(PTRTYPE, self.intval) + return force_cast(PTRTYPE, self.intval) ## def _cast_to_int(self): ## return self.intval diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ /dev/null @@ -1,266 +0,0 @@ - -import py - -from pypy.rpython.lltypesystem import lltype, llmemory, rclass -from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE -from pypy.jit.metainterp import typesystem -from pypy.jit.metainterp.quasiimmut import QuasiImmut -from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance -from pypy.jit.metainterp.test.test_basic import LLJitMixin -from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.rlib.jit import JitDriver, dont_look_inside - - -def test_get_current_qmut_instance(): - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - STRUCT = lltype.GcStruct('Foo', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - foo = lltype.malloc(STRUCT, zero=True) - foo.inst_x = 42 - assert not foo.mutate_x - - class FakeCPU: - ts = typesystem.llhelper - - def bh_getfield_gc_r(self, gcref, fielddescr): - assert fielddescr == mutatefielddescr - foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) - result = foo.mutate_x - return lltype.cast_opaque_ptr(llmemory.GCREF, result) - - def bh_setfield_gc_r(self, gcref, fielddescr, newvalue_gcref): - assert fielddescr == mutatefielddescr - foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) - newvalue = lltype.cast_opaque_ptr(rclass.OBJECTPTR, newvalue_gcref) - foo.mutate_x = newvalue - - cpu = FakeCPU() - mutatefielddescr = ('fielddescr', STRUCT, 'mutate_x') - - foo_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) - qmut1 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) - assert isinstance(qmut1, QuasiImmut) - qmut2 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) - assert qmut1 is qmut2 - - -class QuasiImmutTests(object): - - def test_simple_1(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - x -= 1 - return total - # - res = self.meta_interp(f, [100, 7]) - assert res == 700 - self.check_loops(getfield_gc=0, everywhere=True) - # - from pypy.jit.metainterp.warmspot import get_stats - loops = get_stats().loops - for loop in loops: - assert len(loop.quasi_immutable_deps) == 1 - assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) - - def test_nonopt_1(self): - myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def setup(x): - return [Foo(100 + i) for i in range(x)] - def f(a, x): - lst = setup(x) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(lst=lst, x=x, total=total) - # read a quasi-immutable field out of a variable - x -= 1 - total += lst[x].a - return total - # - assert f(100, 7) == 721 - res = self.meta_interp(f, [100, 7]) - assert res == 721 - self.check_loops(getfield_gc=1) - # - from pypy.jit.metainterp.warmspot import get_stats - loops = get_stats().loops - for loop in loops: - assert loop.quasi_immutable_deps is None - - def test_change_during_tracing_1(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - @dont_look_inside - def residual_call(foo): - foo.a += 1 - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - residual_call(foo) - x -= 1 - return total - # - assert f(100, 7) == 721 - res = self.meta_interp(f, [100, 7]) - assert res == 721 - self.check_loops(getfield_gc=1) - - def test_change_during_tracing_2(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - @dont_look_inside - def residual_call(foo, difference): - foo.a += difference - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - residual_call(foo, +1) - residual_call(foo, -1) - x -= 1 - return total - # - assert f(100, 7) == 700 - res = self.meta_interp(f, [100, 7]) - assert res == 700 - self.check_loops(getfield_gc=1) - - def test_change_invalidate_reentering(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def f(foo, x): - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - x -= 1 - return total - def g(a, x): - foo = Foo(a) - res1 = f(foo, x) - foo.a += 1 # invalidation, while the jit is not running - res2 = f(foo, x) # should still mark the loop as invalid - return res1 * 1000 + res2 - # - assert g(100, 7) == 700707 - res = self.meta_interp(g, [100, 7]) - assert res == 700707 - self.check_loops(getfield_gc=0) - - def test_invalidate_while_running(self): - jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - - def external(foo, v): - if v: - foo.a = 2 - - def f(foo): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(i=i, foo=foo, total=total) - external(foo, i > 7) - i += 1 - total += foo.a - return total - - def g(): - return f(Foo(1)) - - assert self.meta_interp(g, [], policy=StopAtXPolicy(external)) == g() - - def test_invalidate_by_setfield(self): - py.test.skip("Not implemented") - jitdriver = JitDriver(greens=['bc', 'foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - - def f(foo, bc): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(bc=bc, i=i, foo=foo, total=total) - if bc == 0: - f(foo, 1) - if bc == 1: - foo.a = int(i > 5) - i += 1 - total += foo.a - return total - - def g(): - return f(Foo(1), 0) - - assert self.meta_interp(g, []) == g() - - def test_invalidate_bridge(self): - jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - - def f(foo): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(i=i, total=total, foo=foo) - if i > 5: - total += foo.a - else: - total += 2*foo.a - i += 1 - return total - - def main(): - foo = Foo() - foo.a = 1 - total = f(foo) - foo.a = 2 - total += f(foo) - return total - - res = self.meta_interp(main, []) - assert res == main() - -class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py deleted file mode 100644 --- a/pypy/jit/metainterp/quasiimmut.py +++ /dev/null @@ -1,116 +0,0 @@ -import weakref -from pypy.rpython.rclass import IR_QUASI_IMMUTABLE -from pypy.rpython.lltypesystem import lltype, rclass -from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.jit.metainterp.history import AbstractDescr - - -def is_quasi_immutable(STRUCT, fieldname): - imm_fields = STRUCT._hints.get('immutable_fields') - return (imm_fields is not None and - imm_fields.fields.get(fieldname) is IR_QUASI_IMMUTABLE) - -def get_mutate_field_name(fieldname): - if fieldname.startswith('inst_'): # lltype - return 'mutate_' + fieldname[5:] - elif fieldname.startswith('o'): # ootype - return 'mutate_' + fieldname[1:] - else: - raise AssertionError(fieldname) - -def get_current_qmut_instance(cpu, gcref, mutatefielddescr): - """Returns the current QuasiImmut instance in the field, - possibly creating one. - """ - # XXX this is broken on x86 - qmut_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) - if qmut_gcref: - qmut = QuasiImmut.show(cpu, qmut_gcref) - else: - qmut = QuasiImmut(cpu) - cpu.bh_setfield_gc_r(gcref, mutatefielddescr, qmut.hide()) - return qmut - -def make_invalidation_function(STRUCT, mutatefieldname): - # - def _invalidate_now(p): - qmut_ptr = getattr(p, mutatefieldname) - setattr(p, mutatefieldname, lltype.nullptr(rclass.OBJECT)) - qmut = cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) - qmut.invalidate() - _invalidate_now._dont_inline_ = True - # - def invalidation(p): - if getattr(p, mutatefieldname): - _invalidate_now(p) - # - return invalidation - - -class QuasiImmut(object): - llopaque = True - - def __init__(self, cpu): - self.cpu = cpu - # list of weakrefs to the LoopTokens that must be invalidated if - # this value ever changes - self.looptokens_wrefs = [] - self.compress_limit = 30 - - def hide(self): - qmut_ptr = self.cpu.ts.cast_instance_to_base_ref(self) - return self.cpu.ts.cast_to_ref(qmut_ptr) - - @staticmethod - def show(cpu, qmut_gcref): - qmut_ptr = cpu.ts.cast_to_baseclass(qmut_gcref) - return cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) - - def register_loop_token(self, wref_looptoken): - if len(self.looptokens_wrefs) > self.compress_limit: - self.compress_looptokens_list() - self.looptokens_wrefs.append(wref_looptoken) - - def compress_looptokens_list(self): - self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs - if wref() is not None] - self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2 - - def invalidate(self): - # When this is called, all the loops that we record become - # invalid and must not be called again, nor returned to. - wrefs = self.looptokens_wrefs - self.looptokens_wrefs = [] - for wref in wrefs: - looptoken = wref() - if looptoken is not None: - self.cpu.invalidate_loop(looptoken) - - -class QuasiImmutDescr(AbstractDescr): - def __init__(self, cpu, structbox, fielddescr, mutatefielddescr): - self.cpu = cpu - self.structbox = structbox - self.fielddescr = fielddescr - self.mutatefielddescr = mutatefielddescr - gcref = structbox.getref_base() - self.qmut = get_current_qmut_instance(cpu, gcref, mutatefielddescr) - self.constantfieldbox = self.get_current_constant_fieldvalue() - - def get_current_constant_fieldvalue(self): - from pypy.jit.metainterp import executor - from pypy.jit.metainterp.resoperation import rop - fieldbox = executor.execute(self.cpu, None, rop.GETFIELD_GC, - self.fielddescr, self.structbox) - return fieldbox.constbox() - - def is_still_valid(self): - cpu = self.cpu - gcref = self.structbox.getref_base() - qmut = get_current_qmut_instance(cpu, gcref, self.mutatefielddescr) - if qmut is not self.qmut: - return False - else: - currentbox = self.get_current_constant_fieldvalue() - assert self.constantfieldbox.same_constant(currentbox) - return True diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -2,7 +2,6 @@ from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype, lloperation, rclass, llmemory from pypy.rpython.annlowlevel import llhelper -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside @@ -46,7 +45,7 @@ ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY._hints['virtualizable2_accessor'].initialize( - XY, {'inst_x' : IR_IMMUTABLE, 'inst_node' : IR_IMMUTABLE}) + XY, {'inst_x' : "", 'inst_node' : ""}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY, xy_vtable, 'XY') @@ -211,8 +210,7 @@ ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY2._hints['virtualizable2_accessor'].initialize( - XY2, {'inst_x' : IR_IMMUTABLE, - 'inst_l1' : IR_ARRAY_IMMUTABLE, 'inst_l2' : IR_ARRAY_IMMUTABLE}) + XY2, {'inst_x' : "", 'inst_l1' : "[*]", 'inst_l2' : "[*]"}) xy2_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY2, xy2_vtable, 'XY2') diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -41,8 +41,7 @@ # during preamble but to keep it during the loop optimizations.append(o) - if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: optimizations.append(OptSimplify()) if inline_short_preamble: diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -7,9 +7,8 @@ from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.codewriter.policy import log, check_skip_operation +from pypy.jit.codewriter.policy import log from pypy.jit.metainterp.typesystem import deref, arrayItem -from pypy.jit.metainterp import quasiimmut from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj @@ -562,8 +561,7 @@ arraydescr) return [] # check for _immutable_fields_ hints - immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) - if immut: + if v_inst.concretetype.TO._immutable_field(c_fieldname.value): if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): @@ -576,21 +574,10 @@ descr = self.cpu.fielddescrof(v_inst.concretetype.TO, c_fieldname.value) kind = getkind(RESULT)[0] - op1 = SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), - [v_inst, descr], op.result) - # - if immut is quasiimmut.IR_QUASI_IMMUTABLE: - descr1 = self.cpu.fielddescrof( - v_inst.concretetype.TO, - quasiimmut.get_mutate_field_name(c_fieldname.value)) - op1 = [SpaceOperation('-live-', [], None), - SpaceOperation('record_quasiimmut_field', - [v_inst, descr, descr1], None), - op1] - return op1 + return SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), + [v_inst, descr], op.result) def rewrite_op_setfield(self, op): - check_skip_operation(op) # just to check it doesn't raise if self.is_typeptr_getset(op): # ignore the operation completely -- instead, it's done by 'new' return diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -257,7 +257,6 @@ self.pendingfields = [] self.posponedop = None self.exception_might_have_happened = False - self.quasi_immutable_deps = None self.newoperations = [] if loop is not None: self.call_pure_results = loop.call_pure_results @@ -310,7 +309,6 @@ new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None - new.quasi_immutable_deps = self.quasi_immutable_deps return new @@ -412,7 +410,6 @@ self.first_optimization.propagate_forward(op) self.i += 1 self.loop.operations = self.newoperations - self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5717,35 +5717,8 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() - def test_quasi_immut(self): - ops = """ - [p0, p1, i0] - quasiimmut_field(p0, descr=quasiimmutdescr) - guard_not_invalidated() [] - i1 = getfield_gc(p0, descr=quasifielddescr) - jump(p1, p0, i1) - """ - expected = """ - [p0, p1, i0] - i1 = getfield_gc(p0, descr=quasifielddescr) - jump(p1, p0, i1) - """ - self.optimize_loop(ops, expected) - - def test_quasi_immut_2(self): - ops = """ - [] - quasiimmut_field(ConstPtr(myptr), descr=quasiimmutdescr) - guard_not_invalidated() [] - i1 = getfield_gc(ConstPtr(myptr), descr=quasifielddescr) - jump() - """ - expected = """ - [] - guard_not_invalidated() [] - jump() - """ - self.optimize_loop(ops, expected, expected) + + ##class TestOOtype(OptimizeOptTest, OOtypeMixin): diff --git a/pypy/rpython/test/test_annlowlevel.py b/pypy/rpython/test/test_annlowlevel.py --- a/pypy/rpython/test/test_annlowlevel.py +++ b/pypy/rpython/test/test_annlowlevel.py @@ -4,12 +4,9 @@ from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode -from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, oostr from pypy.rpython.annlowlevel import hlunicode, llunicode -from pypy.rpython import annlowlevel - class TestLLType(BaseRtypingTest, LLRtypeMixin): def test_hlstr(self): @@ -56,15 +53,6 @@ res = self.interpret(f, [self.unicode_to_ll(u"abc")]) assert res == 3 - def test_cast_instance_to_base_ptr(self): - class X(object): - pass - x = X() - ptr = annlowlevel.cast_instance_to_base_ptr(x) - assert lltype.typeOf(ptr) == annlowlevel.base_ptr_lltype() - y = annlowlevel.cast_base_ptr_to_instance(X, ptr) - assert y is x - class TestOOType(BaseRtypingTest, OORtypeMixin): def test_hlstr(self): @@ -83,12 +71,3 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 - - def test_cast_instance_to_base_obj(self): - class X(object): - pass - x = X() - obj = annlowlevel.cast_instance_to_base_obj(x) - assert lltype.typeOf(obj) == annlowlevel.base_obj_ootype() - y = annlowlevel.cast_base_ptr_to_instance(X, obj) - assert y is x diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -286,10 +286,6 @@ raise ValueError("CALL_ASSEMBLER not supported") llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) - def invalidate_loop(self, looptoken): - for loop in looptoken.compiled_loop_token.loop_and_bridges: - loop._obj.externalobj.invalid = True - # ---------- def sizeof(self, S): diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -3,7 +3,6 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE -from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, @@ -13,7 +12,6 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.heaptracker import register_known_gctype, adr2int from pypy.jit.tool.oparser import parse -from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -64,18 +62,6 @@ nextdescr = cpu.fielddescrof(NODE, 'next') otherdescr = cpu.fielddescrof(NODE2, 'other') - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_field': IR_QUASI_IMMUTABLE}) - QUASI = lltype.GcStruct('QUASIIMMUT', ('inst_field', lltype.Signed), - ('mutate_field', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - quasi = lltype.malloc(QUASI, immortal=True) - quasifielddescr = cpu.fielddescrof(QUASI, 'inst_field') - quasibox = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, quasi)) - quasiimmutdescr = QuasiImmutDescr(cpu, quasibox, - quasifielddescr, - cpu.fielddescrof(QUASI, 'mutate_field')) - NODEOBJ = lltype.GcStruct('NODEOBJ', ('parent', OBJECT), ('ref', lltype.Ptr(OBJECT))) nodeobj = lltype.malloc(NODEOBJ) diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -1,7 +1,6 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.rclass import IR_ARRAY_IMMUTABLE, IR_IMMUTABLE from pypy.rpython import rvirtualizable2 from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.unroll import unrolling_iterable @@ -11,7 +10,7 @@ from pypy.jit.metainterp.warmstate import wrap, unwrap from pypy.rlib.objectmodel import specialize -class VirtualizableInfo(object): +class VirtualizableInfo: TOKEN_NONE = 0 # must be 0 -- see also x86.call_assembler TOKEN_TRACING_RESCALL = -1 @@ -34,13 +33,11 @@ all_fields = accessor.fields static_fields = [] array_fields = [] - for name, tp in all_fields.iteritems(): - if tp == IR_ARRAY_IMMUTABLE: + for name, suffix in all_fields.iteritems(): + if suffix == '[*]': array_fields.append(name) - elif tp == IR_IMMUTABLE: + else: static_fields.append(name) - else: - raise Exception("unknown type: %s" % tp) self.static_fields = static_fields self.array_fields = array_fields # diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -492,8 +492,6 @@ def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) - consider_guard_not_invalidated = consider_guard_no_exception - def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1293,28 +1293,6 @@ rffi.cast(SP, p).x = 0 lltype.free(chunk, flavor='raw') - def test_opaque_tagged_pointers(self): - from pypy.rpython.annlowlevel import cast_base_ptr_to_instance - from pypy.rpython.annlowlevel import cast_instance_to_base_ptr - from pypy.rpython.lltypesystem import rclass - - class Opaque(object): - llopaque = True - - def hide(self): - ptr = cast_instance_to_base_ptr(self) - return lltype.cast_opaque_ptr(llmemory.GCREF, ptr) - - @staticmethod - def show(gcref): - ptr = lltype.cast_opaque_ptr(lltype.Ptr(rclass.OBJECT), gcref) - return cast_base_ptr_to_instance(Opaque, ptr) - - opaque = Opaque() - round = ctypes2lltype(llmemory.GCREF, lltype2ctypes(opaque.hide())) - assert Opaque.show(round) is opaque - - class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -119,7 +119,6 @@ self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} - self._remove_guard_not_invalidated = False def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() @@ -379,43 +378,6 @@ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) - def optimize_QUASIIMMUT_FIELD(self, op): - # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC(s, descr='inst_x') - # If 's' is a constant (after optimizations), then we make 's.inst_x' - # a constant too, and we rely on the rest of the optimizations to - # constant-fold the following getfield_gc. - structvalue = self.getvalue(op.getarg(0)) - if not structvalue.is_constant(): - self._remove_guard_not_invalidated = True - return # not a constant at all; ignore QUASIIMMUT_FIELD - # - from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr - qmutdescr = op.getdescr() - assert isinstance(qmutdescr, QuasiImmutDescr) - # check that the value is still correct; it could have changed - # already between the tracing and now. In this case, we are - # simply ignoring the QUASIIMMUT_FIELD hint and compiling it - # as a regular getfield. - if not qmutdescr.is_still_valid(): - self._remove_guard_not_invalidated = True - return - # record as an out-of-line guard - if self.optimizer.quasi_immutable_deps is None: - self.optimizer.quasi_immutable_deps = {} - self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None - # perform the replacement in the list of operations - fieldvalue = self.getvalue(qmutdescr.constantfieldbox) - cf = self.field_cache(qmutdescr.fielddescr) - cf.remember_field_value(structvalue, fieldvalue) - self._remove_guard_not_invalidated = False - - def optimize_GUARD_NOT_INVALIDATED(self, op): - if self._remove_guard_not_invalidated: - return - self._remove_guard_not_invalidated = False - self.emit_operation(op) - def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -312,7 +312,6 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, - rop.QUASIIMMUT_FIELD, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -3,8 +3,7 @@ #from pypy.annotation.classdef import isclassdef from pypy.annotation import description from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, getgcflavor, inputconst -from pypy.rpython.lltypesystem.lltype import Void +from pypy.rpython.rmodel import Repr, getgcflavor class FieldListAccessor(object): @@ -13,8 +12,6 @@ assert type(fields) is dict self.TYPE = TYPE self.fields = fields - for x in fields.itervalues(): - assert isinstance(x, ImmutableRanking) def __repr__(self): return '' % getattr(self, 'TYPE', '?') @@ -22,20 +19,6 @@ def _freeze_(self): return True -class ImmutableRanking(object): - def __init__(self, name, is_immutable): - self.name = name - self.is_immutable = is_immutable - def __nonzero__(self): - return self.is_immutable - def __repr__(self): - return '<%s>' % self.name - -IR_MUTABLE = ImmutableRanking('mutable', False) -IR_IMMUTABLE = ImmutableRanking('immutable', True) -IR_ARRAY_IMMUTABLE = ImmutableRanking('array_immutable', True) -IR_QUASI_IMMUTABLE = ImmutableRanking('quasi_immutable', False) - class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" @@ -172,8 +155,7 @@ self.classdef = classdef def _setup_repr(self): - if self.classdef is None: - self.immutable_field_set = set() + pass def _check_for_immutable_hints(self, hints): loc = self.classdef.classdesc.lookup('_immutable_') @@ -185,13 +167,13 @@ self.classdef,)) hints = hints.copy() hints['immutable'] = True - self.immutable_field_set = set() # unless overwritten below + self.immutable_field_list = [] # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() immutable_fields = self.classdef.classdesc.classdict.get( '_immutable_fields_') if immutable_fields is not None: - self.immutable_field_set = set(immutable_fields.value) + self.immutable_field_list = immutable_fields.value accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -219,35 +201,33 @@ if "immutable_fields" in hints: accessor = hints["immutable_fields"] if not hasattr(accessor, 'fields'): - immutable_fields = set() + immutable_fields = [] rbase = self while rbase.classdef is not None: - immutable_fields.update(rbase.immutable_field_set) + immutable_fields += rbase.immutable_field_list rbase = rbase.rbase self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): - ranking = {} + with_suffix = {} for name in fields: - if name.endswith('[*]'): # for virtualizables' lists + if name.endswith('[*]'): name = name[:-3] - rank = IR_ARRAY_IMMUTABLE - elif name.endswith('?'): # a quasi-immutable field - name = name[:-1] - rank = IR_QUASI_IMMUTABLE - else: # a regular immutable/green field - rank = IR_IMMUTABLE + suffix = '[*]' + else: + suffix = '' try: mangled_name, r = self._get_field(name) except KeyError: continue - ranking[mangled_name] = rank - accessor.initialize(self.object_type, ranking) - return ranking + with_suffix[mangled_name] = suffix + accessor.initialize(self.object_type, with_suffix) + return with_suffix def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void is_self_immutable = "immutable" in self.object_type._hints base = self while base.classdef is not None: @@ -268,30 +248,12 @@ "class %r has _immutable_=True, but parent class %r " "defines (at least) the mutable field %r" % ( self, base, fieldname)) - if (fieldname in self.immutable_field_set or - (fieldname + '?') in self.immutable_field_set): + if fieldname in self.immutable_field_list: raise ImmutableConflictError( "field %r is defined mutable in class %r, but " "listed in _immutable_fields_ in subclass %r" % ( fieldname, base, self)) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - - def hook_setfield(self, vinst, fieldname, llops): - if self.is_quasi_immutable(fieldname): - c_fieldname = inputconst(Void, 'mutate_' + fieldname) - llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname]) - - def is_quasi_immutable(self, fieldname): - search = fieldname + '?' - rbase = self - while rbase.classdef is not None: - if search in rbase.immutable_field_set: - return True - rbase = rbase.rbase - return False - def new_instance(self, llops, classcallhop=None): raise NotImplementedError diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -1,5 +1,5 @@ from pypy.translator.simplify import get_funcobj -from pypy.jit.metainterp import history, quasiimmut +from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import lltype, rclass from pypy.tool.udir import udir @@ -85,20 +85,12 @@ getkind(v.concretetype, supports_floats, supports_longlong) v = op.result getkind(v.concretetype, supports_floats, supports_longlong) - check_skip_operation(op) except NotImplementedError, e: log.WARNING('%s, ignoring graph' % (e,)) log.WARNING(' %s' % (graph,)) return True return False -def check_skip_operation(op): - if op.opname == 'setfield': - if quasiimmut.is_quasi_immutable(op.args[0].concretetype.TO, - op.args[1].value): - raise NotImplementedError("write to quasi-immutable field %r" - % (op.args[1].value,)) - # ____________________________________________________________ class StopAtXPolicy(JitPolicy): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -167,7 +167,6 @@ class CompiledLoop(object): has_been_freed = False - invalid = False def __init__(self): self.inputargs = [] @@ -934,9 +933,6 @@ if forced: raise GuardFailed - def op_guard_not_invalidated(self, descr): - if self.loop.invalid: - raise GuardFailed class OOFrame(Frame): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -947,43 +947,3 @@ assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) assert op1.args[3] == ListOfKind('ref', [v1, v2]) - -def test_quasi_immutable(): - from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - v2 = varoftype(lltype.Signed) - STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: - op = SpaceOperation('getfield', [v_x, Constant('inst_x', lltype.Void)], - v2) - tr = Transformer(FakeCPU()) - [_, op1, op2] = tr.rewrite_operation(op) - assert op1.opname == 'record_quasiimmut_field' - assert len(op1.args) == 3 - assert op1.args[0] == v_x - assert op1.args[1] == ('fielddescr', STRUCT, 'inst_x') - assert op1.args[2] == ('fielddescr', STRUCT, 'mutate_x') - assert op1.result is None - assert op2.opname == 'getfield_gc_i' - assert len(op2.args) == 2 - assert op2.args[0] == v_x - assert op2.args[1] == ('fielddescr', STRUCT, 'inst_x') - assert op2.result is op.result - -def test_quasi_immutable_setfield(): - from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - v1 = varoftype(lltype.Signed) - STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: - op = SpaceOperation('setfield', - [v_x, Constant('inst_x', lltype.Void), v1], - varoftype(lltype.Void)) - tr = Transformer(FakeCPU()) - raises(NotImplementedError, tr.rewrite_operation, op) diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -171,8 +171,7 @@ class VirtualizableAnalyzer(BoolGraphAnalyzer): def analyze_simple_operation(self, op, graphinfo): return op.opname in ('jit_force_virtualizable', - 'jit_force_virtual', - 'jit_force_quasi_immutable') + 'jit_force_virtual') # ____________________________________________________________ diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -555,16 +555,6 @@ opimpl_setfield_raw_r = _opimpl_setfield_raw_any opimpl_setfield_raw_f = _opimpl_setfield_raw_any - @arguments("box", "descr", "descr", "orgpc") - def opimpl_record_quasiimmut_field(self, box, fielddescr, - mutatefielddescr, orgpc): - from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr - cpu = self.metainterp.cpu - descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) - self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], - None, descr=descr) - self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) - def _nonstandard_virtualizable(self, pc, box): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] @@ -1086,8 +1076,6 @@ if opnum == rop.GUARD_NOT_FORCED: resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, metainterp.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() guard_op = metainterp.history.record(opnum, moreargs, None, @@ -1860,9 +1848,6 @@ self.handle_possible_exception() except ChangeFrame: pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass # XXX we want to do something special in resume descr, - # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected self.execute_raised(OverflowError(), constant=True) try: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -76,11 +76,6 @@ op.setdescr(None) # clear reference, mostly for tests if not we_are_translated(): op._jumptarget_number = descr.number - # record this looptoken on the QuasiImmut used in the code - if loop.quasi_immutable_deps is not None: - for qmut in loop.quasi_immutable_deps: - qmut.register_loop_token(wref) - # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken loop.token = None if not we_are_translated(): @@ -401,12 +396,6 @@ self.copy_all_attributes_into(res) return res -class ResumeGuardNotInvalidated(ResumeGuardDescr): - def _clone_if_mutable(self): - res = ResumeGuardNotInvalidated() - self.copy_all_attributes_into(res) - return res - class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py --- a/pypy/rpython/lltypesystem/test/test_lloperation.py +++ b/pypy/rpython/lltypesystem/test/test_lloperation.py @@ -54,7 +54,6 @@ def test_is_pure(): from pypy.objspace.flow.model import Variable, Constant - from pypy.rpython import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) @@ -86,50 +85,38 @@ assert llop.getarrayitem.is_pure([v_a2, Variable()]) assert llop.getarraysize.is_pure([v_a2]) # - for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, - rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: - accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': kind}) - v_s3 = Variable() - v_s3.concretetype = lltype.Ptr(S3) - assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) - assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) - assert llop.getfield.is_pure([v_s3, Constant('x')]) is kind - assert not llop.getfield.is_pure([v_s3, Constant('y')]) + accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': ''}) + v_s3 = Variable() + v_s3.concretetype = lltype.Ptr(S3) + assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) + assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) + assert llop.getfield.is_pure([v_s3, Constant('x')]) + assert not llop.getfield.is_pure([v_s3, Constant('y')]) def test_getfield_pure(): S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) S2 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable': True}) accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') s2 = lltype.malloc(S2); s2.x = 45 assert llop.getfield(lltype.Signed, s2, 'x') == 45 + s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 + assert llop.getfield(lltype.Signed, s3, 'x') == 46 + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') # py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s1, 'x') assert llop.getinteriorfield(lltype.Signed, s2, 'x') == 45 - # - for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, - rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: - # - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': kind}) - s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 - if kind in [rclass.IR_IMMUTABLE, rclass.IR_ARRAY_IMMUTABLE]: - assert llop.getfield(lltype.Signed, s3, 'x') == 46 - assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 - else: - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'x') - py.test.raises(TypeError, llop.getinteriorfield, - lltype.Signed, s3, 'x') - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') - py.test.raises(TypeError, llop.getinteriorfield, - lltype.Signed, s3, 'y') + assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 + py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s3, 'y') # ___________________________________________________________________________ # This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync. diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -145,14 +145,6 @@ def redirect_call_assembler(self, oldlooptoken, newlooptoken): self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) - def invalidate_loop(self, looptoken): - from pypy.jit.backend.x86 import codebuf - - for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: - mc = codebuf.MachineCodeBlockWrapper() - mc.JMP_l(tgt) - mc.copy_to_raw_memory(addr - 1) - class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,9 +525,6 @@ def op_jit_force_virtual(x): return x -def op_jit_force_quasi_immutable(*args): - pass - def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/test/test_rvirtualizable2.py b/pypy/rpython/test/test_rvirtualizable2.py --- a/pypy/rpython/test/test_rvirtualizable2.py +++ b/pypy/rpython/test/test_rvirtualizable2.py @@ -5,7 +5,6 @@ from pypy.rlib.jit import hint from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy import conftest @@ -117,8 +116,8 @@ TYPE = self.gettype(v_inst) accessor = TYPE._hints['virtualizable2_accessor'] assert accessor.TYPE == TYPE - assert accessor.fields == {self.prefix + 'v1': IR_IMMUTABLE, - self.prefix + 'v2': IR_ARRAY_IMMUTABLE} + assert accessor.fields == {self.prefix + 'v1' : "", + self.prefix + 'v2': "[*]"} # def fn2(n): Base().base1 = 42 diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1166,11 +1166,6 @@ def bhimpl_setfield_raw_f(cpu, struct, fielddescr, newvalue): cpu.bh_setfield_raw_f(struct, fielddescr, newvalue) - @arguments("cpu", "r", "d", "d") - def bhimpl_record_quasiimmut_field(self, struct, fielddescr, - mutatefielddescr): - pass - @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) @@ -1292,8 +1287,6 @@ # We get here because it used to overflow, but now it no longer # does. pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass else: from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -433,7 +433,6 @@ 'jit_marker': LLOp(), 'jit_force_virtualizable':LLOp(canrun=True), 'jit_force_virtual': LLOp(canrun=True), - 'jit_force_quasi_immutable': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), diff --git a/pypy/jit/backend/x86/test/test_quasiimmut.py b/pypy/jit/backend/x86/test/test_quasiimmut.py deleted file mode 100644 --- a/pypy/jit/backend/x86/test/test_quasiimmut.py +++ /dev/null @@ -1,9 +0,0 @@ - -import py -from pypy.jit.backend.x86.test.test_basic import Jit386Mixin -from pypy.jit.metainterp.test import test_quasiimmut - -class TestLoopSpec(Jit386Mixin, test_quasiimmut.QuasiImmutTests): - # for the individual tests see - # ====> ../../../metainterp/test/test_loop.py - pass diff --git a/pypy/rpython/rvirtualizable2.py b/pypy/rpython/rvirtualizable2.py --- a/pypy/rpython/rvirtualizable2.py +++ b/pypy/rpython/rvirtualizable2.py @@ -50,7 +50,7 @@ def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): - if self.my_redirected_fields.get(cname.value): + if cname.value in self.my_redirected_fields: cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -380,7 +380,6 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', - 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -476,7 +475,6 @@ 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', - 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -794,8 +794,15 @@ def __init__(self, fields): self.fields = fields S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x': 1234})}) - assert S._immutable_field('x') == 1234 + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' def test_typedef(): T = Typedef(Signed, 'T') diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -267,8 +267,6 @@ virtual_state = modifier.get_virtual_state(jump_args) loop.preamble.operations = self.optimizer.newoperations - loop.preamble.quasi_immutable_deps = ( - self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.reconstruct_for_next_iteration() inputargs = self.inline(self.cloned_operations, loop.inputargs, jump_args) @@ -278,7 +276,6 @@ loop.preamble.operations.append(jmp) loop.operations = self.optimizer.newoperations - loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() assert isinstance(start_resumedescr, ResumeGuardDescr) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -85,7 +85,6 @@ 'nslots', 'instancetypedef', 'terminator', - '_version_tag?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -131,16 +131,6 @@ def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') -def find_force_quasi_immutable(graphs): - results = [] - for graph in graphs: - for block in graph.iterblocks(): - for i in range(len(block.operations)): - op = block.operations[i] - if op.opname == 'jit_force_quasi_immutable': - results.append((graph, block, i)) - return results - def get_stats(): return pyjitpl._warmrunnerdesc.stats @@ -197,7 +187,6 @@ self.rewrite_can_enter_jits() self.rewrite_set_param() self.rewrite_force_virtual(vrefinfo) - self.rewrite_force_quasi_immutable() self.add_finish() self.metainterp_sd.finish_setup(self.codewriter) @@ -853,28 +842,6 @@ all_graphs = self.translator.graphs vrefinfo.replace_force_virtual_with_call(all_graphs) - def replace_force_quasiimmut_with_direct_call(self, op): - ARG = op.args[0].concretetype - mutatefieldname = op.args[1].value - key = (ARG, mutatefieldname) - if key in self._cache_force_quasiimmed_funcs: - cptr = self._cache_force_quasiimmed_funcs[key] - else: - from pypy.jit.metainterp import quasiimmut - func = quasiimmut.make_invalidation_function(ARG, mutatefieldname) - FUNC = lltype.Ptr(lltype.FuncType([ARG], lltype.Void)) - llptr = self.helper_func(FUNC, func) - cptr = Constant(llptr, FUNC) - self._cache_force_quasiimmed_funcs[key] = cptr - op.opname = 'direct_call' - op.args = [cptr, op.args[0]] - - def rewrite_force_quasi_immutable(self): - self._cache_force_quasiimmed_funcs = {} - graphs = self.translator.graphs - for graph, block, i in find_force_quasi_immutable(graphs): - self.replace_force_quasiimmut_with_direct_call(block.operations[i]) - # ____________________________________________________________ def execute_token(self, loop_token): diff --git a/pypy/rpython/lltypesystem/rclass.py b/pypy/rpython/lltypesystem/rclass.py --- a/pypy/rpython/lltypesystem/rclass.py +++ b/pypy/rpython/lltypesystem/rclass.py @@ -322,7 +322,6 @@ # before they are fully built, to avoid strange bugs in case # of recursion where other code would uses these # partially-initialized dicts. - AbstractInstanceRepr._setup_repr(self) self.rclass = getclassrepr(self.rtyper, self.classdef) fields = {} allinstancefields = {} @@ -371,11 +370,6 @@ kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True - - for name, attrdef in attrs: - if not attrdef.readonly and self.is_quasi_immutable(name): - llfields.append(('mutate_' + name, OBJECTPTR)) - object_type = MkStruct(self.classdef.name, ('super', self.rbase.object_type), hints=hints, @@ -494,7 +488,6 @@ if force_cast: vinst = llops.genop('cast_pointer', [vinst], resulttype=self) self.hook_access_field(vinst, cname, llops, flags) - self.hook_setfield(vinst, attr, llops) llops.genop('setfield', [vinst, cname, vvalue]) else: if self.classdef is None: @@ -502,6 +495,9 @@ self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True, flags=flags) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + def new_instance(self, llops, classcallhop=None): """Build a new instance, without calling __init__.""" flavor = self.gcflavor diff --git a/pypy/rpython/annlowlevel.py b/pypy/rpython/annlowlevel.py --- a/pypy/rpython/annlowlevel.py +++ b/pypy/rpython/annlowlevel.py @@ -480,26 +480,7 @@ # ____________________________________________________________ def cast_object_to_ptr(PTR, object): - """NOT_RPYTHON: hack. The object may be disguised as a PTR now. - Limited to casting a given object to a single type. - """ - if isinstance(PTR, lltype.Ptr): - TO = PTR.TO - else: - TO = PTR - if not hasattr(object, '_carry_around_for_tests'): - assert not hasattr(object, '_TYPE') - object._carry_around_for_tests = True - object._TYPE = TO - else: - assert object._TYPE == TO - # - if isinstance(PTR, lltype.Ptr): - return lltype._ptr(PTR, object, True) - elif isinstance(PTR, ootype.Instance): - return object - else: - raise NotImplementedError("cast_object_to_ptr(%r, ...)" % PTR) + raise NotImplementedError("cast_object_to_ptr") def cast_instance_to_base_ptr(instance): return cast_object_to_ptr(base_ptr_lltype(), instance) @@ -554,13 +535,7 @@ # ____________________________________________________________ def cast_base_ptr_to_instance(Class, ptr): - """NOT_RPYTHON: hack. Reverse the hacking done in cast_object_to_ptr().""" - if isinstance(lltype.typeOf(ptr), lltype.Ptr): - ptr = ptr._as_obj() - if not isinstance(ptr, Class): - raise NotImplementedError("cast_base_ptr_to_instance: casting %r to %r" - % (ptr, Class)) - return ptr + raise NotImplementedError("cast_base_ptr_to_instance") class CastBasePtrToInstanceEntry(extregistry.ExtRegistryEntry): _about_ = cast_base_ptr_to_instance diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -20,9 +20,6 @@ op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) self.emit_operation(op) - def optimize_QUASIIMMUT_FIELD(self, op): - pass - def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -268,14 +268,13 @@ return self._superclass._get_fields_with_default() + self._fields_with_default def _immutable_field(self, field): - if self._hints.get('immutable'): - return True if 'immutable_fields' in self._hints: try: - return self._hints['immutable_fields'].fields[field] + s = self._hints['immutable_fields'].fields[field] + return s or True except KeyError: pass - return False + return self._hints.get('immutable', False) class SpecializableType(OOType): diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -5,8 +5,6 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.rarithmetic import intmask, r_longlong from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE -from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.objspace.flow.model import summary class EmptyBase(object): @@ -748,10 +746,8 @@ t, typer, graph = self.gengraph(f, []) A_TYPE = deref(graph.getreturnvar().concretetype) accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE, - "inst_y": IR_ARRAY_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_ARRAY_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ + accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype def test_immutable_fields_subclass_1(self): from pypy.jit.metainterp.typesystem import deref @@ -769,8 +765,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : ""} or \ + accessor.fields == {"ox" : ""} # for ootype def test_immutable_fields_subclass_2(self): from pypy.jit.metainterp.typesystem import deref @@ -789,10 +785,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE, - "inst_y": IR_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ + accessor.fields == {"ox" : "", "oy" : ""} # for ootype def test_immutable_fields_only_in_subclass(self): from pypy.jit.metainterp.typesystem import deref @@ -810,8 +804,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y": IR_IMMUTABLE} or \ - accessor.fields == {"oy": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_y" : ""} or \ + accessor.fields == {"oy" : ""} # for ootype def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -855,8 +849,8 @@ except AttributeError: A_TYPE = B_TYPE._superclass # for ootype accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_v": IR_IMMUTABLE} or \ - accessor.fields == {"ov": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype def test_immutable_subclass_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -901,37 +895,6 @@ B_TYPE = deref(graph.getreturnvar().concretetype) assert B_TYPE._hints["immutable"] - def test_quasi_immutable(self): - from pypy.jit.metainterp.typesystem import deref - class A(object): - _immutable_fields_ = ['x', 'y', 'a?', 'b?'] - class B(A): - pass - def f(): - a = A() - a.x = 42 - a.a = 142 - b = B() - b.x = 43 - b.y = 41 - b.a = 44 - b.b = 45 - return B() - t, typer, graph = self.gengraph(f, []) - B_TYPE = deref(graph.getreturnvar().concretetype) - accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y": IR_IMMUTABLE, - "inst_b": IR_QUASI_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_IMMUTABLE, - "oa": IR_QUASI_IMMUTABLE, - "ob": IR_QUASI_IMMUTABLE} # for ootype - found = [] - for op in graph.startblock.operations: - if op.opname == 'jit_force_quasi_immutable': - found.append(op.args[1].value) - assert found == ['mutate_a', 'mutate_a', 'mutate_b'] - class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -48,12 +48,11 @@ class GuardToken(object): - def __init__(self, faildescr, failargs, fail_locs, exc, has_jump): + def __init__(self, faildescr, failargs, fail_locs, exc): self.faildescr = faildescr self.failargs = failargs self.fail_locs = fail_locs self.exc = exc - self.has_jump = has_jump DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed)) @@ -134,7 +133,6 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token - self.invalidate_positions = [] self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -143,7 +141,6 @@ allblocks) def teardown(self): - self.invalidate_positions = None self.pending_guard_tokens = None self.mc = None self.looppos = -1 @@ -438,24 +435,15 @@ # tok.faildescr._x86_adr_jump_offset to contain the raw address of # the 4-byte target field in the JMP/Jcond instruction, and patch # the field in question to point (initially) to the recovery stub - inv_counter = 0 - clt = self.current_clt for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset tok.faildescr._x86_adr_jump_offset = addr relative_target = tok.pos_recovery_stub - (tok.pos_jump_offset + 4) assert rx86.fits_in_32bits(relative_target) # - if tok.has_jump: - mc = codebuf.MachineCodeBlockWrapper() - mc.writeimm32(relative_target) - mc.copy_to_raw_memory(addr) - else: - # guard not invalidate, patch where it jumps - pos, _ = self.invalidate_positions[inv_counter] - clt.invalidate_positions.append((pos + rawstart, - relative_target)) - inv_counter += 1 + mc = codebuf.MachineCodeBlockWrapper() + mc.writeimm32(relative_target) + mc.copy_to_raw_memory(addr) def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1459,13 +1447,6 @@ self.mc.CMP(heap(self.cpu.pos_exception()), imm0) self.implement_guard(guard_token, 'NZ') - def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, - locs, ign_2): - pos = self.mc.get_relative_pos() + 1 # after potential jmp - guard_token.pos_jump_offset = pos - self.invalidate_positions.append((pos, 0)) - self.pending_guard_tokens.append(guard_token) - def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, locs, resloc): loc = locs[0] @@ -1564,8 +1545,7 @@ exc = (guard_opnum == rop.GUARD_EXCEPTION or guard_opnum == rop.GUARD_NO_EXCEPTION or guard_opnum == rop.GUARD_NOT_FORCED) - return GuardToken(faildescr, failargs, fail_locs, exc, has_jump= - guard_opnum != rop.GUARD_NOT_INVALIDATED) + return GuardToken(faildescr, failargs, fail_locs, exc) def generate_quick_failure(self, guardtok): """Generate the initial code for handling a failure. We try to From commits-noreply at bitbucket.org Sun Apr 17 18:51:55 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 18:51:55 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Bah :-( Message-ID: <20110417165155.70CE72A202C@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43432:50f0de807154 Date: 2011-04-17 18:48 +0200 http://bitbucket.org/pypy/pypy/changeset/50f0de807154/ Log: Bah :-( diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -523,7 +523,7 @@ while i >= 0: newarray[i] = self._callshapes[i] i -= 1 - lltype.free(self._callshapes, flavor='raw') + lltype.free(self._callshapes, flavor='raw', track_allocation=False) self._callshapes = newarray self._callshapes_maxlength = newlength From commits-noreply at bitbucket.org Sun Apr 17 18:51:56 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 18:51:56 +0200 (CEST) Subject: [pypy-svn] pypy default: Bah :-( Message-ID: <20110417165156.19EF82A202C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43433:f7145e3e6182 Date: 2011-04-17 18:48 +0200 http://bitbucket.org/pypy/pypy/changeset/f7145e3e6182/ Log: Bah :-( diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -511,7 +511,7 @@ while i >= 0: newarray[i] = self._callshapes[i] i -= 1 - lltype.free(self._callshapes, flavor='raw') + lltype.free(self._callshapes, flavor='raw', track_allocation=False) self._callshapes = newarray self._callshapes_maxlength = newlength From commits-noreply at bitbucket.org Sun Apr 17 18:52:09 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sun, 17 Apr 2011 18:52:09 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110417165209.A1305282B9C@codespeak.net> Author: Armin Rigo Branch: Changeset: r43434:6493811adb56 Date: 2011-04-17 18:51 +0200 http://bitbucket.org/pypy/pypy/changeset/6493811adb56/ Log: merge heads diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py --- a/pypy/rpython/ootypesystem/rclass.py +++ b/pypy/rpython/ootypesystem/rclass.py @@ -262,10 +262,6 @@ self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef) self.rbase.setup() - for name, attrdef in selfattrs.iteritems(): - if not attrdef.readonly and self.is_quasi_immutable(name): - ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT}) - classattributes = {} baseInstance = self.lowleveltype._superclass classrepr = getclassrepr(self.rtyper, self.classdef) @@ -480,9 +476,11 @@ mangled_name = mangle(attr, self.rtyper.getconfig()) cname = inputconst(ootype.Void, mangled_name) self.hook_access_field(vinst, cname, llops, flags) - self.hook_setfield(vinst, attr, llops) llops.genop('oosetfield', [vinst, cname, vvalue]) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + def rtype_is_true(self, hop): vinst, = hop.inputargs(self) return hop.genop('oononnull', [vinst], resulttype=ootype.Bool) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -291,7 +291,6 @@ # that belong to this loop or to a bridge attached to it. # Filled by the frontend calling record_faildescr_index(). self.faildescr_indices = [] - self.invalidate_positions = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc") diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -791,7 +791,6 @@ operations = None token = None call_pure_results = None - quasi_immutable_deps = None def __init__(self, name): self.name = name diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -341,14 +341,13 @@ return _struct(self, n, initialization='example') def _immutable_field(self, field): - if self._hints.get('immutable'): - return True if 'immutable_fields' in self._hints: try: - return self._hints['immutable_fields'].fields[field] + s = self._hints['immutable_fields'].fields[field] + return s or True except KeyError: pass - return False + return self._hints.get('immutable', False) class RttiStruct(Struct): _runtime_type_info = None @@ -1030,8 +1029,6 @@ return None # null pointer if type(p._obj0) is int: return p # a pointer obtained by cast_int_to_ptr - if getattr(p._obj0, '_carry_around_for_tests', False): - return p # a pointer obtained by cast_instance_to_base_ptr container = obj._normalizedcontainer() if type(container) is int: # this must be an opaque ptr originating from an integer @@ -1884,8 +1881,8 @@ if self.__class__ is not other.__class__: return NotImplemented if hasattr(self, 'container') and hasattr(other, 'container'): - obj1 = self._normalizedcontainer() - obj2 = other._normalizedcontainer() + obj1 = self.container._normalizedcontainer() + obj2 = other.container._normalizedcontainer() return obj1 == obj2 else: return self is other @@ -1909,8 +1906,6 @@ # an integer, cast to a ptr, cast to an opaque if type(self.container) is int: return self.container - if getattr(self.container, '_carry_around_for_tests', False): - return self.container return self.container._normalizedcontainer() else: return _parentable._normalizedcontainer(self) diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -578,7 +578,6 @@ _all_callbacks_results = [] _int2obj = {} _callback_exc_info = None -_opaque_objs = [None] def get_rtyper(): llinterp = LLInterpreter.current_interpreter @@ -617,10 +616,6 @@ T = lltype.Ptr(lltype.typeOf(container)) # otherwise it came from integer and we want a c_void_p with # the same valu - if getattr(container, 'llopaque', None): - no = len(_opaque_objs) - _opaque_objs.append(container) - return no * 2 + 1 else: container = llobj._obj if isinstance(T.TO, lltype.FuncType): @@ -769,14 +764,10 @@ if isinstance(T, lltype.Typedef): T = T.OF if isinstance(T, lltype.Ptr): - ptrval = ctypes.cast(cobj, ctypes.c_void_p).value - if not cobj or not ptrval: # NULL pointer + if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 return lltype.nullptr(T.TO) if isinstance(T.TO, lltype.Struct): - if ptrval & 1: # a tagged pointer - gcref = _opaque_objs[ptrval // 2].hide() - return lltype.cast_opaque_ptr(T, gcref) REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) @@ -1237,9 +1228,7 @@ return not self == other def _cast_to_ptr(self, PTRTYPE): - if self.intval & 1: - return _opaque_objs[self.intval // 2] - return force_cast(PTRTYPE, self.intval) + return force_cast(PTRTYPE, self.intval) ## def _cast_to_int(self): ## return self.intval diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ /dev/null @@ -1,266 +0,0 @@ - -import py - -from pypy.rpython.lltypesystem import lltype, llmemory, rclass -from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE -from pypy.jit.metainterp import typesystem -from pypy.jit.metainterp.quasiimmut import QuasiImmut -from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance -from pypy.jit.metainterp.test.test_basic import LLJitMixin -from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.rlib.jit import JitDriver, dont_look_inside - - -def test_get_current_qmut_instance(): - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - STRUCT = lltype.GcStruct('Foo', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - foo = lltype.malloc(STRUCT, zero=True) - foo.inst_x = 42 - assert not foo.mutate_x - - class FakeCPU: - ts = typesystem.llhelper - - def bh_getfield_gc_r(self, gcref, fielddescr): - assert fielddescr == mutatefielddescr - foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) - result = foo.mutate_x - return lltype.cast_opaque_ptr(llmemory.GCREF, result) - - def bh_setfield_gc_r(self, gcref, fielddescr, newvalue_gcref): - assert fielddescr == mutatefielddescr - foo = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), gcref) - newvalue = lltype.cast_opaque_ptr(rclass.OBJECTPTR, newvalue_gcref) - foo.mutate_x = newvalue - - cpu = FakeCPU() - mutatefielddescr = ('fielddescr', STRUCT, 'mutate_x') - - foo_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) - qmut1 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) - assert isinstance(qmut1, QuasiImmut) - qmut2 = get_current_qmut_instance(cpu, foo_gcref, mutatefielddescr) - assert qmut1 is qmut2 - - -class QuasiImmutTests(object): - - def test_simple_1(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - x -= 1 - return total - # - res = self.meta_interp(f, [100, 7]) - assert res == 700 - self.check_loops(getfield_gc=0, everywhere=True) - # - from pypy.jit.metainterp.warmspot import get_stats - loops = get_stats().loops - for loop in loops: - assert len(loop.quasi_immutable_deps) == 1 - assert isinstance(loop.quasi_immutable_deps.keys()[0], QuasiImmut) - - def test_nonopt_1(self): - myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def setup(x): - return [Foo(100 + i) for i in range(x)] - def f(a, x): - lst = setup(x) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(lst=lst, x=x, total=total) - # read a quasi-immutable field out of a variable - x -= 1 - total += lst[x].a - return total - # - assert f(100, 7) == 721 - res = self.meta_interp(f, [100, 7]) - assert res == 721 - self.check_loops(getfield_gc=1) - # - from pypy.jit.metainterp.warmspot import get_stats - loops = get_stats().loops - for loop in loops: - assert loop.quasi_immutable_deps is None - - def test_change_during_tracing_1(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - @dont_look_inside - def residual_call(foo): - foo.a += 1 - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - residual_call(foo) - x -= 1 - return total - # - assert f(100, 7) == 721 - res = self.meta_interp(f, [100, 7]) - assert res == 721 - self.check_loops(getfield_gc=1) - - def test_change_during_tracing_2(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - @dont_look_inside - def residual_call(foo, difference): - foo.a += difference - def f(a, x): - foo = Foo(a) - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - residual_call(foo, +1) - residual_call(foo, -1) - x -= 1 - return total - # - assert f(100, 7) == 700 - res = self.meta_interp(f, [100, 7]) - assert res == 700 - self.check_loops(getfield_gc=1) - - def test_change_invalidate_reentering(self): - myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) - class Foo: - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - def f(foo, x): - total = 0 - while x > 0: - myjitdriver.jit_merge_point(foo=foo, x=x, total=total) - # read a quasi-immutable field out of a Constant - total += foo.a - x -= 1 - return total - def g(a, x): - foo = Foo(a) - res1 = f(foo, x) - foo.a += 1 # invalidation, while the jit is not running - res2 = f(foo, x) # should still mark the loop as invalid - return res1 * 1000 + res2 - # - assert g(100, 7) == 700707 - res = self.meta_interp(g, [100, 7]) - assert res == 700707 - self.check_loops(getfield_gc=0) - - def test_invalidate_while_running(self): - jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - - def external(foo, v): - if v: - foo.a = 2 - - def f(foo): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(i=i, foo=foo, total=total) - external(foo, i > 7) - i += 1 - total += foo.a - return total - - def g(): - return f(Foo(1)) - - assert self.meta_interp(g, [], policy=StopAtXPolicy(external)) == g() - - def test_invalidate_by_setfield(self): - py.test.skip("Not implemented") - jitdriver = JitDriver(greens=['bc', 'foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - def __init__(self, a): - self.a = a - - def f(foo, bc): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(bc=bc, i=i, foo=foo, total=total) - if bc == 0: - f(foo, 1) - if bc == 1: - foo.a = int(i > 5) - i += 1 - total += foo.a - return total - - def g(): - return f(Foo(1), 0) - - assert self.meta_interp(g, []) == g() - - def test_invalidate_bridge(self): - jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) - - class Foo(object): - _immutable_fields_ = ['a?'] - - def f(foo): - i = 0 - total = 0 - while i < 10: - jitdriver.jit_merge_point(i=i, total=total, foo=foo) - if i > 5: - total += foo.a - else: - total += 2*foo.a - i += 1 - return total - - def main(): - foo = Foo() - foo.a = 1 - total = f(foo) - foo.a = 2 - total += f(foo) - return total - - res = self.meta_interp(main, []) - assert res == main() - -class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/quasiimmut.py b/pypy/jit/metainterp/quasiimmut.py deleted file mode 100644 --- a/pypy/jit/metainterp/quasiimmut.py +++ /dev/null @@ -1,116 +0,0 @@ -import weakref -from pypy.rpython.rclass import IR_QUASI_IMMUTABLE -from pypy.rpython.lltypesystem import lltype, rclass -from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.jit.metainterp.history import AbstractDescr - - -def is_quasi_immutable(STRUCT, fieldname): - imm_fields = STRUCT._hints.get('immutable_fields') - return (imm_fields is not None and - imm_fields.fields.get(fieldname) is IR_QUASI_IMMUTABLE) - -def get_mutate_field_name(fieldname): - if fieldname.startswith('inst_'): # lltype - return 'mutate_' + fieldname[5:] - elif fieldname.startswith('o'): # ootype - return 'mutate_' + fieldname[1:] - else: - raise AssertionError(fieldname) - -def get_current_qmut_instance(cpu, gcref, mutatefielddescr): - """Returns the current QuasiImmut instance in the field, - possibly creating one. - """ - # XXX this is broken on x86 - qmut_gcref = cpu.bh_getfield_gc_r(gcref, mutatefielddescr) - if qmut_gcref: - qmut = QuasiImmut.show(cpu, qmut_gcref) - else: - qmut = QuasiImmut(cpu) - cpu.bh_setfield_gc_r(gcref, mutatefielddescr, qmut.hide()) - return qmut - -def make_invalidation_function(STRUCT, mutatefieldname): - # - def _invalidate_now(p): - qmut_ptr = getattr(p, mutatefieldname) - setattr(p, mutatefieldname, lltype.nullptr(rclass.OBJECT)) - qmut = cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) - qmut.invalidate() - _invalidate_now._dont_inline_ = True - # - def invalidation(p): - if getattr(p, mutatefieldname): - _invalidate_now(p) - # - return invalidation - - -class QuasiImmut(object): - llopaque = True - - def __init__(self, cpu): - self.cpu = cpu - # list of weakrefs to the LoopTokens that must be invalidated if - # this value ever changes - self.looptokens_wrefs = [] - self.compress_limit = 30 - - def hide(self): - qmut_ptr = self.cpu.ts.cast_instance_to_base_ref(self) - return self.cpu.ts.cast_to_ref(qmut_ptr) - - @staticmethod - def show(cpu, qmut_gcref): - qmut_ptr = cpu.ts.cast_to_baseclass(qmut_gcref) - return cast_base_ptr_to_instance(QuasiImmut, qmut_ptr) - - def register_loop_token(self, wref_looptoken): - if len(self.looptokens_wrefs) > self.compress_limit: - self.compress_looptokens_list() - self.looptokens_wrefs.append(wref_looptoken) - - def compress_looptokens_list(self): - self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs - if wref() is not None] - self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2 - - def invalidate(self): - # When this is called, all the loops that we record become - # invalid and must not be called again, nor returned to. - wrefs = self.looptokens_wrefs - self.looptokens_wrefs = [] - for wref in wrefs: - looptoken = wref() - if looptoken is not None: - self.cpu.invalidate_loop(looptoken) - - -class QuasiImmutDescr(AbstractDescr): - def __init__(self, cpu, structbox, fielddescr, mutatefielddescr): - self.cpu = cpu - self.structbox = structbox - self.fielddescr = fielddescr - self.mutatefielddescr = mutatefielddescr - gcref = structbox.getref_base() - self.qmut = get_current_qmut_instance(cpu, gcref, mutatefielddescr) - self.constantfieldbox = self.get_current_constant_fieldvalue() - - def get_current_constant_fieldvalue(self): - from pypy.jit.metainterp import executor - from pypy.jit.metainterp.resoperation import rop - fieldbox = executor.execute(self.cpu, None, rop.GETFIELD_GC, - self.fielddescr, self.structbox) - return fieldbox.constbox() - - def is_still_valid(self): - cpu = self.cpu - gcref = self.structbox.getref_base() - qmut = get_current_qmut_instance(cpu, gcref, self.mutatefielddescr) - if qmut is not self.qmut: - return False - else: - currentbox = self.get_current_constant_fieldvalue() - assert self.constantfieldbox.same_constant(currentbox) - return True diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -2,7 +2,6 @@ from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype, lloperation, rclass, llmemory from pypy.rpython.annlowlevel import llhelper -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside @@ -46,7 +45,7 @@ ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY._hints['virtualizable2_accessor'].initialize( - XY, {'inst_x' : IR_IMMUTABLE, 'inst_node' : IR_IMMUTABLE}) + XY, {'inst_x' : "", 'inst_node' : ""}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY, xy_vtable, 'XY') @@ -211,8 +210,7 @@ ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY2._hints['virtualizable2_accessor'].initialize( - XY2, {'inst_x' : IR_IMMUTABLE, - 'inst_l1' : IR_ARRAY_IMMUTABLE, 'inst_l2' : IR_ARRAY_IMMUTABLE}) + XY2, {'inst_x' : "", 'inst_l1' : "[*]", 'inst_l2' : "[*]"}) xy2_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY2, xy2_vtable, 'XY2') diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py --- a/pypy/jit/metainterp/optimizeopt/__init__.py +++ b/pypy/jit/metainterp/optimizeopt/__init__.py @@ -41,8 +41,7 @@ # during preamble but to keep it during the loop optimizations.append(o) - if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts - or 'heap' not in enable_opts): + if 'rewrite' not in enable_opts or 'virtualize' not in enable_opts: optimizations.append(OptSimplify()) if inline_short_preamble: diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -7,9 +7,8 @@ from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from pypy.jit.codewriter import support, heaptracker, longlong from pypy.jit.codewriter.effectinfo import EffectInfo -from pypy.jit.codewriter.policy import log, check_skip_operation +from pypy.jit.codewriter.policy import log from pypy.jit.metainterp.typesystem import deref, arrayItem -from pypy.jit.metainterp import quasiimmut from pypy.rlib import objectmodel from pypy.rlib.jit import _we_are_jitted from pypy.translator.simplify import get_funcobj @@ -562,8 +561,7 @@ arraydescr) return [] # check for _immutable_fields_ hints - immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) - if immut: + if v_inst.concretetype.TO._immutable_field(c_fieldname.value): if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): @@ -576,21 +574,10 @@ descr = self.cpu.fielddescrof(v_inst.concretetype.TO, c_fieldname.value) kind = getkind(RESULT)[0] - op1 = SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), - [v_inst, descr], op.result) - # - if immut is quasiimmut.IR_QUASI_IMMUTABLE: - descr1 = self.cpu.fielddescrof( - v_inst.concretetype.TO, - quasiimmut.get_mutate_field_name(c_fieldname.value)) - op1 = [SpaceOperation('-live-', [], None), - SpaceOperation('record_quasiimmut_field', - [v_inst, descr, descr1], None), - op1] - return op1 + return SpaceOperation('getfield_%s_%s%s' % (argname, kind, pure), + [v_inst, descr], op.result) def rewrite_op_setfield(self, op): - check_skip_operation(op) # just to check it doesn't raise if self.is_typeptr_getset(op): # ignore the operation completely -- instead, it's done by 'new' return diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -257,7 +257,6 @@ self.pendingfields = [] self.posponedop = None self.exception_might_have_happened = False - self.quasi_immutable_deps = None self.newoperations = [] if loop is not None: self.call_pure_results = loop.call_pure_results @@ -310,7 +309,6 @@ new.pure_operations = self.pure_operations new.producer = self.producer assert self.posponedop is None - new.quasi_immutable_deps = self.quasi_immutable_deps return new @@ -412,7 +410,6 @@ self.first_optimization.propagate_forward(op) self.i += 1 self.loop.operations = self.newoperations - self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters self.resumedata_memo.update_counters(self.metainterp_sd.profiler) diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5717,35 +5717,8 @@ # not obvious, because of the exception UnicodeDecodeError that # can be raised by ll_str2unicode() - def test_quasi_immut(self): - ops = """ - [p0, p1, i0] - quasiimmut_field(p0, descr=quasiimmutdescr) - guard_not_invalidated() [] - i1 = getfield_gc(p0, descr=quasifielddescr) - jump(p1, p0, i1) - """ - expected = """ - [p0, p1, i0] - i1 = getfield_gc(p0, descr=quasifielddescr) - jump(p1, p0, i1) - """ - self.optimize_loop(ops, expected) - - def test_quasi_immut_2(self): - ops = """ - [] - quasiimmut_field(ConstPtr(myptr), descr=quasiimmutdescr) - guard_not_invalidated() [] - i1 = getfield_gc(ConstPtr(myptr), descr=quasifielddescr) - jump() - """ - expected = """ - [] - guard_not_invalidated() [] - jump() - """ - self.optimize_loop(ops, expected, expected) + + ##class TestOOtype(OptimizeOptTest, OOtypeMixin): diff --git a/pypy/rpython/test/test_annlowlevel.py b/pypy/rpython/test/test_annlowlevel.py --- a/pypy/rpython/test/test_annlowlevel.py +++ b/pypy/rpython/test/test_annlowlevel.py @@ -4,12 +4,9 @@ from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode -from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import hlstr, llstr, oostr from pypy.rpython.annlowlevel import hlunicode, llunicode -from pypy.rpython import annlowlevel - class TestLLType(BaseRtypingTest, LLRtypeMixin): def test_hlstr(self): @@ -56,15 +53,6 @@ res = self.interpret(f, [self.unicode_to_ll(u"abc")]) assert res == 3 - def test_cast_instance_to_base_ptr(self): - class X(object): - pass - x = X() - ptr = annlowlevel.cast_instance_to_base_ptr(x) - assert lltype.typeOf(ptr) == annlowlevel.base_ptr_lltype() - y = annlowlevel.cast_base_ptr_to_instance(X, ptr) - assert y is x - class TestOOType(BaseRtypingTest, OORtypeMixin): def test_hlstr(self): @@ -83,12 +71,3 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 - - def test_cast_instance_to_base_obj(self): - class X(object): - pass - x = X() - obj = annlowlevel.cast_instance_to_base_obj(x) - assert lltype.typeOf(obj) == annlowlevel.base_obj_ootype() - y = annlowlevel.cast_base_ptr_to_instance(X, obj) - assert y is x diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -286,10 +286,6 @@ raise ValueError("CALL_ASSEMBLER not supported") llimpl.redirect_call_assembler(self, oldlooptoken, newlooptoken) - def invalidate_loop(self, looptoken): - for loop in looptoken.compiled_loop_token.loop_and_bridges: - loop._obj.externalobj.invalid = True - # ---------- def sizeof(self, S): diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -3,7 +3,6 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE -from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, @@ -13,7 +12,6 @@ from pypy.jit.codewriter.effectinfo import EffectInfo from pypy.jit.codewriter.heaptracker import register_known_gctype, adr2int from pypy.jit.tool.oparser import parse -from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -64,18 +62,6 @@ nextdescr = cpu.fielddescrof(NODE, 'next') otherdescr = cpu.fielddescrof(NODE2, 'other') - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_field': IR_QUASI_IMMUTABLE}) - QUASI = lltype.GcStruct('QUASIIMMUT', ('inst_field', lltype.Signed), - ('mutate_field', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - quasi = lltype.malloc(QUASI, immortal=True) - quasifielddescr = cpu.fielddescrof(QUASI, 'inst_field') - quasibox = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, quasi)) - quasiimmutdescr = QuasiImmutDescr(cpu, quasibox, - quasifielddescr, - cpu.fielddescrof(QUASI, 'mutate_field')) - NODEOBJ = lltype.GcStruct('NODEOBJ', ('parent', OBJECT), ('ref', lltype.Ptr(OBJECT))) nodeobj = lltype.malloc(NODEOBJ) diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -1,7 +1,6 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance -from pypy.rpython.rclass import IR_ARRAY_IMMUTABLE, IR_IMMUTABLE from pypy.rpython import rvirtualizable2 from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.unroll import unrolling_iterable @@ -11,7 +10,7 @@ from pypy.jit.metainterp.warmstate import wrap, unwrap from pypy.rlib.objectmodel import specialize -class VirtualizableInfo(object): +class VirtualizableInfo: TOKEN_NONE = 0 # must be 0 -- see also x86.call_assembler TOKEN_TRACING_RESCALL = -1 @@ -34,13 +33,11 @@ all_fields = accessor.fields static_fields = [] array_fields = [] - for name, tp in all_fields.iteritems(): - if tp == IR_ARRAY_IMMUTABLE: + for name, suffix in all_fields.iteritems(): + if suffix == '[*]': array_fields.append(name) - elif tp == IR_IMMUTABLE: + else: static_fields.append(name) - else: - raise Exception("unknown type: %s" % tp) self.static_fields = static_fields self.array_fields = array_fields # diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -492,8 +492,6 @@ def consider_guard_no_exception(self, op): self.perform_guard(op, [], None) - consider_guard_not_invalidated = consider_guard_no_exception - def consider_guard_exception(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) box = TempBox() diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1293,28 +1293,6 @@ rffi.cast(SP, p).x = 0 lltype.free(chunk, flavor='raw') - def test_opaque_tagged_pointers(self): - from pypy.rpython.annlowlevel import cast_base_ptr_to_instance - from pypy.rpython.annlowlevel import cast_instance_to_base_ptr - from pypy.rpython.lltypesystem import rclass - - class Opaque(object): - llopaque = True - - def hide(self): - ptr = cast_instance_to_base_ptr(self) - return lltype.cast_opaque_ptr(llmemory.GCREF, ptr) - - @staticmethod - def show(gcref): - ptr = lltype.cast_opaque_ptr(lltype.Ptr(rclass.OBJECT), gcref) - return cast_base_ptr_to_instance(Opaque, ptr) - - opaque = Opaque() - round = ctypes2lltype(llmemory.GCREF, lltype2ctypes(opaque.hide())) - assert Opaque.show(round) is opaque - - class TestPlatform(object): def test_lib_on_libpaths(self): from pypy.translator.platform import platform diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -119,7 +119,6 @@ self._lazy_setfields = [] # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} - self._remove_guard_not_invalidated = False def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() @@ -379,43 +378,6 @@ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, write=True) - def optimize_QUASIIMMUT_FIELD(self, op): - # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC(s, descr='inst_x') - # If 's' is a constant (after optimizations), then we make 's.inst_x' - # a constant too, and we rely on the rest of the optimizations to - # constant-fold the following getfield_gc. - structvalue = self.getvalue(op.getarg(0)) - if not structvalue.is_constant(): - self._remove_guard_not_invalidated = True - return # not a constant at all; ignore QUASIIMMUT_FIELD - # - from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr - qmutdescr = op.getdescr() - assert isinstance(qmutdescr, QuasiImmutDescr) - # check that the value is still correct; it could have changed - # already between the tracing and now. In this case, we are - # simply ignoring the QUASIIMMUT_FIELD hint and compiling it - # as a regular getfield. - if not qmutdescr.is_still_valid(): - self._remove_guard_not_invalidated = True - return - # record as an out-of-line guard - if self.optimizer.quasi_immutable_deps is None: - self.optimizer.quasi_immutable_deps = {} - self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None - # perform the replacement in the list of operations - fieldvalue = self.getvalue(qmutdescr.constantfieldbox) - cf = self.field_cache(qmutdescr.fielddescr) - cf.remember_field_value(structvalue, fieldvalue) - self._remove_guard_not_invalidated = False - - def optimize_GUARD_NOT_INVALIDATED(self, op): - if self._remove_guard_not_invalidated: - return - self._remove_guard_not_invalidated = False - self.emit_operation(op) - def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -312,7 +312,6 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, - rop.QUASIIMMUT_FIELD, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py --- a/pypy/rpython/rclass.py +++ b/pypy/rpython/rclass.py @@ -3,8 +3,7 @@ #from pypy.annotation.classdef import isclassdef from pypy.annotation import description from pypy.rpython.error import TyperError -from pypy.rpython.rmodel import Repr, getgcflavor, inputconst -from pypy.rpython.lltypesystem.lltype import Void +from pypy.rpython.rmodel import Repr, getgcflavor class FieldListAccessor(object): @@ -13,8 +12,6 @@ assert type(fields) is dict self.TYPE = TYPE self.fields = fields - for x in fields.itervalues(): - assert isinstance(x, ImmutableRanking) def __repr__(self): return '' % getattr(self, 'TYPE', '?') @@ -22,20 +19,6 @@ def _freeze_(self): return True -class ImmutableRanking(object): - def __init__(self, name, is_immutable): - self.name = name - self.is_immutable = is_immutable - def __nonzero__(self): - return self.is_immutable - def __repr__(self): - return '<%s>' % self.name - -IR_MUTABLE = ImmutableRanking('mutable', False) -IR_IMMUTABLE = ImmutableRanking('immutable', True) -IR_ARRAY_IMMUTABLE = ImmutableRanking('array_immutable', True) -IR_QUASI_IMMUTABLE = ImmutableRanking('quasi_immutable', False) - class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" @@ -172,8 +155,7 @@ self.classdef = classdef def _setup_repr(self): - if self.classdef is None: - self.immutable_field_set = set() + pass def _check_for_immutable_hints(self, hints): loc = self.classdef.classdesc.lookup('_immutable_') @@ -185,13 +167,13 @@ self.classdef,)) hints = hints.copy() hints['immutable'] = True - self.immutable_field_set = set() # unless overwritten below + self.immutable_field_list = [] # unless overwritten below if self.classdef.classdesc.lookup('_immutable_fields_') is not None: hints = hints.copy() immutable_fields = self.classdef.classdesc.classdict.get( '_immutable_fields_') if immutable_fields is not None: - self.immutable_field_set = set(immutable_fields.value) + self.immutable_field_list = immutable_fields.value accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints @@ -219,35 +201,33 @@ if "immutable_fields" in hints: accessor = hints["immutable_fields"] if not hasattr(accessor, 'fields'): - immutable_fields = set() + immutable_fields = [] rbase = self while rbase.classdef is not None: - immutable_fields.update(rbase.immutable_field_set) + immutable_fields += rbase.immutable_field_list rbase = rbase.rbase self._parse_field_list(immutable_fields, accessor) def _parse_field_list(self, fields, accessor): - ranking = {} + with_suffix = {} for name in fields: - if name.endswith('[*]'): # for virtualizables' lists + if name.endswith('[*]'): name = name[:-3] - rank = IR_ARRAY_IMMUTABLE - elif name.endswith('?'): # a quasi-immutable field - name = name[:-1] - rank = IR_QUASI_IMMUTABLE - else: # a regular immutable/green field - rank = IR_IMMUTABLE + suffix = '[*]' + else: + suffix = '' try: mangled_name, r = self._get_field(name) except KeyError: continue - ranking[mangled_name] = rank - accessor.initialize(self.object_type, ranking) - return ranking + with_suffix[mangled_name] = suffix + accessor.initialize(self.object_type, with_suffix) + return with_suffix def _check_for_immutable_conflicts(self): # check for conflicts, i.e. a field that is defined normally as # mutable in some parent class but that is now declared immutable + from pypy.rpython.lltypesystem.lltype import Void is_self_immutable = "immutable" in self.object_type._hints base = self while base.classdef is not None: @@ -268,30 +248,12 @@ "class %r has _immutable_=True, but parent class %r " "defines (at least) the mutable field %r" % ( self, base, fieldname)) - if (fieldname in self.immutable_field_set or - (fieldname + '?') in self.immutable_field_set): + if fieldname in self.immutable_field_list: raise ImmutableConflictError( "field %r is defined mutable in class %r, but " "listed in _immutable_fields_ in subclass %r" % ( fieldname, base, self)) - def hook_access_field(self, vinst, cname, llops, flags): - pass # for virtualizables; see rvirtualizable2.py - - def hook_setfield(self, vinst, fieldname, llops): - if self.is_quasi_immutable(fieldname): - c_fieldname = inputconst(Void, 'mutate_' + fieldname) - llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname]) - - def is_quasi_immutable(self, fieldname): - search = fieldname + '?' - rbase = self - while rbase.classdef is not None: - if search in rbase.immutable_field_set: - return True - rbase = rbase.rbase - return False - def new_instance(self, llops, classcallhop=None): raise NotImplementedError diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -1,5 +1,5 @@ from pypy.translator.simplify import get_funcobj -from pypy.jit.metainterp import history, quasiimmut +from pypy.jit.metainterp import history from pypy.rpython.lltypesystem import lltype, rclass from pypy.tool.udir import udir @@ -85,20 +85,12 @@ getkind(v.concretetype, supports_floats, supports_longlong) v = op.result getkind(v.concretetype, supports_floats, supports_longlong) - check_skip_operation(op) except NotImplementedError, e: log.WARNING('%s, ignoring graph' % (e,)) log.WARNING(' %s' % (graph,)) return True return False -def check_skip_operation(op): - if op.opname == 'setfield': - if quasiimmut.is_quasi_immutable(op.args[0].concretetype.TO, - op.args[1].value): - raise NotImplementedError("write to quasi-immutable field %r" - % (op.args[1].value,)) - # ____________________________________________________________ class StopAtXPolicy(JitPolicy): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -167,7 +167,6 @@ class CompiledLoop(object): has_been_freed = False - invalid = False def __init__(self): self.inputargs = [] @@ -934,9 +933,6 @@ if forced: raise GuardFailed - def op_guard_not_invalidated(self, descr): - if self.loop.invalid: - raise GuardFailed class OOFrame(Frame): diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py --- a/pypy/jit/codewriter/test/test_jtransform.py +++ b/pypy/jit/codewriter/test/test_jtransform.py @@ -947,43 +947,3 @@ assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY assert op1.args[2] == ListOfKind('int', [v3, v4, v5]) assert op1.args[3] == ListOfKind('ref', [v1, v2]) - -def test_quasi_immutable(): - from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - v2 = varoftype(lltype.Signed) - STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: - op = SpaceOperation('getfield', [v_x, Constant('inst_x', lltype.Void)], - v2) - tr = Transformer(FakeCPU()) - [_, op1, op2] = tr.rewrite_operation(op) - assert op1.opname == 'record_quasiimmut_field' - assert len(op1.args) == 3 - assert op1.args[0] == v_x - assert op1.args[1] == ('fielddescr', STRUCT, 'inst_x') - assert op1.args[2] == ('fielddescr', STRUCT, 'mutate_x') - assert op1.result is None - assert op2.opname == 'getfield_gc_i' - assert len(op2.args) == 2 - assert op2.args[0] == v_x - assert op2.args[1] == ('fielddescr', STRUCT, 'inst_x') - assert op2.result is op.result - -def test_quasi_immutable_setfield(): - from pypy.rpython.rclass import FieldListAccessor, IR_QUASI_IMMUTABLE - accessor = FieldListAccessor() - accessor.initialize(None, {'inst_x': IR_QUASI_IMMUTABLE}) - v1 = varoftype(lltype.Signed) - STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), - ('mutate_x', rclass.OBJECTPTR), - hints={'immutable_fields': accessor}) - for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: - op = SpaceOperation('setfield', - [v_x, Constant('inst_x', lltype.Void), v1], - varoftype(lltype.Void)) - tr = Transformer(FakeCPU()) - raises(NotImplementedError, tr.rewrite_operation, op) diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py --- a/pypy/jit/codewriter/effectinfo.py +++ b/pypy/jit/codewriter/effectinfo.py @@ -171,8 +171,7 @@ class VirtualizableAnalyzer(BoolGraphAnalyzer): def analyze_simple_operation(self, op, graphinfo): return op.opname in ('jit_force_virtualizable', - 'jit_force_virtual', - 'jit_force_quasi_immutable') + 'jit_force_virtual') # ____________________________________________________________ diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -555,16 +555,6 @@ opimpl_setfield_raw_r = _opimpl_setfield_raw_any opimpl_setfield_raw_f = _opimpl_setfield_raw_any - @arguments("box", "descr", "descr", "orgpc") - def opimpl_record_quasiimmut_field(self, box, fielddescr, - mutatefielddescr, orgpc): - from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr - cpu = self.metainterp.cpu - descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) - self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], - None, descr=descr) - self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) - def _nonstandard_virtualizable(self, pc, box): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] @@ -1086,8 +1076,6 @@ if opnum == rop.GUARD_NOT_FORCED: resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, metainterp.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() guard_op = metainterp.history.record(opnum, moreargs, None, @@ -1860,9 +1848,6 @@ self.handle_possible_exception() except ChangeFrame: pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass # XXX we want to do something special in resume descr, - # but not now elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected self.execute_raised(OverflowError(), constant=True) try: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -76,11 +76,6 @@ op.setdescr(None) # clear reference, mostly for tests if not we_are_translated(): op._jumptarget_number = descr.number - # record this looptoken on the QuasiImmut used in the code - if loop.quasi_immutable_deps is not None: - for qmut in loop.quasi_immutable_deps: - qmut.register_loop_token(wref) - # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken loop.token = None if not we_are_translated(): @@ -401,12 +396,6 @@ self.copy_all_attributes_into(res) return res -class ResumeGuardNotInvalidated(ResumeGuardDescr): - def _clone_if_mutable(self): - res = ResumeGuardNotInvalidated() - self.copy_all_attributes_into(res) - return res - class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py --- a/pypy/rpython/lltypesystem/test/test_lloperation.py +++ b/pypy/rpython/lltypesystem/test/test_lloperation.py @@ -54,7 +54,6 @@ def test_is_pure(): from pypy.objspace.flow.model import Variable, Constant - from pypy.rpython import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) @@ -86,50 +85,38 @@ assert llop.getarrayitem.is_pure([v_a2, Variable()]) assert llop.getarraysize.is_pure([v_a2]) # - for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, - rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: - accessor = rclass.FieldListAccessor() - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': kind}) - v_s3 = Variable() - v_s3.concretetype = lltype.Ptr(S3) - assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) - assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) - assert llop.getfield.is_pure([v_s3, Constant('x')]) is kind - assert not llop.getfield.is_pure([v_s3, Constant('y')]) + accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': ''}) + v_s3 = Variable() + v_s3.concretetype = lltype.Ptr(S3) + assert not llop.setfield.is_pure([v_s3, Constant('x'), Variable()]) + assert not llop.setfield.is_pure([v_s3, Constant('y'), Variable()]) + assert llop.getfield.is_pure([v_s3, Constant('x')]) + assert not llop.getfield.is_pure([v_s3, Constant('y')]) def test_getfield_pure(): S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) S2 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), hints={'immutable': True}) accessor = rclass.FieldListAccessor() + S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), + hints={'immutable_fields': accessor}) + accessor.initialize(S3, {'x': ''}) # s1 = lltype.malloc(S1); s1.x = 45 py.test.raises(TypeError, llop.getfield, lltype.Signed, s1, 'x') s2 = lltype.malloc(S2); s2.x = 45 assert llop.getfield(lltype.Signed, s2, 'x') == 45 + s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 + assert llop.getfield(lltype.Signed, s3, 'x') == 46 + py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') # py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s1, 'x') assert llop.getinteriorfield(lltype.Signed, s2, 'x') == 45 - # - for kind in [rclass.IR_MUTABLE, rclass.IR_IMMUTABLE, - rclass.IR_ARRAY_IMMUTABLE, rclass.IR_QUASI_IMMUTABLE]: - # - S3 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed), - hints={'immutable_fields': accessor}) - accessor.initialize(S3, {'x': kind}) - s3 = lltype.malloc(S3); s3.x = 46; s3.y = 47 - if kind in [rclass.IR_IMMUTABLE, rclass.IR_ARRAY_IMMUTABLE]: - assert llop.getfield(lltype.Signed, s3, 'x') == 46 - assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 - else: - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'x') - py.test.raises(TypeError, llop.getinteriorfield, - lltype.Signed, s3, 'x') - py.test.raises(TypeError, llop.getfield, lltype.Signed, s3, 'y') - py.test.raises(TypeError, llop.getinteriorfield, - lltype.Signed, s3, 'y') + assert llop.getinteriorfield(lltype.Signed, s3, 'x') == 46 + py.test.raises(TypeError, llop.getinteriorfield, lltype.Signed, s3, 'y') # ___________________________________________________________________________ # This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync. diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -145,14 +145,6 @@ def redirect_call_assembler(self, oldlooptoken, newlooptoken): self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken) - def invalidate_loop(self, looptoken): - from pypy.jit.backend.x86 import codebuf - - for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: - mc = codebuf.MachineCodeBlockWrapper() - mc.JMP_l(tgt) - mc.copy_to_raw_memory(addr - 1) - class CPU386(AbstractX86CPU): WORD = 4 NUM_REGS = 8 diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -525,9 +525,6 @@ def op_jit_force_virtual(x): return x -def op_jit_force_quasi_immutable(*args): - pass - def op_get_group_member(TYPE, grpptr, memberoffset): from pypy.rpython.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) diff --git a/pypy/rpython/test/test_rvirtualizable2.py b/pypy/rpython/test/test_rvirtualizable2.py --- a/pypy/rpython/test/test_rvirtualizable2.py +++ b/pypy/rpython/test/test_rvirtualizable2.py @@ -5,7 +5,6 @@ from pypy.rlib.jit import hint from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy import conftest @@ -117,8 +116,8 @@ TYPE = self.gettype(v_inst) accessor = TYPE._hints['virtualizable2_accessor'] assert accessor.TYPE == TYPE - assert accessor.fields == {self.prefix + 'v1': IR_IMMUTABLE, - self.prefix + 'v2': IR_ARRAY_IMMUTABLE} + assert accessor.fields == {self.prefix + 'v1' : "", + self.prefix + 'v2': "[*]"} # def fn2(n): Base().base1 = 42 diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1166,11 +1166,6 @@ def bhimpl_setfield_raw_f(cpu, struct, fielddescr, newvalue): cpu.bh_setfield_raw_f(struct, fielddescr, newvalue) - @arguments("cpu", "r", "d", "d") - def bhimpl_record_quasiimmut_field(self, struct, fielddescr, - mutatefielddescr): - pass - @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) @@ -1292,8 +1287,6 @@ # We get here because it used to overflow, but now it no longer # does. pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass else: from pypy.jit.metainterp.resoperation import opname raise NotImplementedError(opname[opnum]) diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -433,7 +433,6 @@ 'jit_marker': LLOp(), 'jit_force_virtualizable':LLOp(canrun=True), 'jit_force_virtual': LLOp(canrun=True), - 'jit_force_quasi_immutable': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True), diff --git a/pypy/jit/backend/x86/test/test_quasiimmut.py b/pypy/jit/backend/x86/test/test_quasiimmut.py deleted file mode 100644 --- a/pypy/jit/backend/x86/test/test_quasiimmut.py +++ /dev/null @@ -1,9 +0,0 @@ - -import py -from pypy.jit.backend.x86.test.test_basic import Jit386Mixin -from pypy.jit.metainterp.test import test_quasiimmut - -class TestLoopSpec(Jit386Mixin, test_quasiimmut.QuasiImmutTests): - # for the individual tests see - # ====> ../../../metainterp/test/test_loop.py - pass diff --git a/pypy/rpython/rvirtualizable2.py b/pypy/rpython/rvirtualizable2.py --- a/pypy/rpython/rvirtualizable2.py +++ b/pypy/rpython/rvirtualizable2.py @@ -50,7 +50,7 @@ def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): - if self.my_redirected_fields.get(cname.value): + if cname.value in self.my_redirected_fields: cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -380,7 +380,6 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', - 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- @@ -476,7 +475,6 @@ 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', - 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -794,8 +794,15 @@ def __init__(self, fields): self.fields = fields S = GcStruct('S', ('x', lltype.Signed), - hints={'immutable_fields': FieldListAccessor({'x': 1234})}) - assert S._immutable_field('x') == 1234 + hints={'immutable_fields': FieldListAccessor({'x':''})}) + assert S._immutable_field('x') == True + # + class FieldListAccessor(object): + def __init__(self, fields): + self.fields = fields + S = GcStruct('S', ('x', lltype.Signed), + hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) + assert S._immutable_field('x') == '[*]' def test_typedef(): T = Typedef(Signed, 'T') diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -267,8 +267,6 @@ virtual_state = modifier.get_virtual_state(jump_args) loop.preamble.operations = self.optimizer.newoperations - loop.preamble.quasi_immutable_deps = ( - self.optimizer.quasi_immutable_deps) self.optimizer = self.optimizer.reconstruct_for_next_iteration() inputargs = self.inline(self.cloned_operations, loop.inputargs, jump_args) @@ -278,7 +276,6 @@ loop.preamble.operations.append(jmp) loop.operations = self.optimizer.newoperations - loop.quasi_immutable_deps = self.optimizer.quasi_immutable_deps start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable() assert isinstance(start_resumedescr, ResumeGuardDescr) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -85,7 +85,6 @@ 'nslots', 'instancetypedef', 'terminator', - '_version_tag?', ] # for config.objspace.std.getattributeshortcut diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py --- a/pypy/jit/metainterp/warmspot.py +++ b/pypy/jit/metainterp/warmspot.py @@ -131,16 +131,6 @@ def find_set_param(graphs): return _find_jit_marker(graphs, 'set_param') -def find_force_quasi_immutable(graphs): - results = [] - for graph in graphs: - for block in graph.iterblocks(): - for i in range(len(block.operations)): - op = block.operations[i] - if op.opname == 'jit_force_quasi_immutable': - results.append((graph, block, i)) - return results - def get_stats(): return pyjitpl._warmrunnerdesc.stats @@ -197,7 +187,6 @@ self.rewrite_can_enter_jits() self.rewrite_set_param() self.rewrite_force_virtual(vrefinfo) - self.rewrite_force_quasi_immutable() self.add_finish() self.metainterp_sd.finish_setup(self.codewriter) @@ -853,28 +842,6 @@ all_graphs = self.translator.graphs vrefinfo.replace_force_virtual_with_call(all_graphs) - def replace_force_quasiimmut_with_direct_call(self, op): - ARG = op.args[0].concretetype - mutatefieldname = op.args[1].value - key = (ARG, mutatefieldname) - if key in self._cache_force_quasiimmed_funcs: - cptr = self._cache_force_quasiimmed_funcs[key] - else: - from pypy.jit.metainterp import quasiimmut - func = quasiimmut.make_invalidation_function(ARG, mutatefieldname) - FUNC = lltype.Ptr(lltype.FuncType([ARG], lltype.Void)) - llptr = self.helper_func(FUNC, func) - cptr = Constant(llptr, FUNC) - self._cache_force_quasiimmed_funcs[key] = cptr - op.opname = 'direct_call' - op.args = [cptr, op.args[0]] - - def rewrite_force_quasi_immutable(self): - self._cache_force_quasiimmed_funcs = {} - graphs = self.translator.graphs - for graph, block, i in find_force_quasi_immutable(graphs): - self.replace_force_quasiimmut_with_direct_call(block.operations[i]) - # ____________________________________________________________ def execute_token(self, loop_token): diff --git a/pypy/rpython/lltypesystem/rclass.py b/pypy/rpython/lltypesystem/rclass.py --- a/pypy/rpython/lltypesystem/rclass.py +++ b/pypy/rpython/lltypesystem/rclass.py @@ -322,7 +322,6 @@ # before they are fully built, to avoid strange bugs in case # of recursion where other code would uses these # partially-initialized dicts. - AbstractInstanceRepr._setup_repr(self) self.rclass = getclassrepr(self.rtyper, self.classdef) fields = {} allinstancefields = {} @@ -371,11 +370,6 @@ kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True - - for name, attrdef in attrs: - if not attrdef.readonly and self.is_quasi_immutable(name): - llfields.append(('mutate_' + name, OBJECTPTR)) - object_type = MkStruct(self.classdef.name, ('super', self.rbase.object_type), hints=hints, @@ -494,7 +488,6 @@ if force_cast: vinst = llops.genop('cast_pointer', [vinst], resulttype=self) self.hook_access_field(vinst, cname, llops, flags) - self.hook_setfield(vinst, attr, llops) llops.genop('setfield', [vinst, cname, vvalue]) else: if self.classdef is None: @@ -502,6 +495,9 @@ self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True, flags=flags) + def hook_access_field(self, vinst, cname, llops, flags): + pass # for virtualizables; see rvirtualizable2.py + def new_instance(self, llops, classcallhop=None): """Build a new instance, without calling __init__.""" flavor = self.gcflavor diff --git a/pypy/rpython/annlowlevel.py b/pypy/rpython/annlowlevel.py --- a/pypy/rpython/annlowlevel.py +++ b/pypy/rpython/annlowlevel.py @@ -480,26 +480,7 @@ # ____________________________________________________________ def cast_object_to_ptr(PTR, object): - """NOT_RPYTHON: hack. The object may be disguised as a PTR now. - Limited to casting a given object to a single type. - """ - if isinstance(PTR, lltype.Ptr): - TO = PTR.TO - else: - TO = PTR - if not hasattr(object, '_carry_around_for_tests'): - assert not hasattr(object, '_TYPE') - object._carry_around_for_tests = True - object._TYPE = TO - else: - assert object._TYPE == TO - # - if isinstance(PTR, lltype.Ptr): - return lltype._ptr(PTR, object, True) - elif isinstance(PTR, ootype.Instance): - return object - else: - raise NotImplementedError("cast_object_to_ptr(%r, ...)" % PTR) + raise NotImplementedError("cast_object_to_ptr") def cast_instance_to_base_ptr(instance): return cast_object_to_ptr(base_ptr_lltype(), instance) @@ -554,13 +535,7 @@ # ____________________________________________________________ def cast_base_ptr_to_instance(Class, ptr): - """NOT_RPYTHON: hack. Reverse the hacking done in cast_object_to_ptr().""" - if isinstance(lltype.typeOf(ptr), lltype.Ptr): - ptr = ptr._as_obj() - if not isinstance(ptr, Class): - raise NotImplementedError("cast_base_ptr_to_instance: casting %r to %r" - % (ptr, Class)) - return ptr + raise NotImplementedError("cast_base_ptr_to_instance") class CastBasePtrToInstanceEntry(extregistry.ExtRegistryEntry): _about_ = cast_base_ptr_to_instance diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py --- a/pypy/jit/metainterp/optimizeopt/simplify.py +++ b/pypy/jit/metainterp/optimizeopt/simplify.py @@ -20,9 +20,6 @@ op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) self.emit_operation(op) - def optimize_QUASIIMMUT_FIELD(self, op): - pass - def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: diff --git a/pypy/rpython/ootypesystem/ootype.py b/pypy/rpython/ootypesystem/ootype.py --- a/pypy/rpython/ootypesystem/ootype.py +++ b/pypy/rpython/ootypesystem/ootype.py @@ -268,14 +268,13 @@ return self._superclass._get_fields_with_default() + self._fields_with_default def _immutable_field(self, field): - if self._hints.get('immutable'): - return True if 'immutable_fields' in self._hints: try: - return self._hints['immutable_fields'].fields[field] + s = self._hints['immutable_fields'].fields[field] + return s or True except KeyError: pass - return False + return self._hints.get('immutable', False) class SpecializableType(OOType): diff --git a/pypy/rpython/test/test_rclass.py b/pypy/rpython/test/test_rclass.py --- a/pypy/rpython/test/test_rclass.py +++ b/pypy/rpython/test/test_rclass.py @@ -5,8 +5,6 @@ from pypy.rpython.ootypesystem import ootype from pypy.rlib.rarithmetic import intmask, r_longlong from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin -from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE -from pypy.rpython.rclass import IR_QUASI_IMMUTABLE from pypy.objspace.flow.model import summary class EmptyBase(object): @@ -748,10 +746,8 @@ t, typer, graph = self.gengraph(f, []) A_TYPE = deref(graph.getreturnvar().concretetype) accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE, - "inst_y": IR_ARRAY_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_ARRAY_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : "", "inst_y" : "[*]"} or \ + accessor.fields == {"ox" : "", "oy" : "[*]"} # for ootype def test_immutable_fields_subclass_1(self): from pypy.jit.metainterp.typesystem import deref @@ -769,8 +765,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : ""} or \ + accessor.fields == {"ox" : ""} # for ootype def test_immutable_fields_subclass_2(self): from pypy.jit.metainterp.typesystem import deref @@ -789,10 +785,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_x": IR_IMMUTABLE, - "inst_y": IR_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_x" : "", "inst_y" : ""} or \ + accessor.fields == {"ox" : "", "oy" : ""} # for ootype def test_immutable_fields_only_in_subclass(self): from pypy.jit.metainterp.typesystem import deref @@ -810,8 +804,8 @@ t, typer, graph = self.gengraph(f, []) B_TYPE = deref(graph.getreturnvar().concretetype) accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y": IR_IMMUTABLE} or \ - accessor.fields == {"oy": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_y" : ""} or \ + accessor.fields == {"oy" : ""} # for ootype def test_immutable_forbidden_inheritance_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -855,8 +849,8 @@ except AttributeError: A_TYPE = B_TYPE._superclass # for ootype accessor = A_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_v": IR_IMMUTABLE} or \ - accessor.fields == {"ov": IR_IMMUTABLE} # for ootype + assert accessor.fields == {"inst_v" : ""} or \ + accessor.fields == {"ov" : ""} # for ootype def test_immutable_subclass_1(self): from pypy.rpython.rclass import ImmutableConflictError @@ -901,37 +895,6 @@ B_TYPE = deref(graph.getreturnvar().concretetype) assert B_TYPE._hints["immutable"] - def test_quasi_immutable(self): - from pypy.jit.metainterp.typesystem import deref - class A(object): - _immutable_fields_ = ['x', 'y', 'a?', 'b?'] - class B(A): - pass - def f(): - a = A() - a.x = 42 - a.a = 142 - b = B() - b.x = 43 - b.y = 41 - b.a = 44 - b.b = 45 - return B() - t, typer, graph = self.gengraph(f, []) - B_TYPE = deref(graph.getreturnvar().concretetype) - accessor = B_TYPE._hints["immutable_fields"] - assert accessor.fields == {"inst_y": IR_IMMUTABLE, - "inst_b": IR_QUASI_IMMUTABLE} or \ - accessor.fields == {"ox": IR_IMMUTABLE, - "oy": IR_IMMUTABLE, - "oa": IR_QUASI_IMMUTABLE, - "ob": IR_QUASI_IMMUTABLE} # for ootype - found = [] - for op in graph.startblock.operations: - if op.opname == 'jit_force_quasi_immutable': - found.append(op.args[1].value) - assert found == ['mutate_a', 'mutate_a', 'mutate_b'] - class TestLLtype(BaseTestRclass, LLRtypeMixin): diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -48,12 +48,11 @@ class GuardToken(object): - def __init__(self, faildescr, failargs, fail_locs, exc, has_jump): + def __init__(self, faildescr, failargs, fail_locs, exc): self.faildescr = faildescr self.failargs = failargs self.fail_locs = fail_locs self.exc = exc - self.has_jump = has_jump DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', ('i', lltype.Signed)) @@ -134,7 +133,6 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token - self.invalidate_positions = [] self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -143,7 +141,6 @@ allblocks) def teardown(self): - self.invalidate_positions = None self.pending_guard_tokens = None self.mc = None self.looppos = -1 @@ -438,24 +435,15 @@ # tok.faildescr._x86_adr_jump_offset to contain the raw address of # the 4-byte target field in the JMP/Jcond instruction, and patch # the field in question to point (initially) to the recovery stub - inv_counter = 0 - clt = self.current_clt for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset tok.faildescr._x86_adr_jump_offset = addr relative_target = tok.pos_recovery_stub - (tok.pos_jump_offset + 4) assert rx86.fits_in_32bits(relative_target) # - if tok.has_jump: - mc = codebuf.MachineCodeBlockWrapper() - mc.writeimm32(relative_target) - mc.copy_to_raw_memory(addr) - else: - # guard not invalidate, patch where it jumps - pos, _ = self.invalidate_positions[inv_counter] - clt.invalidate_positions.append((pos + rawstart, - relative_target)) - inv_counter += 1 + mc = codebuf.MachineCodeBlockWrapper() + mc.writeimm32(relative_target) + mc.copy_to_raw_memory(addr) def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1459,13 +1447,6 @@ self.mc.CMP(heap(self.cpu.pos_exception()), imm0) self.implement_guard(guard_token, 'NZ') - def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, - locs, ign_2): - pos = self.mc.get_relative_pos() + 1 # after potential jmp - guard_token.pos_jump_offset = pos - self.invalidate_positions.append((pos, 0)) - self.pending_guard_tokens.append(guard_token) - def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, locs, resloc): loc = locs[0] @@ -1564,8 +1545,7 @@ exc = (guard_opnum == rop.GUARD_EXCEPTION or guard_opnum == rop.GUARD_NO_EXCEPTION or guard_opnum == rop.GUARD_NOT_FORCED) - return GuardToken(faildescr, failargs, fail_locs, exc, has_jump= - guard_opnum != rop.GUARD_NOT_INVALIDATED) + return GuardToken(faildescr, failargs, fail_locs, exc) def generate_quick_failure(self, guardtok): """Generate the initial code for handling a failure. We try to From commits-noreply at bitbucket.org Mon Apr 18 08:13:59 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:13:59 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: update pypyjit_demo Message-ID: <20110418061359.9FE6F282BEB@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43435:d6614c7e5628 Date: 2011-04-15 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/d6614c7e5628/ Log: update pypyjit_demo diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,15 +1,16 @@ try: - def main(n): - def g(n): - return range(n) - s = 0 - for i in range(n): # ID: for - tmp = g(n) - s += tmp[i] # ID: getitem - a = 0 - return s - main(10) + class A(object): + def meth(self): + for k in range(4): + pass + + def f(): + a = A() + for i in range(20): + a.meth() + + f() except Exception, e: print "Exception: ", type(e) From commits-noreply at bitbucket.org Mon Apr 18 08:14:05 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:05 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: fix test Message-ID: <20110418061405.736BA282BEC@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43436:24025d0ba2f4 Date: 2011-04-15 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/24025d0ba2f4/ Log: fix test diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -6,7 +6,7 @@ from pypy.jit.metainterp import typesystem from pypy.jit.metainterp.quasiimmut import QuasiImmut from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver, dont_look_inside From commits-noreply at bitbucket.org Mon Apr 18 08:14:13 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:13 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: merge default Message-ID: <20110418061413.F0EAE282BEB@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43437:41a65e7a3055 Date: 2011-04-15 14:51 +0200 http://bitbucket.org/pypy/pypy/changeset/41a65e7a3055/ Log: merge default diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -153,10 +153,10 @@ for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op - def match(self, expected_src): + def match(self, expected_src, **kwds): ops = list(self.allops()) matcher = OpMatcher(ops, src=self.format_ops()) - return matcher.match(expected_src) + return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) @@ -314,7 +314,7 @@ # it matched! The '...' operator ends here return op - def match_loop(self, expected_ops): + def match_loop(self, expected_ops, ignore_ops): """ A note about partial matching: the '...' operator is non-greedy, i.e. it matches all the operations until it finds one that matches @@ -333,13 +333,16 @@ return op = self.match_until(exp_op, iter_ops) else: - op = self._next_op(iter_ops) + while True: + op = self._next_op(iter_ops) + if op.name not in ignore_ops: + break self.match_op(op, exp_op) # # make sure we exhausted iter_ops self._next_op(iter_ops, assert_raises=True) - def match(self, expected_src): + def match(self, expected_src, ignore_ops=[]): def format(src): if src is None: return '' @@ -348,7 +351,7 @@ expected_src = self.preprocess_expected_src(expected_src) expected_ops = self.parse_ops(expected_src) try: - self.match_loop(expected_ops) + self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 @@ -357,6 +360,7 @@ print e.args print e.msg print + print "Ignore ops:", ignore_ops print "Got:" print format(self.src) print diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -222,251 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - - def test_intbound_simple(self): - ops = ('<', '>', '<=', '>=', '==', '!=') - nbr = (3, 7) - for o1 in ops: - for o2 in ops: - for n1 in nbr: - for n2 in nbr: - src = ''' - def f(i): - a, b = 3, 3 - if i %s %d: - a = 0 - else: - a = 1 - if i %s %d: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (o1, n1, o2, n2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 268, ([], res)) - - def test_intbound_addsub_mix(self): - tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', - 'i - 1 > 1', '1 - i > 1', '1 - i < -3', - 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') - for t1 in tests: - for t2 in tests: - print t1, t2 - src = ''' - def f(i): - a, b = 3, 3 - if %s: - a = 0 - else: - a = 1 - if %s: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (t1, t2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 280, ([], res)) - - def test_intbound_gt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i > -1: - a += 1 - if i > -2: - b += 1 - i += 1 - return (a, b) - ''', 48, ([], (2000, 2000))) - - def test_intbound_sub_lt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i - 10 < 1995: - a += 1 - i += 1 - return (a, b) - ''', 38, ([], (2000, 0))) - - def test_intbound_addsub_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if i - 1 >= -1: - b += 1 - i += 1 - return (a, b) - ''', 56, ([], (2000, 2000))) - - def test_intbound_addmul_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if 2 * i >= 0: - b += 1 - i += 1 - return (a, b) - ''', 53, ([], (2000, 2000))) - - def test_intbound_eq(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - if a == 7: - s += a + 1 - elif i == 10: - s += i - else: - s += 1 - i += 1 - return s - ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) - - def test_intbound_mul(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert i >= 0 - if 2 * i < 30000: - s += 1 - else: - s += a - i += 1 - return s - ''', 43, ([7], 1500)) - - def test_assert(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert a == 7 - s += a + 1 - i += 1 - return s - ''', 38, ([7], 8*1500)) - - def test_zeropadded(self): - self.run_source(''' - from array import array - class ZeroPadded(array): - def __new__(cls, l): - self = array.__new__(cls, 'd', range(l)) - return self - - def __getitem__(self, i): - if i < 0 or i >= self.__len__(): - return 0 - return array.__getitem__(self, i) - - - def main(): - buf = ZeroPadded(2000) - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 232, ([], 9895050.0)) - - def test_circular(self): - self.run_source(''' - from array import array - class Circular(array): - def __new__(cls): - self = array.__new__(cls, 'd', range(256)) - return self - def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) - return array.__getitem__(self, i & 255) - - def main(): - buf = Circular() - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 170, ([], 1239690.0)) - - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -763,6 +766,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): ptrval = ctypes.cast(cobj, ctypes.c_void_p).value if not cobj or not ptrval: # NULL pointer diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.module import ll_math from pypy.module.math.test.test_direct import MathTests, get_tester +from pypy.translator.c.test.test_genc import compile class TestMath(MathTests): @@ -21,6 +22,13 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isinf(self): + def f(x): + return ll_math.ll_math_isinf(1. / x) + f = compile(f, [float], backendopt=False) + assert f(5.5e-309) + + def make_test_case((fnname, args, expected), dict): # def test_func(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -818,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -70,11 +70,35 @@ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): @@ -1014,7 +1021,7 @@ node = lltype.malloc(NODE) ref = lltype.cast_opaque_ptr(llmemory.GCREF, node) back = rffi.cast(llmemory.GCREF, rffi.cast(lltype.Signed, ref)) - assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), ref) == node + assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), back) == node def test_gcref_forth_and_back(self): cp = ctypes.c_void_p(1234) diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -3,7 +3,8 @@ from pypy.rlib.rarithmetic import most_neg_value_of_same_type from pypy.rlib.rfloat import isinf, isnan from pypy.rlib.debug import make_sure_not_resized, check_regular_int -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython import extregistry @@ -122,7 +123,11 @@ def numdigits(self): return len(self._digits) + @staticmethod + @jit.purefunction def fromint(intval): + # This function is marked as pure, so you must not call it and + # then modify the result. check_regular_int(intval) if intval < 0: sign = -1 @@ -149,20 +154,25 @@ t >>= SHIFT p += 1 return v - fromint = staticmethod(fromint) + @staticmethod + @jit.purefunction def frombool(b): + # This function is marked as pure, so you must not call it and + # then modify the result. if b: return rbigint([ONEDIGIT], 1) return rbigint() - frombool = staticmethod(frombool) + @staticmethod def fromlong(l): + "NOT_RPYTHON" return rbigint(*args_from_long(l)) - fromlong = staticmethod(fromlong) + @staticmethod def fromfloat(dval): """ Create a new bigint object from a float """ + # This function is not marked as pure because it can raise sign = 1 if isinf(dval) or isnan(dval): raise OverflowError @@ -183,16 +193,21 @@ frac -= float(bits) frac = math.ldexp(frac, SHIFT) return v - fromfloat = staticmethod(fromfloat) + @staticmethod + @jit.purefunction + @specialize.argtype(0) def fromrarith_int(i): + # This function is marked as pure, so you must not call it and + # then modify the result. return rbigint(*args_from_rarith_int(i)) - fromrarith_int._annspecialcase_ = "specialize:argtype(0)" - fromrarith_int = staticmethod(fromrarith_int) + @staticmethod + @jit.purefunction def fromdecimalstr(s): + # This function is marked as pure, so you must not call it and + # then modify the result. return _decimalstr_to_bigint(s) - fromdecimalstr = staticmethod(fromdecimalstr) def toint(self): """ @@ -1841,7 +1856,7 @@ elif s[p] == '+': p += 1 - a = rbigint.fromint(0) + a = rbigint() tens = 1 dig = 0 ord0 = ord('0') @@ -1859,7 +1874,7 @@ def parse_digit_string(parser): # helper for objspace.std.strutil - a = rbigint.fromint(0) + a = rbigint() base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -52,6 +52,8 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if stderr.startswith('SKIP:'): + py.test.skip(stderr) assert not stderr # # parse the JIT log @@ -100,11 +102,11 @@ class TestOpMatcher(object): - def match(self, src1, src2): + def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations, src=src1) - return matcher.match(src2) + return matcher.match(src2, **kwds) def test_match_var(self): match_var = OpMatcher([]).match_var @@ -234,6 +236,21 @@ """ assert self.match(loop, expected) + def test_ignore_opcodes(self): + loop = """ + [i0] + i1 = int_add(i0, 1) + i4 = force_token() + i2 = int_sub(i1, 10) + jump(i4) + """ + expected = """ + i1 = int_add(i0, 1) + i2 = int_sub(i1, 10) + jump(i4, descr=...) + """ + assert self.match(loop, expected, ignore_ops=['force_token']) + class TestRunPyPyC(BaseTestPyPyC): @@ -253,6 +270,14 @@ log = self.run(src, [30, 12]) assert log.result == 42 + def test_skip(self): + import _pytest + def f(): + import sys + print >> sys.stderr, 'SKIP: foobar' + # + raises(_pytest.runner.Skipped, "self.run(f, [])") + def test_parse_jitlog(self): def f(): i = 0 diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -5,6 +5,8 @@ syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -37,8 +39,6 @@ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -56,6 +56,7 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) +math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -94,7 +95,8 @@ return y != y def ll_math_isinf(y): - return y != 0 and y * .5 == y + # Use a bitwise OR so the JIT doesn't produce 2 different guards. + return (y == INFINITY) | (y == -INFINITY) ll_math_copysign = math_copysign diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant @@ -100,6 +100,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -797,6 +797,21 @@ hints={'immutable_fields': FieldListAccessor({'x': 1234})}) assert S._immutable_field('x') == 1234 +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -838,7 +838,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -849,7 +849,7 @@ sa += 20000 return sa """ % (op1, a, op2, b) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -867,7 +867,7 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, op2, float(b)/4.0) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_allcases_reflex(self): @@ -888,7 +888,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -899,7 +899,7 @@ sa += 20000 return sa """ % (op1, a, b, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -917,11 +917,13 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, float(b)/4.0, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') for e1 in compares: for e2 in compares: @@ -933,7 +935,7 @@ b = tst() c = tst() sa = 0 - for i in range(1000): + for i in range(300): if %s: sa += 1 else: @@ -946,7 +948,7 @@ a = b return sa """ % (e1, e2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) def test_array_sum(self): def main(): @@ -1071,3 +1073,460 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) + + def test_intbound_simple(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + self.run_and_check(src, threshold=200) + + def test_intbound_addsub_mix(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + self.run_and_check(src, threshold=200) + + def test_intbound_gt(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i17 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=) + """) + + def test_intbound_sub_lt(self): + def main(): + i, a = 0, 0 + while i < 300: + if i - 10 < 295: + a += 1 + i += 1 + return a + # + log = self.run(main, [], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i5, 300) + guard_true(i7, descr=...) + i9 = int_sub_ovf(i5, 10) + guard_no_overflow(descr=...) + i11 = int_add_ovf(i4, 1) + guard_no_overflow(descr=...) + i13 = int_add(i5, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i13, descr=) + """) + + def test_intbound_addsub_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i8, 5) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i19 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) + """) + + def test_intbound_addmul_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < 300: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_add(i8, 5) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_lshift(i8, 1) + i18 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i21 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) + """) + + def test_intbound_eq(self): + def main(a, n): + i, s = 0, 0 + while i < 300: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + # + log = self.run(main, [7, 300], threshold=200) + assert log.result == main(7, 300) + log = self.run(main, [10, 300], threshold=200) + assert log.result == main(10, 300) + log = self.run(main, [42, 300], threshold=200) + assert log.result == main(42, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_eq(i8, 10) + guard_false(i12, descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) + """) + + def test_intbound_mul(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + # + log = self.run(main, [7], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_lshift(i6, 1) + i12 = int_add_ovf(i5, 1) + guard_no_overflow(descr=...) + i14 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i12, i14, descr=) + """) + + def test_assert(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert a == 7 + s += a + 1 + i += 1 + return s + log = self.run(main, [7], threshold=200) + assert log.result == 300*8 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_add_ovf(i5, 8) + guard_no_overflow(descr=...) + i12 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i10, i12, descr=) + """) + + def test_zeropadded(self): + def main(): + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= len(self): + return 0 + return array.__getitem__(self, i) # ID: get + # + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + log = self.run(main, [], threshold=200) + assert log.result == 9895050.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i20 = int_ge(i18, i8) + guard_false(i20, descr=...) + f21 = getarrayitem_raw(i13, i18, descr=...) + f23 = getarrayitem_raw(i13, i14, descr=...) + f24 = float_add(f21, f23) + f26 = getarrayitem_raw(i13, i6, descr=...) + f27 = float_add(f24, f26) + i29 = int_add(i6, 1) + i31 = int_ge(i29, i8) + guard_false(i31, descr=...) + f33 = getarrayitem_raw(i13, i29, descr=...) + f34 = float_add(f27, f33) + i36 = int_add(i6, 2) + i38 = int_ge(i36, i8) + guard_false(i38, descr=...) + f39 = getarrayitem_raw(i13, i36, descr=...) + ... + """) + + + def test_circular(self): + def main(): + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + assert len(self) == 256 + return array.__getitem__(self, i & 255) + # + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + # + log = self.run(main, [], threshold=200) + assert log.result == 1239690.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i17 = int_and(i14, 255) + f18 = getarrayitem_raw(i8, i17, descr=...) + f20 = getarrayitem_raw(i8, i9, descr=...) + f21 = float_add(f18, f20) + f23 = getarrayitem_raw(i8, i10, descr=...) + f24 = float_add(f21, f23) + i26 = int_add(i6, 1) + i29 = int_and(i26, 255) + f30 = getarrayitem_raw(i8, i29, descr=...) + f31 = float_add(f24, f30) + i33 = int_add(i6, 2) + i36 = int_and(i33, 255) + f37 = getarrayitem_raw(i8, i36, descr=...) + ... + """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) + + def test_silly_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test_iter_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test__ffi_call(self): + from pypy.rlib.test.test_libffi import get_libm_name + def main(libm_name): + try: + from _ffi import CDLL, types + except ImportError: + sys.stdout.write('SKIP: cannot import _ffi') + return 0 + + libm = CDLL(libm_name) + pow = libm.getfunc('pow', [types.double, types.double], + types.double) + i = 0 + res = 0 + while i < 300: + res += pow(2, 3) + i += 1 + return pow.getaddr(), res + # + libm_name = get_libm_name(sys.platform) + log = self.run(main, [libm_name], threshold=200) + pow_addr, res = log.result + assert res == 8.0 * 300 + loop, = log.loops_by_filename(self.filepath) + # XXX: write the actual test when we merge this to jitypes2 + ## ops = self.get_by_bytecode('CALL_FUNCTION') + ## assert len(ops) == 2 # we get two loops, because of specialization + ## call_function = ops[0] + ## last_ops = [op.getopname() for op in call_function[-5:]] + ## assert last_ops == ['force_token', + ## 'setfield_gc', + ## 'call_may_force', + ## 'guard_not_forced', + ## 'guard_no_exception'] + ## call = call_function[-3] + ## assert call.getarg(0).value == pow_addr + ## assert call.getarg(1).value == 2.0 + ## assert call.getarg(2).value == 3.0 From commits-noreply at bitbucket.org Mon Apr 18 08:14:14 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:14 +0200 (CEST) Subject: [pypy-svn] pypy default: two more operations to ignore Message-ID: <20110418061414.B01C8282BEB@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43438:c5cf2f7b9733 Date: 2011-04-18 08:07 +0200 http://bitbucket.org/pypy/pypy/changeset/c5cf2f7b9733/ Log: two more operations to ignore diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -457,6 +457,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', + 'paddq', 'pinsr', # zero-extending moves should not produce GC pointers 'movz', ]) From commits-noreply at bitbucket.org Mon Apr 18 08:14:15 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:15 +0200 (CEST) Subject: [pypy-svn] pypy default: don't list some ops twice Message-ID: <20110418061415.E3850282BEB@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43439:2d96e93e4a47 Date: 2011-04-18 08:07 +0200 http://bitbucket.org/pypy/pypy/changeset/2d96e93e4a47/ Log: don't list some ops twice diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -446,7 +446,6 @@ IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', - 'pslld', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', From commits-noreply at bitbucket.org Mon Apr 18 08:14:22 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:22 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: update pypyjit_demo Message-ID: <20110418061422.325B6282BEC@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43435:d6614c7e5628 Date: 2011-04-15 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/d6614c7e5628/ Log: update pypyjit_demo diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,15 +1,16 @@ try: - def main(n): - def g(n): - return range(n) - s = 0 - for i in range(n): # ID: for - tmp = g(n) - s += tmp[i] # ID: getitem - a = 0 - return s - main(10) + class A(object): + def meth(self): + for k in range(4): + pass + + def f(): + a = A() + for i in range(20): + a.meth() + + f() except Exception, e: print "Exception: ", type(e) From commits-noreply at bitbucket.org Mon Apr 18 08:14:23 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:23 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: fix test Message-ID: <20110418061423.012D1282BEC@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43436:24025d0ba2f4 Date: 2011-04-15 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/24025d0ba2f4/ Log: fix test diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -6,7 +6,7 @@ from pypy.jit.metainterp import typesystem from pypy.jit.metainterp.quasiimmut import QuasiImmut from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver, dont_look_inside From commits-noreply at bitbucket.org Mon Apr 18 08:14:31 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:31 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: merge default Message-ID: <20110418061431.070DE282BF2@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43437:41a65e7a3055 Date: 2011-04-15 14:51 +0200 http://bitbucket.org/pypy/pypy/changeset/41a65e7a3055/ Log: merge default diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -153,10 +153,10 @@ for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op - def match(self, expected_src): + def match(self, expected_src, **kwds): ops = list(self.allops()) matcher = OpMatcher(ops, src=self.format_ops()) - return matcher.match(expected_src) + return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) @@ -314,7 +314,7 @@ # it matched! The '...' operator ends here return op - def match_loop(self, expected_ops): + def match_loop(self, expected_ops, ignore_ops): """ A note about partial matching: the '...' operator is non-greedy, i.e. it matches all the operations until it finds one that matches @@ -333,13 +333,16 @@ return op = self.match_until(exp_op, iter_ops) else: - op = self._next_op(iter_ops) + while True: + op = self._next_op(iter_ops) + if op.name not in ignore_ops: + break self.match_op(op, exp_op) # # make sure we exhausted iter_ops self._next_op(iter_ops, assert_raises=True) - def match(self, expected_src): + def match(self, expected_src, ignore_ops=[]): def format(src): if src is None: return '' @@ -348,7 +351,7 @@ expected_src = self.preprocess_expected_src(expected_src) expected_ops = self.parse_ops(expected_src) try: - self.match_loop(expected_ops) + self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 @@ -357,6 +360,7 @@ print e.args print e.msg print + print "Ignore ops:", ignore_ops print "Got:" print format(self.src) print diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -222,251 +222,6 @@ total += f(i, 5) return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - - - def test_intbound_simple(self): - ops = ('<', '>', '<=', '>=', '==', '!=') - nbr = (3, 7) - for o1 in ops: - for o2 in ops: - for n1 in nbr: - for n2 in nbr: - src = ''' - def f(i): - a, b = 3, 3 - if i %s %d: - a = 0 - else: - a = 1 - if i %s %d: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (o1, n1, o2, n2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 268, ([], res)) - - def test_intbound_addsub_mix(self): - tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', - 'i - 1 > 1', '1 - i > 1', '1 - i < -3', - 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') - for t1 in tests: - for t2 in tests: - print t1, t2 - src = ''' - def f(i): - a, b = 3, 3 - if %s: - a = 0 - else: - a = 1 - if %s: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (t1, t2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 280, ([], res)) - - def test_intbound_gt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i > -1: - a += 1 - if i > -2: - b += 1 - i += 1 - return (a, b) - ''', 48, ([], (2000, 2000))) - - def test_intbound_sub_lt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i - 10 < 1995: - a += 1 - i += 1 - return (a, b) - ''', 38, ([], (2000, 0))) - - def test_intbound_addsub_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if i - 1 >= -1: - b += 1 - i += 1 - return (a, b) - ''', 56, ([], (2000, 2000))) - - def test_intbound_addmul_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if 2 * i >= 0: - b += 1 - i += 1 - return (a, b) - ''', 53, ([], (2000, 2000))) - - def test_intbound_eq(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - if a == 7: - s += a + 1 - elif i == 10: - s += i - else: - s += 1 - i += 1 - return s - ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) - - def test_intbound_mul(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert i >= 0 - if 2 * i < 30000: - s += 1 - else: - s += a - i += 1 - return s - ''', 43, ([7], 1500)) - - def test_assert(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert a == 7 - s += a + 1 - i += 1 - return s - ''', 38, ([7], 8*1500)) - - def test_zeropadded(self): - self.run_source(''' - from array import array - class ZeroPadded(array): - def __new__(cls, l): - self = array.__new__(cls, 'd', range(l)) - return self - - def __getitem__(self, i): - if i < 0 or i >= self.__len__(): - return 0 - return array.__getitem__(self, i) - - - def main(): - buf = ZeroPadded(2000) - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 232, ([], 9895050.0)) - - def test_circular(self): - self.run_source(''' - from array import array - class Circular(array): - def __new__(cls): - self = array.__new__(cls, 'd', range(256)) - return self - def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) - return array.__getitem__(self, i & 255) - - def main(): - buf = Circular() - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 170, ([], 1239690.0)) - - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -763,6 +766,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): ptrval = ctypes.cast(cobj, ctypes.c_void_p).value if not cobj or not ptrval: # NULL pointer diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.module import ll_math from pypy.module.math.test.test_direct import MathTests, get_tester +from pypy.translator.c.test.test_genc import compile class TestMath(MathTests): @@ -21,6 +22,13 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isinf(self): + def f(x): + return ll_math.ll_math_isinf(1. / x) + f = compile(f, [float], backendopt=False) + assert f(5.5e-309) + + def make_test_case((fnname, args, expected), dict): # def test_func(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -818,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -70,11 +70,35 @@ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): @@ -1014,7 +1021,7 @@ node = lltype.malloc(NODE) ref = lltype.cast_opaque_ptr(llmemory.GCREF, node) back = rffi.cast(llmemory.GCREF, rffi.cast(lltype.Signed, ref)) - assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), ref) == node + assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), back) == node def test_gcref_forth_and_back(self): cp = ctypes.c_void_p(1234) diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -3,7 +3,8 @@ from pypy.rlib.rarithmetic import most_neg_value_of_same_type from pypy.rlib.rfloat import isinf, isnan from pypy.rlib.debug import make_sure_not_resized, check_regular_int -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython import extregistry @@ -122,7 +123,11 @@ def numdigits(self): return len(self._digits) + @staticmethod + @jit.purefunction def fromint(intval): + # This function is marked as pure, so you must not call it and + # then modify the result. check_regular_int(intval) if intval < 0: sign = -1 @@ -149,20 +154,25 @@ t >>= SHIFT p += 1 return v - fromint = staticmethod(fromint) + @staticmethod + @jit.purefunction def frombool(b): + # This function is marked as pure, so you must not call it and + # then modify the result. if b: return rbigint([ONEDIGIT], 1) return rbigint() - frombool = staticmethod(frombool) + @staticmethod def fromlong(l): + "NOT_RPYTHON" return rbigint(*args_from_long(l)) - fromlong = staticmethod(fromlong) + @staticmethod def fromfloat(dval): """ Create a new bigint object from a float """ + # This function is not marked as pure because it can raise sign = 1 if isinf(dval) or isnan(dval): raise OverflowError @@ -183,16 +193,21 @@ frac -= float(bits) frac = math.ldexp(frac, SHIFT) return v - fromfloat = staticmethod(fromfloat) + @staticmethod + @jit.purefunction + @specialize.argtype(0) def fromrarith_int(i): + # This function is marked as pure, so you must not call it and + # then modify the result. return rbigint(*args_from_rarith_int(i)) - fromrarith_int._annspecialcase_ = "specialize:argtype(0)" - fromrarith_int = staticmethod(fromrarith_int) + @staticmethod + @jit.purefunction def fromdecimalstr(s): + # This function is marked as pure, so you must not call it and + # then modify the result. return _decimalstr_to_bigint(s) - fromdecimalstr = staticmethod(fromdecimalstr) def toint(self): """ @@ -1841,7 +1856,7 @@ elif s[p] == '+': p += 1 - a = rbigint.fromint(0) + a = rbigint() tens = 1 dig = 0 ord0 = ord('0') @@ -1859,7 +1874,7 @@ def parse_digit_string(parser): # helper for objspace.std.strutil - a = rbigint.fromint(0) + a = rbigint() base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -52,6 +52,8 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if stderr.startswith('SKIP:'): + py.test.skip(stderr) assert not stderr # # parse the JIT log @@ -100,11 +102,11 @@ class TestOpMatcher(object): - def match(self, src1, src2): + def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations, src=src1) - return matcher.match(src2) + return matcher.match(src2, **kwds) def test_match_var(self): match_var = OpMatcher([]).match_var @@ -234,6 +236,21 @@ """ assert self.match(loop, expected) + def test_ignore_opcodes(self): + loop = """ + [i0] + i1 = int_add(i0, 1) + i4 = force_token() + i2 = int_sub(i1, 10) + jump(i4) + """ + expected = """ + i1 = int_add(i0, 1) + i2 = int_sub(i1, 10) + jump(i4, descr=...) + """ + assert self.match(loop, expected, ignore_ops=['force_token']) + class TestRunPyPyC(BaseTestPyPyC): @@ -253,6 +270,14 @@ log = self.run(src, [30, 12]) assert log.result == 42 + def test_skip(self): + import _pytest + def f(): + import sys + print >> sys.stderr, 'SKIP: foobar' + # + raises(_pytest.runner.Skipped, "self.run(f, [])") + def test_parse_jitlog(self): def f(): i = 0 diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -5,6 +5,8 @@ syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -37,8 +39,6 @@ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -56,6 +56,7 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) +math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -94,7 +95,8 @@ return y != y def ll_math_isinf(y): - return y != 0 and y * .5 == y + # Use a bitwise OR so the JIT doesn't produce 2 different guards. + return (y == INFINITY) | (y == -INFINITY) ll_math_copysign = math_copysign diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant @@ -100,6 +100,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -797,6 +797,21 @@ hints={'immutable_fields': FieldListAccessor({'x': 1234})}) assert S._immutable_field('x') == 1234 +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -838,7 +838,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -849,7 +849,7 @@ sa += 20000 return sa """ % (op1, a, op2, b) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -867,7 +867,7 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, op2, float(b)/4.0) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_allcases_reflex(self): @@ -888,7 +888,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -899,7 +899,7 @@ sa += 20000 return sa """ % (op1, a, b, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -917,11 +917,13 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, float(b)/4.0, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') for e1 in compares: for e2 in compares: @@ -933,7 +935,7 @@ b = tst() c = tst() sa = 0 - for i in range(1000): + for i in range(300): if %s: sa += 1 else: @@ -946,7 +948,7 @@ a = b return sa """ % (e1, e2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) def test_array_sum(self): def main(): @@ -1071,3 +1073,460 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) + + def test_intbound_simple(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + self.run_and_check(src, threshold=200) + + def test_intbound_addsub_mix(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + self.run_and_check(src, threshold=200) + + def test_intbound_gt(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i17 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=) + """) + + def test_intbound_sub_lt(self): + def main(): + i, a = 0, 0 + while i < 300: + if i - 10 < 295: + a += 1 + i += 1 + return a + # + log = self.run(main, [], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i5, 300) + guard_true(i7, descr=...) + i9 = int_sub_ovf(i5, 10) + guard_no_overflow(descr=...) + i11 = int_add_ovf(i4, 1) + guard_no_overflow(descr=...) + i13 = int_add(i5, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i13, descr=) + """) + + def test_intbound_addsub_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i8, 5) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i19 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) + """) + + def test_intbound_addmul_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < 300: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_add(i8, 5) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_lshift(i8, 1) + i18 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i21 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) + """) + + def test_intbound_eq(self): + def main(a, n): + i, s = 0, 0 + while i < 300: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + # + log = self.run(main, [7, 300], threshold=200) + assert log.result == main(7, 300) + log = self.run(main, [10, 300], threshold=200) + assert log.result == main(10, 300) + log = self.run(main, [42, 300], threshold=200) + assert log.result == main(42, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_eq(i8, 10) + guard_false(i12, descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) + """) + + def test_intbound_mul(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + # + log = self.run(main, [7], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_lshift(i6, 1) + i12 = int_add_ovf(i5, 1) + guard_no_overflow(descr=...) + i14 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i12, i14, descr=) + """) + + def test_assert(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert a == 7 + s += a + 1 + i += 1 + return s + log = self.run(main, [7], threshold=200) + assert log.result == 300*8 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_add_ovf(i5, 8) + guard_no_overflow(descr=...) + i12 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i10, i12, descr=) + """) + + def test_zeropadded(self): + def main(): + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= len(self): + return 0 + return array.__getitem__(self, i) # ID: get + # + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + log = self.run(main, [], threshold=200) + assert log.result == 9895050.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i20 = int_ge(i18, i8) + guard_false(i20, descr=...) + f21 = getarrayitem_raw(i13, i18, descr=...) + f23 = getarrayitem_raw(i13, i14, descr=...) + f24 = float_add(f21, f23) + f26 = getarrayitem_raw(i13, i6, descr=...) + f27 = float_add(f24, f26) + i29 = int_add(i6, 1) + i31 = int_ge(i29, i8) + guard_false(i31, descr=...) + f33 = getarrayitem_raw(i13, i29, descr=...) + f34 = float_add(f27, f33) + i36 = int_add(i6, 2) + i38 = int_ge(i36, i8) + guard_false(i38, descr=...) + f39 = getarrayitem_raw(i13, i36, descr=...) + ... + """) + + + def test_circular(self): + def main(): + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + assert len(self) == 256 + return array.__getitem__(self, i & 255) + # + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + # + log = self.run(main, [], threshold=200) + assert log.result == 1239690.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i17 = int_and(i14, 255) + f18 = getarrayitem_raw(i8, i17, descr=...) + f20 = getarrayitem_raw(i8, i9, descr=...) + f21 = float_add(f18, f20) + f23 = getarrayitem_raw(i8, i10, descr=...) + f24 = float_add(f21, f23) + i26 = int_add(i6, 1) + i29 = int_and(i26, 255) + f30 = getarrayitem_raw(i8, i29, descr=...) + f31 = float_add(f24, f30) + i33 = int_add(i6, 2) + i36 = int_and(i33, 255) + f37 = getarrayitem_raw(i8, i36, descr=...) + ... + """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) + + def test_silly_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test_iter_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test__ffi_call(self): + from pypy.rlib.test.test_libffi import get_libm_name + def main(libm_name): + try: + from _ffi import CDLL, types + except ImportError: + sys.stdout.write('SKIP: cannot import _ffi') + return 0 + + libm = CDLL(libm_name) + pow = libm.getfunc('pow', [types.double, types.double], + types.double) + i = 0 + res = 0 + while i < 300: + res += pow(2, 3) + i += 1 + return pow.getaddr(), res + # + libm_name = get_libm_name(sys.platform) + log = self.run(main, [libm_name], threshold=200) + pow_addr, res = log.result + assert res == 8.0 * 300 + loop, = log.loops_by_filename(self.filepath) + # XXX: write the actual test when we merge this to jitypes2 + ## ops = self.get_by_bytecode('CALL_FUNCTION') + ## assert len(ops) == 2 # we get two loops, because of specialization + ## call_function = ops[0] + ## last_ops = [op.getopname() for op in call_function[-5:]] + ## assert last_ops == ['force_token', + ## 'setfield_gc', + ## 'call_may_force', + ## 'guard_not_forced', + ## 'guard_no_exception'] + ## call = call_function[-3] + ## assert call.getarg(0).value == pow_addr + ## assert call.getarg(1).value == 2.0 + ## assert call.getarg(2).value == 3.0 From commits-noreply at bitbucket.org Mon Apr 18 08:14:31 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:31 +0200 (CEST) Subject: [pypy-svn] pypy default: two more operations to ignore Message-ID: <20110418061431.94924282BEB@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43438:c5cf2f7b9733 Date: 2011-04-18 08:07 +0200 http://bitbucket.org/pypy/pypy/changeset/c5cf2f7b9733/ Log: two more operations to ignore diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -457,6 +457,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', + 'paddq', 'pinsr', # zero-extending moves should not produce GC pointers 'movz', ]) From commits-noreply at bitbucket.org Mon Apr 18 08:14:32 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 08:14:32 +0200 (CEST) Subject: [pypy-svn] pypy default: don't list some ops twice Message-ID: <20110418061432.2F1E3282BEB@codespeak.net> Author: Maciej Fijalkowski Branch: Changeset: r43439:2d96e93e4a47 Date: 2011-04-18 08:07 +0200 http://bitbucket.org/pypy/pypy/changeset/2d96e93e4a47/ Log: don't list some ops twice diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -446,7 +446,6 @@ IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', - 'pslld', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', From commits-noreply at bitbucket.org Mon Apr 18 10:16:06 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Mon, 18 Apr 2011 10:16:06 +0200 (CEST) Subject: [pypy-svn] pypy default: issue686 (nekto0n) Fix dir() on objects with a broken __getattribute__ Message-ID: <20110418081606.97DC5282BEB@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43440:addc4bcf08a3 Date: 2011-04-18 09:53 +0200 http://bitbucket.org/pypy/pypy/changeset/addc4bcf08a3/ Log: issue686 (nekto0n) Fix dir() on objects with a broken __getattribute__ diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -110,6 +110,13 @@ __dict__ = 8 raises(TypeError, dir, Foo("foo")) + def test_dir_broken_object(self): + class Foo(object): + x = 3 + def __getattribute__(self, name): + return name + assert dir(Foo()) == [] + def test_dir_custom(self): class Foo(object): def __dir__(self): diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -76,8 +76,8 @@ result.sort() return result - elif hasattr(obj, '__dir__'): - result = obj.__dir__() + elif hasattr(type(obj), '__dir__'): + result = type(obj).__dir__(obj) if not isinstance(result, list): raise TypeError("__dir__() must return a list, not %r" % ( type(result),)) @@ -87,11 +87,14 @@ else: #(regular item) Dict = {} try: - Dict.update(obj.__dict__) - except AttributeError: pass + if isinstance(obj.__dict__, dict): + Dict.update(obj.__dict__) + except AttributeError: + pass try: Dict.update(_classdir(obj.__class__)) - except AttributeError: pass + except AttributeError: + pass ## Comment from object.c: ## /* Merge in __members__ and __methods__ (if any). @@ -99,10 +102,14 @@ ## XXX needed to get at im_self etc of method objects. */ for attr in ['__members__','__methods__']: try: - for item in getattr(obj, attr): + l = getattr(obj, attr) + if not isinstance(l, list): + continue + for item in l: if isinstance(item, types.StringTypes): Dict[item] = None - except (AttributeError, TypeError): pass + except (AttributeError, TypeError): + pass result = Dict.keys() result.sort() From commits-noreply at bitbucket.org Mon Apr 18 10:16:10 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Mon, 18 Apr 2011 10:16:10 +0200 (CEST) Subject: [pypy-svn] pypy default: issue686 (nekto0n) Fix dir() on objects with a broken __getattribute__ Message-ID: <20110418081610.B7FB1282BF7@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43440:addc4bcf08a3 Date: 2011-04-18 09:53 +0200 http://bitbucket.org/pypy/pypy/changeset/addc4bcf08a3/ Log: issue686 (nekto0n) Fix dir() on objects with a broken __getattribute__ diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -110,6 +110,13 @@ __dict__ = 8 raises(TypeError, dir, Foo("foo")) + def test_dir_broken_object(self): + class Foo(object): + x = 3 + def __getattribute__(self, name): + return name + assert dir(Foo()) == [] + def test_dir_custom(self): class Foo(object): def __dir__(self): diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -76,8 +76,8 @@ result.sort() return result - elif hasattr(obj, '__dir__'): - result = obj.__dir__() + elif hasattr(type(obj), '__dir__'): + result = type(obj).__dir__(obj) if not isinstance(result, list): raise TypeError("__dir__() must return a list, not %r" % ( type(result),)) @@ -87,11 +87,14 @@ else: #(regular item) Dict = {} try: - Dict.update(obj.__dict__) - except AttributeError: pass + if isinstance(obj.__dict__, dict): + Dict.update(obj.__dict__) + except AttributeError: + pass try: Dict.update(_classdir(obj.__class__)) - except AttributeError: pass + except AttributeError: + pass ## Comment from object.c: ## /* Merge in __members__ and __methods__ (if any). @@ -99,10 +102,14 @@ ## XXX needed to get at im_self etc of method objects. */ for attr in ['__members__','__methods__']: try: - for item in getattr(obj, attr): + l = getattr(obj, attr) + if not isinstance(l, list): + continue + for item in l: if isinstance(item, types.StringTypes): Dict[item] = None - except (AttributeError, TypeError): pass + except (AttributeError, TypeError): + pass result = Dict.keys() result.sort() From commits-noreply at bitbucket.org Mon Apr 18 10:53:42 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 18 Apr 2011 10:53:42 +0200 (CEST) Subject: [pypy-svn] buildbot default: ignore duplicate commits in case the same payload is sent twice by bitbucket Message-ID: <20110418085342.28A22282BEB@codespeak.net> Author: Antonio Cuni Branch: Changeset: r449:56a99650a46b Date: 2011-04-18 10:50 +0200 http://bitbucket.org/pypy/buildbot/changeset/56a99650a46b/ Log: ignore duplicate commits in case the same payload is sent twice by bitbucket diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -76,6 +76,22 @@ class BitbucketHookHandler(object): Popen, PIPE = Popen, PIPE + + def __init__(self): + self.seen_nodes = set() + + def get_commits(self, service, payload): + import operator + commits = sorted(self.payload['commits'], + key=operator.itemgetter('revision')) + for commit in commits: + node = commit['raw_node'] + key = service, node + if key in self.seen_nodes: + continue + self.seen_nodes.add(key) + yield commit + def _hgexe(self, argv): proc = self.Popen([hgexe] + list(argv), stdout=self.PIPE, stderr=self.PIPE) @@ -119,12 +135,15 @@ else: return self.call_subprocess([BOT, CHANNEL, message]) + def check_for_local_repo(self, local_repo): + return local_repo.check(dir=True) + def handle(self, payload, test=False): path = payload['repository']['absolute_url'] self.payload = payload self.local_repo = LOCAL_REPOS.join(path) self.remote_repo = REMOTE_BASE + path - if not self.local_repo.check(dir=True): + if not self.check_for_local_repo(self.local_repo): print >> sys.stderr, 'Ignoring unknown repo', path return self.hg('pull', '-R', self.local_repo) @@ -134,9 +153,7 @@ USE_COLOR_CODES = True LISTFILES = False def handle_irc_message(self, test=False): - import operator - commits = sorted(self.payload['commits'], - key=operator.itemgetter('revision')) + commits = self.get_commits('irc', self.payload) if test: print "#" * 20 print "IRC messages:" @@ -171,9 +188,7 @@ self.send_irc_message(irc_msg, test) def handle_diff_email(self, test=False): - import operator - commits = sorted(self.payload['commits'], - key=operator.itemgetter('revision')) + commits = self.get_commits('email', self.payload) for commit in commits: self.send_diff_for_commit(commit, test) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -4,12 +4,19 @@ class BaseHandler(BitbucketHookHandler): + USE_COLOR_CODES = False + def __init__(self): + BitbucketHookHandler.__init__(self) self.mails = [] + self.messages = [] def send(self, from_, to, subject, body, test=False): self.mails.append((from_, to, subject, body)) + def send_irc_message(self, message, test=False): + self.messages.append(message) + def test_non_ascii_encoding_guess_utf8(): class MyHandler(BaseHandler): @@ -34,14 +41,15 @@ def test_sort_commits(): class MyHandler(BaseHandler): def __init__(self): + BaseHandler.__init__(self) self.sent_commits = [] def send_diff_for_commit(self, commit, test=False): self.sent_commits.append(commit['node']) # handler = MyHandler() handler.payload = { - 'commits': [{'revision': 43, 'node': 'second'}, - {'revision': 42, 'node': 'first'}] + 'commits': [{'revision': 43, 'node': 'second', 'raw_node': 'first'}, + {'revision': 42, 'node': 'first', 'raw_node': 'second'}] } handler.handle_diff_email() assert handler.sent_commits == ['first', 'second'] @@ -182,26 +190,21 @@ def test_irc_message(): - class MyHandler(BaseHandler): - USE_COLOR_CODES = False - def __init__(self): - self.messages = [] - def send_irc_message(self, message, test=False): - self.messages.append(message) - - handler = MyHandler() + handler = BaseHandler() handler.payload = { 'commits': [{'revision': 42, 'branch': u'default', 'author': u'antocuni', 'message': u'this is a test', - 'node': 'abcdef' + 'node': 'abcdef', + 'raw_node': 'abcdef', }, {'revision': 43, 'author': u'antocuni', 'branch': u'mybranch', 'message': LONG_MESSAGE, - 'node': 'xxxyyy' + 'node': 'xxxyyy', + 'raw_node': 'xxxyyy', } ]} @@ -246,3 +249,26 @@ handler.handle(test_payload) handler.handle(test_payload, test=True) + +def test_ignore_duplicate_commits(): + class MyHandler(BaseHandler): + def hg(self, *args): + return '' % ' '.join(map(str, args)) + def check_for_local_repo(self, local_repo): + return True + + handler = MyHandler() + commits, _ = irc_cases() + payload = {u'repository': {u'absolute_url': '', + u'name': u'test', + u'owner': u'antocuni', + u'slug': u'test', + u'website': u''}, + u'user': u'antocuni', + 'commits': commits['commits']} + handler.handle(payload) + handler.handle(payload) + # + num_commits = len(commits['commits']) + assert len(handler.mails) == num_commits + assert len(handler.messages) == num_commits From commits-noreply at bitbucket.org Mon Apr 18 10:53:45 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 18 Apr 2011 10:53:45 +0200 (CEST) Subject: [pypy-svn] buildbot default: ignore duplicate commits in case the same payload is sent twice by bitbucket Message-ID: <20110418085345.18725282BF7@codespeak.net> Author: Antonio Cuni Branch: Changeset: r449:56a99650a46b Date: 2011-04-18 10:50 +0200 http://bitbucket.org/pypy/buildbot/changeset/56a99650a46b/ Log: ignore duplicate commits in case the same payload is sent twice by bitbucket diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -76,6 +76,22 @@ class BitbucketHookHandler(object): Popen, PIPE = Popen, PIPE + + def __init__(self): + self.seen_nodes = set() + + def get_commits(self, service, payload): + import operator + commits = sorted(self.payload['commits'], + key=operator.itemgetter('revision')) + for commit in commits: + node = commit['raw_node'] + key = service, node + if key in self.seen_nodes: + continue + self.seen_nodes.add(key) + yield commit + def _hgexe(self, argv): proc = self.Popen([hgexe] + list(argv), stdout=self.PIPE, stderr=self.PIPE) @@ -119,12 +135,15 @@ else: return self.call_subprocess([BOT, CHANNEL, message]) + def check_for_local_repo(self, local_repo): + return local_repo.check(dir=True) + def handle(self, payload, test=False): path = payload['repository']['absolute_url'] self.payload = payload self.local_repo = LOCAL_REPOS.join(path) self.remote_repo = REMOTE_BASE + path - if not self.local_repo.check(dir=True): + if not self.check_for_local_repo(self.local_repo): print >> sys.stderr, 'Ignoring unknown repo', path return self.hg('pull', '-R', self.local_repo) @@ -134,9 +153,7 @@ USE_COLOR_CODES = True LISTFILES = False def handle_irc_message(self, test=False): - import operator - commits = sorted(self.payload['commits'], - key=operator.itemgetter('revision')) + commits = self.get_commits('irc', self.payload) if test: print "#" * 20 print "IRC messages:" @@ -171,9 +188,7 @@ self.send_irc_message(irc_msg, test) def handle_diff_email(self, test=False): - import operator - commits = sorted(self.payload['commits'], - key=operator.itemgetter('revision')) + commits = self.get_commits('email', self.payload) for commit in commits: self.send_diff_for_commit(commit, test) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -4,12 +4,19 @@ class BaseHandler(BitbucketHookHandler): + USE_COLOR_CODES = False + def __init__(self): + BitbucketHookHandler.__init__(self) self.mails = [] + self.messages = [] def send(self, from_, to, subject, body, test=False): self.mails.append((from_, to, subject, body)) + def send_irc_message(self, message, test=False): + self.messages.append(message) + def test_non_ascii_encoding_guess_utf8(): class MyHandler(BaseHandler): @@ -34,14 +41,15 @@ def test_sort_commits(): class MyHandler(BaseHandler): def __init__(self): + BaseHandler.__init__(self) self.sent_commits = [] def send_diff_for_commit(self, commit, test=False): self.sent_commits.append(commit['node']) # handler = MyHandler() handler.payload = { - 'commits': [{'revision': 43, 'node': 'second'}, - {'revision': 42, 'node': 'first'}] + 'commits': [{'revision': 43, 'node': 'second', 'raw_node': 'first'}, + {'revision': 42, 'node': 'first', 'raw_node': 'second'}] } handler.handle_diff_email() assert handler.sent_commits == ['first', 'second'] @@ -182,26 +190,21 @@ def test_irc_message(): - class MyHandler(BaseHandler): - USE_COLOR_CODES = False - def __init__(self): - self.messages = [] - def send_irc_message(self, message, test=False): - self.messages.append(message) - - handler = MyHandler() + handler = BaseHandler() handler.payload = { 'commits': [{'revision': 42, 'branch': u'default', 'author': u'antocuni', 'message': u'this is a test', - 'node': 'abcdef' + 'node': 'abcdef', + 'raw_node': 'abcdef', }, {'revision': 43, 'author': u'antocuni', 'branch': u'mybranch', 'message': LONG_MESSAGE, - 'node': 'xxxyyy' + 'node': 'xxxyyy', + 'raw_node': 'xxxyyy', } ]} @@ -246,3 +249,26 @@ handler.handle(test_payload) handler.handle(test_payload, test=True) + +def test_ignore_duplicate_commits(): + class MyHandler(BaseHandler): + def hg(self, *args): + return '' % ' '.join(map(str, args)) + def check_for_local_repo(self, local_repo): + return True + + handler = MyHandler() + commits, _ = irc_cases() + payload = {u'repository': {u'absolute_url': '', + u'name': u'test', + u'owner': u'antocuni', + u'slug': u'test', + u'website': u''}, + u'user': u'antocuni', + 'commits': commits['commits']} + handler.handle(payload) + handler.handle(payload) + # + num_commits = len(commits['commits']) + assert len(handler.mails) == num_commits + assert len(handler.messages) == num_commits From commits-noreply at bitbucket.org Mon Apr 18 10:59:09 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 18 Apr 2011 10:59:09 +0200 (CEST) Subject: [pypy-svn] buildbot default: bah, a new handler is created at each request, so we have to store this value in a "more global" place; hopefully a class variable is enough Message-ID: <20110418085909.2D5D3282BEB@codespeak.net> Author: Antonio Cuni Branch: Changeset: r450:f093943ce80d Date: 2011-04-18 10:58 +0200 http://bitbucket.org/pypy/buildbot/changeset/f093943ce80d/ Log: bah, a new handler is created at each request, so we have to store this value in a "more global" place; hopefully a class variable is enough diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -77,8 +77,7 @@ class BitbucketHookHandler(object): Popen, PIPE = Popen, PIPE - def __init__(self): - self.seen_nodes = set() + seen_nodes = set() def get_commits(self, service, payload): import operator diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -252,6 +252,9 @@ def test_ignore_duplicate_commits(): class MyHandler(BaseHandler): + seen_nodes = set() # make sure we do not depend on what the other + # tests did + def hg(self, *args): return '' % ' '.join(map(str, args)) def check_for_local_repo(self, local_repo): From commits-noreply at bitbucket.org Mon Apr 18 13:55:34 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 18 Apr 2011 13:55:34 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Fix test. Message-ID: <20110418115534.981CC282BEC@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43441:bd266e9648e6 Date: 2011-04-17 19:00 +0200 http://bitbucket.org/pypy/pypy/changeset/bd266e9648e6/ Log: Fix test. diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -56,6 +56,7 @@ self.single_gcref_descr = GcPtrFieldDescr('', 0) rewrite_assembler = GcLLDescr_framework.rewrite_assembler.im_func + supports_compressed_ptrs = False class TestRegallocDirectGcIntegration(object): From commits-noreply at bitbucket.org Mon Apr 18 13:55:35 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 18 Apr 2011 13:55:35 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Must round the size up, otherwise we are getting unaligned nursery pointers. Message-ID: <20110418115535.3038D282BF2@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43442:6e2a62ce61aa Date: 2011-04-17 23:16 +0200 http://bitbucket.org/pypy/pypy/changeset/6e2a62ce61aa/ Log: Must round the size up, otherwise we are getting unaligned nursery pointers. diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2096,6 +2096,7 @@ def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) + size = (size + WORD-1) & ~(WORD-1) # round up self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) From commits-noreply at bitbucket.org Mon Apr 18 13:55:36 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 18 Apr 2011 13:55:36 +0200 (CEST) Subject: [pypy-svn] pypy default: Test and fix: fastpath_malloc_varsize() used to not align the request Message-ID: <20110418115536.920EB282C1B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43443:407d02bfd2a7 Date: 2011-04-18 13:54 +0200 http://bitbucket.org/pypy/pypy/changeset/407d02bfd2a7/ Log: Test and fix: fastpath_malloc_varsize() used to not align the request to a multiple of WORD. This gives unaligned nursery pointers :-( diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -2090,6 +2090,7 @@ def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) + size = (size + WORD-1) & ~(WORD-1) # round up self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) self.mc.CMP(edx, heap(nursery_top_adr)) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -326,6 +326,8 @@ ARRAY = lltype.GcArray(lltype.Signed) arraydescr = cpu.arraydescrof(ARRAY) self.arraydescr = arraydescr + ARRAYCHAR = lltype.GcArray(lltype.Char) + arraychardescr = cpu.arraydescrof(ARRAYCHAR) self.namespace = locals().copy() @@ -388,3 +390,24 @@ finish(p0) ''' py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_array_of_char(self): + # check that fastpath_malloc_varsize() respects the alignment + # of the pointer in the nursery + ops = ''' + [] + p1 = new_array(1, descr=arraychardescr) + p2 = new_array(2, descr=arraychardescr) + p3 = new_array(3, descr=arraychardescr) + p4 = new_array(4, descr=arraychardescr) + finish(p1, p2, p3, p4) + ''' + self.interpret(ops, []) + p1 = self.getptr(0, llmemory.GCREF) + p2 = self.getptr(1, llmemory.GCREF) + p3 = self.getptr(2, llmemory.GCREF) + p4 = self.getptr(3, llmemory.GCREF) + assert p1._obj.intval & (WORD-1) == 0 # aligned + assert p2._obj.intval & (WORD-1) == 0 # aligned + assert p3._obj.intval & (WORD-1) == 0 # aligned + assert p4._obj.intval & (WORD-1) == 0 # aligned From commits-noreply at bitbucket.org Mon Apr 18 14:09:29 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 18 Apr 2011 14:09:29 +0200 (CEST) Subject: [pypy-svn] pypy new-dict-proxy: pypy's type dicts *can* be safely modified Message-ID: <20110418120929.018BD282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: new-dict-proxy Changeset: r43444:bea50689532e Date: 2011-04-17 09:38 +0200 http://bitbucket.org/pypy/pypy/changeset/bea50689532e/ Log: pypy's type dicts *can* be safely modified diff --git a/lib-python/modified-2.7.0/test/test_descr.py b/lib-python/modified-2.7.0/test/test_descr.py --- a/lib-python/modified-2.7.0/test/test_descr.py +++ b/lib-python/modified-2.7.0/test/test_descr.py @@ -3189,7 +3189,8 @@ except TypeError: pass else: - self.fail("%r's __dict__ can be modified" % cls) + if test_support.check_impl_detail(pypy=False): + self.fail("%r's __dict__ can be modified" % cls) # Modules also disallow __dict__ assignment class Module1(types.ModuleType, Base): From commits-noreply at bitbucket.org Mon Apr 18 14:09:30 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 18 Apr 2011 14:09:30 +0200 (CEST) Subject: [pypy-svn] pypy new-dict-proxy: add some test about changing type objects Message-ID: <20110418120930.93D5D282BF2@codespeak.net> Author: Carl Friedrich Bolz Branch: new-dict-proxy Changeset: r43445:7e817ab8c51c Date: 2011-04-17 09:39 +0200 http://bitbucket.org/pypy/pypy/changeset/7e817ab8c51c/ Log: add some test about changing type objects diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1016,6 +1016,25 @@ __weakref__ = 42 assert B().__weakref__ == 42 + def test_change_dict(self): + class A(object): + pass + + a = A() + A.x = 1 + assert A.__dict__["x"] == 1 + raises(AttributeError, "del A.__dict__") + raises((AttributeError, TypeError), "A.__dict__ = {}") + + def test_mutate_dict(self): + class A(object): + pass + + a = A() + A.x = 1 + assert A.__dict__["x"] == 1 + A.__dict__['x'] = 5 + assert A.x == 5 class AppTestMutableBuiltintypes: From commits-noreply at bitbucket.org Mon Apr 18 14:09:33 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 18 Apr 2011 14:09:33 +0200 (CEST) Subject: [pypy-svn] pypy new-dict-proxy: kill from_strdict_shared Message-ID: <20110418120933.E80FC282C1B@codespeak.net> Author: Carl Friedrich Bolz Branch: new-dict-proxy Changeset: r43446:6ee045ccb063 Date: 2011-04-17 09:40 +0200 http://bitbucket.org/pypy/pypy/changeset/6ee045ccb063/ Log: kill from_strdict_shared diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -615,7 +615,7 @@ if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) - w_dict = space.newdict(from_strdict_shared=w_obj.dict_w) + w_dict = w_obj.getdict(space) pto.c_tp_dict = make_ref(space, w_dict) @cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -34,13 +34,7 @@ @staticmethod def allocate_and_init_instance(space, w_type=None, module=False, instance=False, classofinstance=None, - from_strdict_shared=None, strdict=False): - if from_strdict_shared is not None: - assert w_type is None - assert not module and not instance and classofinstance is None - w_self = StrDictImplementation(space) - w_self.content = from_strdict_shared - return w_self + strdict=False): if space.config.objspace.std.withcelldict and module: from pypy.objspace.std.celldict import ModuleDictImplementation assert w_type is None diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -131,25 +131,6 @@ assert self.space.eq_w(space.call_function(get, w("33")), w(None)) assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) - def test_initialize_from_strdict_shared(self): - space = self.space - w = space.wrap - d = {"a": w(1), "b": w(2)} - w_d = space.newdict(from_strdict_shared=d) - assert self.space.eq_w(space.getitem(w_d, w("a")), w(1)) - assert self.space.eq_w(space.getitem(w_d, w("b")), w(2)) - - def test_initialize_from_strdict_really_shared(self): - space = self.space - w = space.wrap - d = {"a": w(1), "b": w(2)} - w_d = space.newdict(from_strdict_shared=d) - assert self.space.eq_w(space.getitem(w_d, w("a")), w(1)) - assert self.space.eq_w(space.getitem(w_d, w("b")), w(2)) - d["c"] = w(41) - assert self.space.eq_w(space.getitem(w_d, w("c")), w(41)) - - class AppTest_DictObject: @@ -766,12 +747,10 @@ def newtuple(self, l): return tuple(l) - def newdict(self, module=False, instance=False, classofinstance=None, - from_strdict_shared=None): + def newdict(self, module=False, instance=False, classofinstance=None): return W_DictMultiObject.allocate_and_init_instance( self, module=module, instance=instance, - classofinstance=classofinstance, - from_strdict_shared=from_strdict_shared) + classofinstance=classofinstance) def finditem_str(self, w_dict, s): return w_dict.getitem_str(s) # assume it's a multidict diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -303,11 +303,10 @@ return W_ListObject(list_w) def newdict(self, module=False, instance=False, classofinstance=None, - from_strdict_shared=None, strdict=False): + strdict=False): return W_DictMultiObject.allocate_and_init_instance( self, module=module, instance=instance, classofinstance=classofinstance, - from_strdict_shared=from_strdict_shared, strdict=strdict) def newslice(self, w_start, w_end, w_step): From commits-noreply at bitbucket.org Mon Apr 18 14:09:34 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 18 Apr 2011 14:09:34 +0200 (CEST) Subject: [pypy-svn] pypy new-dict-proxy: loop with class mutation Message-ID: <20110418120934.E5E1E282BF2@codespeak.net> Author: Carl Friedrich Bolz Branch: new-dict-proxy Changeset: r43447:254868b6286f Date: 2011-04-17 09:53 +0200 http://bitbucket.org/pypy/pypy/changeset/254868b6286f/ Log: loop with class mutation diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,20 +1,24 @@ try: - import pypyjit - pypyjit.set_param(threshold=3, inlining=True) + try: + import pypyjit + pypyjit.set_param(threshold=3, inlining=True) + except ImportError: + pass + class A(object): + x = 1 + y = 2 + def sqrt(y): + a = A() + for i in range(y): + assert a.y == 2 + assert A.__dict__['x'] == i + 1 + A.x += 1 + return a.x - def sqrt(y, n=10000): - x = y / 2 - while n > 0: - #assert y > 0 and x > 0 - if y > 0 and x > 0: pass - n -= 1 - x = (x + y/x) / 2 - return x + print sqrt(1000000) - print sqrt(1234, 4) - except Exception, e: print "Exception: ", type(e) print e - + From commits-noreply at bitbucket.org Mon Apr 18 14:11:12 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 18 Apr 2011 14:11:12 +0200 (CEST) Subject: [pypy-svn] pypy new-dict-proxy: merge default Message-ID: <20110418121112.4003B282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: new-dict-proxy Changeset: r43448:f0418b95a2b0 Date: 2011-04-18 14:09 +0200 http://bitbucket.org/pypy/pypy/changeset/f0418b95a2b0/ Log: merge default diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -854,6 +854,9 @@ def op_gc_adr_of_nursery_free(self): raise NotImplementedError + def op_gc_adr_of_root_stack_top(self): + raise NotImplementedError + def op_gc_call_rtti_destructor(self, rtti, addr): if hasattr(rtti._obj, 'destructor_funcptr'): d = rtti._obj.destructor_funcptr diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py --- a/pypy/rlib/test/test_runicode.py +++ b/pypy/rlib/test/test_runicode.py @@ -31,22 +31,28 @@ def checkdecode(self, s, encoding): decoder = self.getdecoder(encoding) - if isinstance(s, str): - trueresult = s.decode(encoding) - else: - trueresult = s - s = s.encode(encoding) + try: + if isinstance(s, str): + trueresult = s.decode(encoding) + else: + trueresult = s + s = s.encode(encoding) + except LookupError, e: + py.test.skip(e) result, consumed = decoder(s, len(s), True) assert consumed == len(s) self.typeequals(trueresult, result) def checkencode(self, s, encoding): encoder = self.getencoder(encoding) - if isinstance(s, unicode): - trueresult = s.encode(encoding) - else: - trueresult = s - s = s.decode(encoding) + try: + if isinstance(s, unicode): + trueresult = s.encode(encoding) + else: + trueresult = s + s = s.decode(encoding) + except LookupError, e: + py.test.skip(e) result = encoder(s, len(s), True) self.typeequals(trueresult, result) @@ -66,9 +72,10 @@ assert called[0] assert "42424242" in result - def checkdecodeerror(self, s, encoding, start, stop, addstuff=True): + def checkdecodeerror(self, s, encoding, start, stop, + addstuff=True, msg=None): called = [0] - def errorhandler(errors, enc, msg, t, startingpos, + def errorhandler(errors, enc, errmsg, t, startingpos, endingpos): called[0] += 1 if called[0] == 1: @@ -77,6 +84,8 @@ assert t is s assert start == startingpos assert stop == endingpos + if msg is not None: + assert errmsg == msg return u"42424242", stop return u"", endingpos decoder = self.getdecoder(encoding) @@ -90,7 +99,7 @@ class TestDecoding(UnicodeTests): - + # XXX test bom recognition in utf-16 # XXX test proper error handling @@ -131,6 +140,96 @@ "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(uni, encoding) + def test_ascii_error(self): + self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) + + def test_utf16_errors(self): + # trunkated BOM + for s in ["\xff", "\xfe"]: + self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) + + for s in [ + # unexpected end of data ascii + "\xff\xfeF", + # unexpected end of data + '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', + ]: + self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) + for s in [ + # illegal surrogate + "\xff\xfe\xff\xdb\xff\xff", + ]: + self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) + + def test_utf16_bugs(self): + s = '\x80-\xe9\xdeL\xa3\x9b' + py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, + s, len(s), True) + + def test_utf7_bugs(self): + u = u'A\u2262\u0391.' + assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' + + def test_utf7_tofrom_utf8_bug(self): + def _assert_decu7(input, expected): + assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) + + _assert_decu7('+-', u'+') + _assert_decu7('+-+-', u'++') + _assert_decu7('+-+AOQ-', u'+\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ-', u'\xe4') + _assert_decu7('+AOQ- ', u'\xe4 ') + _assert_decu7(' +AOQ-', u' \xe4') + _assert_decu7(' +AOQ- ', u' \xe4 ') + _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') + + s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' + s_utf8 = u'Die Männer ärgen sich!' + s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' + + _assert_decu7(s_utf7, s_utf8_esc) + _assert_decu7(s_utf7, s_utf8) + + assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 + assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 + + def test_utf7_partial(self): + s = u"a+-b".encode('utf-7') + assert s == "a+--b" + decode = self.getdecoder('utf-7') + assert decode(s, 1, None) == (u'a', 1) + assert decode(s, 2, None) == (u'a', 1) + assert decode(s, 3, None) == (u'a+', 3) + assert decode(s, 4, None) == (u'a+-', 4) + assert decode(s, 5, None) == (u'a+-b', 5) + + def test_utf7_surrogates(self): + encode = self.getencoder('utf-7') + u = u'\U000abcde' + assert encode(u, len(u), None) == '+2m/c3g-' + decode = self.getdecoder('utf-7') + s = '+3ADYAA-' + raises(UnicodeError, decode, s, len(s), None) + def replace_handler(errors, codec, message, input, start, end): + return u'?', end + assert decode(s, len(s), None, final=True, + errorhandler = replace_handler) == (u'??', len(s)) + + +class TestUTF8Decoding(UnicodeTests): + def __init__(self): + self.decoder = self.getdecoder('utf-8') + + def replace_handler(self, errors, codec, message, input, start, end): + return u'\ufffd', end + + def ignore_handler(self, errors, codec, message, input, start, end): + return u'', end + + def to_bytestring(self, bytes): + return ''.join(chr(int(c, 16)) for c in bytes.split()) + def test_single_chars_utf8(self): for s in ["\xd7\x90", "\xd6\x96", "\xeb\x96\x95", "\xf0\x90\x91\x93"]: self.checkdecode(s, "utf-8") @@ -140,30 +239,297 @@ # This test will raise an error with python 3.x self.checkdecode(u"\ud800", "utf-8") + def test_invalid_start_byte(self): + """ + Test that an 'invalid start byte' error is raised when the first byte + is not in the ASCII range or is not a valid start byte of a 2-, 3-, or + 4-bytes sequence. The invalid start byte is replaced with a single + U+FFFD when errors='replace'. + E.g. <80> is a continuation byte and can appear only after a start byte. + """ + FFFD = u'\ufffd' + for byte in '\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF': + raises(UnicodeDecodeError, self.decoder, byte, 1, None, final=True) + self.checkdecodeerror(byte, 'utf-8', 0, 1, addstuff=False, + msg='invalid start byte') + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.replace_handler) == (FFFD, 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', 9)) + assert self.decoder(byte, 1, None, final=True, + errorhandler=self.ignore_handler) == (u'', 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', 9)) + + def test_unexpected_end_of_data(self): + """ + Test that an 'unexpected end of data' error is raised when the string + ends after a start byte of a 2-, 3-, or 4-bytes sequence without having + enough continuation bytes. The incomplete sequence is replaced with a + single U+FFFD when errors='replace'. + E.g. in the sequence , F3 is the start byte of a 4-bytes + sequence, but it's followed by only 2 valid continuation bytes and the + last continuation bytes is missing. + Note: the continuation bytes must be all valid, if one of them is + invalid another error will be raised. + """ + sequences = [ + 'C2', 'DF', + 'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF', + 'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF', + 'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF', + 'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF', + 'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF', + 'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF' + ] + FFFD = u'\ufffd' + for seq in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq), addstuff=False, + msg='unexpected end of data') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (FFFD, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa'+ FFFD + u'bbbb', len(seq) + 8)) + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (u'', len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaabbbb', len(seq) + 8)) + + def test_invalid_cb_for_2bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte of a 2-bytes sequence is invalid. The start byte + is replaced by a single U+FFFD and the second byte is handled + separately when errors='replace'. + E.g. in the sequence , C2 is the start byte of a 2-bytes + sequence, but 41 is not a valid continuation byte because it's the + ASCII letter 'A'. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('C2 00', FFFD+u'\x00'), ('C2 7F', FFFD+u'\x7f'), + ('C2 C0', FFFDx2), ('C2 FF', FFFDx2), + ('DF 00', FFFD+u'\x00'), ('DF 7F', FFFD+u'\x7f'), + ('DF C0', FFFDx2), ('DF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, 1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_3bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 3-bytes sequence are invalid. When + errors='replace', if the first continuation byte is valid, the first + two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the + third byte is handled separately, otherwise only the start byte is + replaced with a U+FFFD and the other continuation bytes are handled + separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('E0 00', FFFD+u'\x00'), ('E0 7F', FFFD+u'\x7f'), ('E0 80', FFFDx2), + ('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2), + ('E0 A0 00', FFFD+u'\x00'), ('E0 A0 7F', FFFD+u'\x7f'), + ('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2), + ('E0 BF 00', FFFD+u'\x00'), ('E0 BF 7F', FFFD+u'\x7f'), + ('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+u'\x00'), + ('E1 7F', FFFD+u'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2), + ('E1 80 00', FFFD+u'\x00'), ('E1 80 7F', FFFD+u'\x7f'), + ('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2), + ('E1 BF 00', FFFD+u'\x00'), ('E1 BF 7F', FFFD+u'\x7f'), + ('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+u'\x00'), + ('EC 7F', FFFD+u'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2), + ('EC 80 00', FFFD+u'\x00'), ('EC 80 7F', FFFD+u'\x7f'), + ('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2), + ('EC BF 00', FFFD+u'\x00'), ('EC BF 7F', FFFD+u'\x7f'), + ('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+u'\x00'), + ('ED 7F', FFFD+u'\x7f'), + # ('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^ + ('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+u'\x00'), + ('ED 80 7F', FFFD+u'\x7f'), ('ED 80 C0', FFFDx2), + ('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+u'\x00'), + ('ED 9F 7F', FFFD+u'\x7f'), ('ED 9F C0', FFFDx2), + ('ED 9F FF', FFFDx2), ('EE 00', FFFD+u'\x00'), + ('EE 7F', FFFD+u'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2), + ('EE 80 00', FFFD+u'\x00'), ('EE 80 7F', FFFD+u'\x7f'), + ('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2), + ('EE BF 00', FFFD+u'\x00'), ('EE BF 7F', FFFD+u'\x7f'), + ('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+u'\x00'), + ('EF 7F', FFFD+u'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2), + ('EF 80 00', FFFD+u'\x00'), ('EF 80 7F', FFFD+u'\x7f'), + ('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2), + ('EF BF 00', FFFD+u'\x00'), ('EF BF 7F', FFFD+u'\x7f'), + ('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2), + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + + def test_invalid_cb_for_4bytes_seq(self): + """ + Test that an 'invalid continuation byte' error is raised when the + continuation byte(s) of a 4-bytes sequence are invalid. When + errors='replace',the start byte and all the following valid + continuation bytes are replaced with a single U+FFFD, and all the bytes + starting from the first invalid continuation bytes (included) are + handled separately. + E.g. in the sequence , E1 is the start byte of a 3-bytes + sequence, 80 is a valid continuation byte, but 41 is not a valid cb + because it's the ASCII letter 'A'. + Note: when the start byte is E0 or ED, the valid ranges for the first + continuation byte are limited to A0..BF and 80..9F respectively. + However, when the start byte is ED, Python 2 considers all the bytes + in range 80..BF valid. This is fixed in Python 3. + """ + FFFD = u'\ufffd' + FFFDx2 = FFFD * 2 + sequences = [ + ('F0 00', FFFD+u'\x00'), ('F0 7F', FFFD+u'\x7f'), ('F0 80', FFFDx2), + ('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2), + ('F0 90 00', FFFD+u'\x00'), ('F0 90 7F', FFFD+u'\x7f'), + ('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2), + ('F0 BF 00', FFFD+u'\x00'), ('F0 BF 7F', FFFD+u'\x7f'), + ('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2), + ('F0 90 80 00', FFFD+u'\x00'), ('F0 90 80 7F', FFFD+u'\x7f'), + ('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2), + ('F0 90 BF 00', FFFD+u'\x00'), ('F0 90 BF 7F', FFFD+u'\x7f'), + ('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2), + ('F0 BF 80 00', FFFD+u'\x00'), ('F0 BF 80 7F', FFFD+u'\x7f'), + ('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2), + ('F0 BF BF 00', FFFD+u'\x00'), ('F0 BF BF 7F', FFFD+u'\x7f'), + ('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2), + ('F1 00', FFFD+u'\x00'), ('F1 7F', FFFD+u'\x7f'), ('F1 C0', FFFDx2), + ('F1 FF', FFFDx2), ('F1 80 00', FFFD+u'\x00'), + ('F1 80 7F', FFFD+u'\x7f'), ('F1 80 C0', FFFDx2), + ('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+u'\x00'), + ('F1 BF 7F', FFFD+u'\x7f'), ('F1 BF C0', FFFDx2), + ('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+u'\x00'), + ('F1 80 80 7F', FFFD+u'\x7f'), ('F1 80 80 C0', FFFDx2), + ('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+u'\x00'), + ('F1 80 BF 7F', FFFD+u'\x7f'), ('F1 80 BF C0', FFFDx2), + ('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+u'\x00'), + ('F1 BF 80 7F', FFFD+u'\x7f'), ('F1 BF 80 C0', FFFDx2), + ('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+u'\x00'), + ('F1 BF BF 7F', FFFD+u'\x7f'), ('F1 BF BF C0', FFFDx2), + ('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+u'\x00'), + ('F3 7F', FFFD+u'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2), + ('F3 80 00', FFFD+u'\x00'), ('F3 80 7F', FFFD+u'\x7f'), + ('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2), + ('F3 BF 00', FFFD+u'\x00'), ('F3 BF 7F', FFFD+u'\x7f'), + ('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2), + ('F3 80 80 00', FFFD+u'\x00'), ('F3 80 80 7F', FFFD+u'\x7f'), + ('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2), + ('F3 80 BF 00', FFFD+u'\x00'), ('F3 80 BF 7F', FFFD+u'\x7f'), + ('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2), + ('F3 BF 80 00', FFFD+u'\x00'), ('F3 BF 80 7F', FFFD+u'\x7f'), + ('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2), + ('F3 BF BF 00', FFFD+u'\x00'), ('F3 BF BF 7F', FFFD+u'\x7f'), + ('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2), + ('F4 00', FFFD+u'\x00'), ('F4 7F', FFFD+u'\x7f'), ('F4 90', FFFDx2), + ('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2), + ('F4 80 00', FFFD+u'\x00'), ('F4 80 7F', FFFD+u'\x7f'), + ('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2), + ('F4 8F 00', FFFD+u'\x00'), ('F4 8F 7F', FFFD+u'\x7f'), + ('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2), + ('F4 80 80 00', FFFD+u'\x00'), ('F4 80 80 7F', FFFD+u'\x7f'), + ('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2), + ('F4 80 BF 00', FFFD+u'\x00'), ('F4 80 BF 7F', FFFD+u'\x7f'), + ('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2), + ('F4 8F 80 00', FFFD+u'\x00'), ('F4 8F 80 7F', FFFD+u'\x7f'), + ('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2), + ('F4 8F BF 00', FFFD+u'\x00'), ('F4 8F BF 7F', FFFD+u'\x7f'), + ('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2) + ] + for seq, res in sequences: + seq = self.to_bytestring(seq) + raises(UnicodeDecodeError, self.decoder, seq, len(seq), + None, final=True) + self.checkdecodeerror(seq, 'utf-8', 0, len(seq)-1, addstuff=False, + msg='invalid continuation byte') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.replace_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.replace_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + res = res.replace(FFFD, u'') + assert self.decoder(seq, len(seq), None, final=True, + errorhandler=self.ignore_handler) == (res, len(seq)) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, None, + final=True, errorhandler=self.ignore_handler) == + (u'aaaa' + res + u'bbbb', len(seq) + 8)) + def test_utf8_errors(self): - for s in [# unexpected end of data - "\xd7", "\xd6", "\xeb\x96", "\xf0\x90\x91"]: - self.checkdecodeerror(s, "utf-8", 0, len(s), addstuff=False) - - # unexpected code byte - for s in ["\x81", "\xbf"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + # unexpected end of data + for s in ['\xd7', '\xd6', '\xeb\x96', '\xf0\x90\x91', '\xc2', '\xdf']: + self.checkdecodeerror(s, 'utf-8', 0, len(s), addstuff=False, + msg='unexpected end of data') # invalid data 2 byte for s in ["\xd7\x50", "\xd6\x06", "\xd6\xD6"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') # invalid data 3 byte for s in ["\xeb\x56\x95", "\xeb\x06\x95", "\xeb\xD6\x95"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xeb\x96\x55", "\xeb\x96\x05", "\xeb\x96\xD5"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') # invalid data 4 byte for s in ["\xf0\x50\x91\x93", "\xf0\x00\x91\x93", "\xf0\xd0\x91\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 1, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x51\x93", "\xf0\x90\x01\x93", "\xf0\x90\xd1\x93"]: - self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 2, addstuff=True, + msg='invalid continuation byte') for s in ["\xf0\x90\x91\x53", "\xf0\x90\x91\x03", "\xf0\x90\x91\xd3"]: - self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True) + self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True, + msg='invalid continuation byte') def test_issue8271(self): @@ -249,97 +615,18 @@ ('\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64', u'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'), ] - def replace_handler(errors, codec, message, input, start, end): - return FFFD, end - def ignore_handler(errors, codec, message, input, start, end): - return u'', end + for n, (seq, res) in enumerate(sequences): decoder = self.getdecoder('utf-8') raises(UnicodeDecodeError, decoder, seq, len(seq), None, final=True) assert decoder(seq, len(seq), None, final=True, - errorhandler=replace_handler) == (res, len(seq)) + errorhandler=self.replace_handler) == (res, len(seq)) assert decoder(seq + 'b', len(seq) + 1, None, final=True, - errorhandler=replace_handler) == (res + u'b', - len(seq) + 1) + errorhandler=self.replace_handler) == (res + u'b', + len(seq) + 1) res = res.replace(FFFD, u'') assert decoder(seq, len(seq), None, final=True, - errorhandler=ignore_handler) == (res, len(seq)) - - def test_ascii_error(self): - self.checkdecodeerror("abc\xFF\xFF\xFFcde", "ascii", 3, 4) - - def test_utf16_errors(self): - # trunkated BOM - for s in ["\xff", "\xfe"]: - self.checkdecodeerror(s, "utf-16", 0, len(s), addstuff=False) - - for s in [ - # unexpected end of data ascii - "\xff\xfeF", - # unexpected end of data - '\xff\xfe\xc0\xdb\x00', '\xff\xfe\xc0\xdb', '\xff\xfe\xc0', - ]: - self.checkdecodeerror(s, "utf-16", 2, len(s), addstuff=False) - for s in [ - # illegal surrogate - "\xff\xfe\xff\xdb\xff\xff", - ]: - self.checkdecodeerror(s, "utf-16", 2, 4, addstuff=False) - - def test_utf16_bugs(self): - s = '\x80-\xe9\xdeL\xa3\x9b' - py.test.raises(UnicodeDecodeError, runicode.str_decode_utf_16_le, - s, len(s), True) - - def test_utf7_bugs(self): - u = u'A\u2262\u0391.' - assert runicode.unicode_encode_utf_7(u, len(u), None) == 'A+ImIDkQ.' - - def test_utf7_tofrom_utf8_bug(self): - def _assert_decu7(input, expected): - assert runicode.str_decode_utf_7(input, len(input), None) == (expected, len(input)) - - _assert_decu7('+-', u'+') - _assert_decu7('+-+-', u'++') - _assert_decu7('+-+AOQ-', u'+\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ-', u'\xe4') - _assert_decu7('+AOQ- ', u'\xe4 ') - _assert_decu7(' +AOQ-', u' \xe4') - _assert_decu7(' +AOQ- ', u' \xe4 ') - _assert_decu7('+AOQ-+AOQ-', u'\xe4\xe4') - - s_utf7 = 'Die M+AOQ-nner +AOQ-rgen sich!' - s_utf8 = u'Die Männer ärgen sich!' - s_utf8_esc = u'Die M\xe4nner \xe4rgen sich!' - - _assert_decu7(s_utf7, s_utf8_esc) - _assert_decu7(s_utf7, s_utf8) - - assert runicode.unicode_encode_utf_7(s_utf8_esc, len(s_utf8_esc), None) == s_utf7 - assert runicode.unicode_encode_utf_7(s_utf8, len(s_utf8_esc), None) == s_utf7 - - def test_utf7_partial(self): - s = u"a+-b".encode('utf-7') - assert s == "a+--b" - decode = self.getdecoder('utf-7') - assert decode(s, 1, None) == (u'a', 1) - assert decode(s, 2, None) == (u'a', 1) - assert decode(s, 3, None) == (u'a+', 3) - assert decode(s, 4, None) == (u'a+-', 4) - assert decode(s, 5, None) == (u'a+-b', 5) - - def test_utf7_surrogates(self): - encode = self.getencoder('utf-7') - u = u'\U000abcde' - assert encode(u, len(u), None) == '+2m/c3g-' - decode = self.getdecoder('utf-7') - s = '+3ADYAA-' - raises(UnicodeError, decode, s, len(s), None) - def replace_handler(errors, codec, message, input, start, end): - return u'?', end - assert decode(s, len(s), None, final=True, - errorhandler = replace_handler) == (u'??', len(s)) + errorhandler=self.ignore_handler) == (res, len(seq)) class TestEncoding(UnicodeTests): @@ -376,7 +663,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_maxunicode(self): uni = unichr(sys.maxunicode) @@ -384,7 +671,7 @@ self.checkencode(uni, "utf-7") for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " "utf-32 utf-32-be utf-32-le").split(): - self.checkencode(uni, encoding) + self.checkencode(uni, encoding) def test_single_chars_utf8(self): # check every number of bytes per char @@ -394,7 +681,7 @@ def test_utf8_surrogates(self): # check replacing of two surrogates by single char while encoding # make sure that the string itself is not marshalled - u = u"\ud800" + u = u"\ud800" for i in range(4): u += u"\udc00" self.checkencode(u, "utf-8") @@ -422,7 +709,7 @@ def test_utf8(self): from pypy.rpython.test.test_llinterp import interpret def f(x): - + s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) s2 = runicode.unicode_encode_utf_8(u, len(u), True) @@ -438,6 +725,6 @@ u = runicode.UNICHR(x) t = runicode.ORD(u) return t - + res = interpret(f, [0x10140]) assert res == 0x10140 diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py --- a/pypy/interpreter/test/test_eval.py +++ b/pypy/interpreter/test/test_eval.py @@ -13,7 +13,8 @@ def __init__(self, space, code, numlocals): self.code = code - Frame.__init__(self, space, numlocals=numlocals) + Frame.__init__(self, space) + self.numlocals = numlocals self.fastlocals_w = [None] * self.numlocals def getcode(self): @@ -24,7 +25,10 @@ def getfastscope(self): return self.fastlocals_w - + + def getfastscopelength(self): + return self.numlocals + self.f = ConcreteFastscopeFrame(self.space, code, numlocals=5) diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -179,6 +179,9 @@ """ raise NotImplementedError + def count_fields_if_immutable(self): + return -1 + def _clone_if_mutable(self): return self def clone_if_mutable(self): diff --git a/pypy/translator/backendopt/constfold.py b/pypy/translator/backendopt/constfold.py --- a/pypy/translator/backendopt/constfold.py +++ b/pypy/translator/backendopt/constfold.py @@ -1,19 +1,16 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import c_last_exception from pypy.objspace.flow.model import mkentrymap -from pypy.translator.backendopt.support import split_block_with_keepalive from pypy.translator.backendopt.support import log from pypy.translator.simplify import eliminate_empty_blocks -from pypy.translator.unsimplify import insert_empty_block +from pypy.translator.unsimplify import insert_empty_block, split_block from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.lltypesystem import lltype def fold_op_list(operations, constants, exit_early=False, exc_catch=False): newops = [] - keepalives = [] folded_count = 0 - first_sideeffect_index = None for spaceop in operations: vargsmodif = False vargs = [] @@ -29,10 +26,9 @@ try: op = getattr(llop, spaceop.opname) except AttributeError: - sideeffects = True + pass else: - sideeffects = op.sideeffects - if not sideeffects and len(args) == len(vargs): + if not op.sideeffects and len(args) == len(vargs): RESTYPE = spaceop.result.concretetype try: result = op(RESTYPE, *args) @@ -53,10 +49,6 @@ # failed to fold an operation, exit early if requested if exit_early: return folded_count - if spaceop.opname == 'keepalive' and first_sideeffect_index is None: - if vargsmodif: - continue # keepalive(constant) is not useful - keepalives.append(spaceop) else: if vargsmodif: if (spaceop.opname == 'indirect_call' @@ -66,20 +58,11 @@ else: spaceop = SpaceOperation(spaceop.opname, vargs, spaceop.result) - if sideeffects and first_sideeffect_index is None: - first_sideeffect_index = len(newops) newops.append(spaceop) # end if exit_early: return folded_count else: - # move the keepalives to the end of the block, which makes the life - # of prepare_constant_fold_link() easier. Don't put them past the - # exception-raising operation, though. There is also no point in - # moving them past the first sideeffect-ing operation. - if first_sideeffect_index is None: - first_sideeffect_index = len(newops) - exc_catch - newops[first_sideeffect_index:first_sideeffect_index] = keepalives return newops def constant_fold_block(block): @@ -177,33 +160,23 @@ if block.exitswitch == c_last_exception: n -= 1 # is the next, non-folded operation an indirect_call? - m = folded_count - while m < n and block.operations[m].opname == 'keepalive': - m += 1 - if m < n: - nextop = block.operations[m] + if folded_count < n: + nextop = block.operations[folded_count] if nextop.opname == 'indirect_call' and nextop.args[0] in constants: # indirect_call -> direct_call callargs = [constants[nextop.args[0]]] constants1 = constants.copy() complete_constants(link, constants1) - newkeepalives = [] - for i in range(folded_count, m): - [v] = block.operations[i].args - v = constants1.get(v, v) - v_void = Variable() - v_void.concretetype = lltype.Void - newkeepalives.append(SpaceOperation('keepalive', [v], v_void)) for v in nextop.args[1:-1]: callargs.append(constants1.get(v, v)) v_result = Variable(nextop.result) v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, newkeepalives + [callop]) + newblock = insert_empty_block(None, link, [callop]) [link] = newblock.exits assert link.target is block - folded_count = m+1 + folded_count += 1 if folded_count > 0: splits = splitblocks.setdefault(block, []) @@ -226,7 +199,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block_with_keepalive(block, position) + splitlink = split_block(None, block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -119,13 +119,16 @@ Impara, Germany Change Maker, Sweden +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. -License for 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' +License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' ============================================================== Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files -in the 'lib-python/2.5.2' and 'lib-python/2.5.2-modified' directories +in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories are all copyrighted by the Python Software Foundation and licensed under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html @@ -158,21 +161,12 @@ ====================================== The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. - CompositionExclusions-3.2.0.txt - CompositionExclusions-4.1.0.txt - CompositionExclusions-5.0.0.txt - EastAsianWidth-3.2.0.txt - EastAsianWidth-4.1.0.txt - EastAsianWidth-5.0.0.txt - UnicodeData-3.2.0.txt - UnicodeData-4.1.0.txt - UnicodeData-5.0.0.txt - -The following files are derived from files from the above website. The same -terms of use apply. - UnihanNumeric-3.2.0.txt - UnihanNumeric-4.1.0.txt - UnihanNumeric-5.0.0.txt + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt diff --git a/pypy/translator/c/test/test_lltyped.py b/pypy/translator/c/test/test_lltyped.py --- a/pypy/translator/c/test/test_lltyped.py +++ b/pypy/translator/c/test/test_lltyped.py @@ -895,3 +895,10 @@ fn = self.getcompiled(llf) assert fn() == 45 + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def llf(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + fn = self.getcompiled(llf, [int]) + assert fn(0) == 42.3 diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py --- a/pypy/jit/backend/llsupport/test/test_gc.py +++ b/pypy/jit/backend/llsupport/test/test_gc.py @@ -9,7 +9,6 @@ from pypy.jit.tool.oparser import parse from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from pypy.jit.metainterp.test.test_optimizeopt import equaloplists -from pypy.rpython.memory.gctransform import asmgcroot def test_boehm(): gc_ll_descr = GcLLDescr_boehm(None, None, None) @@ -75,8 +74,8 @@ num2a = ((-num2|3) >> 7) | 128 num2b = (-num2|3) & 127 shape = gcrootmap.get_basic_shape() - gcrootmap.add_ebp_offset(shape, num1) - gcrootmap.add_ebp_offset(shape, num2) + gcrootmap.add_frame_offset(shape, num1) + gcrootmap.add_frame_offset(shape, num2) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a]) gcrootmap.add_callee_save_reg(shape, 1) assert shape == map(chr, [6, 7, 11, 15, 2, 0, num1a, num2b, num2a, @@ -228,6 +227,33 @@ gc.asmgcroot = saved +class TestGcRootMapShadowStack: + class FakeGcDescr: + force_index_ofs = 92 + + def test_make_shapes(self): + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = gcrootmap.get_basic_shape() + gcrootmap.add_frame_offset(shape, 16) + gcrootmap.add_frame_offset(shape, -24) + assert shape == [16, -24] + + def test_compress_callshape(self): + class FakeDataBlockWrapper: + def malloc_aligned(self, size, alignment): + assert alignment == 4 # even on 64-bits + assert size == 12 # 4*3, even on 64-bits + return rffi.cast(lltype.Signed, p) + datablockwrapper = FakeDataBlockWrapper() + p = lltype.malloc(rffi.CArray(rffi.INT), 3, immortal=True) + gcrootmap = GcRootMap_shadowstack(self.FakeGcDescr()) + shape = [16, -24] + gcrootmap.compress_callshape(shape, datablockwrapper) + assert rffi.cast(lltype.Signed, p[0]) == 16 + assert rffi.cast(lltype.Signed, p[1]) == -24 + assert rffi.cast(lltype.Signed, p[2]) == 0 + + class FakeLLOp(object): def __init__(self): self.record = [] diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -37,6 +37,10 @@ base_encoding = None def _getfilesystemencoding(space): + if (space.config.translation.type_system == 'ootype'): + # XXX: fix this for ootype + return base_encoding + # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) diff --git a/lib-python/TODO b/lib-python/TODO --- a/lib-python/TODO +++ b/lib-python/TODO @@ -2,7 +2,7 @@ =================== You can find the results of the most recent buildbot run at: -http://buildbot.pypy.org/summary?branch=fast-forward +http://buildbot.pypy.org/ Probably easy tasks @@ -39,18 +39,8 @@ Medium tasks ------------ -- Ast objects should be picklable, see in pypy/module/_ast/test/test_ast.py: - test_pickle() - - socket module has a couple of changes (including AF_TIPC packet range) -- (test_lib2to3) When a "for" loop runs a generator function, if the loop is - exited before the end, the "finally" clause of the generator is not called - until the next gc collection. In our case, in lib2to3/pytree.py, - WildcardPattern.match_seq() does not exhaust the generate_matches() generator, - and stderr is not restored. - - Longer tasks ------------ diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver, hint, purefunction from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class SendTests(object): diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py --- a/pypy/rlib/jit.py +++ b/pypy/rlib/jit.py @@ -457,6 +457,12 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) + def get_getfield_op(self, rtyper): + if rtyper.type_system.name == 'ootypesystem': + return 'oogetfield' + else: + return 'getfield' + def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -471,8 +477,8 @@ r_green = hop.args_r[i] v_green = hop.inputarg(r_green, arg=i) else: - if hop.rtyper.type_system.name == 'ootypesystem': - py.test.skip("lltype only") + #if hop.rtyper.type_system.name == 'ootypesystem': + #py.test.skip("lltype only") objname, fieldname = name.split('.') # see test_green_field assert objname in driver.reds i = kwds_i['i_' + objname] @@ -488,7 +494,10 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - GTYPE = r_red.lowleveltype.TO + if hop.rtyper.type_system.name == 'ootypesystem': + GTYPE = r_red.lowleveltype + else: + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -497,7 +506,8 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - v_green = hop.genop('getfield', [v_red, c_llname], + getfield_op = self.get_getfield_op(hop.rtyper) + v_green = hop.genop(getfield_op, [v_red, c_llname], resulttype = r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py --- a/pypy/jit/metainterp/test/test_dict.py +++ b/pypy/jit/metainterp/test/test_dict.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver from pypy.rlib import objectmodel diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py --- a/pypy/rpython/memory/test/test_transformed_gc.py +++ b/pypy/rpython/memory/test/test_transformed_gc.py @@ -13,7 +13,6 @@ from pypy.rlib import rgc from pypy import conftest from pypy.rlib.rstring import StringBuilder -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rlib.rarithmetic import LONG_BIT WORD = LONG_BIT // 8 diff --git a/pypy/translator/backendopt/test/test_support.py b/pypy/translator/backendopt/test/test_support.py --- a/pypy/translator/backendopt/test/test_support.py +++ b/pypy/translator/backendopt/test/test_support.py @@ -1,94 +1,7 @@ -from pypy.translator.unsimplify import varoftype from pypy.translator.translator import TranslationContext, graphof from pypy.translator.backendopt.support import \ - needs_conservative_livevar_calculation, split_block_with_keepalive, \ find_loop_blocks, find_backedges, compute_reachability -from pypy.rpython.rtyper import LowLevelOpList -from pypy.rpython.lltypesystem import lltype -from pypy.objspace.flow import model - -NonGcB = lltype.Struct("B", ('x', lltype.Signed)) -GcA = lltype.GcStruct("A", ('b', NonGcB), ('c', lltype.Ptr(lltype.FuncType([], lltype.Void)))) - -def test_nclc_should_be_true(): - # this is testing a block like: - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: v0 (i.e. pointer to non-gc) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert needs_conservative_livevar_calculation(block) - -def test_nclc_nongc_not_passed_on(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getsubstruct pointer_to_gc 'b' - # +--- exitargs: pointer_to_gc (i.e. the pointer to non-gc doesn't leave the block) - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getsubstruct", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([ptr_a], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_nclc_ignore_functype(): - # +--- inputargs: pointer_to_gc - # | v0 <- op_getfield pointer_to_gc 'c' - # +--- exitargs: v0 (i.e. a pointer to function) - # pointers to functions are 'not gc' but functions are also - # immortal so you don't need to muck around inserting keepalives - # so *they* don't die! - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('c', lltype.Void)], - resulttype=GcA.c) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([v_res], None)) - assert not needs_conservative_livevar_calculation(block) - -def test_sbwk_should_insert_keepalives(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_using_v0 <- split here - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - -def test_sbwk_should_insert_keepalives_2(): - # this is testing something like: - # v0 <- op_producing_non_gc - # v1 <- op_not_using_v0 <- split here - # v2 <- op_using_v0 - llops = LowLevelOpList() - ptr_a = varoftype(lltype.Ptr(GcA)) - v_res = llops.genop("getfield", [ptr_a, model.Constant('b', lltype.Void)], - resulttype=lltype.Ptr(NonGcB)) - llops.genop("direct_call", [model.Constant(None, lltype.Void)], - resulttype=lltype.Void) - llops.genop("direct_call", [model.Constant(None, lltype.Void), v_res], - resulttype=lltype.Void) - block = model.Block([ptr_a]) - block.operations.extend(llops) - block.closeblock(model.Link([], None)) - link = split_block_with_keepalive(block, 1) - assert 'keepalive' in [op.opname for op in link.target.operations] - #__________________________________________________________ # test compute_reachability diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/test/test_optimizeutil.py --- a/pypy/jit/metainterp/test/test_optimizeutil.py +++ b/pypy/jit/metainterp/test/test_optimizeutil.py @@ -68,6 +68,16 @@ nodeobjvalue = lltype.cast_opaque_ptr(llmemory.GCREF, nodeobj) refdescr = cpu.fielddescrof(NODEOBJ, 'ref') + INTOBJ_NOIMMUT = lltype.GcStruct('INTOBJ_NOIMMUT', ('parent', OBJECT), + ('intval', lltype.Signed)) + INTOBJ_IMMUT = lltype.GcStruct('INTOBJ_IMMUT', ('parent', OBJECT), + ('intval', lltype.Signed), + hints={'immutable': True}) + intobj_noimmut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + intobj_immut_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) + noimmut_intval = cpu.fielddescrof(INTOBJ_NOIMMUT, 'intval') + immut_intval = cpu.fielddescrof(INTOBJ_IMMUT, 'intval') + arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) @@ -155,6 +165,8 @@ register_known_gctype(cpu, node_vtable2, NODE2) register_known_gctype(cpu, u_vtable, U) register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF) + register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT) + register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) namespace = locals() diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -1,6 +1,8 @@ +import os from pypy.rlib import rgc from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror +from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr from pypy.rpython.lltypesystem import llgroup from pypy.rpython.lltypesystem.lloperation import llop @@ -21,6 +23,8 @@ class GcLLDescription(GcCache): minimal_size_in_nursery = 0 + get_malloc_slowpath_addr = None + def __init__(self, gcdescr, translator=None, rtyper=None): GcCache.__init__(self, translator is not None, rtyper) self.gcdescr = gcdescr @@ -34,6 +38,8 @@ pass def can_inline_malloc(self, descr): return False + def can_inline_malloc_varsize(self, descr, num_elem): + return False def has_write_barrier_class(self): return None def freeing_block(self, start, stop): @@ -212,10 +218,12 @@ return addr_ref -class GcRootMap_asmgcc: +class GcRootMap_asmgcc(object): """Handles locating the stack roots in the assembler. This is the class supporting --gcrootfinder=asmgcc. """ + is_shadow_stack = False + LOC_REG = 0 LOC_ESP_PLUS = 1 LOC_EBP_PLUS = 2 @@ -224,7 +232,7 @@ GCMAP_ARRAY = rffi.CArray(lltype.Signed) CALLSHAPE_ARRAY_PTR = rffi.CArrayPtr(rffi.UCHAR) - def __init__(self): + def __init__(self, gcdescr=None): # '_gcmap' is an array of length '_gcmap_maxlength' of addresses. # '_gcmap_curlength' tells how full the array really is. # The addresses are actually grouped in pairs: @@ -237,6 +245,13 @@ self._gcmap_deadentries = 0 self._gcmap_sorted = True + def add_jit2gc_hooks(self, jit2gc): + jit2gc.update({ + 'gcmapstart': lambda: self.gcmapstart(), + 'gcmapend': lambda: self.gcmapend(), + 'gcmarksorted': lambda: self.gcmarksorted(), + }) + def initialize(self): # hack hack hack. Remove these lines and see MissingRTypeAttribute # when the rtyper tries to annotate these methods only when GC-ing... @@ -365,7 +380,7 @@ number >>= 7 shape.append(chr(number | flag)) - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): assert (offset & 3) == 0 if offset >= 0: num = self.LOC_EBP_PLUS | offset @@ -388,6 +403,126 @@ return rawaddr +class GcRootMap_shadowstack(object): + """Handles locating the stack roots in the assembler. + This is the class supporting --gcrootfinder=shadowstack. + """ + is_shadow_stack = True + MARKER = 8 + + # The "shadowstack" is a portable way in which the GC finds the + # roots that live in the stack. Normally it is just a list of + # pointers to GC objects. The pointers may be moved around by a GC + # collection. But with the JIT, an entry can also be MARKER, in + # which case the next entry points to an assembler stack frame. + # During a residual CALL from the assembler (which may indirectly + # call the GC), we use the force_index stored in the assembler + # stack frame to identify the call: we can go from the force_index + # to a list of where the GC pointers are in the frame (this is the + # purpose of the present class). + # + # Note that across CALL_MAY_FORCE or CALL_ASSEMBLER, we can also go + # from the force_index to a ResumeGuardForcedDescr instance, which + # is used if the virtualizable or the virtualrefs need to be forced + # (see pypy.jit.backend.model). The force_index number in the stack + # frame is initially set to a non-negative value x, but it is + # occasionally turned into (~x) in case of forcing. + + INTARRAYPTR = rffi.CArrayPtr(rffi.INT) + CALLSHAPES_ARRAY = rffi.CArray(INTARRAYPTR) + + def __init__(self, gcdescr): + self._callshapes = lltype.nullptr(self.CALLSHAPES_ARRAY) + self._callshapes_maxlength = 0 + self.force_index_ofs = gcdescr.force_index_ofs + + def add_jit2gc_hooks(self, jit2gc): + # + def collect_jit_stack_root(callback, gc, addr): + if addr.signed[0] != GcRootMap_shadowstack.MARKER: + # common case + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return WORD + else: + # case of a MARKER followed by an assembler stack frame + follow_stack_frame_of_assembler(callback, gc, addr) + return 2 * WORD + # + def follow_stack_frame_of_assembler(callback, gc, addr): + frame_addr = addr.signed[1] + addr = llmemory.cast_int_to_adr(frame_addr + self.force_index_ofs) + force_index = addr.signed[0] + if force_index < 0: + force_index = ~force_index + callshape = self._callshapes[force_index] + n = 0 + while True: + offset = rffi.cast(lltype.Signed, callshape[n]) + if offset == 0: + break + addr = llmemory.cast_int_to_adr(frame_addr + offset) + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + n += 1 + # + jit2gc.update({ + 'rootstackhook': collect_jit_stack_root, + }) + + def initialize(self): + pass + + def get_basic_shape(self, is_64_bit=False): + return [] + + def add_frame_offset(self, shape, offset): + assert offset != 0 + shape.append(offset) + + def add_callee_save_reg(self, shape, register): + msg = "GC pointer in %s was not spilled" % register + os.write(2, '[llsupport/gc] %s\n' % msg) + raise AssertionError(msg) + + def compress_callshape(self, shape, datablockwrapper): + length = len(shape) + SZINT = rffi.sizeof(rffi.INT) + rawaddr = datablockwrapper.malloc_aligned((length + 1) * SZINT, SZINT) + p = rffi.cast(self.INTARRAYPTR, rawaddr) + for i in range(length): + p[i] = rffi.cast(rffi.INT, shape[i]) + p[length] = rffi.cast(rffi.INT, 0) + return p + + def write_callshape(self, p, force_index): + if force_index >= self._callshapes_maxlength: + self._enlarge_callshape_list(force_index + 1) + self._callshapes[force_index] = p + + def _enlarge_callshape_list(self, minsize): + newlength = 250 + (self._callshapes_maxlength // 3) * 4 + if newlength < minsize: + newlength = minsize + newarray = lltype.malloc(self.CALLSHAPES_ARRAY, newlength, + flavor='raw', track_allocation=False) + if self._callshapes: + i = self._callshapes_maxlength - 1 + while i >= 0: + newarray[i] = self._callshapes[i] + i -= 1 + lltype.free(self._callshapes, flavor='raw', track_allocation=False) + self._callshapes = newarray + self._callshapes_maxlength = newlength + + def freeing_block(self, start, stop): + pass # nothing needed here + + def get_root_stack_top_addr(self): + rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) + return rffi.cast(lltype.Signed, rst_addr) + + class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 @@ -437,7 +572,7 @@ except KeyError: raise NotImplementedError("--gcrootfinder=%s not implemented" " with the JIT" % (name,)) - gcrootmap = cls() + gcrootmap = cls(gcdescr) self.gcrootmap = gcrootmap self.gcrefs = GcRefList() self.single_gcref_descr = GcPtrFieldDescr('', 0) @@ -446,12 +581,9 @@ # where it can be fished and reused by the FrameworkGCTransformer self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - self.translator._jit2gc = { - 'layoutbuilder': self.layoutbuilder, - 'gcmapstart': lambda: gcrootmap.gcmapstart(), - 'gcmapend': lambda: gcrootmap.gcmapend(), - 'gcmarksorted': lambda: gcrootmap.gcmarksorted(), - } + self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + gcrootmap.add_jit2gc_hooks(self.translator._jit2gc) + self.GCClass = self.layoutbuilder.GCClass self.moving_gc = self.GCClass.moving_gc self.HDRPTR = lltype.Ptr(self.GCClass.HDR) @@ -461,6 +593,10 @@ self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj() self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery() + # for the fast path of mallocs, the following must be true, at least + assert self.GCClass.inline_simple_malloc + assert self.GCClass.inline_simple_malloc_varsize + # make a malloc function, with three arguments def malloc_basic(size, tid): type_id = llop.extract_ushort(llgroup.HALFWORD, tid) @@ -539,20 +675,23 @@ x3 = x0 * 0.3 for_test_only.x = x0 + x1 + x2 + x3 # - def malloc_fixedsize_slowpath(size): + def malloc_slowpath(size): if self.DEBUG: random_usage_of_xmm_registers() assert size >= self.minimal_size_in_nursery try: + # NB. although we call do_malloc_fixedsize_clear() here, + # it's a bit of a hack because we set tid to 0 and may + # also use it to allocate varsized objects. The tid + # and possibly the length are both set afterward. gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF, 0, size, True, False, False) except MemoryError: fatalerror("out of memory (from JITted code)") return 0 return rffi.cast(lltype.Signed, gcref) - self.malloc_fixedsize_slowpath = malloc_fixedsize_slowpath - self.MALLOC_FIXEDSIZE_SLOWPATH = lltype.FuncType([lltype.Signed], - lltype.Signed) + self.malloc_slowpath = malloc_slowpath + self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) def get_nursery_free_addr(self): nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) @@ -562,9 +701,8 @@ nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) return rffi.cast(lltype.Signed, nurs_top_addr) - def get_malloc_fixedsize_slowpath_addr(self): - fptr = llhelper(lltype.Ptr(self.MALLOC_FIXEDSIZE_SLOWPATH), - self.malloc_fixedsize_slowpath) + def get_malloc_slowpath_addr(self): + fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) def initialize(self): @@ -710,6 +848,16 @@ return True return False + def can_inline_malloc_varsize(self, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + try: + size = ovfcheck(basesize + ovfcheck(itemsize * num_elem)) + return size < self.max_size_of_young_obj + except OverflowError: + return False + def has_write_barrier_class(self): return WriteBarrierDescr diff --git a/pypy/translator/backendopt/malloc.py b/pypy/translator/backendopt/malloc.py --- a/pypy/translator/backendopt/malloc.py +++ b/pypy/translator/backendopt/malloc.py @@ -1,5 +1,5 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link -from pypy.objspace.flow.model import SpaceOperation, traverse +from pypy.objspace.flow.model import SpaceOperation from pypy.tool.algo.unionfind import UnionFind from pypy.rpython.lltypesystem import lltype from pypy.rpython.ootypesystem import ootype @@ -67,7 +67,6 @@ # in this 'block', follow where the 'var' goes to and replace # it by a flattened-out family of variables. This family is given # by newvarsmap, whose keys are the 'flatnames'. - self.last_removed_access = None def list_newvars(): return [newvarsmap[key] for key in self.flatnames] @@ -115,7 +114,6 @@ newargs.append(arg) link.args[:] = newargs - self.insert_keepalives(list_newvars()) block.operations[:] = self.newops def compute_lifetimes(self, graph): @@ -149,8 +147,7 @@ set_use_point(graph.exceptblock, graph.exceptblock.inputargs[0], "except") set_use_point(graph.exceptblock, graph.exceptblock.inputargs[1], "except") - def visit(node): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname in self.IDENTITY_OPS: # special-case these operations to identify their input @@ -167,7 +164,7 @@ if isinstance(node.exitswitch, Variable): set_use_point(node, node.exitswitch, "exitswitch", node) - if isinstance(node, Link): + for node in graph.iterlinks(): if isinstance(node.last_exception, Variable): set_creation_point(node.prevblock, node.last_exception, "last_exception") @@ -187,7 +184,6 @@ else: d[arg] = True - traverse(visit, graph) return lifetimes.infos() def _try_inline_malloc(self, info): @@ -213,7 +209,7 @@ STRUCT = self.get_STRUCT(lltypes.keys()[0]) # must be only ever accessed via getfield/setfield/getsubstruct/ - # direct_fieldptr, or touched by keepalive or ptr_iszero/ptr_nonzero. + # direct_fieldptr, or touched by ptr_iszero/ptr_nonzero. # Note that same_as and cast_pointer are not recorded in usepoints. self.accessed_substructs = {} @@ -333,7 +329,6 @@ MALLOC_OP = "malloc" FIELD_ACCESS = dict.fromkeys(["getfield", "setfield", - "keepalive", "ptr_iszero", "ptr_nonzero", "getarrayitem", @@ -484,7 +479,6 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - self.last_removed_access = len(self.newops) elif op.opname in ("setfield", "setarrayitem"): S = op.args[0].concretetype.TO fldname = op.args[1].value @@ -500,15 +494,12 @@ self.newops.append(newop) else: newvarsmap[key] = op.args[2] - self.last_removed_access = len(self.newops) elif op.opname in ("same_as", "cast_pointer"): vars[op.result] = True # Consider the two pointers (input and result) as # equivalent. We can, and indeed must, use the same # flattened list of variables for both, as a "setfield" # via one pointer must be reflected in the other. - elif op.opname == 'keepalive': - self.last_removed_access = len(self.newops) elif op.opname in ("getsubstruct", "getarraysubstruct", "direct_fieldptr"): S = op.args[0].concretetype.TO @@ -546,18 +537,6 @@ else: raise AssertionError, op.opname - - def insert_keepalives(self, newvars): - if self.last_removed_access is not None: - keepalives = [] - for v in newvars: - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = Variable() - v0.concretetype = lltype.Void - newop = SpaceOperation('keepalive', [v], v0) - keepalives.append(newop) - self.newops[self.last_removed_access:self.last_removed_access] = keepalives class OOTypeMallocRemover(BaseMallocRemover): @@ -616,14 +595,12 @@ [newvarsmap[key]], op.result) self.newops.append(newop) - last_removed_access = len(self.newops) elif op.opname == "oosetfield": S = op.args[0].concretetype fldname = op.args[1].value key = self.key_for_field_access(S, fldname) assert key in newvarsmap newvarsmap[key] = op.args[2] - last_removed_access = len(self.newops) elif op.opname in ("same_as", "oodowncast", "ooupcast"): vars[op.result] = True # Consider the two pointers (input and result) as @@ -639,8 +616,6 @@ else: raise AssertionError, op.opname - def insert_keepalives(self, newvars): - pass def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -79,7 +79,8 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel **32-bit** environment needs ``4GB``. + compiler. As of March 2011, Intel 32-bit environment needs **at + least** 2GB, and 64-bit needs 4GB. .. _`optimization level`: config/opt.html diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -880,6 +880,11 @@ except AttributeError: return False + def warn_missing_attribute(self, attr): + # only warn for missing attribute names whose name doesn't start + # with '$', to silence the warnings about '$memofield_xxx'. + return not self.has_attribute(attr) and not attr.startswith('$') + def read_attribute(self, attr): try: return self.attrcache[attr] diff --git a/pypy/jit/metainterp/test/test_memmgr.py b/pypy/jit/metainterp/test/test_memmgr.py --- a/pypy/jit/metainterp/test/test_memmgr.py +++ b/pypy/jit/metainterp/test/test_memmgr.py @@ -12,7 +12,7 @@ import py from pypy.jit.metainterp.memmgr import MemoryManager -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside diff --git a/pypy/translator/backendopt/test/test_constfold.py b/pypy/translator/backendopt/test/test_constfold.py --- a/pypy/translator/backendopt/test/test_constfold.py +++ b/pypy/translator/backendopt/test/test_constfold.py @@ -185,27 +185,6 @@ check_graph(graph, [0], 61, t) -def test_keepalive_const_substruct(): - py.test.skip("do we want partial folding of getinteriorfield?") - S2 = lltype.Struct('S2', ('x', lltype.Signed)) - S1 = lltype.GcStruct('S1', ('sub', S2)) - s1 = lltype.malloc(S1) - s1.sub.x = 1234 - def fn(): - return s1.sub.x - graph, t = get_graph(fn, []) - assert summary(graph) == {'getinteriorfield': 1} - constant_fold_graph(graph) - - # kill all references to 's1' - s1 = fn = None - del graph.func - import gc; gc.collect() - - assert summary(graph) == {'getfield': 1} - check_graph(graph, [], 1234, t) - - def test_keepalive_const_fieldptr(): S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) s1 = lltype.malloc(S1) diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py --- a/pypy/jit/metainterp/test/test_list.py +++ b/pypy/jit/metainterp/test/test_list.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ListTests: diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr[2]) + longrepr = str(report.longrepr) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py --- a/pypy/jit/backend/test/test_random.py +++ b/pypy/jit/backend/test/test_random.py @@ -717,6 +717,7 @@ def test_random_function(BuilderClass=OperationBuilder): r = Random() cpu = get_cpu() + cpu.setup_once() if pytest.config.option.repeat == -1: while 1: check_random_function(cpu, BuilderClass, r) diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py --- a/pypy/jit/backend/x86/test/test_zrpy_gc.py +++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py @@ -5,8 +5,8 @@ soon as possible (at least in a simple case). """ -import weakref, random -import py +import weakref +import py, os from pypy.annotation import policy as annpolicy from pypy.rlib import rgc from pypy.rpython.lltypesystem import lltype, llmemory, rffi @@ -72,6 +72,20 @@ return entrypoint +def get_functions_to_patch(): + from pypy.jit.backend.llsupport import gc + # + can_inline_malloc1 = gc.GcLLDescr_framework.can_inline_malloc + def can_inline_malloc2(*args): + try: + if os.environ['PYPY_NO_INLINE_MALLOC']: + return False + except KeyError: + pass + return can_inline_malloc1(*args) + # + return {(gc.GcLLDescr_framework, 'can_inline_malloc'): can_inline_malloc2} + def compile(f, gc, **kwds): from pypy.annotation.listdef import s_list_of_strings from pypy.translator.translator import TranslationContext @@ -87,8 +101,21 @@ ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy()) ann.build_types(f, [s_list_of_strings], main_entry_point=True) t.buildrtyper().specialize() + if kwds['jit']: - apply_jit(t, enable_opts='') + patch = get_functions_to_patch() + old_value = {} + try: + for (obj, attr), value in patch.items(): + old_value[obj, attr] = getattr(obj, attr) + setattr(obj, attr, value) + # + apply_jit(t, enable_opts='') + # + finally: + for (obj, attr), oldvalue in old_value.items(): + setattr(obj, attr, oldvalue) + cbuilder = genc.CStandaloneBuilder(t, f, t.config) cbuilder.generate_source() cbuilder.compile() @@ -127,7 +154,7 @@ # ______________________________________________________________________ -class TestCompileFramework(object): +class CompileFrameworkTests(object): # Test suite using (so far) the minimark GC. def setup_class(cls): funcs = [] @@ -178,15 +205,21 @@ try: GcLLDescr_framework.DEBUG = True cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC, - gcrootfinder="asmgcc", jit=True) + gcrootfinder=cls.gcrootfinder, jit=True) finally: GcLLDescr_framework.DEBUG = OLD_DEBUG + def _run(self, name, n, env): + res = self.cbuilder.cmdexec("%s %d" %(name, n), env=env) + assert int(res) == 20 + def run(self, name, n=2000): pypylog = udir.join('TestCompileFramework.log') - res = self.cbuilder.cmdexec("%s %d" %(name, n), - env={'PYPYLOG': ':%s' % pypylog}) - assert int(res) == 20 + env = {'PYPYLOG': ':%s' % pypylog, + 'PYPY_NO_INLINE_MALLOC': '1'} + self._run(name, n, env) + env['PYPY_NO_INLINE_MALLOC'] = '' + self._run(name, n, env) def run_orig(self, name, n, x): self.main_allfuncs(name, n, x) @@ -576,3 +609,10 @@ def test_compile_framework_minimal_size_in_nursery(self): self.run('compile_framework_minimal_size_in_nursery') + + +class TestShadowStack(CompileFrameworkTests): + gcrootfinder = "shadowstack" + +class TestAsmGcc(CompileFrameworkTests): + gcrootfinder = "asmgcc" diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,13 +105,6 @@ return parser def handle_config(self, config, translateconfig): - if config.translation.type_system == 'ootype': - print - print 'Translation to cli and jvm is known to be broken at the moment' - print 'Please try the "cli-jit" branch at:' - print 'http://codespeak.net/svn/pypy/branch/cli-jit/' - sys.exit(1) - self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level @@ -159,8 +152,8 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - if config.translation.type_system == 'ootype': - config.objspace.usemodules.suggest(rbench=True) + ## if config.translation.type_system == 'ootype': + ## config.objspace.usemodules.suggest(rbench=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/jit/metainterp/test/test_del.py b/pypy/jit/metainterp/test/test_del.py --- a/pypy/jit/metainterp/test/test_del.py +++ b/pypy/jit/metainterp/test/test_del.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class DelTests: diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -17,8 +17,8 @@ '_count': 'os_thread._count', 'allocate_lock': 'os_lock.allocate_lock', 'allocate': 'os_lock.allocate_lock', # obsolete synonym - 'LockType': 'os_lock.getlocktype(space)', - '_local': 'os_local.getlocaltype(space)', + 'LockType': 'os_lock.Lock', + '_local': 'os_local.Local', 'error': 'space.fromcache(error.Cache).w_error', } diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -2,34 +2,50 @@ from ctypes.util import find_library from resource import _struct_rusage, struct_rusage +__all__ = ["wait3", "wait4"] + libc = CDLL(find_library("c")) c_wait3 = libc.wait3 c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] +c_wait4 = libc.wait4 + +c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] + +def create_struct_rusage(c_struct): + return struct_rusage(( + float(c_struct.ru_utime), + float(c_struct.ru_stime), + c_struct.ru_maxrss, + c_struct.ru_ixrss, + c_struct.ru_idrss, + c_struct.ru_isrss, + c_struct.ru_minflt, + c_struct.ru_majflt, + c_struct.ru_nswap, + c_struct.ru_inblock, + c_struct.ru_oublock, + c_struct.ru_msgsnd, + c_struct.ru_msgrcv, + c_struct.ru_nsignals, + c_struct.ru_nvcsw, + c_struct.ru_nivcsw)) + def wait3(options): status = c_int() _rusage = _struct_rusage() pid = c_wait3(byref(status), c_int(options), byref(_rusage)) - rusage = struct_rusage(( - float(_rusage.ru_utime), - float(_rusage.ru_stime), - _rusage.ru_maxrss, - _rusage.ru_ixrss, - _rusage.ru_idrss, - _rusage.ru_isrss, - _rusage.ru_minflt, - _rusage.ru_majflt, - _rusage.ru_nswap, - _rusage.ru_inblock, - _rusage.ru_oublock, - _rusage.ru_msgsnd, - _rusage.ru_msgrcv, - _rusage.ru_nsignals, - _rusage.ru_nvcsw, - _rusage.ru_nivcsw)) + rusage = create_struct_rusage(_rusage) return pid, status.value, rusage -__all__ = ["wait3"] +def wait4(pid, options): + status = c_int() + _rusage = _struct_rusage() + pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + + rusage = create_struct_rusage(_rusage) + + return pid, status.value, rusage diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -174,6 +174,17 @@ assert api.PyObject_Compare(space.wrap(72), space.wrap(42)) == 1 assert api.PyObject_Compare(space.wrap("a"), space.wrap("a")) == 0 + def test_cmp(self, space, api): + w = space.wrap + with lltype.scoped_alloc(rffi.INTP.TO, 1) as ptr: + assert api.PyObject_Cmp(w(42), w(72), ptr) == 0 + assert ptr[0] == -1 + assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 + assert ptr[0] == 0 + assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyErr_Occurred() + api.PyErr_Clear() + def test_unicode(self, space, api): assert space.unwrap(api.PyObject_Unicode(space.wrap([]))) == u"[]" assert space.unwrap(api.PyObject_Unicode(space.wrap("e"))) == u"e" diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -3,7 +3,8 @@ from pypy.rlib.rarithmetic import most_neg_value_of_same_type from pypy.rlib.rfloat import isinf, isnan from pypy.rlib.debug import make_sure_not_resized, check_regular_int -from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.rlib import jit from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython import extregistry @@ -122,7 +123,11 @@ def numdigits(self): return len(self._digits) + @staticmethod + @jit.purefunction def fromint(intval): + # This function is marked as pure, so you must not call it and + # then modify the result. check_regular_int(intval) if intval < 0: sign = -1 @@ -149,20 +154,25 @@ t >>= SHIFT p += 1 return v - fromint = staticmethod(fromint) + @staticmethod + @jit.purefunction def frombool(b): + # This function is marked as pure, so you must not call it and + # then modify the result. if b: return rbigint([ONEDIGIT], 1) return rbigint() - frombool = staticmethod(frombool) + @staticmethod def fromlong(l): + "NOT_RPYTHON" return rbigint(*args_from_long(l)) - fromlong = staticmethod(fromlong) + @staticmethod def fromfloat(dval): """ Create a new bigint object from a float """ + # This function is not marked as pure because it can raise sign = 1 if isinf(dval) or isnan(dval): raise OverflowError @@ -183,16 +193,21 @@ frac -= float(bits) frac = math.ldexp(frac, SHIFT) return v - fromfloat = staticmethod(fromfloat) + @staticmethod + @jit.purefunction + @specialize.argtype(0) def fromrarith_int(i): + # This function is marked as pure, so you must not call it and + # then modify the result. return rbigint(*args_from_rarith_int(i)) - fromrarith_int._annspecialcase_ = "specialize:argtype(0)" - fromrarith_int = staticmethod(fromrarith_int) + @staticmethod + @jit.purefunction def fromdecimalstr(s): + # This function is marked as pure, so you must not call it and + # then modify the result. return _decimalstr_to_bigint(s) - fromdecimalstr = staticmethod(fromdecimalstr) def toint(self): """ @@ -1841,7 +1856,7 @@ elif s[p] == '+': p += 1 - a = rbigint.fromint(0) + a = rbigint() tens = 1 dig = 0 ord0 = ord('0') @@ -1859,7 +1874,7 @@ def parse_digit_string(parser): # helper for objspace.std.strutil - a = rbigint.fromint(0) + a = rbigint() base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 diff --git a/pypy/translator/jvm/database.py b/pypy/translator/jvm/database.py --- a/pypy/translator/jvm/database.py +++ b/pypy/translator/jvm/database.py @@ -4,7 +4,7 @@ """ from cStringIO import StringIO -from pypy.rpython.lltypesystem import lltype +from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.ootypesystem import ootype, rclass from pypy.rpython.ootypesystem.module import ll_os from pypy.translator.jvm import node, methods @@ -229,9 +229,15 @@ if not ootype.isSubclass(OOTYPE, SELF): continue mobj = self._function_for_graph( clsobj, mname, False, mimpl.graph) - graphs = OOTYPE._lookup_graphs(mname) - if len(graphs) == 1: - mobj.is_final = True + # XXX: this logic is broken: it might happen that there are + # ootype.Instance which contains a meth whose graph is exactly + # the same as the meth in the superclass: in this case, + # len(graphs) == 1 but we cannot just mark the method as final + # (or we can, but we should avoid to emit the method in the + # subclass, then) + ## graphs = OOTYPE._lookup_graphs(mname) + ## if len(graphs) == 1: + ## mobj.is_final = True clsobj.add_method(mobj) # currently, we always include a special "dump" method for debugging @@ -359,6 +365,7 @@ ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, ootype.String:jvm.PYPYESCAPEDSTRING, ootype.Unicode:jvm.PYPYESCAPEDUNICODE, + rffi.SHORT:jvm.SHORTTOSTRINGS, } def toString_method_for_ootype(self, OOTYPE): @@ -406,6 +413,7 @@ ootype.UniChar: jvm.jChar, ootype.Class: jvm.jClass, ootype.ROOT: jvm.jObject, # treat like a scalar + rffi.SHORT: jvm.jShort, } # Dictionary for non-scalar types; in this case, if we see the key, we diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -278,6 +278,22 @@ rex_mem_reg_plus_scaled_reg_plus_const) # ____________________________________________________________ +# Emit a mod/rm referencing an immediate address that fits in 32-bit +# (the immediate address itself must be explicitely encoded as well, +# with immediate(argnum)). + +def encode_abs(mc, _1, _2, orbyte): + # expands to either '\x05' on 32-bit, or '\x04\x25' or 64-bit + if mc.WORD == 8: + mc.writechar(chr(0x04 | orbyte)) + mc.writechar(chr(0x25)) + else: + mc.writechar(chr(0x05 | orbyte)) + return 0 + +abs_ = encode_abs, 0, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -348,7 +364,9 @@ INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) - INSN_rj = insn(rex_w, chr(base+3), register(1,8), '\x05', immediate(2)) + INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2)) + INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1), + immediate(2,'b')) INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b')) INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2)) @@ -366,7 +384,8 @@ INSN_bi32(mc, offset, immed) INSN_bi._always_inline_ = True # try to constant-fold single_byte() - return INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj + return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, + INSN_ji8) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -444,23 +463,25 @@ # ------------------------------ Arithmetic ------------------------------ - ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj = common_modes(0) - OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj = common_modes(1) - AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj = common_modes(4) - SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj = common_modes(5) - SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj = common_modes(3) - XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj = common_modes(6) - CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj = common_modes(7) + ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj, _ = common_modes(0) + OR_ri, OR_rr, OR_rb, _, _, OR_rm, OR_rj, _ = common_modes(1) + AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj, _ = common_modes(4) + SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj, SUB_ji8 = common_modes(5) + SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj, _ = common_modes(3) + XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj, _ = common_modes(6) + CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj, _ = common_modes(7) CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32) CMP_mr = insn(rex_w, '\x39', register(2, 8), mem_reg_plus_const(1)) - CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b')) - CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2)) + CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_, + immediate(1), immediate(2, 'b')) + CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_, + immediate(1), immediate(2)) CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32) - CMP_jr = insn(rex_w, '\x39', register(2, 8), '\x05', immediate(1)) + CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_, immediate(1)) CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) @@ -508,7 +529,7 @@ LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) - LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2)) + LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_, immediate(2)) CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) @@ -534,12 +555,15 @@ CDQ = insn(rex_nw, '\x99') TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) - TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), '\x05', immediate(1), immediate(2, 'b')) + TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_, immediate(1), immediate(2, 'b')) TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') # x87 instructions FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + # reserved as an illegal instruction + UD2 = insn('\x0F\x0B') + # ------------------------------ SSE2 ------------------------------ # Conversion @@ -639,7 +663,7 @@ add_insn('s', stack_sp(modrm_argnum)) add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) - add_insn('j', '\x05', immediate(modrm_argnum)) + add_insn('j', abs_, immediate(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register @@ -680,7 +704,7 @@ # assert insnname_template.count('*') == 1 add_insn('x', register(2), '\xC0') - add_insn('j', '\x05', immediate(2)) + add_insn('j', abs_, immediate(2)) define_pxmm_insn('PADDQ_x*', '\xD4') define_pxmm_insn('PSUBQ_x*', '\xFB') diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -519,7 +519,7 @@ return code = frame.pycode if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr <= frame.instr_prev: + if frame.last_instr < frame.instr_prev_plus_one: # We jumped backwards in the same line. executioncontext._trace(frame, 'line', self.space.w_None) else: @@ -557,5 +557,5 @@ frame.f_lineno = line executioncontext._trace(frame, 'line', self.space.w_None) - frame.instr_prev = frame.last_instr + frame.instr_prev_plus_one = frame.last_instr + 1 self.space.frame_trace_action.fire() # continue tracing diff --git a/pypy/translator/backendopt/test/test_merge_if_blocks.py b/pypy/translator/backendopt/test/test_merge_if_blocks.py --- a/pypy/translator/backendopt/test/test_merge_if_blocks.py +++ b/pypy/translator/backendopt/test/test_merge_if_blocks.py @@ -2,7 +2,7 @@ from pypy.translator.backendopt.merge_if_blocks import merge_if_blocks from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof as tgraphof -from pypy.objspace.flow.model import flatten, Block +from pypy.objspace.flow.model import Block from pypy.translator.backendopt.removenoops import remove_same_as from pypy.rpython.llinterp import LLInterpreter from pypy.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int diff --git a/pypy/jit/metainterp/test/test_immutable.py b/pypy/jit/metainterp/test/test_immutable.py --- a/pypy/jit/metainterp/test/test_immutable.py +++ b/pypy/jit/metainterp/test/test_immutable.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class ImmutableFieldsTests: diff --git a/pypy/translator/test/test_simplify.py b/pypy/translator/test/test_simplify.py --- a/pypy/translator/test/test_simplify.py +++ b/pypy/translator/test/test_simplify.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.simplify import (get_graph, transform_dead_op_vars, desugar_isinstance) -from pypy.objspace.flow.model import traverse, Block, Constant, summary +from pypy.objspace.flow.model import Block, Constant, summary from pypy import conftest def translate(func, argtypes, backend_optimize=True): @@ -156,36 +156,6 @@ assert graph.startblock.operations[-1].opname == 'direct_call' -def test_remove_pointless_keepalive(): - from pypy.rlib import objectmodel - class C: - y = None - z1 = None - z2 = None - - def g(): - return C() - - def f(i): - c = g() - c.y - if i: - n = c.z1 - else: - n = c.z2 - objectmodel.keepalive_until_here(c, n) - - graph, t = translate(f, [bool]) - - #t.view() - - for block in graph.iterblocks(): - for op in block.operations: - assert op.opname != 'getfield' - if op.opname == 'keepalive': - assert op.args[0] in graph.getargs() - - def test_remove_identical_variables(): def g(code): pc = 0 diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -191,6 +191,10 @@ f = _io.BufferedReader(raw) assert repr(f) == '<_io.BufferedReader name=%r>' % (self.tmpfile,) +class AppTestBufferedReaderWithThreads(AppTestBufferedReader): + spaceconfig = dict(usemodules=['_io', 'thread']) + + class AppTestBufferedWriter: def setup_class(cls): cls.space = gettestobjspace(usemodules=['_io']) diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -107,7 +107,8 @@ self.bytecode_no = int(bytecode_no) self.operations = operations self.storage = storage - self.code = storage.disassemble_code(self.filename, self.startlineno) + self.code = storage.disassemble_code(self.filename, self.startlineno, + self.name) def repr(self): if self.filename is None: diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py --- a/pypy/annotation/model.py +++ b/pypy/annotation/model.py @@ -641,6 +641,8 @@ except TypeError: s = None # unhashable T, e.g. a Ptr(GcForwardReference()) if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) if isinstance(T, (ootype.Instance, ootype.BuiltinType)): diff --git a/lib_pypy/pypy_test/test_os_wait.py b/lib_pypy/pypy_test/test_os_wait.py new file mode 100644 --- /dev/null +++ b/lib_pypy/pypy_test/test_os_wait.py @@ -0,0 +1,44 @@ +# Generates the resource cache +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') + +import os + +from lib_pypy._pypy_wait import wait3, wait4 + +if hasattr(os, 'wait3'): + def test_os_wait3(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait3()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) + +if hasattr(os, 'wait4'): + def test_os_wait4(): + exit_status = 0x33 + + if not hasattr(os, "fork"): + skip("Need fork() to test wait4()") + + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py --- a/pypy/jit/metainterp/test/test_jitprof.py +++ b/pypy/jit/metainterp/test/test_jitprof.py @@ -1,7 +1,7 @@ from pypy.jit.metainterp.warmspot import ll_meta_interp from pypy.rlib.jit import JitDriver, dont_look_inside, purefunction -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin from pypy.jit.metainterp import pyjitpl from pypy.jit.metainterp.jitprof import * diff --git a/pypy/jit/metainterp/test/test_float.py b/pypy/jit/metainterp/test/test_float.py --- a/pypy/jit/metainterp/test/test_float.py +++ b/pypy/jit/metainterp/test/test_float.py @@ -1,5 +1,5 @@ import math -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class FloatTests: diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -382,7 +382,7 @@ send_bridge_to_backend(metainterp.staticdata, self, inputargs, new_loop.operations, new_loop.token) - def copy_all_attrbutes_into(self, res): + def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here res.rd_snapshot = self.rd_snapshot res.rd_frame_info_list = self.rd_frame_info_list @@ -393,13 +393,13 @@ def _clone_if_mutable(self): res = ResumeGuardDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeAtPositionDescr(ResumeGuardDescr): def _clone_if_mutable(self): res = ResumeAtPositionDescr() - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res class ResumeGuardForcedDescr(ResumeGuardDescr): @@ -473,7 +473,7 @@ def _clone_if_mutable(self): res = ResumeGuardForcedDescr(self.metainterp_sd, self.jitdriver_sd) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -42,3 +42,13 @@ assert arr[1:].tolist() == [2,3,4] assert arr[:2].tolist() == [1,2] assert arr[1:3].tolist() == [2,3] + + def test_buffer(self): + module = self.import_module(name='array') + arr = module.array('i', [1,2,3,4]) + # XXX big-endian + assert str(buffer(arr)) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') + diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -3,6 +3,7 @@ from pypy.rpython.lltypesystem.module import ll_math from pypy.module.math.test.test_direct import MathTests, get_tester +from pypy.translator.c.test.test_genc import compile class TestMath(MathTests): @@ -11,6 +12,7 @@ nan = inf / inf assert not ll_math.ll_math_isinf(0) assert ll_math.ll_math_isinf(inf) + assert ll_math.ll_math_isinf(-inf) assert not ll_math.ll_math_isinf(nan) def test_isnan(self): @@ -20,6 +22,13 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isinf(self): + def f(x): + return ll_math.ll_math_isinf(1. / x) + f = compile(f, [float], backendopt=False) + assert f(5.5e-309) + + def make_test_case((fnname, args, expected), dict): # def test_func(self): diff --git a/pypy/translator/c/src/ll_math.h b/pypy/translator/c/src/ll_math.h --- a/pypy/translator/c/src/ll_math.h +++ b/pypy/translator/c/src/ll_math.h @@ -1,9 +1,6 @@ /* Definitions of some C99 math library functions, for those platforms that don't implement these functions already. */ -int _pypy_math_isinf(double x); -int _pypy_math_isnan(double x); - double _pypy_math_acosh(double x); double _pypy_math_asinh(double x); double _pypy_math_atanh(double x); diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py --- a/pypy/rpython/ootypesystem/rdict.py +++ b/pypy/rpython/ootypesystem/rdict.py @@ -153,6 +153,13 @@ hop.exception_cannot_occur() return self.send_message(hop, 'll_clear') + def rtype_method_popitem(self, hop): + v_dict, = hop.inputargs(self) + r_tuple = hop.r_result + cTUPLE = hop.inputconst(ootype.Void, r_tuple.lowleveltype) + hop.exception_is_here() + return hop.gendirectcall(ll_popitem, cTUPLE, v_dict) + def __get_func(self, interp, r_func, fn, TYPE): if isinstance(r_func, MethodOfFrozenPBCRepr): obj = r_func.r_im_self.convert_const(fn.im_self) @@ -353,6 +360,16 @@ ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') +def ll_popitem(ELEM, d): + it = d.ll_get_items_iterator() + if it.ll_go_next(): + res = ootype.new(ELEM) + key = res.item0 = it.ll_current_key() + res.item1 = it.ll_current_value() + d.ll_remove(key) + return res + raise KeyError + # ____________________________________________________________ # # Iteration. diff --git a/pypy/translator/cli/opcodes.py b/pypy/translator/cli/opcodes.py --- a/pypy/translator/cli/opcodes.py +++ b/pypy/translator/cli/opcodes.py @@ -71,6 +71,8 @@ 'hint': [PushArg(0), StoreResult], 'direct_call': [Call], 'indirect_call': [IndirectCall], + 'int_between': [PushAllArgs, 'call bool [pypylib]pypy.runtime.Utils::IntBetween(int32, int32, int32)'], + 'cast_ptr_to_weakadr': [PushAllArgs, 'newobj instance void class %s::.ctor(object)' % WEAKREF], 'gc__collect': 'call void class [mscorlib]System.GC::Collect()', @@ -147,7 +149,10 @@ 'cast_float_to_uint': 'conv.u4', 'cast_longlong_to_float': 'conv.r8', 'cast_float_to_longlong': 'conv.i8', + 'cast_ulonglong_to_float': 'conv.r8', + 'cast_float_to_ulonglong': 'conv.u8', 'cast_primitive': [PushAllArgs, CastPrimitive], + 'force_cast': [PushAllArgs, CastPrimitive], 'truncate_longlong_to_int': 'conv.i4', } @@ -266,6 +271,8 @@ 'ullong_ge': _not('clt.un'), 'ullong_lshift': [PushAllArgs, 'conv.u4', 'shl'], 'ullong_rshift': [PushAllArgs, 'conv.i4', 'shr'], + 'ullong_and': 'and', + 'ullong_or': 'or', 'oois': 'ceq', 'ooisnot': _not('ceq'), diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -201,6 +201,23 @@ assert cmpr == 3 assert cmpr != 42 + def test_richcompare(self): + module = self.import_module("comparisons") + cmpr = module.CmpType() + + # should not crash + cmpr < 4 + cmpr <= 4 + cmpr > 4 + cmpr >= 4 + + assert cmpr.__le__(4) is NotImplemented + + def test_tpcompare(self): + module = self.import_module("comparisons") + cmpr = module.OldCmpType() + assert cmpr < cmpr + def test_hash(self): module = self.import_module("comparisons") cmpr = module.CmpType() diff --git a/pypy/translator/jvm/metavm.py b/pypy/translator/jvm/metavm.py --- a/pypy/translator/jvm/metavm.py +++ b/pypy/translator/jvm/metavm.py @@ -1,4 +1,5 @@ from pypy.rpython.ootypesystem import ootype +from pypy.rpython.lltypesystem import rffi from pypy.translator.oosupport.metavm import MicroInstruction from pypy.translator.jvm.typesystem import JvmScalarType, JvmClassType import pypy.translator.jvm.typesystem as jvm @@ -94,14 +95,20 @@ (ootype.SignedLongLong, ootype.Signed): jvm.L2I, (ootype.UnsignedLongLong, ootype.Unsigned): jvm.L2I, (ootype.UnsignedLongLong, ootype.Signed): jvm.L2I, + (ootype.Signed, rffi.SHORT): jvm.I2S, + (ootype.Unsigned, ootype.SignedLongLong): jvm.PYPYUINTTOLONG, (ootype.UnsignedLongLong, ootype.SignedLongLong): None, (ootype.SignedLongLong, ootype.UnsignedLongLong): None, + (ootype.Signed, ootype.Unsigned): None, + (ootype.Unsigned, ootype.Signed): None, } class _CastPrimitive(MicroInstruction): def render(self, generator, op): FROM = op.args[0].concretetype TO = op.result.concretetype + if TO == FROM: + return opcode = CASTS[(FROM, TO)] if opcode: generator.emit(opcode) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -56,13 +56,10 @@ """A frame is an environment supporting the execution of a code object. Abstract base class.""" - def __init__(self, space, w_globals=None, numlocals=-1): + def __init__(self, space, w_globals=None): self.space = space self.w_globals = w_globals # wrapped dict of globals self.w_locals = None # wrapped dict of locals - if numlocals < 0: # compute the minimal size based on arguments - numlocals = len(self.getcode().getvarnames()) - self.numlocals = numlocals def run(self): "Abstract method to override. Runs the frame" @@ -96,6 +93,10 @@ where the order is according to self.getcode().signature().""" raise TypeError, "abstract" + def getfastscopelength(self): + "Abstract. Get the expected number of locals." + raise TypeError, "abstract" + def fast2locals(self): # Copy values from self.fastlocals_w to self.w_locals if self.w_locals is None: @@ -113,10 +114,11 @@ # Copy values from self.w_locals to self.fastlocals_w assert self.w_locals is not None varnames = self.getcode().getvarnames() + numlocals = self.getfastscopelength() - new_fastlocals_w = [None]*self.numlocals - - for i in range(min(len(varnames), self.numlocals)): + new_fastlocals_w = [None] * numlocals + + for i in range(min(len(varnames), numlocals)): w_name = self.space.wrap(varnames[i]) try: w_value = self.space.getitem(self.w_locals, w_name) diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -5,6 +5,8 @@ syntax: regexp ^testresult$ ^site-packages$ +^site-packages/.*$ +^site-packages/.*$ ^bin$ ^pypy/bin/pypy-c ^pypy/module/cpyext/src/.+\.o$ @@ -37,8 +39,6 @@ ^pypy/translator/goal/.+\.dll$ ^pypy/translator/goal/target.+-c$ ^pypy/_cache$ -^site-packages/.+\.egg$ -^site-packages/.+\.pth$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ ^pypy/doc/statistic/.+\.pdf$ diff --git a/pypy/rlib/_jit_vref.py b/pypy/rlib/_jit_vref.py --- a/pypy/rlib/_jit_vref.py +++ b/pypy/rlib/_jit_vref.py @@ -8,6 +8,8 @@ from pypy.rpython.lltypesystem import lltype from pypy.rpython.error import TyperError +from pypy.rpython.ootypesystem import ootype + class SomeVRef(annmodel.SomeObject): @@ -24,7 +26,10 @@ return self.s_instance def rtyper_makerepr(self, rtyper): - return vrefrepr + if rtyper.type_system.name == 'lltypesystem': + return vrefrepr + elif rtyper.type_system.name == 'ootypesystem': + return oovrefrepr def rtyper_makekey(self): return self.__class__, @@ -54,4 +59,20 @@ " prebuilt virtual_ref") return lltype.nullptr(OBJECTPTR.TO) +from pypy.rpython.ootypesystem.rclass import OBJECT + +class OOVRefRepr(VRefRepr): + lowleveltype = OBJECT + def rtype_simple_call(self, hop): + [v] = hop.inputargs(self) + v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) + return hop.genop('oodowncast', [v], resulttype = hop.r_result) + + def convert_const(self, value): + if value() is not None: + raise TypeError("only supports virtual_ref_None as a" + " prebuilt virtual_ref") + return ootype.ROOT._null + vrefrepr = VRefRepr() +oovrefrepr = OOVRefRepr() diff --git a/pypy/translator/cli/test/test_class.py b/pypy/translator/cli/test/test_class.py --- a/pypy/translator/cli/test/test_class.py +++ b/pypy/translator/cli/test/test_class.py @@ -1,11 +1,8 @@ import py from pypy.translator.cli.test.runtest import CliTest -from pypy.translator.oosupport.test_template.class_ import BaseTestClass, BaseTestSpecialcase +from pypy.translator.oosupport.test_template.class_ import BaseTestClass # ====> ../../oosupport/test_template/class_.py class TestCliClass(CliTest, BaseTestClass): pass - -class TestCliSpecialCase(CliTest, BaseTestSpecialcase): - pass diff --git a/pypy/jit/metainterp/test/test_exception.py b/pypy/jit/metainterp/test/test_exception.py --- a/pypy/jit/metainterp/test/test_exception.py +++ b/pypy/jit/metainterp/test/test_exception.py @@ -1,5 +1,5 @@ import py, sys -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver, dont_look_inside from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from pypy.jit.codewriter.policy import StopAtXPolicy diff --git a/pypy/jit/backend/x86/arch.py b/pypy/jit/backend/x86/arch.py --- a/pypy/jit/backend/x86/arch.py +++ b/pypy/jit/backend/x86/arch.py @@ -1,17 +1,29 @@ # Constants that depend on whether we are on 32-bit or 64-bit +# The frame size gives the standard fixed part at the start of +# every assembler frame: the saved value of some registers, +# one word for the force_index, and some extra space used only +# during a malloc that needs to go via its slow path. + import sys if sys.maxint == (2**31 - 1): WORD = 4 - # ebp + ebx + esi + edi + force_index = 5 words - FRAME_FIXED_SIZE = 5 + # ebp + ebx + esi + edi + 4 extra words + force_index = 9 words + FRAME_FIXED_SIZE = 9 + FORCE_INDEX_OFS = -8*WORD + MY_COPY_OF_REGS = -7*WORD IS_X86_32 = True IS_X86_64 = False else: WORD = 8 - # rbp + rbx + r12 + r13 + r14 + r15 + force_index = 7 words - FRAME_FIXED_SIZE = 7 + # rbp + rbx + r12 + r13 + r14 + r15 + 11 extra words + force_index = 18 + FRAME_FIXED_SIZE = 18 + FORCE_INDEX_OFS = -17*WORD + MY_COPY_OF_REGS = -16*WORD IS_X86_32 = False IS_X86_64 = True -FORCE_INDEX_OFS = -(FRAME_FIXED_SIZE-1)*WORD +# The extra space has room for almost all registers, apart from eax and edx +# which are used in the malloc itself. They are: +# ecx, ebx, esi, edi [32 and 64 bits] +# r8, r9, r10, r12, r13, r14, r15 [64 bits only] diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py --- a/pypy/config/translationoption.py +++ b/pypy/config/translationoption.py @@ -343,7 +343,11 @@ } def final_check_config(config): - pass + # XXX: this should be a real config option, but it is hard to refactor it; + # instead, we "just" patch it from here + from pypy.rlib import rfloat + if config.translation.type_system == 'ootype': + rfloat.USE_SHORT_FLOAT_REPR = False def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -486,6 +486,7 @@ class W_IMap(Wrappable): _error_name = "imap" + _immutable_fields_ = ["w_fun", "iterators_w"] def __init__(self, space, w_fun, args_w): self.space = space diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -4,6 +4,8 @@ from pypy.rpython.tool import rffi_platform from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rlib import objectmodel +from pypy.rpython.extfunc import register_external +from pypy.annotation.model import SomeString USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? @@ -24,16 +26,28 @@ globals().update(rffi_platform.configure(CConfig)) def rstring_to_float(s): + return rstring_to_float_impl(s) + +def rstring_to_float_impl(s): if USE_SHORT_FLOAT_REPR: from pypy.rlib.rdtoa import strtod return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: raise ValueError + return parts_to_float(sign, before_point, after_point, exponent) - return parts_to_float(sign, before_point, after_point, exponent) +def oo_rstring_to_float(s): + from pypy.rpython.annlowlevel import oostr + from pypy.rpython.ootypesystem import ootype + lls = oostr(s) + return ootype.ooparse_float(lls) + +register_external(rstring_to_float, [SomeString(can_be_None=False)], float, + llimpl=rstring_to_float_impl, + ooimpl=oo_rstring_to_float, + sandboxsafe=True) + # float as string -> sign, beforept, afterpt, exponent def break_up_float(s): @@ -153,128 +167,132 @@ result = formatd(value, tp, precision, flags) return result, special -if USE_SHORT_FLOAT_REPR: - def round_double(value, ndigits): - # The basic idea is very simple: convert and round the double to - # a decimal string using _Py_dg_dtoa, then convert that decimal - # string back to a double with _Py_dg_strtod. There's one minor - # difficulty: Python 2.x expects round to do - # round-half-away-from-zero, while _Py_dg_dtoa does - # round-half-to-even. So we need some way to detect and correct - # the halfway cases. +def round_double(value, ndigits): + if USE_SHORT_FLOAT_REPR: + return round_double_short_repr(value, ndigits) + else: + return round_double_fallback_repr(value, ndigits) - # a halfway value has the form k * 0.5 * 10**-ndigits for some - # odd integer k. Or in other words, a rational number x is - # exactly halfway between two multiples of 10**-ndigits if its - # 2-valuation is exactly -ndigits-1 and its 5-valuation is at - # least -ndigits. For ndigits >= 0 the latter condition is - # automatically satisfied for a binary float x, since any such - # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x - # needs to be an integral multiple of 5**-ndigits; we can check - # this using fmod. For -22 > ndigits, there are no halfway - # cases: 5**23 takes 54 bits to represent exactly, so any odd - # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of - # precision to represent exactly. +def round_double_short_repr(value, ndigits): + # The basic idea is very simple: convert and round the double to + # a decimal string using _Py_dg_dtoa, then convert that decimal + # string back to a double with _Py_dg_strtod. There's one minor + # difficulty: Python 2.x expects round to do + # round-half-away-from-zero, while _Py_dg_dtoa does + # round-half-to-even. So we need some way to detect and correct + # the halfway cases. - sign = copysign(1.0, value) - value = abs(value) + # a halfway value has the form k * 0.5 * 10**-ndigits for some + # odd integer k. Or in other words, a rational number x is + # exactly halfway between two multiples of 10**-ndigits if its + # 2-valuation is exactly -ndigits-1 and its 5-valuation is at + # least -ndigits. For ndigits >= 0 the latter condition is + # automatically satisfied for a binary float x, since any such + # float has nonnegative 5-valuation. For 0 > ndigits >= -22, x + # needs to be an integral multiple of 5**-ndigits; we can check + # this using fmod. For -22 > ndigits, there are no halfway + # cases: 5**23 takes 54 bits to represent exactly, so any odd + # multiple of 0.5 * 10**n for n >= 23 takes at least 54 bits of + # precision to represent exactly. - # find 2-valuation value - m, expo = math.frexp(value) - while m != math.floor(m): - m *= 2.0 - expo -= 1 + sign = copysign(1.0, value) + value = abs(value) - # determine whether this is a halfway case. - halfway_case = 0 - if expo == -ndigits - 1: - if ndigits >= 0: + # find 2-valuation value + m, expo = math.frexp(value) + while m != math.floor(m): + m *= 2.0 + expo -= 1 + + # determine whether this is a halfway case. + halfway_case = 0 + if expo == -ndigits - 1: + if ndigits >= 0: + halfway_case = 1 + elif ndigits >= -22: + # 22 is the largest k such that 5**k is exactly + # representable as a double + five_pow = 1.0 + for i in range(-ndigits): + five_pow *= 5.0 + if math.fmod(value, five_pow) == 0.0: halfway_case = 1 - elif ndigits >= -22: - # 22 is the largest k such that 5**k is exactly - # representable as a double - five_pow = 1.0 - for i in range(-ndigits): - five_pow *= 5.0 - if math.fmod(value, five_pow) == 0.0: - halfway_case = 1 - # round to a decimal string; use an extra place for halfway case - strvalue = formatd(value, 'f', ndigits + halfway_case) + # round to a decimal string; use an extra place for halfway case + strvalue = formatd(value, 'f', ndigits + halfway_case) - if halfway_case: - buf = [c for c in strvalue] - if ndigits >= 0: - endpos = len(buf) - 1 - else: - endpos = len(buf) + ndigits - # Sanity checks: there should be exactly ndigits+1 places - # following the decimal point, and the last digit in the - # buffer should be a '5' - if not objectmodel.we_are_translated(): - assert buf[endpos] == '5' - if '.' in buf: - assert endpos == len(buf) - 1 - assert buf.index('.') == len(buf) - ndigits - 2 + if halfway_case: + buf = [c for c in strvalue] + if ndigits >= 0: + endpos = len(buf) - 1 + else: + endpos = len(buf) + ndigits + # Sanity checks: there should be exactly ndigits+1 places + # following the decimal point, and the last digit in the + # buffer should be a '5' + if not objectmodel.we_are_translated(): + assert buf[endpos] == '5' + if '.' in buf: + assert endpos == len(buf) - 1 + assert buf.index('.') == len(buf) - ndigits - 2 - # increment and shift right at the same time - i = endpos - 1 - carry = 1 - while i >= 0: + # increment and shift right at the same time + i = endpos - 1 + carry = 1 + while i >= 0: + digit = ord(buf[i]) + if digit == ord('.'): + buf[i+1] = chr(digit) + i -= 1 digit = ord(buf[i]) - if digit == ord('.'): - buf[i+1] = chr(digit) - i -= 1 - digit = ord(buf[i]) - carry += digit - ord('0') - buf[i+1] = chr(carry % 10 + ord('0')) - carry /= 10 - i -= 1 - buf[0] = chr(carry + ord('0')) - if ndigits < 0: - buf.append('0') + carry += digit - ord('0') + buf[i+1] = chr(carry % 10 + ord('0')) + carry /= 10 + i -= 1 + buf[0] = chr(carry + ord('0')) + if ndigits < 0: + buf.append('0') - strvalue = ''.join(buf) + strvalue = ''.join(buf) - return sign * rstring_to_float(strvalue) + return sign * rstring_to_float(strvalue) -else: - # fallback version, to be used when correctly rounded - # binary<->decimal conversions aren't available - def round_double(value, ndigits): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 +# fallback version, to be used when correctly rounded +# binary<->decimal conversions aren't available +def round_double_fallback_repr(value, ndigits): + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow + pow1 = math.pow(10.0, ndigits - 22) + pow2 = 1e22 + else: + pow1 = math.pow(10.0, ndigits) + pow2 = 1.0 - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value + y = (value * pow1) * pow2 + # if y overflows, then rounded value is exactly x + if isinf(y): + return value - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 + else: + pow1 = math.pow(10.0, -ndigits); + pow2 = 1.0 # unused; for translation + y = value / pow1 - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y-z) == 1.0: # obscure case, see the test - z = y + if y >= 0.0: + z = math.floor(y + 0.5) + else: + z = math.ceil(y - 0.5) + if math.fabs(y-z) == 1.0: # obscure case, see the test + z = y - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z + if ndigits >= 0: + z = (z / pow2) / pow1 + else: + z *= pow1 + return z INFINITY = 1e200 * 1e200 NAN = INFINITY / INFINITY diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -133,6 +133,8 @@ class AppTest_DictObject: + def setup_class(cls): + cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names) def test_equality(self): d = {1:2} @@ -252,10 +254,17 @@ k = Key() d = {} d.setdefault(k, []) - assert k.calls == 1 + if self.on_pypy: + assert k.calls == 1 d.setdefault(k, 1) - assert k.calls == 2 + if self.on_pypy: + assert k.calls == 2 + + k = Key() + d.setdefault(k, 42) + if self.on_pypy: + assert k.calls == 1 def test_update(self): d = {1:2, 3:4} diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_ajit.py copy from pypy/jit/metainterp/test/test_basic.py copy to pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_basic.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -4,269 +4,17 @@ from pypy.rlib.jit import loop_invariant from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp import pyjitpl, history from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.metainterp.warmspot import get_stats from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong from pypy import conftest from pypy.rlib.rarithmetic import ovfcheck from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class BasicTests: diff --git a/pypy/translator/backendopt/test/test_mallocprediction.py b/pypy/translator/backendopt/test/test_mallocprediction.py --- a/pypy/translator/backendopt/test/test_mallocprediction.py +++ b/pypy/translator/backendopt/test/test_mallocprediction.py @@ -4,7 +4,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.conftest import option import sys diff --git a/pypy/translator/backendopt/merge_if_blocks.py b/pypy/translator/backendopt/merge_if_blocks.py --- a/pypy/translator/backendopt/merge_if_blocks.py +++ b/pypy/translator/backendopt/merge_if_blocks.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Block, Constant, Variable, flatten +from pypy.objspace.flow.model import Block, Constant, Variable from pypy.objspace.flow.model import checkgraph, mkentrymap from pypy.translator.backendopt.support import log @@ -75,14 +75,19 @@ # False link checkvar = [var for var in current.operations[-1].args if isinstance(var, Variable)][0] + resvar = current.operations[-1].result case = [var for var in current.operations[-1].args if isinstance(var, Constant)][0] - chain.append((current, case)) checkvars.append(checkvar) falseexit = current.exits[0] assert not falseexit.exitcase trueexit = current.exits[1] targetblock = falseexit.target + # if the result of the check is also passed through the link, we + # cannot construct the chain + if resvar in falseexit.args or resvar in trueexit.args: + break + chain.append((current, case)) if len(entrymap[targetblock]) != 1: break if checkvar not in falseexit.args: diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -6,7 +6,7 @@ from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside from pypy.rlib.rarithmetic import intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.rclass import FieldListAccessor from pypy.jit.metainterp.warmspot import get_stats, get_translator from pypy.jit.metainterp import history diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -76,8 +76,8 @@ result.sort() return result - elif hasattr(obj, '__dir__'): - result = obj.__dir__() + elif hasattr(type(obj), '__dir__'): + result = type(obj).__dir__(obj) if not isinstance(result, list): raise TypeError("__dir__() must return a list, not %r" % ( type(result),)) @@ -87,11 +87,14 @@ else: #(regular item) Dict = {} try: - Dict.update(obj.__dict__) - except AttributeError: pass + if isinstance(obj.__dict__, dict): + Dict.update(obj.__dict__) + except AttributeError: + pass try: Dict.update(_classdir(obj.__class__)) - except AttributeError: pass + except AttributeError: + pass ## Comment from object.c: ## /* Merge in __members__ and __methods__ (if any). @@ -99,10 +102,14 @@ ## XXX needed to get at im_self etc of method objects. */ for attr in ['__members__','__methods__']: try: - for item in getattr(obj, attr): + l = getattr(obj, attr) + if not isinstance(l, list): + continue + for item in l: if isinstance(item, types.StringTypes): Dict[item] = None - except (AttributeError, TypeError): pass + except (AttributeError, TypeError): + pass result = Dict.keys() result.sort() diff --git a/pypy/rpython/lltypesystem/test/test_rffi.py b/pypy/rpython/lltypesystem/test/test_rffi.py --- a/pypy/rpython/lltypesystem/test/test_rffi.py +++ b/pypy/rpython/lltypesystem/test/test_rffi.py @@ -728,6 +728,7 @@ for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) + assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) assert not size_and_sign(lltype.Signed)[1] assert not size_and_sign(lltype.Char)[1] assert not size_and_sign(lltype.UniChar)[1] diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -135,7 +135,7 @@ return type(self) is type(other) # xxx obscure def clone_if_mutable(self): res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attrbutes_into(res) + self.copy_all_attributes_into(res) return res def _sortboxes(boxes): @@ -2757,7 +2757,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops(self): + def test_fold_partially_constant_add_sub(self): ops = """ [i0] i1 = int_sub(i0, 0) @@ -2791,7 +2791,7 @@ """ self.optimize_loop(ops, expected) - def test_fold_partially_constant_ops_ovf(self): + def test_fold_partially_constant_add_sub_ovf(self): ops = """ [i0] i1 = int_sub_ovf(i0, 0) @@ -2828,6 +2828,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): + ops = """ + [i0] + i1 = int_lshift(i0, 0) + i2 = int_rshift(i1, 0) + i3 = int_eq(i2, i0) + guard_true(i3) [] + jump(i2) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + # ---------- class TestLLtype(OptimizeOptTest, LLtypeMixin): @@ -4960,6 +4975,58 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i1, descr=nextdescr) """ + py.test.skip("no test here") + + def test_immutable_not(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_noimmut_vtable)) + setfield_gc(p0, 42, descr=noimmut_intval) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_variable(self): + ops = """ + [i0] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, i0, descr=immut_intval) + escape(p0) + jump(i0) + """ + self.optimize_loop(ops, ops) + + def test_immutable_incomplete(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + escape(p0) + jump() + """ + self.optimize_loop(ops, ops) + + def test_immutable_constantfold(self): + ops = """ + [] + p0 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p0, 1242, descr=immut_intval) + escape(p0) + jump() + """ + from pypy.rpython.lltypesystem import lltype, llmemory + class IntObj1242(object): + _TYPE = llmemory.GCREF.TO + def __eq__(self, other): + return other.container.intval == 1242 + self.namespace['intobj1242'] = lltype._ptr(llmemory.GCREF, + IntObj1242()) + expected = """ + [] + escape(ConstPtr(intobj1242)) + jump() + """ + self.optimize_loop(ops, expected) # ---------- def optimize_strunicode_loop(self, ops, optops, preamble=None): diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -156,6 +156,37 @@ return x self.interpret(fn, [1.0, 2.0, 3.0]) + def test_copysign(self): + import math + def fn(x, y): + return math.copysign(x, y) + assert self.interpret(fn, [42, -1]) == -42 + assert self.interpret(fn, [42, -0.0]) == -42 + assert self.interpret(fn, [42, 0.0]) == 42 + + def test_rstring_to_float(self): + from pypy.rlib.rfloat import rstring_to_float + def fn(i): + s = ['42.3', '123.4'][i] + return rstring_to_float(s) + assert self.interpret(fn, [0]) == 42.3 + + def test_isnan(self): + import math + def fn(x): + inf = x * x + nan = inf / inf + return math.isnan(nan) + assert self.interpret(fn, [1e200]) + + def test_isinf(self): + import math + def fn(x): + inf = x * x + return math.isinf(inf) + assert self.interpret(fn, [1e200]) + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -514,12 +514,10 @@ break else: # all constant arguments: constant-fold away - argboxes = [self.get_constant_box(op.getarg(i)) - for i in range(op.numargs())] - resbox = execute_nonspec(self.cpu, None, - op.getopnum(), argboxes, op.getdescr()) - # FIXME: Don't we need to check for an overflow here? - self.make_constant(op.result, resbox.constbox()) + resbox = self.constant_fold(op) + # note that INT_xxx_OVF is not done from here, and the + # overflows in the INT_xxx operations are ignored + self.make_constant(op.result, resbox) return # did we do the exact same operation already? @@ -538,6 +536,13 @@ if nextop: self.emit_operation(nextop) + def constant_fold(self, op): + argboxes = [self.get_constant_box(op.getarg(i)) + for i in range(op.numargs())] + resbox = execute_nonspec(self.cpu, None, + op.getopnum(), argboxes, op.getdescr()) + return resbox.constbox() + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py --- a/pypy/jit/metainterp/test/test_jitdriver.py +++ b/pypy/jit/metainterp/test/test_jitdriver.py @@ -1,6 +1,6 @@ """Tests for multiple JitDrivers.""" from pypy.rlib.jit import JitDriver, unroll_safe -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_basic.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_basic.py +++ /dev/null @@ -1,2411 +0,0 @@ -import py -import sys -from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside -from pypy.rlib.jit import loop_invariant -from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed -from pypy.rlib.jit import unroll_safe, current_trace_length -from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.backend.llgraph import runner -from pypy.jit.metainterp import pyjitpl, history -from pypy.jit.metainterp.warmstate import set_future_value -from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy -from pypy.jit.codewriter import longlong -from pypy import conftest -from pypy.rlib.rarithmetic import ovfcheck -from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper -from pypy.rpython.lltypesystem import lltype, llmemory -from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT - -def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, **kwds): - from pypy.jit.codewriter import support, codewriter - - class FakeJitCell: - __compiled_merge_points = [] - def get_compiled_merge_points(self): - return self.__compiled_merge_points[:] - def set_compiled_merge_points(self, lst): - self.__compiled_merge_points = lst - - class FakeWarmRunnerState: - def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): - pass - - def jit_cell_at_key(self, greenkey): - assert greenkey == [] - return self._cell - _cell = FakeJitCell() - - trace_limit = sys.maxint - enable_opts = ALL_OPTS_DICT - - func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system) - graphs = rtyper.annotator.translator.graphs - result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] - - class FakeJitDriverSD: - num_green_args = 0 - portal_graph = graphs[0] - virtualizable_info = None - greenfield_info = None - result_type = result_kind - portal_runner_ptr = "???" - - stats = history.Stats() - cpu = CPUClass(rtyper, stats, None, False) - cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) - testself.cw = cw - policy = JitPolicy() - policy.set_supports_longlong(supports_longlong) - cw.find_all_graphs(policy) - # - testself.warmrunnerstate = FakeWarmRunnerState() - testself.warmrunnerstate.cpu = cpu - FakeJitDriverSD.warmstate = testself.warmrunnerstate - if hasattr(testself, 'finish_setup_for_interp_operations'): - testself.finish_setup_for_interp_operations() - # - cw.make_jitcodes(verbose=True) - -def _run_with_blackhole(testself, args): - from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder - cw = testself.cw - blackholeinterpbuilder = BlackholeInterpBuilder(cw) - blackholeinterp = blackholeinterpbuilder.acquire_interp() - count_i = count_r = count_f = 0 - for value in args: - T = lltype.typeOf(value) - if T == lltype.Signed: - blackholeinterp.setarg_i(count_i, value) - count_i += 1 - elif T == llmemory.GCREF: - blackholeinterp.setarg_r(count_r, value) - count_r += 1 - elif T == lltype.Float: - value = longlong.getfloatstorage(value) - blackholeinterp.setarg_f(count_f, value) - count_f += 1 - else: - raise TypeError(T) - [jitdriver_sd] = cw.callcontrol.jitdrivers_sd - blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) - blackholeinterp.run() - return blackholeinterp._final_result_anytype() - -def _run_with_pyjitpl(testself, args): - - class DoneWithThisFrame(Exception): - pass - - class DoneWithThisFrameRef(DoneWithThisFrame): - def __init__(self, cpu, *args): - DoneWithThisFrame.__init__(self, *args) - - cw = testself.cw - opt = history.Options(listops=True) - metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) - metainterp_sd.finish_setup(cw) - [jitdriver_sd] = metainterp_sd.jitdrivers_sd - metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) - metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame - metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef - metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame - testself.metainterp = metainterp - try: - metainterp.compile_and_run_once(jitdriver_sd, *args) - except DoneWithThisFrame, e: - #if conftest.option.view: - # metainterp.stats.view() - return e.args[0] - else: - raise Exception("FAILED") - -def _run_with_machine_code(testself, args): - metainterp = testself.metainterp - num_green_args = metainterp.jitdriver_sd.num_green_args - loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) - if len(loop_tokens) != 1: - return NotImplemented - # a loop was successfully created by _run_with_pyjitpl(); call it - cpu = metainterp.cpu - for i in range(len(args) - num_green_args): - x = args[num_green_args + i] - typecode = history.getkind(lltype.typeOf(x)) - set_future_value(cpu, i, x, typecode) - faildescr = cpu.execute_token(loop_tokens[0]) - assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') - if metainterp.jitdriver_sd.result_type == history.INT: - return cpu.get_latest_value_int(0) - elif metainterp.jitdriver_sd.result_type == history.REF: - return cpu.get_latest_value_ref(0) - elif metainterp.jitdriver_sd.result_type == history.FLOAT: - return cpu.get_latest_value_float(0) - else: - return None - - -class JitMixin: - basic = True - def check_loops(self, expected=None, everywhere=False, **check): - get_stats().check_loops(expected=expected, everywhere=everywhere, - **check) - def check_loop_count(self, count): - """NB. This is a hack; use check_tree_loop_count() or - check_enter_count() for the real thing. - This counts as 1 every bridge in addition to every loop; and it does - not count at all the entry bridges from interpreter, although they - are TreeLoops as well.""" - assert get_stats().compiled_count == count - def check_tree_loop_count(self, count): - assert len(get_stats().loops) == count - def check_loop_count_at_most(self, count): - assert get_stats().compiled_count <= count - def check_enter_count(self, count): - assert get_stats().enter_count == count - def check_enter_count_at_most(self, count): - assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): - assert get_stats().aborted_count == count - def check_aborted_count_at_least(self, count): - assert get_stats().aborted_count >= count - - def meta_interp(self, *args, **kwds): - kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system - if "backendopt" not in kwds: - kwds["backendopt"] = False - return ll_meta_interp(*args, **kwds) - - def interp_operations(self, f, args, **kwds): - # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) - # try to run it with blackhole.py - result1 = _run_with_blackhole(self, args) - # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) - assert result1 == result2 - # try to run it by running the code compiled just before - result3 = _run_with_machine_code(self, args) - assert result1 == result3 or result3 == NotImplemented - # - if (longlong.supports_longlong and - isinstance(result1, longlong.r_float_storage)): - result1 = longlong.getrealfloat(result1) - return result1 - - def check_history(self, expected=None, **isns): - # this can be used after calling meta_interp - get_stats().check_history(expected, **isns) - - def check_operations_history(self, expected=None, **isns): - # this can be used after interp_operations - if expected is not None: - expected = dict(expected) - expected['jump'] = 1 - self.metainterp.staticdata.stats.check_history(expected, **isns) - - -class LLJitMixin(JitMixin): - type_system = 'lltype' - CPUClass = runner.LLtypeCPU - - @staticmethod - def Ptr(T): - return lltype.Ptr(T) - - @staticmethod - def GcStruct(name, *fields, **kwds): - S = lltype.GcStruct(name, *fields, **kwds) - return S - - malloc = staticmethod(lltype.malloc) - nullptr = staticmethod(lltype.nullptr) - - @staticmethod - def malloc_immortal(T): - return lltype.malloc(T, immortal=True) - - def _get_NODE(self): - NODE = lltype.GcForwardReference() - NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), - ('next', lltype.Ptr(NODE)))) - return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - - -class BasicTests: - - def test_basic(self): - def f(x, y): - return x + y - res = self.interp_operations(f, [40, 2]) - assert res == 42 - - def test_basic_inst(self): - class A: - pass - def f(n): - a = A() - a.x = n - return a.x - res = self.interp_operations(f, [42]) - assert res == 42 - - def test_uint_floordiv(self): - from pypy.rlib.rarithmetic import r_uint - - def f(a, b): - a = r_uint(a) - b = r_uint(b) - return a/b - - res = self.interp_operations(f, [-4, 3]) - assert res == long(r_uint(-4)) // 3 - - def test_direct_call(self): - def g(n): - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_direct_call_with_guard(self): - def g(n): - if n < 0: - return 0 - return n + 2 - def f(a, b): - return g(a) + g(b) - res = self.interp_operations(f, [8, 98]) - assert res == 110 - - def test_loop(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - if self.basic: - found = 0 - for op in get_stats().loops[0]._all_operations(): - if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 3 - for box in liveboxes: - assert isinstance(box, history.BoxInt) - found += 1 - assert found == 1 - - def test_loop_invariant_mul1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loop_invariant_mul_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - b = y * 2 - res += ovfcheck(x * x) + b - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 308 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 2, 'int_sub': 1, 'int_gt': 1, - 'int_lshift': 1, - 'jump': 1}) - - def test_loop_invariant_mul_bridge1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - x += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 3427 - self.check_loop_count(3) - - def test_loop_invariant_mul_bridge_maintaining1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x * x - if y<16: - res += 1 - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1167 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - - def test_loop_invariant_mul_bridge_maintaining2(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - z = x * x - res += z - if y<16: - res += z - y -= 1 - return res - res = self.meta_interp(f, [6, 32]) - assert res == 1692 - self.check_loop_count(3) - self.check_loops({'int_add': 3, 'int_lt': 2, - 'int_sub': 2, 'guard_false': 1, - 'jump': 2, - 'int_gt': 1, 'guard_true': 2}) - - def test_loop_invariant_mul_bridge_maintaining3(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm']) - def f(x, y, m): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res, m=m) - myjitdriver.jit_merge_point(x=x, y=y, res=res, m=m) - z = x * x - res += z - if y 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x.intval * x.intval - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 252 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'int_add': 1, 'int_sub': 1, 'int_gt': 1, - 'jump': 1}) - - def test_loops_are_transient(self): - import gc, weakref - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - if y%2: - res *= 2 - y -= 1 - return res - wr_loops = [] - old_init = history.TreeLoop.__init__.im_func - try: - def track_init(self, name): - old_init(self, name) - wr_loops.append(weakref.ref(self)) - history.TreeLoop.__init__ = track_init - res = self.meta_interp(f, [6, 15], no_stats=True) - finally: - history.TreeLoop.__init__ = old_init - - assert res == f(6, 15) - gc.collect() - - #assert not [wr for wr in wr_loops if wr()] - for loop in [wr for wr in wr_loops if wr()]: - assert loop().name == 'short preamble' - - def test_string(self): - def f(n): - bytecode = 'adlfkj' + chr(n) - if n < len(bytecode): - return bytecode[n] - else: - return "?" - res = self.interp_operations(f, [1]) - assert res == ord("d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord("?") - - def test_chr2str(self): - def f(n): - s = chr(n) - return s[0] - res = self.interp_operations(f, [3]) - assert res == 3 - - def test_unicode(self): - def f(n): - bytecode = u'adlfkj' + unichr(n) - if n < len(bytecode): - return bytecode[n] - else: - return u"?" - res = self.interp_operations(f, [1]) - assert res == ord(u"d") # XXX should be "d" - res = self.interp_operations(f, [6]) - assert res == 6 - res = self.interp_operations(f, [42]) - assert res == ord(u"?") - - def test_residual_call(self): - @dont_look_inside - def externfn(x, y): - return x * y - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) - - def test_residual_call_pure(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - n = hint(n, promote=True) - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is not recorded in the history if all-constant args - self.check_operations_history(int_add=0, int_mul=0, - call=0, call_pure=0) - - def test_residual_call_pure_1(self): - def externfn(x, y): - return x * y - externfn._pure_function_ = True - def f(n): - return externfn(n, n+1) - res = self.interp_operations(f, [6]) - assert res == 42 - # CALL_PURE is recorded in the history if not-all-constant args - self.check_operations_history(int_add=1, int_mul=0, - call=0, call_pure=1) - - def test_residual_call_pure_2(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def externfn(x): - return x - 1 - externfn._pure_function_ = True - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - n = externfn(n) - return n - res = self.meta_interp(f, [7]) - assert res == 0 - # CALL_PURE is recorded in the history, but turned into a CALL - # by optimizeopt.py - self.check_loops(int_sub=0, call=1, call_pure=0) - - def test_constfold_call_pure(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - n -= externfn(m) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_constfold_call_pure_2(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - def externfn(x): - return x - 3 - externfn._pure_function_ = True - class V: - def __init__(self, value): - self.value = value - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - v = V(m) - n -= externfn(v.value) - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0) - - def test_pure_function_returning_object(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - class V: - def __init__(self, x): - self.x = x - v1 = V(1) - v2 = V(2) - def externfn(x): - if x: - return v1 - else: - return v2 - externfn._pure_function_ = True - def f(n, m): - while n > 0: - myjitdriver.can_enter_jit(n=n, m=m) - myjitdriver.jit_merge_point(n=n, m=m) - m = V(m).x - n -= externfn(m).x + externfn(m + m - m).x - return n - res = self.meta_interp(f, [21, 5]) - assert res == -1 - # the CALL_PURE is constant-folded away by optimizeopt.py - self.check_loops(int_sub=1, call=0, call_pure=0, getfield_gc=0) - - def test_constant_across_mp(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - class X(object): - pass - def f(n): - while n > -100: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - x = X() - x.arg = 5 - if n <= 0: break - n -= x.arg - x.arg = 6 # prevents 'x.arg' from being annotated as constant - return n - res = self.meta_interp(f, [31]) - assert res == -4 - - def test_stopatxpolicy(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def internfn(y): - return y * 3 - def externfn(y): - return y % 4 - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if y & 7: - f = internfn - else: - f = externfn - f(y) - y -= 1 - return 42 - policy = StopAtXPolicy(externfn) - res = self.meta_interp(f, [31], policy=policy) - assert res == 42 - self.check_loops(int_mul=1, int_mod=0) - - def test_we_are_jitted(self): - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - while y >= 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - if we_are_jitted(): - x = 1 - else: - x = 10 - y -= x - return y - assert f(55) == -5 - res = self.meta_interp(f, [55]) - assert res == -1 - - def test_confirm_enter_jit(self): - def confirm_enter_jit(x, y): - return x <= 5 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - confirm_enter_jit = confirm_enter_jit) - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - y -= x - return y - # - res = self.meta_interp(f, [10, 84]) - assert res == -6 - self.check_loop_count(0) - # - res = self.meta_interp(f, [3, 19]) - assert res == -2 - self.check_loop_count(1) - - def test_can_never_inline(self): - def can_never_inline(x): - return x > 50 - myjitdriver = JitDriver(greens = ['x'], reds = ['y'], - can_never_inline = can_never_inline) - @dont_look_inside - def marker(): - pass - def f(x, y): - while y >= 0: - myjitdriver.can_enter_jit(x=x, y=y) - myjitdriver.jit_merge_point(x=x, y=y) - x += 1 - if x == 4 or x == 61: - marker() - y -= x - return y - # - res = self.meta_interp(f, [3, 6], repeat=7) - assert res == 6 - 4 - 5 - self.check_history(call=0) # because the trace starts in the middle - # - res = self.meta_interp(f, [60, 84], repeat=7) - assert res == 84 - 61 - 62 - self.check_history(call=1) # because the trace starts immediately - - def test_format(self): - def f(n): - return len("<%d>" % n) - res = self.interp_operations(f, [421]) - assert res == 5 - - def test_switch(self): - def f(n): - if n == -5: return 12 - elif n == 2: return 51 - elif n == 7: return 1212 - else: return 42 - res = self.interp_operations(f, [7]) - assert res == 1212 - res = self.interp_operations(f, [12311]) - assert res == 42 - - def test_r_uint(self): - from pypy.rlib.rarithmetic import r_uint - myjitdriver = JitDriver(greens = [], reds = ['y']) - def f(y): - y = r_uint(y) - while y > 0: - myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) - y -= 1 - return y - res = self.meta_interp(f, [10]) - assert res == 0 - - def test_uint_operations(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - return ((r_uint(n) - 123) >> 1) <= r_uint(456) - res = self.interp_operations(f, [50]) - assert res == False - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_uint_condition(self): - from pypy.rlib.rarithmetic import r_uint - def f(n): - if ((r_uint(n) - 123) >> 1) <= r_uint(456): - return 24 - else: - return 12 - res = self.interp_operations(f, [50]) - assert res == 12 - self.check_operations_history(int_rshift=0, uint_rshift=1, - int_le=0, uint_le=1, - int_sub=1) - - def test_int_between(self): - # - def check(arg1, arg2, arg3, expect_result, **expect_operations): - from pypy.rpython.lltypesystem import lltype - from pypy.rpython.lltypesystem.lloperation import llop - loc = locals().copy() - exec py.code.Source(""" - def f(n, m, p): - arg1 = %(arg1)s - arg2 = %(arg2)s - arg3 = %(arg3)s - return llop.int_between(lltype.Bool, arg1, arg2, arg3) - """ % locals()).compile() in loc - res = self.interp_operations(loc['f'], [5, 6, 7]) - assert res == expect_result - self.check_operations_history(expect_operations) - # - check('n', 'm', 'p', True, int_sub=2, uint_lt=1) - check('n', 'p', 'm', False, int_sub=2, uint_lt=1) - # - check('n', 'm', 6, False, int_sub=2, uint_lt=1) - # - check('n', 4, 'p', False, int_sub=2, uint_lt=1) - check('n', 5, 'p', True, int_sub=2, uint_lt=1) - check('n', 8, 'p', False, int_sub=2, uint_lt=1) - # - check('n', 6, 7, True, int_sub=2, uint_lt=1) - # - check(-2, 'n', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'm', 'p', True, int_sub=2, uint_lt=1) - check(-2, 'p', 'm', False, int_sub=2, uint_lt=1) - #check(0, 'n', 'p', True, uint_lt=1) xxx implement me - #check(0, 'm', 'p', True, uint_lt=1) - #check(0, 'p', 'm', False, uint_lt=1) - # - check(2, 'n', 6, True, int_sub=1, uint_lt=1) - check(2, 'm', 6, False, int_sub=1, uint_lt=1) - check(2, 'p', 6, False, int_sub=1, uint_lt=1) - check(5, 'n', 6, True, int_eq=1) # 6 == 5+1 - check(5, 'm', 6, False, int_eq=1) # 6 == 5+1 - # - check(2, 6, 'm', False, int_sub=1, uint_lt=1) - check(2, 6, 'p', True, int_sub=1, uint_lt=1) - # - check(2, 40, 6, False) - check(2, 40, 60, True) - - def test_getfield(self): - class A: - pass - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=1) - - def test_getfield_immutable(self): - class A: - _immutable_ = True - a1 = A() - a1.foo = 5 - a2 = A() - a2.foo = 8 - def f(x): - if x > 5: - a = a1 - else: - a = a2 - return a.foo * x - res = self.interp_operations(f, [42]) - assert res == 210 - self.check_operations_history(getfield_gc=0) - - def test_setfield_bool(self): - class A: - def __init__(self): - self.flag = True - myjitdriver = JitDriver(greens = [], reds = ['n', 'obj']) - def f(n): - obj = A() - res = False - while n > 0: - myjitdriver.can_enter_jit(n=n, obj=obj) - myjitdriver.jit_merge_point(n=n, obj=obj) - obj.flag = False - n -= 1 - return res - res = self.meta_interp(f, [7]) - assert type(res) == bool - assert not res - - def test_switch_dict(self): - def f(x): - if x == 1: return 61 - elif x == 2: return 511 - elif x == 3: return -22 - elif x == 4: return 81 - elif x == 5: return 17 - elif x == 6: return 54 - elif x == 7: return 987 - elif x == 8: return -12 - elif x == 9: return 321 - return -1 - res = self.interp_operations(f, [5]) - assert res == 17 - res = self.interp_operations(f, [15]) - assert res == -1 - - def test_int_add_ovf(self): - def f(x, y): - try: - return ovfcheck(x + y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -98 - res = self.interp_operations(f, [1, sys.maxint]) - assert res == -42 - - def test_int_sub_ovf(self): - def f(x, y): - try: - return ovfcheck(x - y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -102 - res = self.interp_operations(f, [1, -sys.maxint]) - assert res == -42 - - def test_int_mul_ovf(self): - def f(x, y): - try: - return ovfcheck(x * y) - except OverflowError: - return -42 - res = self.interp_operations(f, [-100, 2]) - assert res == -200 - res = self.interp_operations(f, [-3, sys.maxint//2]) - assert res == -42 - - def test_mod_ovf(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'y']) - def f(n, x, y): - while n > 0: - myjitdriver.can_enter_jit(x=x, y=y, n=n) - myjitdriver.jit_merge_point(x=x, y=y, n=n) - n -= ovfcheck(x % y) - return n - res = self.meta_interp(f, [20, 1, 2]) - assert res == 0 - self.check_loops(call=0) - - def test_abs(self): - myjitdriver = JitDriver(greens = [], reds = ['i', 't']) - def f(i): - t = 0 - while i < 10: - myjitdriver.can_enter_jit(i=i, t=t) - myjitdriver.jit_merge_point(i=i, t=t) - t += abs(i) - i += 1 - return t - res = self.meta_interp(f, [-5]) - assert res == 5+4+3+2+1+0+1+2+3+4+5+6+7+8+9 - - def test_float(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - x = float(x) - y = float(y) - res = 0.0 - while y > 0.0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += x - y -= 1.0 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42.0 - self.check_loop_count(1) - self.check_loops({'guard_true': 1, - 'float_add': 1, 'float_sub': 1, 'float_gt': 1, - 'jump': 1}) - - def test_print(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) - def f(n): - while n > 0: - myjitdriver.can_enter_jit(n=n) - myjitdriver.jit_merge_point(n=n) - print n - n -= 1 - return n - res = self.meta_interp(f, [7]) - assert res == 0 - - def test_bridge_from_interpreter(self): - mydriver = JitDriver(reds = ['n'], greens = []) - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - n -= 1 - - self.meta_interp(f, [20], repeat=7) - self.check_tree_loop_count(2) # the loop and the entry path - # we get: - # ENTER - compile the new loop and the entry bridge - # ENTER - compile the leaving path - self.check_enter_count(2) - - def test_bridge_from_interpreter_2(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n'], greens = []) - glob = [1] - - def f(n): - while n > 0: - mydriver.can_enter_jit(n=n) - mydriver.jit_merge_point(n=n) - if n == 17 and glob[0]: - glob[0] = 0 - x = n + 1 - y = n + 2 - z = n + 3 - k = n + 4 - n -= 1 - n += x + y + z + k - n -= x + y + z + k - n -= 1 - - self.meta_interp(f, [20], repeat=7) - - def test_bridge_from_interpreter_3(self): - # one case for backend - computing of framesize on guard failure - mydriver = JitDriver(reds = ['n', 'x', 'y', 'z', 'k'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - glob.x = 1 - x = 0 - y = 0 - z = 0 - k = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x, y=y, z=z, k=k) - mydriver.jit_merge_point(n=n, x=x, y=y, z=z, k=k) - x += 10 - y += 3 - z -= 15 - k += 4 - if n == 17 and glob.x: - glob.x = 0 - x += n + 1 - y += n + 2 - z += n + 3 - k += n + 4 - n -= 1 - n -= 1 - return x + 2*y + 3*z + 5*k + 13*n - - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_bridge_from_interpreter_4(self): - jitdriver = JitDriver(reds = ['n', 'k'], greens = []) - - def f(n, k): - while n > 0: - jitdriver.can_enter_jit(n=n, k=k) - jitdriver.jit_merge_point(n=n, k=k) - if k: - n -= 2 - else: - n -= 1 - return n + k - - from pypy.rpython.test.test_llinterp import get_interpreter, clear_tcache - from pypy.jit.metainterp.warmspot import WarmRunnerDesc - - interp, graph = get_interpreter(f, [0, 0], backendopt=False, - inline_threshold=0, type_system=self.type_system) - clear_tcache() - translator = interp.typer.annotator.translator - translator.config.translation.gc = "boehm" - warmrunnerdesc = WarmRunnerDesc(translator, - CPUClass=self.CPUClass) - state = warmrunnerdesc.jitdrivers_sd[0].warmstate - state.set_param_threshold(3) # for tests - state.set_param_trace_eagerness(0) # for tests - warmrunnerdesc.finish() - for n, k in [(20, 0), (20, 1)]: - interp.eval_graph(graph, [n, k]) - - def test_bridge_leaving_interpreter_5(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Global: - pass - glob = Global() - - def f(n): - x = 0 - glob.x = 1 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - glob.x += 1 - x += 3 - n -= 1 - glob.x += 100 - return glob.x + x - res = self.meta_interp(f, [20], repeat=7) - assert res == f(20) - - def test_instantiate_classes(self): - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - def f(n): - if n > 5: - cls = A - else: - cls = B - return cls().foo - res = self.interp_operations(f, [3]) - assert res == 8 - res = self.interp_operations(f, [13]) - assert res == 72 - - def test_instantiate_does_not_call(self): - mydriver = JitDriver(reds = ['n', 'x'], greens = []) - class Base: pass - class A(Base): foo = 72 - class B(Base): foo = 8 - - def f(n): - x = 0 - while n > 0: - mydriver.can_enter_jit(n=n, x=x) - mydriver.jit_merge_point(n=n, x=x) - if n % 2 == 0: - cls = A - else: - cls = B - inst = cls() - x += inst.foo - n -= 1 - return x - res = self.meta_interp(f, [20], enable_opts='') - assert res == f(20) - self.check_loops(call=0) - - def test_zerodivisionerror(self): - # test the case of exception-raising operation that is not delegated - # to the backend at all: ZeroDivisionError - # - def f(n): - assert n >= 0 - try: - return ovfcheck(5 % n) - except ZeroDivisionError: - return -666 - except OverflowError: - return -777 - res = self.interp_operations(f, [0]) - assert res == -666 - # - def f(n): - assert n >= 0 - try: - return ovfcheck(6 // n) - except ZeroDivisionError: - return -667 - except OverflowError: - return -778 - res = self.interp_operations(f, [0]) - assert res == -667 - - def test_div_overflow(self): - import sys - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): - res = 0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - try: - res += llop.int_floordiv_ovf(lltype.Signed, - -sys.maxint-1, x) - x += 5 - except OverflowError: - res += 100 - y -= 1 - return res - res = self.meta_interp(f, [-41, 16]) - assert res == ((-sys.maxint-1) // (-41) + - (-sys.maxint-1) // (-36) + - (-sys.maxint-1) // (-31) + - (-sys.maxint-1) // (-26) + - (-sys.maxint-1) // (-21) + - (-sys.maxint-1) // (-16) + - (-sys.maxint-1) // (-11) + - (-sys.maxint-1) // (-6) + - 100 * 8) - - def test_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - if n: - obj = A() - else: - obj = B() - return isinstance(obj, B) - res = self.interp_operations(fn, [0]) - assert res - self.check_operations_history(guard_class=1) - res = self.interp_operations(fn, [1]) - assert not res - - def test_isinstance_2(self): - driver = JitDriver(greens = [], reds = ['n', 'sum', 'x']) - class A: - pass - class B(A): - pass - class C(B): - pass - - def main(): - return f(5, B()) * 10 + f(5, C()) + f(5, A()) * 100 - - def f(n, x): - sum = 0 - while n > 0: - driver.can_enter_jit(x=x, n=n, sum=sum) - driver.jit_merge_point(x=x, n=n, sum=sum) - if isinstance(x, B): - sum += 1 - n -= 1 - return sum - - res = self.meta_interp(main, []) - assert res == 55 - - def test_assert_isinstance(self): - class A: - pass - class B(A): - pass - def fn(n): - # this should only be called with n != 0 - if n: - obj = B() - obj.a = n - else: - obj = A() - obj.a = 17 - assert isinstance(obj, B) - return obj.a - res = self.interp_operations(fn, [1]) - assert res == 1 - self.check_operations_history(guard_class=0) - if self.type_system == 'ootype': - self.check_operations_history(instanceof=0) - - def test_r_dict(self): - from pypy.rlib.objectmodel import r_dict - class FooError(Exception): - pass - def myeq(n, m): - return n == m - def myhash(n): - if n < 0: - raise FooError - return -n - def f(n): - d = r_dict(myeq, myhash) - for i in range(10): - d[i] = i*i - try: - return d[n] - except FooError: - return 99 - res = self.interp_operations(f, [5]) - assert res == f(5) - - def test_free_object(self): - import weakref - from pypy.rlib import rgc - from pypy.rpython.lltypesystem.lloperation import llop - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - class X(object): - pass - def main(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= x.foo - def g(n): - x = X() - x.foo = 2 - main(n, x) - x.foo = 5 - return weakref.ref(x) - def f(n): - r = g(n) - rgc.collect(); rgc.collect(); rgc.collect() - return r() is None - # - assert f(30) == 1 - res = self.meta_interp(f, [30], no_stats=True) - assert res == 1 - - def test_pass_around(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - - def call(): - pass - - def f(n, x): - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - if n % 2: - call() - if n == 8: - return x - x = 3 - else: - x = 5 - n -= 1 - return 0 - - self.meta_interp(f, [40, 0]) - - def test_const_inputargs(self): - myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'x']) - def f(n, x): - m = 0x7FFFFFFF - while n > 0: - myjitdriver.can_enter_jit(m=m, n=n, x=x) - myjitdriver.jit_merge_point(m=m, n=n, x=x) - x = 42 - n -= 1 - m = m >> 1 - return x - - res = self.meta_interp(f, [50, 1], enable_opts='') - assert res == 42 - - def test_set_param(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) - def g(n): - x = 0 - while n > 0: - myjitdriver.can_enter_jit(n=n, x=x) - myjitdriver.jit_merge_point(n=n, x=x) - n -= 1 - x += n - return x - def f(n, threshold): - myjitdriver.set_param('threshold', threshold) - return g(n) - - res = self.meta_interp(f, [10, 3]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(2) - - res = self.meta_interp(f, [10, 13]) - assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 - self.check_tree_loop_count(0) - - def test_dont_look_inside(self): - @dont_look_inside - def g(a, b): - return a + b - def f(a, b): - return g(a, b) - res = self.interp_operations(f, [3, 5]) - assert res == 8 - self.check_operations_history(int_add=0, call=1) - - def test_listcomp(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst']) - def f(x, y): - lst = [0, 0, 0] - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, lst=lst) - myjitdriver.jit_merge_point(x=x, y=y, lst=lst) - lst = [i+x for i in lst if i >=0] - y -= 1 - return lst[0] - res = self.meta_interp(f, [6, 7], listcomp=True, backendopt=True, listops=True) - # XXX: the loop looks inefficient - assert res == 42 - - def test_tuple_immutable(self): - def new(a, b): - return a, b - def f(a, b): - tup = new(a, b) - return tup[1] - res = self.interp_operations(f, [3, 5]) - assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure=1) - - def test_oosend_look_inside_only_one(self): - class A: - pass - class B(A): - def g(self): - return 123 - class C(A): - @dont_look_inside - def g(self): - return 456 - def f(n): - if n > 3: - x = B() - else: - x = C() - return x.g() + x.g() - res = self.interp_operations(f, [10]) - assert res == 123 * 2 - res = self.interp_operations(f, [-10]) - assert res == 456 * 2 - - def test_residual_external_call(self): - import math - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - def f(x, y): - x = float(x) - res = 0.0 - while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) - # this is an external call that the default policy ignores - rpart, ipart = math.modf(x) - res += ipart - y -= 1 - return res - res = self.meta_interp(f, [6, 7]) - assert res == 42 - self.check_loop_count(1) - self.check_loops(call=1) - - def test_merge_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 5 - class B(A): - def g(self, y): - return y - 3 - - a1 = A() - a2 = A() - b = B() - def f(x): - l = [a1] * 100 + [a2] * 100 + [b] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - x = a.g(x) - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_value=2) - self.check_loops(guard_class=0, guard_value=5, everywhere=True) - - def test_merge_guardnonnull_guardclass(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=2, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=4, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [b1] * 100 + [None] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=1, - guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=3, - guard_nonnull_class=0, guard_isnull=2, - everywhere=True) - - def test_merge_guardnonnull_guardvalue_2(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - pass - class B(A): - pass - - a1 = A() - b1 = B() - def f(x): - l = [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x -= 5 - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [299], listops=True) - assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=4, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_merge_guardnonnull_guardclass_guardvalue(self): - from pypy.rlib.objectmodel import instantiate - myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) - - class A(object): - def g(self, x): - return x - 3 - class B(A): - def g(self, y): - return y - 5 - - a1 = A() - a2 = A() - b1 = B() - def f(x): - l = [a2] * 100 + [None] * 100 + [b1] * 100 + [a1] * 100 - while x > 0: - myjitdriver.can_enter_jit(x=x, l=l) - myjitdriver.jit_merge_point(x=x, l=l) - a = l[x] - if a: - x = a.g(x) - else: - x -= 7 - hint(a, promote=True) - return x - res = self.meta_interp(f, [399], listops=True) - assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=5, - guard_nonnull_class=0, guard_isnull=1, - everywhere=True) - - def test_residual_call_doesnt_lose_info(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l']) - - class A(object): - pass - - globall = [""] - @dont_look_inside - def g(x): - globall[0] = str(x) - return x - - def f(x): - y = A() - y.v = x - l = [0] - while y.v > 0: - myjitdriver.can_enter_jit(x=x, y=y, l=l) - myjitdriver.jit_merge_point(x=x, y=y, l=l) - l[0] = y.v - lc = l[0] - y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 - return y.v - res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) - - def test_guard_isnull_nonnull(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - - @dont_look_inside - def create(x): - if x >= -40: - return A() - return None - - def f(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - obj = create(x-1) - if obj is not None: - res += 1 - obj2 = create(x-1000) - if obj2 is None: - res += 1 - x -= 1 - return res - res = self.meta_interp(f, [21]) - assert res == 42 - self.check_loops(guard_nonnull=1, guard_isnull=1) - - def test_loop_invariant1(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) - class A(object): - pass - a = A() - a.current_a = A() - a.current_a.x = 1 - @loop_invariant - def f(): - return a.current_a - - def g(x): - res = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res) - myjitdriver.jit_merge_point(x=x, res=res) - res += f().x - res += f().x - res += f().x - x -= 1 - a.current_a = A() - a.current_a.x = 2 - return res - res = self.meta_interp(g, [21]) - assert res == 3 * 21 - self.check_loops(call=0) - self.check_loops(call=1, everywhere=True) - - def test_bug_optimizeopt_mutates_ops(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a']) - class A(object): - pass - class B(A): - pass - - glob = A() - glob.a = None - def f(x): - res = 0 - a = A() - a.x = 0 - glob.a = A() - const = 2 - while x > 0: - myjitdriver.can_enter_jit(x=x, res=res, a=a, const=const) - myjitdriver.jit_merge_point(x=x, res=res, a=a, const=const) - if type(glob.a) is B: - res += 1 - if a is None: - a = A() - a.x = x - glob.a = B() - const = 2 - else: - const = hint(const, promote=True) - x -= const - res += a.x - a = None - glob.a = A() - const = 1 - return res - res = self.meta_interp(f, [21]) - assert res == f(21) - - def test_getitem_indexerror(self): - lst = [10, 4, 9, 16] - def f(n): - try: - return lst[n] - except IndexError: - return -2 - res = self.interp_operations(f, [2]) - assert res == 9 - res = self.interp_operations(f, [4]) - assert res == -2 - res = self.interp_operations(f, [-4]) - assert res == 10 - res = self.interp_operations(f, [-5]) - assert res == -2 - - def test_guard_always_changing_value(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - a = A() - hint(a, promote=True) - x -= 1 - self.meta_interp(f, [50]) - self.check_loop_count(1) - # this checks that the logic triggered by make_a_counter_per_value() - # works and prevents generating tons of bridges - - def test_swap_values(self): - def f(x, y): - if x > 5: - x, y = y, x - return x - y - res = self.interp_operations(f, [10, 2]) - assert res == -8 - res = self.interp_operations(f, [3, 2]) - assert res == 1 - - def test_raw_malloc_and_access(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Signed) - - def f(n): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = n - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10]) - assert res == 10 - - def test_raw_malloc_and_access_float(self): - from pypy.rpython.lltypesystem import rffi - - TP = rffi.CArray(lltype.Float) - - def f(n, f): - a = lltype.malloc(TP, n, flavor='raw') - a[0] = f - res = a[0] - lltype.free(a, flavor='raw') - return res - - res = self.interp_operations(f, [10, 3.5]) - assert res == 3.5 - - def test_jit_debug(self): - myjitdriver = JitDriver(greens = [], reds = ['x']) - class A: - pass - def f(x): - while x > 0: - myjitdriver.can_enter_jit(x=x) - myjitdriver.jit_merge_point(x=x) - jit_debug("hi there:", x) - jit_debug("foobar") - x -= 1 - return x - res = self.meta_interp(f, [8]) - assert res == 0 - self.check_loops(jit_debug=2) - - def test_assert_green(self): - def f(x, promote): - if promote: - x = hint(x, promote=True) - assert_green(x) - return x - res = self.interp_operations(f, [8, 1]) - assert res == 8 - py.test.raises(AssertGreenFailed, self.interp_operations, f, [8, 0]) - - def test_multiple_specialied_versions1(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 7]) - assert res == 6*8 + 6**8 - self.check_loop_count(5) - self.check_loops({'guard_true': 2, - 'int_add': 1, 'int_mul': 1, 'int_sub': 2, - 'int_gt': 2, 'jump': 2}) - - def test_multiple_specialied_versions_array(self): - myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res', - 'array']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - class B(Base): - def binop(self, other): - return B(self.val - other.val) - def f(x, y): - res = x - array = [1, 2, 3] - array[1] = 7 - idx = 0 - while y > 0: - myjitdriver.can_enter_jit(idx=idx, y=y, x=x, res=res, - array=array) - myjitdriver.jit_merge_point(idx=idx, y=y, x=x, res=res, - array=array) - res = res.binop(x) - res.val += array[idx] + array[1] - if y < 7: - idx = 2 - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - self.check_loop_count(9) - self.check_loops(getarrayitem_gc=6, everywhere=True) - - def test_multiple_specialied_versions_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - b1 = f(B(x), y, B(x)) - b2 = f(B(x), y, B(x)) - assert b1.val == b2.val - c1 = f(B(x), y, A(x)) - c2 = f(B(x), y, A(x)) - assert c1.val == c2.val - d1 = f(A(x), y, B(x)) - d2 = f(A(x), y, B(x)) - assert d1.val == d2.val - return a1.val + b1.val + c1.val + d1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_failing_inlined_guard(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class Base: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - class A(Base): - def binop(self, other): - return A(self.getval() + other.getval()) - class B(Base): - def binop(self, other): - return B(self.getval() * other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 8: - x = z - return res - def g(x, y): - c1 = f(A(x), y, B(x)) - c2 = f(A(x), y, B(x)) - assert c1.val == c2.val - return c1.val - res = self.meta_interp(g, [3, 16]) - assert res == g(3, 16) - - def test_inlined_guard_in_short_preamble(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) - class A: - def __init__(self, val): - self.val = val - def getval(self): - return self.val - def binop(self, other): - return A(self.getval() + other.getval()) - def f(x, y, z): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res) - myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res) - res = res.binop(x) - y -= 1 - if y < 7: - x = z - return res - def g(x, y): - a1 = f(A(x), y, A(x)) - a2 = f(A(x), y, A(x)) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [3, 14]) - assert res == g(3, 14) - - def test_specialied_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - res = res.binop(A(y)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_specialied_bridge_const(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'const', 'x', 'res']) - class A: - def __init__(self, val): - self.val = val - def binop(self, other): - return A(self.val + other.val) - def f(x, y): - res = A(0) - const = 7 - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res, const=const) - myjitdriver.jit_merge_point(y=y, x=x, res=res, const=const) - const = hint(const, promote=True) - res = res.binop(A(const)) - if y<7: - res = x - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - assert a1.val == a2.val - return a1.val - res = self.meta_interp(g, [6, 14]) - assert res == g(6, 14) - - def test_multiple_specialied_zigzag(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) - class Base: - def __init__(self, val): - self.val = val - class A(Base): - def binop(self, other): - return A(self.val + other.val) - def switch(self): - return B(self.val) - class B(Base): - def binop(self, other): - return B(self.val * other.val) - def switch(self): - return A(self.val) - def f(x, y): - res = x - while y > 0: - myjitdriver.can_enter_jit(y=y, x=x, res=res) - myjitdriver.jit_merge_point(y=y, x=x, res=res) - if y % 4 == 0: - res = res.switch() - res = res.binop(x) - y -= 1 - return res - def g(x, y): - a1 = f(A(x), y) - a2 = f(A(x), y) - b1 = f(B(x), y) - b2 = f(B(x), y) - assert a1.val == a2.val - assert b1.val == b2.val - return a1.val + b1.val - res = self.meta_interp(g, [3, 23]) - assert res == 7068153 - self.check_loop_count(6) - self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, - guard_false=2) - - def test_dont_trace_every_iteration(self): - myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) - - def main(a, b): - i = sa = 0 - #while i < 200: - while i < 200: - myjitdriver.can_enter_jit(a=a, b=b, i=i, sa=sa) - myjitdriver.jit_merge_point(a=a, b=b, i=i, sa=sa) - if a > 0: pass - if b < 2: pass - sa += a % b - i += 1 - return sa - def g(): - return main(10, 20) + main(-10, -20) - res = self.meta_interp(g, []) - assert res == g() - self.check_enter_count(2) - - def test_current_trace_length(self): - myjitdriver = JitDriver(greens = ['g'], reds = ['x']) - @dont_look_inside - def residual(): - print "hi there" - @unroll_safe - def loop(g): - y = 0 - while y < g: - residual() - y += 1 - def f(x, g): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, g=g) - myjitdriver.jit_merge_point(x=x, g=g) - loop(g) - x -= 1 - n = current_trace_length() - return n - res = self.meta_interp(f, [5, 8]) - assert 14 < res < 42 - res = self.meta_interp(f, [5, 2]) - assert 4 < res < 14 - - def test_compute_identity_hash(self): - from pypy.rlib.objectmodel import compute_identity_hash - class A(object): - pass - def f(): - a = A() - return compute_identity_hash(a) == compute_identity_hash(a) - res = self.interp_operations(f, []) - assert res - # a "did not crash" kind of test - - def test_compute_unique_id(self): - from pypy.rlib.objectmodel import compute_unique_id - class A(object): - pass - def f(): - a1 = A() - a2 = A() - return (compute_unique_id(a1) == compute_unique_id(a1) and - compute_unique_id(a1) != compute_unique_id(a2)) - res = self.interp_operations(f, []) - assert res - - def test_wrap_around_add(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x += 1 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint-10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_mul(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x > 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x *= 2 - n += 1 - return n - res = self.meta_interp(f, [sys.maxint>>10]) - assert res == 11 - self.check_tree_loop_count(2) - - def test_wrap_around_sub(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) - class A: - pass - def f(x): - n = 0 - while x < 0: - myjitdriver.can_enter_jit(x=x, n=n) - myjitdriver.jit_merge_point(x=x, n=n) - x -= 1 - n += 1 - return n - res = self.meta_interp(f, [10-sys.maxint]) - assert res == 12 - self.check_tree_loop_count(2) - - - -class TestOOtype(BasicTests, OOJitMixin): - - def test_oohash(self): - def f(n): - s = ootype.oostring(n, -1) - return s.ll_hash() - res = self.interp_operations(f, [5]) - assert res == ootype.oostring(5, -1).ll_hash() - - def test_identityhash(self): - A = ootype.Instance("A", ootype.ROOT) - def f(): - obj1 = ootype.new(A) - obj2 = ootype.new(A) - return ootype.identityhash(obj1) == ootype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oois(self): - A = ootype.Instance("A", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - if n: - obj2 = obj1 - else: - obj2 = ootype.new(A) - return obj1 is obj2 - res = self.interp_operations(f, [0]) - assert not res - res = self.interp_operations(f, [1]) - assert res - - def test_oostring_instance(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - obj2 = ootype.new(B) - s1 = ootype.oostring(obj1, -1) - s2 = ootype.oostring(obj2, -1) - ch1 = s1.ll_stritem_nonneg(1) - ch2 = s2.ll_stritem_nonneg(1) - return ord(ch1) + ord(ch2) - res = self.interp_operations(f, [0]) - assert res == ord('A') + ord('B') - - def test_subclassof(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", A) - clsA = ootype.runtimeClass(A) - clsB = ootype.runtimeClass(B) - myjitdriver = JitDriver(greens = [], reds = ['n', 'flag', 'res']) - - def getcls(flag): - if flag: - return clsA - else: - return clsB - - def f(flag, n): - res = True - while n > -100: - myjitdriver.can_enter_jit(n=n, flag=flag, res=res) - myjitdriver.jit_merge_point(n=n, flag=flag, res=res) - cls = getcls(flag) - n -= 1 - res = ootype.subclassof(cls, clsB) - return res - - res = self.meta_interp(f, [1, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert not res - - res = self.meta_interp(f, [0, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert res - -class BaseLLtypeTests(BasicTests): - - def test_identityhash(self): - A = lltype.GcStruct("A") - def f(): - obj1 = lltype.malloc(A) - obj2 = lltype.malloc(A) - return lltype.identityhash(obj1) == lltype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oops_on_nongc(self): - from pypy.rpython.lltypesystem import lltype - - TP = lltype.Struct('x') - def f(i1, i2): - p1 = prebuilt[i1] - p2 = prebuilt[i2] - a = p1 is p2 - b = p1 is not p2 - c = bool(p1) - d = not bool(p2) - return 1000*a + 100*b + 10*c + d - prebuilt = [lltype.malloc(TP, flavor='raw', immortal=True)] * 2 - expected = f(0, 1) - assert self.interp_operations(f, [0, 1]) == expected - - def test_casts(self): - py.test.skip("xxx fix or kill") - if not self.basic: - py.test.skip("test written in a style that " - "means it's frontend only") - from pypy.rpython.lltypesystem import lltype, llmemory, rffi - - TP = lltype.GcStruct('S1') - def f(p): - n = lltype.cast_ptr_to_int(p) - return n - x = lltype.malloc(TP) - xref = lltype.cast_opaque_ptr(llmemory.GCREF, x) - res = self.interp_operations(f, [xref]) - y = llmemory.cast_ptr_to_adr(x) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - # - TP = lltype.Struct('S2') - prebuilt = [lltype.malloc(TP, immortal=True), - lltype.malloc(TP, immortal=True)] - def f(x): - p = prebuilt[x] - n = lltype.cast_ptr_to_int(p) - return n - res = self.interp_operations(f, [1]) - y = llmemory.cast_ptr_to_adr(prebuilt[1]) - y = llmemory.cast_adr_to_int(y) - assert rffi.get_real_int(res) == rffi.get_real_int(y) - - def test_collapsing_ptr_eq(self): - S = lltype.GcStruct('S') - p = lltype.malloc(S) - driver = JitDriver(greens = [], reds = ['n', 'x']) - - def f(n, x): - while n > 0: - driver.can_enter_jit(n=n, x=x) - driver.jit_merge_point(n=n, x=x) - if x: - n -= 1 - n -= 1 - - def main(): - f(10, p) - f(10, lltype.nullptr(S)) - - self.meta_interp(main, []) - - def test_enable_opts(self): - jitdriver = JitDriver(greens = [], reds = ['a']) - - class A(object): - def __init__(self, i): - self.i = i - - def f(): - a = A(0) - - while a.i < 10: - jitdriver.jit_merge_point(a=a) - jitdriver.can_enter_jit(a=a) - a = A(a.i + 1) - - self.meta_interp(f, []) - self.check_loops(new_with_vtable=0) - self.meta_interp(f, [], enable_opts='') - self.check_loops(new_with_vtable=1) - -class TestLLtype(BaseLLtypeTests, LLJitMixin): - pass diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py --- a/pypy/objspace/flow/test/test_objspace.py +++ b/pypy/objspace/flow/test/test_objspace.py @@ -1,8 +1,8 @@ from __future__ import with_statement import new import py -from pypy.objspace.flow.model import Constant, Block, Link, Variable, traverse -from pypy.objspace.flow.model import flatten, mkentrymap, c_last_exception +from pypy.objspace.flow.model import Constant, Block, Link, Variable +from pypy.objspace.flow.model import mkentrymap, c_last_exception from pypy.interpreter.argument import Arguments from pypy.translator.simplify import simplify_graph from pypy.objspace.flow.objspace import FlowObjSpace, error @@ -37,12 +37,10 @@ def all_operations(self, graph): result = {} - def visit(node): - if isinstance(node, Block): - for op in node.operations: - result.setdefault(op.opname, 0) - result[op.opname] += 1 - traverse(visit, graph) + for node in graph.iterblocks(): + for op in node.operations: + result.setdefault(op.opname, 0) + result[op.opname] += 1 return result @@ -246,12 +244,9 @@ x = self.codetest(self.implicitException) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock - def implicitAttributeError(x): try: x = getattr(x, "y") @@ -263,10 +258,8 @@ x = self.codetest(self.implicitAttributeError) simplify_graph(x) self.show(x) - def cannot_reach_exceptblock(link): - if isinstance(link, Link): - assert link.target is not x.exceptblock - traverse(cannot_reach_exceptblock, x) + for link in x.iterlinks(): + assert link.target is not x.exceptblock #__________________________________________________________ def implicitException_int_and_id(x): @@ -311,14 +304,12 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: if isinstance(link.args[0], Constant): found[link.args[0].value] = True else: found[link.exitcase] = None - traverse(find_exceptions, x) assert found == {IndexError: True, KeyError: True, Exception: None} def reraiseAnything(x): @@ -332,12 +323,10 @@ simplify_graph(x) self.show(x) found = {} - def find_exceptions(link): - if isinstance(link, Link): + for link in x.iterlinks(): if link.target is x.exceptblock: assert isinstance(link.args[0], Constant) found[link.args[0].value] = True - traverse(find_exceptions, x) assert found == {ValueError: True, ZeroDivisionError: True, OverflowError: True} def loop_in_bare_except_bug(lst): @@ -521,11 +510,9 @@ def test_jump_target_specialization(self): x = self.codetest(self.jump_target_specialization) - def visitor(node): - if isinstance(node, Block): - for op in node.operations: - assert op.opname != 'mul', "mul should have disappeared" - traverse(visitor, x) + for block in x.iterblocks(): + for op in block.operations: + assert op.opname != 'mul', "mul should have disappeared" #__________________________________________________________ def highly_branching_example(a,b,c,d,e,f,g,h,i,j): @@ -573,7 +560,8 @@ def test_highly_branching_example(self): x = self.codetest(self.highly_branching_example) - assert len(flatten(x)) < 60 # roughly 20 blocks + 30 links + # roughly 20 blocks + 30 links + assert len(list(x.iterblocks())) + len(list(x.iterlinks())) < 60 #__________________________________________________________ def test_unfrozen_user_class1(self): @@ -589,11 +577,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 2 def test_unfrozen_user_class2(self): @@ -607,11 +593,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert not isinstance(results[0], Constant) def test_frozen_user_class1(self): @@ -630,11 +614,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert len(results) == 1 def test_frozen_user_class2(self): @@ -650,11 +632,9 @@ graph = self.codetest(f) results = [] - def visit(link): - if isinstance(link, Link): - if link.target == graph.returnblock: - results.extend(link.args) - traverse(visit, graph) + for link in graph.iterlinks(): + if link.target == graph.returnblock: + results.extend(link.args) assert results == [Constant(4)] def test_const_star_call(self): @@ -663,14 +643,9 @@ def f(): return g(1,*(2,3)) graph = self.codetest(f) - call_args = [] - def visit(block): - if isinstance(block, Block): - for op in block.operations: - if op.opname == "call_args": - call_args.append(op) - traverse(visit, graph) - assert not call_args + for block in graph.iterblocks(): + for op in block.operations: + assert not op.opname == "call_args" def test_catch_importerror_1(self): def f(): @@ -997,11 +972,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, AttributeError] @@ -1019,11 +992,9 @@ simplify_graph(x) self.show(x) excfound = [] - def check(link): - if isinstance(link, Link): - if link.target is x.exceptblock: - excfound.append(link.exitcase) - traverse(check, x) + for link in x.iterlinks(): + if link.target is x.exceptblock: + excfound.append(link.exitcase) assert len(excfound) == 2 excfound.sort() expected = [Exception, TypeError] diff --git a/lib_pypy/pypy_test/test_os_wait3.py b/lib_pypy/pypy_test/test_os_wait3.py deleted file mode 100644 --- a/lib_pypy/pypy_test/test_os_wait3.py +++ /dev/null @@ -1,19 +0,0 @@ -import os - -if hasattr(os, 'wait3'): - def test_os_wait3(): - exit_status = 0x33 - - if not hasattr(os, "fork"): - skip("Need fork() to test wait3()") - - child = os.fork() - if child == 0: # in child - os._exit(exit_status) - else: - pid, status, rusage = os.wait3(0) - assert child == pid - assert os.WIFEXITED(status) - assert os.WEXITSTATUS(status) == exit_status - assert isinstance(rusage.ru_utime, float) - assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/translator/c/database.py b/pypy/translator/c/database.py --- a/pypy/translator/c/database.py +++ b/pypy/translator/c/database.py @@ -1,7 +1,7 @@ -from pypy.rpython.lltypesystem.lltype import \ - Primitive, Ptr, typeOf, RuntimeTypeInfo, \ - Struct, Array, FuncType, PyObject, Void, \ - ContainerType, OpaqueType, FixedSizeArray, _uninitialized + +from pypy.rpython.lltypesystem.lltype import ( + Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, PyObject, + Void, ContainerType, OpaqueType, FixedSizeArray, _uninitialized, Typedef) from pypy.rpython.lltypesystem import lltype, rffi from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF from pypy.rpython.lltypesystem.rffi import CConstant @@ -100,6 +100,8 @@ def gettype(self, T, varlength=1, who_asks=None, argnames=[]): if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] + elif isinstance(T, Typedef): + return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -20,8 +20,7 @@ separate_module_files=[cdir.join('src', 'll_math.c')], export_symbols=['_pypy_math_acosh', '_pypy_math_asinh', '_pypy_math_atanh', - '_pypy_math_expm1', '_pypy_math_log1p', - '_pypy_math_isinf', '_pypy_math_isnan'], + '_pypy_math_expm1', '_pypy_math_log1p'], ) math_prefix = '_pypy_math_' else: @@ -58,7 +57,6 @@ math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) -math_isnan = math_llexternal('isnan', [rffi.DOUBLE], rffi.INT) # ____________________________________________________________ # @@ -91,13 +89,14 @@ # # Custom implementations - at jit.purefunction def ll_math_isnan(y): - return bool(math_isnan(y)) + # By not calling into the extenal function the JIT can inline this. Floats + # are awesome. + return y != y - at jit.purefunction def ll_math_isinf(y): - return bool(math_isinf(y)) + # Use a bitwise OR so the JIT doesn't produce 2 different guards. + return (y == INFINITY) | (y == -INFINITY) ll_math_copysign = math_copysign diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py --- a/pypy/jit/backend/x86/test/test_assembler.py +++ b/pypy/jit/backend/x86/test/test_assembler.py @@ -140,7 +140,7 @@ xmmregisters = lltype.malloc(rffi.LONGP.TO, 16+ACTUAL_CPU.NUM_REGS+1, flavor='raw', immortal=True) registers = rffi.ptradd(xmmregisters, 16) - stacklen = baseloc + 10 + stacklen = baseloc + 30 stack = lltype.malloc(rffi.LONGP.TO, stacklen, flavor='raw', immortal=True) expected_ints = [0] * len(content) diff --git a/pypy/translator/backendopt/test/test_malloc.py b/pypy/translator/backendopt/test/test_malloc.py --- a/pypy/translator/backendopt/test/test_malloc.py +++ b/pypy/translator/backendopt/test/test_malloc.py @@ -3,7 +3,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.rpython.llinterp import LLInterpreter from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype @@ -22,8 +22,7 @@ remover = cls.MallocRemover() checkgraph(graph) count1 = count2 = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == cls.MallocRemover.MALLOC_OP: S = op.args[0].value @@ -47,7 +46,7 @@ auto_inline_graphs(t, t.graphs, inline) if option.view: t.view() - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() while True: progress = remover.remove_mallocs_once(graph) @@ -158,18 +157,6 @@ type_system = 'lltype' MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -199,50 +186,6 @@ op = graph.startblock.exits[0].target.exits[1].target.operations[0] assert op.opname == "malloc" - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, must_be_removed=False) - - def test_getsubstruct(self): - py.test.skip("fails because of the interior structure changes") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42) - - def test_fixedsizearray(self): - py.test.skip("fails because of the interior structure changes") - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58) - def test_wrapper_cannot_be_removed(self): SMALL = lltype.OpaqueType('SMALL') BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) diff --git a/pypy/translator/cli/test/test_list.py b/pypy/translator/cli/test/test_list.py --- a/pypy/translator/cli/test/test_list.py +++ b/pypy/translator/cli/test/test_list.py @@ -7,7 +7,10 @@ def test_recursive(self): py.test.skip("CLI doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_list_unsigned(self): diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py --- a/pypy/rpython/memory/gctransform/framework.py +++ b/pypy/rpython/memory/gctransform/framework.py @@ -9,6 +9,7 @@ from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib import rstack, rgc from pypy.rlib.debug import ll_assert +from pypy.rlib.objectmodel import we_are_translated from pypy.translator.backendopt import graphanalyze from pypy.translator.backendopt.support import var_needsgc from pypy.annotation import model as annmodel @@ -151,8 +152,13 @@ # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.root_stack_jit_hook = None if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + try: + self.root_stack_jit_hook = translator._jit2gc['rootstackhook'] + except KeyError: + pass else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) self.layoutbuilder.transformer = self @@ -500,6 +506,10 @@ s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) + s_gc_data = self.translator.annotator.bookkeeper.valueoftype( + gctypelayout.GCData) + r_gc_data = self.translator.rtyper.getrepr(s_gc_data) + self.c_const_gcdata = rmodel.inputconst(r_gc_data, self.gcdata) self.malloc_zero_filled = GCClass.malloc_zero_filled HDR = self.HDR = self.gcdata.gc.gcheaderbuilder.HDR @@ -786,6 +796,15 @@ resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) + def gct_gc_adr_of_root_stack_top(self, hop): + op = hop.spaceop + ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, + 'inst_root_stack_top') + c_ofs = rmodel.inputconst(lltype.Signed, ofs) + v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], + resulttype=llmemory.Address) + hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) + def gct_gc_x_swap_pool(self, hop): op = hop.spaceop [v_malloced] = op.args @@ -1327,6 +1346,14 @@ return top self.decr_stack = decr_stack + self.rootstackhook = gctransformer.root_stack_jit_hook + if self.rootstackhook is None: + def collect_stack_root(callback, gc, addr): + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + return sizeofaddr + self.rootstackhook = collect_stack_root + def push_stack(self, addr): top = self.incr_stack(1) top.address[0] = addr @@ -1348,12 +1375,11 @@ def walk_stack_roots(self, collect_stack_root): gcdata = self.gcdata gc = self.gc + rootstackhook = self.rootstackhook addr = gcdata.root_stack_base end = gcdata.root_stack_top while addr != end: - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - addr += sizeofaddr + addr += rootstackhook(collect_stack_root, gc, addr) if self.collect_stacks_from_other_threads is not None: self.collect_stacks_from_other_threads(collect_stack_root) @@ -1460,12 +1486,11 @@ # collect all valid stacks from the dict (the entry # corresponding to the current thread is not valid) gc = self.gc + rootstackhook = self.rootstackhook end = stacktop - sizeofaddr addr = end.address[0] while addr != end: - if gc.points_to_valid_gc_object(addr): - callback(gc, addr) - addr += sizeofaddr + addr += rootstackhook(callback, gc, addr) def collect_more_stacks(callback): ll_assert(get_aid() == gcdata.active_thread, diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -19,7 +19,8 @@ from pypy.jit.backend.llsupport.descr import BaseCallDescr, BaseSizeDescr from pypy.jit.backend.llsupport.regalloc import FrameManager, RegisterManager,\ TempBox -from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE, IS_X86_32, IS_X86_64 +from pypy.jit.backend.x86.arch import WORD, FRAME_FIXED_SIZE +from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64, MY_COPY_OF_REGS from pypy.rlib.rarithmetic import r_longlong, r_uint class X86RegisterManager(RegisterManager): @@ -34,6 +35,12 @@ esi: 2, edi: 3, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + } def call_result_location(self, v): return eax @@ -61,6 +68,19 @@ r14: 4, r15: 5, } + REGLOC_TO_COPY_AREA_OFS = { + ecx: MY_COPY_OF_REGS + 0 * WORD, + ebx: MY_COPY_OF_REGS + 1 * WORD, + esi: MY_COPY_OF_REGS + 2 * WORD, + edi: MY_COPY_OF_REGS + 3 * WORD, + r8: MY_COPY_OF_REGS + 4 * WORD, + r9: MY_COPY_OF_REGS + 5 * WORD, + r10: MY_COPY_OF_REGS + 6 * WORD, + r12: MY_COPY_OF_REGS + 7 * WORD, + r13: MY_COPY_OF_REGS + 8 * WORD, + r14: MY_COPY_OF_REGS + 9 * WORD, + r15: MY_COPY_OF_REGS + 10 * WORD, + } class X86XMMRegisterManager(RegisterManager): @@ -117,6 +137,16 @@ else: return 1 +if WORD == 4: + gpr_reg_mgr_cls = X86RegisterManager + xmm_reg_mgr_cls = X86XMMRegisterManager +elif WORD == 8: + gpr_reg_mgr_cls = X86_64_RegisterManager + xmm_reg_mgr_cls = X86_64_XMMRegisterManager +else: + raise AssertionError("Word size should be 4 or 8") + + class RegAlloc(object): def __init__(self, assembler, translate_support_code=False): @@ -135,16 +165,6 @@ # compute longevity of variables longevity = self._compute_vars_longevity(inputargs, operations) self.longevity = longevity - # XXX - if cpu.WORD == 4: - gpr_reg_mgr_cls = X86RegisterManager - xmm_reg_mgr_cls = X86XMMRegisterManager - elif cpu.WORD == 8: - gpr_reg_mgr_cls = X86_64_RegisterManager - xmm_reg_mgr_cls = X86_64_XMMRegisterManager - else: - raise AssertionError("Word size should be 4 or 8") - self.rm = gpr_reg_mgr_cls(longevity, frame_manager = self.fm, assembler = self.assembler) @@ -738,8 +758,12 @@ def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None): save_all_regs = guard_not_forced_op is not None + self.xrm.before_call(force_store, save_all_regs=save_all_regs) + if not save_all_regs: + gcrootmap = gc_ll_descr = self.assembler.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + save_all_regs = 2 self.rm.before_call(force_store, save_all_regs=save_all_regs) - self.xrm.before_call(force_store, save_all_regs=save_all_regs) if op.result is not None: if op.result.type == FLOAT: resloc = self.xrm.after_call(op.result) @@ -836,31 +860,53 @@ self.PerformDiscard(op, arglocs) self.rm.possibly_free_vars_for_op(op) - def _fastpath_malloc(self, op, descr): + def fastpath_malloc_fixedsize(self, op, descr): assert isinstance(descr, BaseSizeDescr) + self._do_fastpath_malloc(op, descr.size, descr.tid) + + def fastpath_malloc_varsize(self, op, arraydescr, num_elem): + assert isinstance(arraydescr, BaseArrayDescr) + ofs_length = arraydescr.get_ofs_length(self.translate_support_code) + basesize = arraydescr.get_base_size(self.translate_support_code) + itemsize = arraydescr.get_item_size(self.translate_support_code) + size = basesize + itemsize * num_elem + self._do_fastpath_malloc(op, size, arraydescr.tid) + self.assembler.set_new_array_length(eax, ofs_length, imm(num_elem)) + + def _do_fastpath_malloc(self, op, size, tid): gc_ll_descr = self.assembler.cpu.gc_ll_descr self.rm.force_allocate_reg(op.result, selected_reg=eax) - # We need to force-allocate each of save_around_call_regs now. - # The alternative would be to save and restore them around the - # actual call to malloc(), in the rare case where we need to do - # it; however, mark_gc_roots() would need to be adapted to know - # where the variables end up being saved. Messy. - for reg in self.rm.save_around_call_regs: - if reg is not eax: - tmp_box = TempBox() - self.rm.force_allocate_reg(tmp_box, selected_reg=reg) - self.rm.possibly_free_var(tmp_box) - self.assembler.malloc_cond_fixedsize( + if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + # We need edx as a temporary, but otherwise don't save any more + # register. See comments in _build_malloc_slowpath(). + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=edx) + self.rm.possibly_free_var(tmp_box) + else: + # ---- asmgcc ---- + # We need to force-allocate each of save_around_call_regs now. + # The alternative would be to save and restore them around the + # actual call to malloc(), in the rare case where we need to do + # it; however, mark_gc_roots() would need to be adapted to know + # where the variables end up being saved. Messy. + for reg in self.rm.save_around_call_regs: + if reg is not eax: + tmp_box = TempBox() + self.rm.force_allocate_reg(tmp_box, selected_reg=reg) + self.rm.possibly_free_var(tmp_box) + + self.assembler.malloc_cond( gc_ll_descr.get_nursery_free_addr(), gc_ll_descr.get_nursery_top_addr(), - descr.size, descr.tid, + size, tid, ) def consider_new(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.can_inline_malloc(op.getdescr()): - self._fastpath_malloc(op, op.getdescr()) + self.fastpath_malloc_fixedsize(op, op.getdescr()) else: args = gc_ll_descr.args_for_new(op.getdescr()) arglocs = [imm(x) for x in args] @@ -870,7 +916,7 @@ classint = op.getarg(0).getint() descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint) if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize): - self._fastpath_malloc(op, descrsize) + self.fastpath_malloc_fixedsize(op, descrsize) self.assembler.set_vtable(eax, imm(classint)) # result of fastpath malloc is in eax else: @@ -929,16 +975,25 @@ gc_ll_descr = self.assembler.cpu.gc_ll_descr if gc_ll_descr.get_funcptr_for_newarray is not None: # framework GC - args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr()) + box_num_elem = op.getarg(0) + if isinstance(box_num_elem, ConstInt): + num_elem = box_num_elem.value + if gc_ll_descr.can_inline_malloc_varsize(op.getdescr(), + num_elem): + self.fastpath_malloc_varsize(op, op.getdescr(), num_elem) + return + args = self.assembler.cpu.gc_ll_descr.args_for_new_array( + op.getdescr()) arglocs = [imm(x) for x in args] - arglocs.append(self.loc(op.getarg(0))) - return self._call(op, arglocs) + arglocs.append(self.loc(box_num_elem)) + self._call(op, arglocs) + return # boehm GC (XXX kill the following code at some point) itemsize, basesize, ofs_length, _, _ = ( self._unpack_arraydescr(op.getdescr())) scale_of_field = _get_scale(itemsize) - return self._malloc_varsize(basesize, ofs_length, scale_of_field, - op.getarg(0), op.result) + self._malloc_varsize(basesize, ofs_length, scale_of_field, + op.getarg(0), op.result) def _unpack_arraydescr(self, arraydescr): assert isinstance(arraydescr, BaseArrayDescr) @@ -1132,7 +1187,7 @@ # call memcpy() self.rm.before_call() self.xrm.before_call() - self.assembler._emit_call(imm(self.assembler.memcpy_addr), + self.assembler._emit_call(-1, imm(self.assembler.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) self.rm.possibly_free_var(length_box) self.rm.possibly_free_var(dstaddr_box) @@ -1200,18 +1255,24 @@ def consider_jit_debug(self, op): pass - def get_mark_gc_roots(self, gcrootmap): + def get_mark_gc_roots(self, gcrootmap, use_copy_area=False): shape = gcrootmap.get_basic_shape(IS_X86_64) for v, val in self.fm.frame_bindings.items(): if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): assert isinstance(val, StackLoc) - gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position)) + gcrootmap.add_frame_offset(shape, get_ebp_ofs(val.position)) for v, reg in self.rm.reg_bindings.items(): if reg is eax: continue # ok to ignore this one if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)): - assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX - gcrootmap.add_callee_save_reg(shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) + if use_copy_area: + assert reg in self.rm.REGLOC_TO_COPY_AREA_OFS + area_offset = self.rm.REGLOC_TO_COPY_AREA_OFS[reg] + gcrootmap.add_frame_offset(shape, area_offset) + else: + assert reg in self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX + gcrootmap.add_callee_save_reg( + shape, self.rm.REGLOC_TO_GCROOTMAP_REG_INDEX[reg]) return gcrootmap.compress_callshape(shape, self.assembler.datablockwrapper) diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -46,6 +46,7 @@ import pypy.module.cpyext.complexobject import pypy.module.cpyext.weakrefobject import pypy.module.cpyext.funcobject +import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject diff --git a/pypy/translator/backendopt/mallocprediction.py b/pypy/translator/backendopt/mallocprediction.py --- a/pypy/translator/backendopt/mallocprediction.py +++ b/pypy/translator/backendopt/mallocprediction.py @@ -176,7 +176,6 @@ break count += newcount for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) return count diff --git a/pypy/jit/tl/pypyjit_child.py b/pypy/jit/tl/pypyjit_child.py --- a/pypy/jit/tl/pypyjit_child.py +++ b/pypy/jit/tl/pypyjit_child.py @@ -2,7 +2,6 @@ from pypy.rpython.lltypesystem import lltype from pypy.jit.metainterp import warmspot from pypy.module.pypyjit.policy import PyPyJitPolicy -from pypy.rlib.jit import OPTIMIZER_FULL, OPTIMIZER_NO_UNROLL def run_child(glob, loc): @@ -34,6 +33,5 @@ option.view = True warmspot.jittify_and_run(interp, graph, [], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -285,6 +285,15 @@ elif drv.exe_name is None and '__name__' in targetspec_dic: drv.exe_name = targetspec_dic['__name__'] + '-%(backend)s' + # Double check to ensure we are not overwriting the current interpreter + try: + exe_name = str(drv.compute_exe_name()) + assert not os.path.samefile(exe_name, sys.executable), ( + 'Output file %r is the currently running ' + 'interpreter (use --output=...)'% exe_name) + except OSError: + pass + goals = translateconfig.goals try: drv.proceed(goals) diff --git a/pypy/jit/codewriter/test/test_regalloc.py b/pypy/jit/codewriter/test/test_regalloc.py --- a/pypy/jit/codewriter/test/test_regalloc.py +++ b/pypy/jit/codewriter/test/test_regalloc.py @@ -9,7 +9,6 @@ from pypy.objspace.flow.model import c_last_exception from pypy.rpython.lltypesystem import lltype, llmemory, rclass from pypy.rlib.rarithmetic import ovfcheck -from pypy.rlib.objectmodel import keepalive_until_here class TestRegAlloc: diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py --- a/pypy/rpython/test/test_rdict.py +++ b/pypy/rpython/test/test_rdict.py @@ -578,6 +578,26 @@ res = self.interpret(fn, [3, 3]) assert res == 123 + def test_dict_popitem(self): + def func(): + d = {} + d[5] = 2 + d[6] = 3 + k1, v1 = d.popitem() + assert len(d) == 1 + k2, v2 = d.popitem() + try: + d.popitem() + except KeyError: + pass + else: + assert 0, "should have raised KeyError" + assert len(d) == 0 + return k1*1000 + v1*100 + k2*10 + v2 + + res = self.interpret(func, []) + assert res in [5263, 6352] + class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): @@ -682,26 +702,6 @@ # if it does not crash, we are fine. It crashes if you forget the hash field. self.interpret(func, []) - def test_dict_popitem(self): - def func(): - d = {} - d[5] = 2 - d[6] = 3 - k1, v1 = d.popitem() - assert len(d) == 1 - k2, v2 = d.popitem() - try: - d.popitem() - except KeyError: - pass - else: - assert 0, "should have raised KeyError" - assert len(d) == 0 - return k1*1000 + v1*100 + k2*10 + v2 - - res = self.interpret(func, []) - assert res in [5263, 6352] - # ____________________________________________________________ def test_opt_nullkeymarker(self): diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py --- a/pypy/rpython/lltypesystem/rffi.py +++ b/pypy/rpython/lltypesystem/rffi.py @@ -746,6 +746,7 @@ def charpsize2str(cp, size): l = [cp[i] for i in range(size)] return emptystr.join(l) + charpsize2str._annenforceargs_ = [None, int] return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, @@ -817,6 +818,8 @@ """Similar to llmemory.sizeof() but tries hard to return a integer instead of a symbolic value. """ + if isinstance(tp, lltype.Typedef): + tp = tp.OF if isinstance(tp, lltype.FixedSizeArray): return sizeof(tp.OF) * tp.length if isinstance(tp, lltype.Struct): diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -110,6 +110,8 @@ #include "intobject.h" #include "listobject.h" #include "unicodeobject.h" +#include "compile.h" +#include "frameobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/translator/backendopt/removenoops.py b/pypy/translator/backendopt/removenoops.py --- a/pypy/translator/backendopt/removenoops.py +++ b/pypy/translator/backendopt/removenoops.py @@ -108,15 +108,3 @@ for i, op in list(enumerate(block.operations))[::-1]: if op.opname == "debug_assert": del block.operations[i] - -def remove_superfluous_keep_alive(graph): - for block in graph.iterblocks(): - used = {} - for i, op in list(enumerate(block.operations))[::-1]: - if op.opname == "keepalive": - if op.args[0] in used: - del block.operations[i] - else: - used[op.args[0]] = True - - diff --git a/pypy/rpython/test/test_rbuiltin.py b/pypy/rpython/test/test_rbuiltin.py --- a/pypy/rpython/test/test_rbuiltin.py +++ b/pypy/rpython/test/test_rbuiltin.py @@ -496,6 +496,13 @@ res = self.interpret(llf, [rffi.r_short(123)], policy=LowLevelAnnotatorPolicy()) assert res == 123 + def test_force_cast(self): + def llfn(v): + return rffi.cast(rffi.SHORT, v) + res = self.interpret(llfn, [0x12345678]) + assert res == 0x5678 + + class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_isinstance_obj(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,7 +12,6 @@ W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError -from pypy.module.thread.os_lock import Lock STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -121,7 +120,7 @@ ## XXX cannot free a Lock? ## if self.lock: ## self.lock.free() - self.lock = Lock(space) + self.lock = space.allocate_lock() try: self._raw_tell(space) diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -26,9 +26,10 @@ CPU = getcpuclass() class MockGcRootMap(object): + is_shadow_stack = False def get_basic_shape(self, is_64_bit): return ['shape'] - def add_ebp_offset(self, shape, offset): + def add_frame_offset(self, shape, offset): shape.append(offset) def add_callee_save_reg(self, shape, reg_index): index_to_name = { 1: 'ebx', 2: 'esi', 3: 'edi' } @@ -44,7 +45,8 @@ get_funcptr_for_newarray = get_funcptr_for_new get_funcptr_for_newstr = get_funcptr_for_new get_funcptr_for_newunicode = get_funcptr_for_new - + get_malloc_slowpath_addr = None + moving_gc = True gcrootmap = MockGcRootMap() @@ -166,26 +168,29 @@ class GCDescrFastpathMalloc(GcLLDescription): gcrootmap = None - + expected_malloc_slowpath_size = WORD*2 + def __init__(self): GcCache.__init__(self, False) # create a nursery NTP = rffi.CArray(lltype.Signed) self.nursery = lltype.malloc(NTP, 16, flavor='raw') - self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, + self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) - self.addrs[1] = self.addrs[0] + 64 - # 64 bytes + self.addrs[1] = self.addrs[0] + 16*WORD + self.addrs[2] = 0 + # 16 WORDs def malloc_slowpath(size): - assert size == WORD*2 + assert size == self.expected_malloc_slowpath_size nadr = rffi.cast(lltype.Signed, self.nursery) self.addrs[0] = nadr + size + self.addrs[2] += 1 return nadr self.malloc_slowpath = malloc_slowpath self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed) - self._counter = 123 + self._counter = 123000 def can_inline_malloc(self, descr): return True @@ -204,7 +209,7 @@ def get_nursery_top_addr(self): return rffi.cast(lltype.Signed, self.addrs) + WORD - def get_malloc_fixedsize_slowpath_addr(self): + def get_malloc_slowpath_addr(self): fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath) return rffi.cast(lltype.Signed, fptr) @@ -220,9 +225,11 @@ cpu.gc_ll_descr = GCDescrFastpathMalloc() cpu.setup_once() - NODE = lltype.Struct('node', ('tid', lltype.Signed), - ('value', lltype.Signed)) - nodedescr = cpu.sizeof(NODE) # xxx hack: NODE is not a GcStruct + # hack: specify 'tid' explicitly, because this test is not running + # with the gc transformer + NODE = lltype.GcStruct('node', ('tid', lltype.Signed), + ('value', lltype.Signed)) + nodedescr = cpu.sizeof(NODE) valuedescr = cpu.fielddescrof(NODE, 'value') self.cpu = cpu @@ -254,6 +261,7 @@ assert gc_ll_descr.nursery[1] == 42 nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*2) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called def test_malloc_slowpath(self): ops = ''' @@ -274,6 +282,7 @@ gc_ll_descr = self.cpu.gc_ll_descr nadr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nadr + (WORD*2) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once def test_new_with_vtable(self): ops = ''' @@ -289,3 +298,93 @@ assert gc_ll_descr.nursery[1] == self.vtable_int nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*3) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + +class Seen(Exception): + pass + +class GCDescrFastpathMallocVarsize(GCDescrFastpathMalloc): + def can_inline_malloc_varsize(self, arraydescr, num_elem): + return num_elem < 5 + def get_funcptr_for_newarray(self): + return 52 + def init_array_descr(self, A, descr): + descr.tid = self._counter + self._counter += 1 + def args_for_new_array(self, descr): + raise Seen("args_for_new_array") + +class TestMallocVarsizeFastpath(BaseTestRegalloc): + def setup_method(self, method): + cpu = CPU(None, None) + cpu.vtable_offset = WORD + cpu.gc_ll_descr = GCDescrFastpathMallocVarsize() + cpu.setup_once() + self.cpu = cpu + + ARRAY = lltype.GcArray(lltype.Signed) + arraydescr = cpu.arraydescrof(ARRAY) + self.arraydescr = arraydescr + + self.namespace = locals().copy() + + def test_malloc_varsize_fastpath(self): + # Hack. Running the GcLLDescr_framework without really having + # a complete GC means that we end up with both the tid and the + # length being at offset 0. In this case, so the length overwrites + # the tid. This is of course only the case in this test class. + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 142, descr=arraydescr) + setarrayitem_gc(p0, 3, 143, descr=arraydescr) + finish(p0) + ''' + self.interpret(ops, []) + # check the nursery + gc_ll_descr = self.cpu.gc_ll_descr + assert gc_ll_descr.nursery[0] == 4 + assert gc_ll_descr.nursery[1] == 142 + assert gc_ll_descr.nursery[4] == 143 + nurs_adr = rffi.cast(lltype.Signed, gc_ll_descr.nursery) + assert gc_ll_descr.addrs[0] == nurs_adr + (WORD*5) + assert gc_ll_descr.addrs[2] == 0 # slowpath never called + + def test_malloc_varsize_slowpath(self): + ops = ''' + [] + p0 = new_array(4, descr=arraydescr) + setarrayitem_gc(p0, 0, 420, descr=arraydescr) + setarrayitem_gc(p0, 3, 430, descr=arraydescr) + p1 = new_array(4, descr=arraydescr) + setarrayitem_gc(p1, 0, 421, descr=arraydescr) + setarrayitem_gc(p1, 3, 431, descr=arraydescr) + p2 = new_array(4, descr=arraydescr) + setarrayitem_gc(p2, 0, 422, descr=arraydescr) + setarrayitem_gc(p2, 3, 432, descr=arraydescr) + p3 = new_array(4, descr=arraydescr) + setarrayitem_gc(p3, 0, 423, descr=arraydescr) + setarrayitem_gc(p3, 3, 433, descr=arraydescr) + finish(p0, p1, p2, p3) + ''' + gc_ll_descr = self.cpu.gc_ll_descr + gc_ll_descr.expected_malloc_slowpath_size = 5*WORD + self.interpret(ops, []) + assert gc_ll_descr.addrs[2] == 1 # slowpath called once + + def test_malloc_varsize_too_big(self): + ops = ''' + [] + p0 = new_array(5, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_varsize_variable(self): + ops = ''' + [i0] + p0 = new_array(i0, descr=arraydescr) + finish(p0) + ''' + py.test.raises(Seen, self.interpret, ops, []) diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -283,9 +283,14 @@ sys.stdout = out = Out() try: raises(UnicodeError, "print unichr(0xa2)") + assert out.data == [] out.encoding = "cp424" print unichr(0xa2) assert out.data == [unichr(0xa2).encode("cp424"), "\n"] + del out.data[:] + del out.encoding + print u"foo\t", u"bar\n", u"trick", u"baz\n" # softspace handling + assert out.data == ["foo\t", "bar\n", "trick", " ", "baz\n", "\n"] finally: sys.stdout = save diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -35,7 +35,8 @@ def test_load_dynamic(self): raises(ImportError, self.imp.load_dynamic, 'foo', 'bar') - raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', 'baz.so') + raises(ImportError, self.imp.load_dynamic, 'foo', 'bar', + open(self.file_module)) def test_suffixes(self): for suffix, mode, type in self.imp.get_suffixes(): diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -112,6 +112,7 @@ try: while True: count = fread(buf, 1, BUF_SIZE, fp) + count = rffi.cast(lltype.Signed, count) source += rffi.charpsize2str(buf, count) if count < BUF_SIZE: if feof(fp): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -29,20 +29,14 @@ state = api.PyInterpreterState_Head() assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state) -def clear_threadstate(space): - # XXX: this should collect the ThreadState memory - del space.getexecutioncontext().cpyext_threadstate - class TestThreadState(BaseApiTest): def test_thread_state_get(self, space, api): ts = api.PyThreadState_Get() assert ts != nullptr(PyThreadState.TO) - clear_threadstate(space) def test_thread_state_interp(self, space, api): ts = api.PyThreadState_Get() assert ts.c_interp == api.PyInterpreterState_Head() - clear_threadstate(space) def test_basic_threadstate_dance(self, space, api): # Let extension modules call these functions, @@ -54,5 +48,3 @@ api.PyEval_AcquireThread(tstate) api.PyEval_ReleaseThread(tstate) - - clear_threadstate(space) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -91,9 +91,10 @@ else: # XXX that's slow def case_ok(filename): - index1 = filename.rfind(os.sep) - index2 = filename.rfind(os.altsep) - index = max(index1, index2) + index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) if index < 0: directory = os.curdir else: diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -18,7 +18,6 @@ def should_skip_instruction(self, instrname, argmodes): return ( super(TestRx86_64, self).should_skip_instruction(instrname, argmodes) or - ('j' in argmodes) or # Not testing FSTP on 64-bit for now (instrname == 'FSTP') ) diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py --- a/pypy/objspace/flow/flowcontext.py +++ b/pypy/objspace/flow/flowcontext.py @@ -311,8 +311,7 @@ # EggBlocks reuse the variables of their previous block, # which is deemed not acceptable for simplicity of the operations # that will be performed later on the flow graph. - def fixegg(link): - if isinstance(link, Link): + for link in list(self.graph.iterlinks()): block = link.target if isinstance(block, EggBlock): if (not block.operations and len(block.exits) == 1 and @@ -324,15 +323,14 @@ link.args = list(link2.args) link.target = link2.target assert link2.exitcase is None - fixegg(link) else: mapping = {} for a in block.inputargs: mapping[a] = Variable(a) block.renamevariables(mapping) - elif isinstance(link, SpamBlock): + for block in self.graph.iterblocks(): + if isinstance(link, SpamBlock): del link.framestate # memory saver - traverse(fixegg, self.graph) def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py --- a/pypy/jit/backend/llsupport/test/test_descr.py +++ b/pypy/jit/backend/llsupport/test/test_descr.py @@ -18,12 +18,33 @@ descr_t = get_size_descr(c0, T) assert descr_s.size == symbolic.get_size(S, False) assert descr_t.size == symbolic.get_size(T, False) + assert descr_s.count_fields_if_immutable() == -1 + assert descr_t.count_fields_if_immutable() == -1 assert descr_s == get_size_descr(c0, S) assert descr_s != get_size_descr(c1, S) # descr_s = get_size_descr(c1, S) assert isinstance(descr_s.size, Symbolic) + assert descr_s.count_fields_if_immutable() == -1 +def test_get_size_descr_immut(): + S = lltype.GcStruct('S', hints={'immutable': True}) + T = lltype.GcStruct('T', ('parent', S), + ('x', lltype.Char), + hints={'immutable': True}) + U = lltype.GcStruct('U', ('parent', T), + ('u', lltype.Ptr(T)), + ('v', lltype.Signed), + hints={'immutable': True}) + V = lltype.GcStruct('V', ('parent', U), + ('miss1', lltype.Void), + ('miss2', lltype.Void), + hints={'immutable': True}) + for STRUCT, expected in [(S, 0), (T, 1), (U, 3), (V, 3)]: + for translated in [False, True]: + c0 = GcCache(translated) + descr_s = get_size_descr(c0, STRUCT) + assert descr_s.count_fields_if_immutable() == expected def test_get_field_descr(): U = lltype.Struct('U') diff --git a/pypy/translator/backendopt/mallocv.py b/pypy/translator/backendopt/mallocv.py --- a/pypy/translator/backendopt/mallocv.py +++ b/pypy/translator/backendopt/mallocv.py @@ -846,22 +846,6 @@ else: return self.handle_default(op) - def handle_op_keepalive(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - rtnodes, vtnodes = find_all_nodes([node]) - newops = [] - for rtnode in rtnodes: - v = self.renamings[rtnode] - if isinstance(v, Variable): - T = v.concretetype - if isinstance(T, lltype.Ptr) and T._needsgc(): - v0 = varoftype(lltype.Void) - newops.append(SpaceOperation('keepalive', [v], v0)) - return newops - else: - return self.handle_default(op) - def handle_op_ptr_nonzero(self, op): node = self.getnode(op.args[0]) if isinstance(node, VirtualSpecNode): diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -43,9 +43,14 @@ class SizeDescr(AbstractDescr): size = 0 # help translation + is_immutable = False - def __init__(self, size): + def __init__(self, size, count_fields_if_immut=-1): self.size = size + self.count_fields_if_immut = count_fields_if_immut + + def count_fields_if_immutable(self): + return self.count_fields_if_immut def repr_of_descr(self): return '' % self.size @@ -62,15 +67,15 @@ return cache[STRUCT] except KeyError: size = symbolic.get_size(STRUCT, gccache.translate_support_code) + count_fields_if_immut = heaptracker.count_fields_if_immutable(STRUCT) if heaptracker.has_gcstruct_a_vtable(STRUCT): - sizedescr = SizeDescrWithVTable(size) + sizedescr = SizeDescrWithVTable(size, count_fields_if_immut) else: - sizedescr = SizeDescr(size) + sizedescr = SizeDescr(size, count_fields_if_immut) gccache.init_size_descr(STRUCT, sizedescr) cache[STRUCT] = sizedescr return sizedescr - # ____________________________________________________________ # FieldDescrs diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py --- a/pypy/jit/backend/x86/regloc.py +++ b/pypy/jit/backend/x86/regloc.py @@ -283,9 +283,15 @@ # These are the worst cases: val2 = loc2.value_i() code1 = loc1.location_code() - if (code1 == 'j' - or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1])) - or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))): + if code1 == 'j': + checkvalue = loc1.value_j() + elif code1 == 'm': + checkvalue = loc1.value_m()[1] + elif code1 == 'a': + checkvalue = loc1.value_a()[3] + else: + checkvalue = 0 + if not rx86.fits_in_32bits(checkvalue): # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai # and the constant offset in the address is 64-bit. # Hopefully this doesn't happen too often @@ -330,10 +336,10 @@ if code1 == possible_code1: val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 - if self.WORD == 8 and possible_code1 == 'j': + if possible_code1 == 'j' and not rx86.fits_in_32bits(val1): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) - elif self.WORD == 8 and possible_code2 == 'j': + elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]): @@ -378,6 +384,10 @@ _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() else: + # xxx can we avoid "MOV r11, $val; JMP/CALL *r11" + # in case it would fit a 32-bit displacement? + # Hard, because we don't know yet where this insn + # will end up... assert self.WORD == 8 self._load_scratch(val) _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) diff --git a/pypy/translator/backendopt/test/test_tailrecursion.py b/pypy/translator/backendopt/test/test_tailrecursion.py --- a/pypy/translator/backendopt/test/test_tailrecursion.py +++ b/pypy/translator/backendopt/test/test_tailrecursion.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.translator.backendopt.tailrecursion import remove_tail_calls_to_self from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.llinterp import LLInterpreter diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -69,12 +69,31 @@ }; +static int cmp_compare(PyObject *self, PyObject *other) { + return -1; +} + +PyTypeObject OldCmpType = { + PyVarObject_HEAD_INIT(NULL, 0) + "comparisons.OldCmpType", /* tp_name */ + sizeof(CmpObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)cmp_compare, /* tp_compare */ +}; + + void initcomparisons(void) { PyObject *m, *d; if (PyType_Ready(&CmpType) < 0) return; + if (PyType_Ready(&OldCmpType) < 0) + return; m = Py_InitModule("comparisons", NULL); if (m == NULL) return; @@ -83,4 +102,6 @@ return; if (PyDict_SetItemString(d, "CmpType", (PyObject *)&CmpType) < 0) return; + if (PyDict_SetItemString(d, "OldCmpType", (PyObject *)&OldCmpType) < 0) + return; } diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -262,6 +262,8 @@ signal(SIGALRM, SIG_DFL) class AppTestItimer: + spaceconfig = dict(usemodules=['signal']) + def test_itimer_real(self): import signal diff --git a/pypy/translator/c/src/ll_math.c b/pypy/translator/c/src/ll_math.c --- a/pypy/translator/c/src/ll_math.c +++ b/pypy/translator/c/src/ll_math.c @@ -22,18 +22,6 @@ #endif #define PyPy_NAN (HUGE_VAL * 0.) -int -_pypy_math_isinf(double x) -{ - return PyPy_IS_INFINITY(x); -} - -int -_pypy_math_isnan(double x) -{ - return PyPy_IS_NAN(x); -} - /* The following copyright notice applies to the original implementations of acosh, asinh and atanh. */ diff --git a/pypy/rpython/lltypesystem/test/test_lltype.py b/pypy/rpython/lltypesystem/test/test_lltype.py --- a/pypy/rpython/lltypesystem/test/test_lltype.py +++ b/pypy/rpython/lltypesystem/test/test_lltype.py @@ -804,6 +804,21 @@ hints={'immutable_fields': FieldListAccessor({'x':'[*]'})}) assert S._immutable_field('x') == '[*]' +def test_typedef(): + T = Typedef(Signed, 'T') + assert T == Signed + assert Signed == T + T2 = Typedef(T, 'T2') + assert T2 == T + assert T2.OF is Signed + py.test.raises(TypeError, Ptr, T) + assert rffi.CArrayPtr(T) == rffi.CArrayPtr(Signed) + assert rffi.CArrayPtr(Signed) == rffi.CArrayPtr(T) + + F = FuncType((T,), T) + assert F.RESULT == Signed + assert F.ARGS == (Signed,) + class TestTrackAllocation: def test_automatic_tracking(self): diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -153,10 +153,10 @@ for op in self._ops_for_chunk(chunk, include_debug_merge_points): yield op - def match(self, expected_src): + def match(self, expected_src, **kwds): ops = list(self.allops()) matcher = OpMatcher(ops, src=self.format_ops()) - return matcher.match(expected_src) + return matcher.match(expected_src, **kwds) def match_by_id(self, id, expected_src, **kwds): ops = list(self.ops_by_id(id, **kwds)) @@ -250,7 +250,6 @@ # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ ticker2 = getfield_raw(ticker_address, descr=) - setfield_gc(_, _, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ @@ -266,7 +265,7 @@ if exp_v2 == '_': return True if self.is_const(v1) or self.is_const(exp_v2): - return v1 == exp_v2 + return v1[:-1].startswith(exp_v2[:-1]) if v1 not in self.alpha_map: self.alpha_map[v1] = exp_v2 return self.alpha_map[v1] == exp_v2 @@ -315,7 +314,7 @@ # it matched! The '...' operator ends here return op - def match_loop(self, expected_ops): + def match_loop(self, expected_ops, ignore_ops): """ A note about partial matching: the '...' operator is non-greedy, i.e. it matches all the operations until it finds one that matches @@ -334,13 +333,16 @@ return op = self.match_until(exp_op, iter_ops) else: - op = self._next_op(iter_ops) + while True: + op = self._next_op(iter_ops) + if op.name not in ignore_ops: + break self.match_op(op, exp_op) # # make sure we exhausted iter_ops self._next_op(iter_ops, assert_raises=True) - def match(self, expected_src): + def match(self, expected_src, ignore_ops=[]): def format(src): if src is None: return '' @@ -349,7 +351,7 @@ expected_src = self.preprocess_expected_src(expected_src) expected_ops = self.parse_ops(expected_src) try: - self.match_loop(expected_ops) + self.match_loop(expected_ops, ignore_ops) except InvalidMatch, e: #raise # uncomment this and use py.test --pdb for better debugging print '@' * 40 @@ -358,6 +360,7 @@ print e.args print e.msg print + print "Ignore ops:", ignore_ops print "Got:" print format(self.src) print diff --git a/pypy/translator/c/test/test_database.py b/pypy/translator/c/test/test_database.py --- a/pypy/translator/c/test/test_database.py +++ b/pypy/translator/c/test/test_database.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Constant, Variable, SpaceOperation from pypy.objspace.flow.model import Block, Link, FunctionGraph from pypy.rpython.typesystem import getfunctionptr -from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT +from pypy.rpython.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr def dump_on_stdout(database): @@ -244,3 +244,15 @@ db.get(p) db.complete() dump_on_stdout(db) + +def test_typedef(): + A = Typedef(Signed, 'test4') + db = LowLevelDatabase() + assert db.gettype(A) == "test4 @" + + PA = CArrayPtr(A) + assert db.gettype(PA) == "test4 *@" + + F = FuncType((A,), A) + assert db.gettype(F) == "test4 (@)(test4)" + diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,5 +1,5 @@ -from pypy.jit.metainterp.history import Const, Box +from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated class TempBox(Box): @@ -313,11 +313,12 @@ self.assembler.regalloc_mov(reg, to) # otherwise it's clean - def before_call(self, force_store=[], save_all_regs=False): + def before_call(self, force_store=[], save_all_regs=0): """ Spill registers before a call, as described by 'self.save_around_call_regs'. Registers are not spilled if they don't survive past the current operation, unless they - are listed in 'force_store'. + are listed in 'force_store'. 'save_all_regs' can be 0 (default), + 1 (save all), or 2 (save default+PTRs). """ for v, reg in self.reg_bindings.items(): if v not in force_store and self.longevity[v][1] <= self.position: @@ -325,9 +326,11 @@ del self.reg_bindings[v] self.free_regs.append(reg) continue - if not save_all_regs and reg not in self.save_around_call_regs: - # we don't have to - continue + if save_all_regs != 1 and reg not in self.save_around_call_regs: + if save_all_regs == 0: + continue # we don't have to + if v.type != REF: + continue # only save GC pointers self._sync_var(v) del self.reg_bindings[v] self.free_regs.append(reg) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -3,6 +3,7 @@ import py from py.test import skip import sys, os, re +import subprocess class BytecodeTrace(list): def get_opnames(self, prefix=""): @@ -116,13 +117,12 @@ print >> f, "print 'OK :-)'" f.close() - if sys.platform.startswith('win'): - py.test.skip("XXX this is not Windows-friendly") print logfilepath - child_stdout = os.popen('PYPYLOG=":%s" "%s" "%s"' % ( - logfilepath, self.pypy_c, filepath), 'r') - result = child_stdout.read() - child_stdout.close() + env = os.environ.copy() + env['PYPYLOG'] = ":%s" % (logfilepath,) + p = subprocess.Popen([self.pypy_c, str(filepath)], + env=env, stdout=subprocess.PIPE) + result, _ = p.communicate() assert result if result.strip().startswith('SKIP:'): py.test.skip(result.strip()) @@ -198,44 +198,6 @@ print print '@' * 79 - def test_f1(self): - self.run_source(''' - def main(n): - "Arbitrary test function." - i = 0 - x = 1 - while i 1: - r *= n - n -= 1 - return r - ''', 28, - ([5], 120), - ([25], 15511210043330985984000000L)) - - def test_factorialrec(self): - self.run_source(''' - def main(n): - if n > 1: - return n * main(n-1) - else: - return 1 - ''', 0, - ([5], 120), - ([25], 15511210043330985984000000L)) def test_richards(self): self.run_source(''' @@ -247,529 +209,6 @@ ''' % (sys.path,), 7200, ([], 42)) - def test_simple_call(self): - self.run_source(''' - OFFSET = 0 - def f(i): - return i + 1 + OFFSET - def main(n): - i = 0 - while i < n+OFFSET: - i = f(f(i)) - return i - ''', 98, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOAD_GLOBAL", True) - assert len(ops) == 5 - assert ops[0].get_opnames() == ["guard_value", - "getfield_gc", "guard_value", - "getfield_gc", "guard_isnull", - "getfield_gc", "guard_nonnull_class"] - # the second getfield on the same globals is quicker - assert ops[1].get_opnames() == ["getfield_gc", "guard_nonnull_class"] - assert not ops[2] # second LOAD_GLOBAL of the same name folded away - # LOAD_GLOBAL of the same name but in different function partially - # folded away - # XXX could be improved - assert ops[3].get_opnames() == ["guard_value", - "getfield_gc", "guard_isnull"] - assert not ops[4] - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 10 - - ops = self.get_by_bytecode("LOAD_GLOBAL") - assert len(ops) == 5 - for bytecode in ops: - assert not bytecode - - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for bytecode in ops: - assert len(bytecode) <= 1 - - - def test_method_call(self): - self.run_source(''' - class A(object): - def __init__(self, a): - self.a = a - def f(self, i): - return self.a + i - def main(n): - i = 0 - a = A(1) - while i < n: - x = a.f(i) - i = a.f(x) - return i - ''', 93, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("LOOKUP_METHOD", True) - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 3 - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert not ops[0] # first LOOKUP_METHOD folded away - assert not ops[1] # second LOOKUP_METHOD folded away - - ops = self.get_by_bytecode("CALL_METHOD", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 6 - assert len(ops[1]) < len(ops[0]) - - ops = self.get_by_bytecode("CALL_METHOD") - assert len(ops) == 2 - assert len(ops[0]) <= 1 - assert len(ops[1]) <= 1 - - ops = self.get_by_bytecode("LOAD_ATTR", True) - assert len(ops) == 2 - # With mapdict, we get fast access to (so far) the 5 first - # attributes, which means it is done with only the following - # operations. (For the other attributes there is additionally - # a getarrayitem_gc.) - assert ops[0].get_opnames() == ["getfield_gc", - "guard_nonnull_class"] - assert not ops[1] # second LOAD_ATTR folded away - - ops = self.get_by_bytecode("LOAD_ATTR") - assert not ops[0] # first LOAD_ATTR folded away - assert not ops[1] # second LOAD_ATTR folded away - - def test_static_classmethod_call(self): - self.run_source(''' - class A(object): - @classmethod - def f(cls, i): - return i + (cls is A) + 1 - - @staticmethod - def g(i): - return i - 1 - - def main(n): - i = 0 - a = A() - while i < n: - x = a.f(i) - i = a.g(x) - return i - ''', 106, - ([20], 20), - ([31], 31)) - ops = self.get_by_bytecode("LOOKUP_METHOD") - assert len(ops) == 2 - assert not ops[0].get_opnames("call") - assert not ops[0].get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 2 - assert len(ops[0].get_opnames("getfield")) <= 4 - assert not ops[1] # second LOOKUP_METHOD folded away - - def test_default_and_kw(self): - self.run_source(''' - def f(i, j=1): - return i + j - def main(n): - i = 0 - while i < n: - i = f(f(i), j=1) - return i - ''', 100, - ([20], 20), - ([31], 32)) - ops = self.get_by_bytecode("CALL_FUNCTION") - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - ops = self.get_by_bytecode("CALL_FUNCTION", True) - assert len(ops) == 2 - for i, bytecode in enumerate(ops): - if i == 0: - assert "call(getexecutioncontext)" in str(bytecode) - else: - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(ops[0].get_opnames("guard")) <= 14 - assert len(ops[1].get_opnames("guard")) <= 3 - - def test_kwargs(self): - self.run_source(''' - d = {} - - def g(**args): - return len(args) - - def main(x): - s = 0 - d = {} - for i in range(x): - s += g(**d) - d[str(i)] = i - if i % 100 == 99: - d = {} - return s - ''', 100000, ([100], 4950), - ([1000], 49500), - ([10000], 495000), - ([100000], 4950000)) - assert len(self.rawloops) + len(self.rawentrybridges) == 4 - op, = self.get_by_bytecode("CALL_FUNCTION_KW") - # XXX a bit too many guards, but better than before - assert len(op.get_opnames("guard")) <= 12 - - def test_stararg_virtual(self): - self.run_source(''' - d = {} - - def g(*args): - return len(args) - def h(a, b, c): - return c - - def main(x): - s = 0 - for i in range(x): - l = [i, x, 2] - s += g(*l) - s += h(*l) - s += g(i, x, 2) - for i in range(x): - l = [x, 2] - s += g(i, *l) - s += h(i, *l) - return s - ''', 100000, ([100], 1300), - ([1000], 13000), - ([10000], 130000), - ([100000], 1300000)) - assert len(self.loops) == 2 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - assert len(ops) == 4 - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - ops = self.get_by_bytecode("CALL_FUNCTION") - for op in ops: - assert len(op.get_opnames("new")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_stararg(self): - self.run_source(''' - d = {} - - def g(*args): - return args[-1] - def h(*args): - return len(args) - - def main(x): - s = 0 - l = [] - i = 0 - while i < x: - l.append(1) - s += g(*l) - i = h(*l) - return s - ''', 100000, ([100], 100), - ([1000], 1000), - ([2000], 2000), - ([4000], 4000)) - assert len(self.loops) == 1 - ops = self.get_by_bytecode("CALL_FUNCTION_VAR") - for op in ops: - assert len(op.get_opnames("new_with_vtable")) == 0 - assert len(op.get_opnames("call_may_force")) == 0 - - def test_virtual_instance(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0 - while i < n: - a = A() - assert isinstance(a, A) - assert not isinstance(a, int) - a.x = 2 - i = i + a.x - return i - ''', 69, - ([20], 20), - ([31], 32)) - - callA, callisinstance1, callisinstance2 = ( - self.get_by_bytecode("CALL_FUNCTION")) - assert not callA.get_opnames("call") - assert not callA.get_opnames("new") - assert len(callA.get_opnames("guard")) <= 2 - assert not callisinstance1.get_opnames("call") - assert not callisinstance1.get_opnames("new") - assert len(callisinstance1.get_opnames("guard")) <= 2 - # calling isinstance on a builtin type gives zero guards - # because the version_tag of a builtin type is immutable - assert not len(callisinstance1.get_opnames("guard")) - - - bytecode, = self.get_by_bytecode("STORE_ATTR") - assert bytecode.get_opnames() == [] - - def test_load_attr(self): - self.run_source(''' - class A(object): - pass - a = A() - a.x = 2 - def main(n): - i = 0 - while i < n: - i = i + a.x - return i - ''', 41, - ([20], 20), - ([31], 32)) - - load, = self.get_by_bytecode("LOAD_ATTR") - # 1 guard_value for the class - # 1 guard_value for the version_tag - # 1 guard_value for the structure - # 1 guard_nonnull_class for the result since it is used later - assert len(load.get_opnames("guard")) <= 4 - - def test_mixed_type_loop(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 0.0 - j = 2 - while i < n: - i = j + i - return i, type(i) is float - ''', 35, - ([20], (20, True)), - ([31], (32, True))) - - bytecode, = self.get_by_bytecode("BINARY_ADD") - assert not bytecode.get_opnames("call") - assert not bytecode.get_opnames("new") - assert len(bytecode.get_opnames("guard")) <= 2 - - def test_call_builtin_function(self): - self.run_source(''' - class A(object): - pass - def main(n): - i = 2 - l = [] - while i < n: - i += 1 - l.append(i) - return i, len(l) - ''', 39, - ([20], (20, 18)), - ([31], (31, 29))) - - bytecode, = self.get_by_bytecode("CALL_METHOD") - assert len(bytecode.get_opnames("new_with_vtable")) == 1 # the forcing of the int - assert len(bytecode.get_opnames("call")) == 1 # the call to append - assert len(bytecode.get_opnames("guard")) == 1 # guard for guard_no_exception after the call - bytecode, = self.get_by_bytecode("CALL_METHOD", True) - assert len(bytecode.get_opnames("guard")) == 2 # guard for profiling disabledness + guard_no_exception after the call - - def test_range_iter(self): - self.run_source(''' - def g(n): - return range(n) - - def main(n): - s = 0 - for i in range(n): - s += g(n)[i] - return s - ''', 143, ([1000], 1000 * 999 / 2)) - bytecode, = self.get_by_bytecode("BINARY_SUBSCR", True) - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER", True) # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_value", - "guard_class", # check the class of the iterator - "guard_nonnull", # check that the iterator is not finished - "guard_isnull", # check that the range list is not forced - "guard_false", # check that the index is lower than the current length - ] - - bytecode, = self.get_by_bytecode("BINARY_SUBSCR") - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is >= 0 - "guard_false", # check that the index is lower than the current length - ] - bytecode, _ = self.get_by_bytecode("FOR_ITER") # second bytecode is the end of the loop - assert bytecode.get_opnames("guard") == [ - "guard_false", # check that the index is lower than the current length - ] - - def test_exception_inside_loop_1(self): - self.run_source(''' - def main(n): - while n: - try: - raise ValueError - except ValueError: - pass - n -= 1 - return n - ''', 33, - ([30], 0)) - - bytecode, = self.get_by_bytecode("SETUP_EXCEPT") - #assert not bytecode.get_opnames("new") -- currently, we have - # new_with_vtable(pypy.interpreter.pyopcode.ExceptBlock) - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert not bytecode.get_opnames() - - def test_exception_inside_loop_2(self): - self.run_source(''' - def g(n): - raise ValueError(n) - def f(n): - g(n) - def main(n): - while n: - try: - f(n) - except ValueError: - pass - n -= 1 - return n - ''', 51, - ([30], 0)) - - bytecode, = self.get_by_bytecode("RAISE_VARARGS") - assert not bytecode.get_opnames("new") - bytecode, = self.get_by_bytecode("COMPARE_OP") - assert len(bytecode.get_opnames()) <= 2 # oois, guard_true - - def test_chain_of_guards(self): - self.run_source(''' - class A(object): - def method_x(self): - return 3 - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - i = 0 - while i < 2000: - name = l[arg] - sum += getattr(a, 'method_' + name)() - i += 1 - return sum - ''', 3000, ([0], 2000*3)) - assert len(self.loops) == 1 - - def test_getattr_with_dynamic_attribute(self): - self.run_source(''' - class A(object): - pass - - l = ["x", "y"] - - def main(arg): - sum = 0 - a = A() - a.a1 = 0 - a.a2 = 0 - a.a3 = 0 - a.a4 = 0 - a.a5 = 0 # workaround, because the first five attributes need a promotion - a.x = 1 - a.y = 2 - i = 0 - while i < 2000: - name = l[i % 2] - sum += getattr(a, name) - i += 1 - return sum - ''', 3000, ([0], 3000)) - assert len(self.loops) == 1 - - def test_blockstack_virtualizable(self): - self.run_source(''' - from pypyjit import residual_call - - def main(): - i = 0 - while i < 100: - try: - residual_call(len, []) - except: - pass - i += 1 - return i - ''', 1000, ([], 100)) - bytecode, = self.get_by_bytecode("CALL_FUNCTION") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('new_with_vtable')) == 2 - - def test_import_in_function(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - from sys import version - i += 1 - return i - ''', 100, ([], 100)) - bytecode, = self.get_by_bytecode('IMPORT_NAME') - bytecode2, = self.get_by_bytecode('IMPORT_FROM') - assert len(bytecode.get_opnames('call')) == 2 # split_chr and list_pop - assert len(bytecode2.get_opnames('call')) == 0 - - def test_arraycopy_disappears(self): - self.run_source(''' - def main(): - i = 0 - while i < 100: - t = (1, 2, 3, i + 1) - t2 = t[:] - del t - i = t2[3] - del t2 - return i - ''', 40, ([], 100)) - bytecode, = self.get_by_bytecode('BINARY_SUBSCR') - assert len(bytecode.get_opnames('new_array')) == 0 def test_overflow_checking(self): startvalue = sys.maxint - 2147483647 @@ -784,514 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_boolrewrite_invers(self): - for a, b, res, ops in (('2000', '2000', 20001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 16001700, 83), - ( 'a', 'b', 16001700, 89), - ( 'a', 'a', 13001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if i >= %s: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - def test_boolrewrite_reflex(self): - for a, b, res, ops in (('2000', '2000', 10001000, 51), - ( '500', '500', 15001500, 81), - ( '300', '600', 14001700, 83), - ( 'a', 'b', 14001700, 89), - ( 'a', 'a', 17001700, 85)): - - self.run_source(''' - def main(): - sa = 0 - a = 300 - b = 600 - for i in range(1000): - if i < %s: sa += 1 - else: sa += 2 - if %s > i: sa += 10000 - else: sa += 20000 - return sa - '''%(a, b), ops, ([], res)) - - - def test_boolrewrite_correct_invers(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b-1, op2, b) * 10000 * (b) - res += opval( b, op2, b) * 10000 - res += opval(b+1, op2, b) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if i %s %d: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, op2, b), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if i %s %f: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, op2, float(b)/4.0), 156, ([], res)) - - - def test_boolrewrite_correct_reflex(self): - def opval(i, op, a): - if eval('%d %s %d' % (i, op, a)): return 1 - return 2 - - ops = ('<', '>', '<=', '>=', '==', '!=') - for op1 in ops: - for op2 in ops: - for a,b in ((500, 500), (300, 600)): - res = 0 - res += opval(a-1, op1, a) * (a) - res += opval( a, op1, a) - res += opval(a+1, op1, a) * (1000 - a - 1) - res += opval(b, op2, b-1) * 10000 * (b) - res += opval(b, op2, b) * 10000 - res += opval(b, op2, b+1) * 10000 * (1000 - b - 1) - - self.run_source(''' - def main(): - sa = 0 - for i in range(1000): - if i %s %d: sa += 1 - else: sa += 2 - if %d %s i: sa += 10000 - else: sa += 20000 - return sa - '''%(op1, a, b, op2), 83, ([], res)) - - self.run_source(''' - def main(): - sa = 0 - i = 0.0 - while i < 250.0: - if i %s %f: sa += 1 - else: sa += 2 - if %f %s i: sa += 10000 - else: sa += 20000 - i += 0.25 - return sa - '''%(op1, float(a)/4.0, float(b)/4.0, op2), 156, ([], res)) - - def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') - for e1 in compares: - for e2 in compares: - a, b, c = 1, 2, 3 - if eval(e1): res = 752 * 1 - else: res = 752 * 2 - if eval(e2): res += 752 * 10000 - else: res += 752 * 20000 - a = b - if eval(e1): res += 248 * 1 - else: res += 248 * 2 - if eval(e2): res += 248 * 10000 - else: res += 248 * 20000 - - - if 'c' in e1 or 'c' in e2: - n = 337 - else: - n = 215 - - print - print 'Test:', e1, e2, n, res - self.run_source(''' - class tst(object): - pass - def main(): - a = tst() - b = tst() - c = tst() - sa = 0 - for i in range(1000): - if %s: sa += 1 - else: sa += 2 - if %s: sa += 10000 - else: sa += 20000 - if i > 750: a = b - return sa - '''%(e1, e2), n, ([], res)) - - def test_array_sum(self): - for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)): - res = 19352859 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(): - img = array("%s", range(127) * 5) * 484 - l, i = 0, 0 - while i < 640 * 480: - l += img[i] - i += 1 - return l - ''' % tc, maxops, ([], res)) - - def test_array_sum_char(self): - self.run_source(''' - from array import array - - def main(): - img = array("c", "Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - l += ord(img[i]) - i += 1 - return l - ''', 60, ([], 30720000)) - - def test_array_sum_unicode(self): - self.run_source(''' - from array import array - - def main(): - img = array("u", u"Hello") * 130 * 480 - l, i = 0, 0 - while i < 640 * 480: - if img[i] == u"l": - l += 1 - i += 1 - return l - ''', 65, ([], 122880)) - - def test_array_intimg(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead - for tc, maxops in zip('ilILd', (67, 67, 70, 70, 61)): - print - print '='*65 - print '='*20, 'running test for tc=%r' % (tc,), '='*20 - res = 73574560 - if tc == 'L': - res = long(res) - elif tc in 'fd': - res = float(res) - elif tc == 'I' and sys.maxint == 2147483647: - res = long(res) - # note: in CPython we always get longs here, even on 64-bits - - self.run_source(''' - from array import array - - def main(tc): - img = array(tc, range(3)) * (350 * 480) - intimg = array(tc, (0,)) * (640 * 480) - l, i = 0, 640 - while i < 640 * 480: - l = l + img[i] - intimg[i] = (intimg[i-640] + l) - i += 1 - return intimg[i - 1] - ''', maxops, ([tc], res)) - - def test_unpackiterable(self): - self.run_source(''' - from array import array - - def main(): - i = 0 - t = array('l', (1, 2)) - while i < 2000: - a, b = t - i += 1 - return 3 - - ''', 100, ([], 3)) - bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE") - # we allocate virtual ref and frame, we don't want block - assert len(bytecode.get_opnames('call_may_force')) == 0 - - - def test_intbound_simple(self): - ops = ('<', '>', '<=', '>=', '==', '!=') - nbr = (3, 7) - for o1 in ops: - for o2 in ops: - for n1 in nbr: - for n2 in nbr: - src = ''' - def f(i): - a, b = 3, 3 - if i %s %d: - a = 0 - else: - a = 1 - if i %s %d: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (o1, n1, o2, n2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 268, ([], res)) - - def test_intbound_addsub_mix(self): - tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', - 'i - 1 > 1', '1 - i > 1', '1 - i < -3', - 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') - for t1 in tests: - for t2 in tests: - print t1, t2 - src = ''' - def f(i): - a, b = 3, 3 - if %s: - a = 0 - else: - a = 1 - if %s: - b = 0 - else: - b = 1 - return a + b * 2 - - def main(): - res = [0] * 4 - idx = [] - for i in range(15): - idx.extend([i] * 1500) - for i in idx: - res[f(i)] += 1 - return res - - ''' % (t1, t2) - - exec(str(py.code.Source(src))) - res = [0] * 4 - for i in range(15): - res[f(i)] += 1500 - self.run_source(src, 280, ([], res)) - - def test_intbound_gt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i > -1: - a += 1 - if i > -2: - b += 1 - i += 1 - return (a, b) - ''', 48, ([], (2000, 2000))) - - def test_intbound_sub_lt(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i - 10 < 1995: - a += 1 - i += 1 - return (a, b) - ''', 38, ([], (2000, 0))) - - def test_intbound_addsub_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if i - 1 >= -1: - b += 1 - i += 1 - return (a, b) - ''', 56, ([], (2000, 2000))) - - def test_intbound_addmul_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if 2 * i >= 0: - b += 1 - i += 1 - return (a, b) - ''', 53, ([], (2000, 2000))) - - def test_intbound_eq(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - if a == 7: - s += a + 1 - elif i == 10: - s += i - else: - s += 1 - i += 1 - return s - ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) - - def test_intbound_mul(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert i >= 0 - if 2 * i < 30000: - s += 1 - else: - s += a - i += 1 - return s - ''', 43, ([7], 1500)) - - def test_assert(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert a == 7 - s += a + 1 - i += 1 - return s - ''', 38, ([7], 8*1500)) - - def test_zeropadded(self): - self.run_source(''' - from array import array - class ZeroPadded(array): - def __new__(cls, l): - self = array.__new__(cls, 'd', range(l)) - return self - - def __getitem__(self, i): - if i < 0 or i >= self.__len__(): - return 0 - return array.__getitem__(self, i) - - - def main(): - buf = ZeroPadded(2000) - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 232, ([], 9895050.0)) - - def test_circular(self): - self.run_source(''' - from array import array - class Circular(array): - def __new__(cls): - self = array.__new__(cls, 'd', range(256)) - return self - def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) - return array.__getitem__(self, i & 255) - - def main(): - buf = Circular() - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 170, ([], 1239690.0)) - - def test_min_max(self): - self.run_source(''' - def main(): - i=0 - sa=0 - while i < 2000: - sa+=min(max(i, 3000), 4000) - i+=1 - return sa - ''', 51, ([], 2000*3000)) - - def test_silly_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(*range(i)) - i+=1 - return sa - ''', 125, ([], 1997001)) - - def test_iter_max(self): - self.run_source(''' - def main(): - i=2 - sa=0 - while i < 2000: - sa+=max(range(i)) - i+=1 - return sa - ''', 88, ([], 1997001)) - def test__ffi_call(self): from pypy.rlib.test.test_libffi import get_libm_name libm_name = get_libm_name(sys.platform) diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -1,6 +1,6 @@ import py from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder from pypy.jit.metainterp.blackhole import BlackholeInterpreter from pypy.jit.metainterp.blackhole import convert_and_run_from_pyjitpl diff --git a/pypy/rpython/test/test_rint.py b/pypy/rpython/test/test_rint.py --- a/pypy/rpython/test/test_rint.py +++ b/pypy/rpython/test/test_rint.py @@ -4,7 +4,7 @@ from pypy.annotation import model as annmodel from pypy.rpython.test import snippet from pypy.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong -from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from pypy.rlib import objectmodel from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin @@ -215,6 +215,14 @@ assert res == f(inttype(0)) assert type(res) == inttype + def test_and_or(self): + inttypes = [int, r_uint, r_int64, r_ulonglong] + for inttype in inttypes: + def f(a, b, c): + return a&b|c + res = self.interpret(f, [inttype(0x1234), inttype(0x00FF), inttype(0x5600)]) + assert res == f(0x1234, 0x00FF, 0x5600) + def test_neg_abs_ovf(self): for op in (operator.neg, abs): def f(x): @@ -388,6 +396,18 @@ else: assert res == 123456789012345678 + def test_int_between(self): + def fn(a, b, c): + return int_between(a, b, c) + assert self.interpret(fn, [1, 1, 3]) + assert self.interpret(fn, [1, 2, 3]) + assert not self.interpret(fn, [1, 0, 2]) + assert not self.interpret(fn, [1, 5, 2]) + assert not self.interpret(fn, [1, 2, 2]) + assert not self.interpret(fn, [1, 1, 1]) + + + class TestLLtype(BaseTestRint, LLRtypeMixin): pass diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java --- a/pypy/translator/jvm/src/pypy/PyPy.java +++ b/pypy/translator/jvm/src/pypy/PyPy.java @@ -38,6 +38,10 @@ public final static int INT_MIN = Integer.MIN_VALUE; public final static double ULONG_MAX = 18446744073709551616.0; + public static boolean int_between(int a, int b, int c) { + return a <= b && b < c; + } + /** * Compares two unsigned integers (value1 and value2) and returns * a value greater than, equal to, or less than zero if value 1 is @@ -163,6 +167,13 @@ return ULONG_MAX + value; } } + + public static long double_to_ulong(double value) { + if (value < 0) + return (long)(ULONG_MAX + value); + else + return (long)value; + } public static int double_to_uint(double value) { if (value <= Integer.MAX_VALUE) @@ -1175,6 +1186,18 @@ return Math.tanh(x); } + public double ll_math_copysign(double x, double y) { + return Math.copySign(x, y); + } + + public boolean ll_math_isnan(double x) { + return Double.isNaN(x); + } + + public boolean ll_math_isinf(double x) { + return Double.isInfinite(x); + } + private double check(double v) { if (Double.isNaN(v)) interlink.throwValueError(); @@ -1187,9 +1210,42 @@ return Character.toLowerCase(c); } + public int locale_tolower(int chr) + { + return Character.toLowerCase(chr); + } + + public int locale_isupper(int chr) + { + return boolean2int(Character.isUpperCase(chr)); + } + + public int locale_islower(int chr) + { + return boolean2int(Character.isLowerCase(chr)); + } + + public int locale_isalpha(int chr) + { + return boolean2int(Character.isLetter(chr)); + } + + public int locale_isalnum(int chr) + { + return boolean2int(Character.isLetterOrDigit(chr)); + } + + // ---------------------------------------------------------------------- // Self Test + public static int boolean2int(boolean b) + { + if (b) + return 1; + return 0; + } + public static int __counter = 0, __failures = 0; public static void ensure(boolean f) { if (f) { diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.ootypesystem import ootype -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin class StringTests: diff --git a/pypy/translator/cli/ilgenerator.py b/pypy/translator/cli/ilgenerator.py --- a/pypy/translator/cli/ilgenerator.py +++ b/pypy/translator/cli/ilgenerator.py @@ -443,8 +443,8 @@ self.ilasm.opcode('newarr', clitype.itemtype.typename()) def _array_suffix(self, ARRAY, erase_unsigned=False): - from pypy.translator.cli.metavm import OOTYPE_TO_MNEMONIC - suffix = OOTYPE_TO_MNEMONIC.get(ARRAY.ITEM, 'ref') + from pypy.translator.cli.metavm import ootype_to_mnemonic + suffix = ootype_to_mnemonic(ARRAY.ITEM, ARRAY.ITEM, 'ref') if erase_unsigned: suffix = suffix.replace('u', 'i') return suffix diff --git a/pypy/jit/codewriter/heaptracker.py b/pypy/jit/codewriter/heaptracker.py --- a/pypy/jit/codewriter/heaptracker.py +++ b/pypy/jit/codewriter/heaptracker.py @@ -10,6 +10,30 @@ def int2adr(int): return llmemory.cast_int_to_adr(int) +def count_fields_if_immutable(STRUCT): + assert isinstance(STRUCT, lltype.GcStruct) + if STRUCT._hints.get('immutable', False): + try: + return _count_fields(STRUCT) + except ValueError: + pass + return -1 + +def _count_fields(STRUCT): + if STRUCT == rclass.OBJECT: + return 0 # don't count 'typeptr' + result = 0 + for fieldname, TYPE in STRUCT._flds.items(): + if TYPE is lltype.Void: + pass # ignore Voids + elif not isinstance(TYPE, lltype.ContainerType): + result += 1 + elif isinstance(TYPE, lltype.GcStruct): + result += _count_fields(TYPE) + else: + raise ValueError(TYPE) + return result + # ____________________________________________________________ def has_gcstruct_a_vtable(GCSTRUCT): diff --git a/pypy/jit/metainterp/test/test_greenfield.py b/pypy/jit/metainterp/test/test_greenfield.py --- a/pypy/jit/metainterp/test/test_greenfield.py +++ b/pypy/jit/metainterp/test/test_greenfield.py @@ -1,4 +1,4 @@ -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver diff --git a/pypy/rlib/rlocale.py b/pypy/rlib/rlocale.py --- a/pypy/rlib/rlocale.py +++ b/pypy/rlib/rlocale.py @@ -7,6 +7,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.rpython.tool import rffi_platform as platform +from pypy.rpython.extfunc import register_external class LocaleError(Exception): def __init__(self, message): @@ -156,23 +157,35 @@ HAVE_BIND_TEXTDOMAIN_CODESET = cConfig.HAVE_BIND_TEXTDOMAIN_CODESET -def external(name, args, result, calling_conv='c'): +def external(name, args, result, calling_conv='c', **kwds): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv=calling_conv, - sandboxsafe=True) + sandboxsafe=True, **kwds) _lconv = lltype.Ptr(cConfig.lconv) localeconv = external('localeconv', [], _lconv) def numeric_formatting(): """Specialized function to get formatting for numbers""" + return numeric_formatting_impl() + +def numeric_formatting_impl(): conv = localeconv() decimal_point = rffi.charp2str(conv.c_decimal_point) thousands_sep = rffi.charp2str(conv.c_thousands_sep) grouping = rffi.charp2str(conv.c_grouping) return decimal_point, thousands_sep, grouping +def oo_numeric_formatting(): + return '.', '', '' + +register_external(numeric_formatting, [], (str, str, str), + llimpl=numeric_formatting_impl, + ooimpl=oo_numeric_formatting, + sandboxsafe=True) + + _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) def setlocale(category, locale): @@ -184,11 +197,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT) -isupper = external('isupper', [rffi.INT], rffi.INT) -islower = external('islower', [rffi.INT], rffi.INT) -tolower = external('tolower', [rffi.INT], rffi.INT) -isalnum = external('isalnum', [rffi.INT], rffi.INT) +isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') +isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') +islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') +tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') +isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py --- a/pypy/module/thread/ll_thread.py +++ b/pypy/module/thread/ll_thread.py @@ -1,10 +1,10 @@ -from pypy.rpython.lltypesystem import rffi -from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.lltypesystem import rffi, lltype, llmemory from pypy.rpython.tool import rffi_platform as platform from pypy.translator.tool.cbuild import ExternalCompilationInfo import py, os from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rlib import jit from pypy.rlib.debug import ll_assert from pypy.rlib.objectmodel import we_are_translated from pypy.rpython.lltypesystem.lloperation import llop @@ -79,6 +79,7 @@ # wrappers... + at jit.loop_invariant def get_ident(): return rffi.cast(lltype.Signed, c_thread_get_ident()) @@ -113,6 +114,12 @@ def __del__(self): free_ll_lock(self._lock) + def __enter__(self): + self.acquire(True) + + def __exit__(self, *args): + self.release() + # ____________________________________________________________ # # Stack size diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py --- a/pypy/objspace/flow/model.py +++ b/pypy/objspace/flow/model.py @@ -379,27 +379,6 @@ return result -def traverse(visit, functiongraph): - block = functiongraph.startblock - visit(block) - seen = identity_dict() - seen[block] = True - stack = list(block.exits[::-1]) - while stack: - link = stack.pop() - visit(link) - block = link.target - if block not in seen: - visit(block) - seen[block] = True - stack += block.exits[::-1] - - -def flatten(funcgraph): - l = [] - traverse(l.append, funcgraph) - return l - def flattenobj(*args): for arg in args: try: @@ -497,6 +476,19 @@ assert block.operations == () assert block.exits == () + def definevar(v, only_in_link=None): + assert isinstance(v, Variable) + assert v not in vars, "duplicate variable %r" % (v,) + assert v not in vars_previous_blocks, ( + "variable %r used in more than one block" % (v,)) + vars[v] = only_in_link + + def usevar(v, in_link=None): + assert v in vars + if in_link is not None: + assert vars[v] is None or vars[v] is in_link + + for block in graph.iterblocks(): assert bool(block.isstartblock) == (block is graph.startblock) assert type(block.exits) is tuple, ( @@ -506,18 +498,6 @@ assert block in exitblocks vars = {} - def definevar(v, only_in_link=None): - assert isinstance(v, Variable) - assert v not in vars, "duplicate variable %r" % (v,) - assert v not in vars_previous_blocks, ( - "variable %r used in more than one block" % (v,)) - vars[v] = only_in_link - - def usevar(v, in_link=None): - assert v in vars - if in_link is not None: - assert vars[v] is None or vars[v] is in_link - for v in block.inputargs: definevar(v) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -46,15 +46,15 @@ w_f_trace = None # For tracing instr_lb = 0 - instr_ub = -1 - instr_prev = -1 + instr_ub = 0 + instr_prev_plus_one = 0 is_being_profiled = False def __init__(self, space, code, w_globals, closure): self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.pycode = code - eval.Frame.__init__(self, space, w_globals, code.co_nlocals) + eval.Frame.__init__(self, space, w_globals) self.valuestack_w = [None] * code.co_stacksize self.valuestackdepth = 0 self.lastblock = None @@ -63,7 +63,7 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(closure, code) - self.fastlocals_w = [None]*self.numlocals + self.fastlocals_w = [None] * code.co_nlocals make_sure_not_resized(self.fastlocals_w) self.f_lineno = code.co_firstlineno @@ -335,7 +335,7 @@ w(self.instr_lb), #do we need these three (that are for tracing) w(self.instr_ub), - w(self.instr_prev), + w(self.instr_prev_plus_one), w_cells, ] @@ -349,7 +349,7 @@ args_w = space.unpackiterable(w_args) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ - w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev, w_cells = args_w + w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w new_frame = self pycode = space.interp_w(PyCode, w_pycode) @@ -397,7 +397,7 @@ new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev = space.int_w(w_instr_prev) + new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) # XXX what if the frame is in another thread?? @@ -430,7 +430,10 @@ """Initialize cellvars from self.fastlocals_w This is overridden in nestedscope.py""" pass - + + def getfastscopelength(self): + return self.pycode.co_nlocals + def getclosure(self): return None diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,7 +13,7 @@ and not p.basename.startswith('test')] essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "signal"] + ["exceptions", "_file", "sys", "__builtin__", "posix"] ) default_modules = essential_modules.copy() @@ -39,7 +39,7 @@ translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "md5", "cStringIO", "array"])) + "struct", "_md5", "cStringIO", "array"])) working_oo_modules = default_modules.copy() working_oo_modules.update(dict.fromkeys( diff --git a/pypy/translator/simplify.py b/pypy/translator/simplify.py --- a/pypy/translator/simplify.py +++ b/pypy/translator/simplify.py @@ -9,7 +9,7 @@ from pypy.objspace.flow import operation from pypy.objspace.flow.model import (SpaceOperation, Variable, Constant, Block, Link, c_last_exception, checkgraph, - traverse, mkentrymap) + mkentrymap) from pypy.rlib import rarithmetic from pypy.translator import unsimplify from pypy.translator.backendopt import ssa @@ -76,23 +76,19 @@ def desugar_isinstance(graph): """Replace isinstance operation with a call to isinstance.""" constant_isinstance = Constant(isinstance) - def visit(block): - if not isinstance(block, Block): - return + for block in graph.iterblocks(): for i in range(len(block.operations) - 1, -1, -1): op = block.operations[i] if op.opname == "isinstance": args = [constant_isinstance, op.args[0], op.args[1]] new_op = SpaceOperation("simple_call", args, op.result) block.operations[i] = new_op - traverse(visit, graph) def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the following link. Arguments of the links should be updated.""" - def visit(link): - if isinstance(link, Link): + for link in list(graph.iterlinks()): while not link.target.operations: block1 = link.target if block1.exitswitch is not None: @@ -113,7 +109,6 @@ link.args = outputargs link.target = exit.target # the while loop above will simplify recursively the new link - traverse(visit, graph) def transform_ovfcheck(graph): """The special function calls ovfcheck and ovfcheck_lshift need to @@ -174,11 +169,10 @@ def rename(v): return renaming.get(v, v) - def visit(block): - if not (isinstance(block, Block) - and block.exitswitch == clastexc + for block in graph.iterblocks(): + if not (block.exitswitch == clastexc and block.exits[-1].exitcase is Exception): - return + continue covered = [link.exitcase for link in block.exits[1:-1]] seen = [] preserve = list(block.exits[:-1]) @@ -233,8 +227,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) - traverse(visit, graph) - def transform_xxxitem(graph): # xxx setitem too for block in graph.iterblocks(): @@ -262,9 +254,9 @@ return True return False - def visit(block): - if not (isinstance(block, Block) and block.exitswitch == clastexc): - return + for block in list(graph.iterblocks()): + if block.exitswitch != clastexc: + continue exits = [] seen = [] for link in block.exits: @@ -283,8 +275,6 @@ seen.append(case) block.recloseblock(*exits) - traverse(visit, graph) - def join_blocks(graph): """Links can be deleted if they are the single exit of a block and the single entry point of the next block. When this happens, we can @@ -340,8 +330,7 @@ this is how implicit exceptions are removed (see _implicit_ in flowcontext.py). """ - def visit(block): - if isinstance(block, Block): + for block in list(graph.iterblocks()): for i in range(len(block.exits)-1, -1, -1): exit = block.exits[i] if not (exit.target is graph.exceptblock and @@ -361,7 +350,6 @@ lst = list(block.exits) del lst[i] block.recloseblock(*lst) - traverse(visit, graph) # _____________________________________________________________________ @@ -627,12 +615,11 @@ tgts.append((exit.exitcase, tgt)) return tgts - def visit(block): - if isinstance(block, Block) and block.operations and block.operations[-1].opname == 'is_true': + for block in graph.iterblocks(): + if block.operations and block.operations[-1].opname == 'is_true': tgts = has_is_true_exitpath(block) if tgts: candidates.append((block, tgts)) - traverse(visit, graph) while candidates: cand, tgts = candidates.pop() diff --git a/pypy/translator/stackless/transform.py b/pypy/translator/stackless/transform.py --- a/pypy/translator/stackless/transform.py +++ b/pypy/translator/stackless/transform.py @@ -2,7 +2,7 @@ from pypy.rpython.lltypesystem.lloperation import LL_OPERATIONS from pypy.rlib import rarithmetic from pypy.rpython import rclass, rmodel -from pypy.translator.backendopt import support +from pypy.translator.unsimplify import split_block from pypy.objspace.flow import model from pypy.translator import unsimplify, simplify from pypy.translator.unsimplify import varoftype @@ -598,7 +598,7 @@ link = block.exits[0] nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) i = 0 nextblock = link.target @@ -765,7 +765,7 @@ exitcases = dict.fromkeys([l.exitcase for l in block.exits]) nextblock = None else: - link = support.split_block_with_keepalive(block, i+1) + link = split_block(None, block, i+1) nextblock = link.target block.exitswitch = model.c_last_exception link.llexitcase = None diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py --- a/pypy/rpython/lltypesystem/rlist.py +++ b/pypy/rpython/lltypesystem/rlist.py @@ -16,7 +16,6 @@ from pypy.rlib.debug import ll_assert from pypy.rlib.rarithmetic import ovfcheck from pypy.rpython.lltypesystem import rffi -from pypy.rlib.objectmodel import keepalive_until_here from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib import rgc diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -5,6 +5,25 @@ import os, sys exec 'import %s as posix' % os.name +# this is the list of function which is *not* present in the posix module of +# IronPython 2.6, and that we want to ignore for now +lltype_only_defs = [ + 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', + 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', + 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', + 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', + 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', + 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', + 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', + 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', + 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', + 'ttyname', 'uname', 'wait', 'wait3', 'wait4' + ] + +# the Win32 urandom implementation isn't going to translate on JVM or CLI so +# we have to remove it +lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -33,6 +52,8 @@ appleveldefs['wait'] = 'app_posix.wait' if hasattr(os, 'wait3'): appleveldefs['wait3'] = 'app_posix.wait3' + if hasattr(os, 'wait4'): + appleveldefs['wait4'] = 'app_posix.wait4' interpleveldefs = { 'open' : 'interp_posix.open', @@ -158,11 +179,12 @@ interpleveldefs[name] = 'interp_posix.' + name def __init__(self, space, w_name): + # if it's an ootype translation, remove all the defs that are lltype + # only backend = space.config.translation.backend - # the Win32 urandom implementation isn't going to translate on JVM or CLI - # so we have to remove it - if 'urandom' in self.interpleveldefs and (backend == 'cli' or backend == 'jvm'): - del self.interpleveldefs['urandom'] + if backend == 'cli' or backend == 'jvm': + for name in lltype_only_defs: + self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) def startup(self, space): diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py --- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py +++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py @@ -1000,6 +1000,13 @@ p = ctypes2lltype(lltype.Ptr(NODE), ctypes.pointer(pc)) assert p.pong.ping == p + def test_typedef(self): + assert ctypes2lltype(lltype.Typedef(lltype.Signed, 'test'), 6) == 6 + assert ctypes2lltype(lltype.Typedef(lltype.Float, 'test2'), 3.4) == 3.4 + + assert get_ctypes_type(lltype.Signed) == get_ctypes_type( + lltype.Typedef(lltype.Signed, 'test3')) + def test_cast_adr_to_int(self): class someaddr(object): def _cast_to_int(self): @@ -1014,7 +1021,7 @@ node = lltype.malloc(NODE) ref = lltype.cast_opaque_ptr(llmemory.GCREF, node) back = rffi.cast(llmemory.GCREF, rffi.cast(lltype.Signed, ref)) - assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), ref) == node + assert lltype.cast_opaque_ptr(lltype.Ptr(NODE), back) == node def test_gcref_forth_and_back(self): cp = ctypes.c_void_p(1234) diff --git a/pypy/rpython/extfunc.py b/pypy/rpython/extfunc.py --- a/pypy/rpython/extfunc.py +++ b/pypy/rpython/extfunc.py @@ -249,6 +249,9 @@ llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) """ + + if export_name is None: + export_name = function.__name__ class FunEntry(ExtFuncEntry): _about_ = function diff --git a/pypy/translator/goal/query.py b/pypy/translator/goal/query.py --- a/pypy/translator/goal/query.py +++ b/pypy/translator/goal/query.py @@ -30,15 +30,13 @@ def polluted_qgen(translator): """list functions with still real SomeObject variables""" annotator = translator.annotator - def visit(block): - if isinstance(block, flowmodel.Block): - for v in block.getvariables(): - s = annotator.binding(v, None) - if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: - raise Found for g in translator.graphs: try: - flowmodel.traverse(visit, g) + for block in g.iterblocks(): + for v in block.getvariables(): + s = annotator.binding(v, None) + if s and s.__class__ == annmodel.SomeObject and s.knowntype != type: + raise Found except Found: line = "%s: %s" % (g, graph_sig(translator, g)) yield line diff --git a/pypy/tool/jitlogparser/storage.py b/pypy/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/pypy/tool/jitlogparser/storage.py @@ -30,18 +30,18 @@ self.codes[fname] = res return res - def disassemble_code(self, fname, startlineno): + def disassemble_code(self, fname, startlineno, name): try: if py.path.local(fname).check(file=False): return None # cannot find source file except py.error.EACCES: return None # cannot open the file - key = (fname, startlineno) + key = (fname, startlineno, name) try: return self.disassembled_codes[key] except KeyError: codeobjs = self.load_code(fname) - if startlineno not in codeobjs: + if (startlineno, name) not in codeobjs: # cannot find the code obj at this line: this can happen for # various reasons, e.g. because the .py files changed since # the log was produced, or because the co_firstlineno @@ -49,7 +49,7 @@ # produced by gateway.applevel(), such as the ones found in # nanos.py) return None - code = codeobjs[startlineno] + code = codeobjs[(startlineno, name)] res = dis(code) self.disassembled_codes[key] = res return res diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py --- a/pypy/jit/metainterp/test/test_longlong.py +++ b/pypy/jit/metainterp/test/test_longlong.py @@ -1,6 +1,6 @@ import py, sys from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class WrongResult(Exception): pass diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -110,6 +110,13 @@ __dict__ = 8 raises(TypeError, dir, Foo("foo")) + def test_dir_broken_object(self): + class Foo(object): + x = 3 + def __getattribute__(self, name): + return name + assert dir(Foo()) == [] + def test_dir_custom(self): class Foo(object): def __dir__(self): diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py --- a/lib_pypy/_ctypes/builtin.py +++ b/lib_pypy/_ctypes/builtin.py @@ -1,6 +1,9 @@ import _rawffi, sys -import threading +try: + from thread import _local as local +except ImportError: + local = object # no threads class ConvMode: encoding = 'ascii' @@ -28,7 +31,7 @@ arg = cobj._get_buffer_value() return _rawffi.wcharp2rawunicode(arg, lgt) -class ErrorObject(threading.local): +class ErrorObject(local): def __init__(self): self.errno = 0 self.winerror = 0 diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -194,8 +194,8 @@ c_buf = py_str.c_ob_type.c_tp_as_buffer assert c_buf py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(rffi.INTP.TO)) == 1 - ref = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 assert ref[0] == 10 lltype.free(ref, flavor='raw') diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -255,7 +255,7 @@ x = ord(s[0]) << 7 i = 0 while i < length: - x = (1000003*x) ^ ord(s[i]) + x = intmask((1000003*x) ^ ord(s[i])) i += 1 x ^= length return intmask(x) diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py --- a/pypy/rpython/lltypesystem/lltype.py +++ b/pypy/rpython/lltypesystem/lltype.py @@ -95,6 +95,8 @@ __slots__ = ['__dict__', '__cached_hash'] def __eq__(self, other): + if isinstance(other, Typedef): + return other.__eq__(self) return self.__class__ is other.__class__ and ( self is other or safe_equal(self.__dict__, other.__dict__)) @@ -194,6 +196,36 @@ raise NotImplementedError +class Typedef(LowLevelType): + """A typedef is just another name for an existing type""" + def __init__(self, OF, c_name): + """ + @param OF: the equivalent rffi type + @param c_name: the name we want in C code + """ + assert isinstance(OF, LowLevelType) + # Look through typedefs, so other places don't have to + if isinstance(OF, Typedef): + OF = OF.OF # haha + self.OF = OF + self.c_name = c_name + + def __repr__(self): + return '' % (self.c_name, self.OF) + + def __eq__(self, other): + return other == self.OF + + def __getattr__(self, name): + return self.OF.get(name) + + def _defl(self, parent=None, parentindex=None): + return self.OF._defl() + + def _allocate(self, initialization, parent=None, parentindex=None): + return self.OF._allocate(initialization, parent, parentindex) + + class Struct(ContainerType): _gckind = 'raw' diff --git a/pypy/translator/jvm/typesystem.py b/pypy/translator/jvm/typesystem.py --- a/pypy/translator/jvm/typesystem.py +++ b/pypy/translator/jvm/typesystem.py @@ -181,6 +181,7 @@ jIntegerClass = JvmClassType('java.lang.Integer') jLongClass = JvmClassType('java.lang.Long') +jShortClass = JvmClassType('java.lang.Short') jDoubleClass = JvmClassType('java.lang.Double') jByteClass = JvmClassType('java.lang.Byte') jCharClass = JvmClassType('java.lang.Character') @@ -239,6 +240,7 @@ jDouble = JvmScalarType('D', jDoubleClass, 'doubleValue') jByte = JvmScalarType('B', jByteClass, 'byteValue') jChar = JvmScalarType('C', jCharClass, 'charValue') +jShort = JvmScalarType('S', jShortClass, 'shortValue') class Generifier(object): @@ -527,6 +529,7 @@ if desc == 'C': return self._o("i") # Characters if desc == 'B': return self._o("i") # Bytes if desc == 'Z': return self._o("i") # Boolean + if desc == 'S': return self._o("i") # Short assert False, "Unknown argtype=%s" % repr(argtype) raise NotImplementedError @@ -625,6 +628,7 @@ NOP = Opcode('nop') I2D = Opcode('i2d') I2L = Opcode('i2l') +I2S = Opcode('i2s') D2I= Opcode('d2i') #D2L= Opcode('d2l') #PAUL L2I = Opcode('l2i') @@ -891,6 +895,7 @@ SYSTEMIDENTITYHASH = Method.s(jSystem, 'identityHashCode', (jObject,), jInt) SYSTEMGC = Method.s(jSystem, 'gc', (), jVoid) INTTOSTRINGI = Method.s(jIntegerClass, 'toString', (jInt,), jString) +SHORTTOSTRINGS = Method.s(jShortClass, 'toString', (jShort,), jString) LONGTOSTRINGL = Method.s(jLongClass, 'toString', (jLong,), jString) DOUBLETOSTRINGD = Method.s(jDoubleClass, 'toString', (jDouble,), jString) CHARTOSTRINGC = Method.s(jCharClass, 'toString', (jChar,), jString) @@ -922,15 +927,19 @@ CLASSISASSIGNABLEFROM = Method.v(jClass, 'isAssignableFrom', (jClass,), jBool) STRINGBUILDERAPPEND = Method.v(jStringBuilder, 'append', (jString,), jStringBuilder) +PYPYINTBETWEEN = Method.s(jPyPy, 'int_between', (jInt,jInt,jInt), jBool) PYPYUINTCMP = Method.s(jPyPy, 'uint_cmp', (jInt,jInt,), jInt) PYPYULONGCMP = Method.s(jPyPy, 'ulong_cmp', (jLong,jLong), jInt) PYPYUINTMOD = Method.v(jPyPy, 'uint_mod', (jInt, jInt), jInt) PYPYUINTMUL = Method.v(jPyPy, 'uint_mul', (jInt, jInt), jInt) PYPYUINTDIV = Method.v(jPyPy, 'uint_div', (jInt, jInt), jInt) PYPYULONGMOD = Method.v(jPyPy, 'ulong_mod', (jLong, jLong), jLong) +PYPYUINTTOLONG = Method.s(jPyPy, 'uint_to_long', (jInt,), jLong) PYPYUINTTODOUBLE = Method.s(jPyPy, 'uint_to_double', (jInt,), jDouble) PYPYDOUBLETOUINT = Method.s(jPyPy, 'double_to_uint', (jDouble,), jInt) PYPYDOUBLETOLONG = Method.v(jPyPy, 'double_to_long', (jDouble,), jLong) #PAUL +PYPYDOUBLETOULONG = Method.s(jPyPy, 'double_to_ulong', (jDouble,), jLong) +PYPYULONGTODOUBLE = Method.s(jPyPy, 'ulong_to_double', (jLong,), jDouble) PYPYLONGBITWISENEGATE = Method.v(jPyPy, 'long_bitwise_negate', (jLong,), jLong) PYPYSTRTOINT = Method.v(jPyPy, 'str_to_int', (jString,), jInt) PYPYSTRTOUINT = Method.v(jPyPy, 'str_to_uint', (jString,), jInt) diff --git a/pypy/translator/backendopt/test/test_removenoops.py b/pypy/translator/backendopt/test/test_removenoops.py --- a/pypy/translator/backendopt/test/test_removenoops.py +++ b/pypy/translator/backendopt/test/test_removenoops.py @@ -1,12 +1,12 @@ from pypy.translator.backendopt.removenoops import remove_same_as, \ - remove_unaryops, remove_duplicate_casts, remove_superfluous_keep_alive + remove_unaryops, remove_duplicate_casts from pypy.translator.backendopt.inline import simple_inline_function from pypy.translator.translator import TranslationContext, graphof from pypy.rpython.memory.gctransform.test.test_transform import getops from pypy.translator.test.snippet import simple_method from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST -from pypy.objspace.flow.model import checkgraph, flatten, Block +from pypy.objspace.flow.model import checkgraph, Block from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.llinterp import LLInterpreter @@ -115,20 +115,6 @@ result = interp.eval_graph(f_graph, [-2]) assert result == -1 -def test_remove_keepalive(): - S = lltype.GcStruct("s", ("f", lltype.Signed)) - def f(): - s1 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - s2 = lltype.malloc(S) - llop.keepalive(lltype.Void, s1) - llop.keepalive(lltype.Void, s2) - return lltype.cast_ptr_to_int(s1) + lltype.cast_ptr_to_int(s2) - graph, t = get_graph(f, []) - remove_superfluous_keep_alive(graph) - ops = getops(graph) - assert len(ops['keepalive']) == 2 - def test_remove_duplicate_casts(): class A(object): def __init__(self, x, y): diff --git a/pypy/translator/cli/metavm.py b/pypy/translator/cli/metavm.py --- a/pypy/translator/cli/metavm.py +++ b/pypy/translator/cli/metavm.py @@ -270,23 +270,38 @@ generator.ilasm.call('void [pypylib]pypy.runtime.DebugPrint::DEBUG_PRINT(%s)' % signature) +INT_SIZE = { + ootype.Bool: 1, + ootype.Char: 2, + ootype.UniChar: 2, + rffi.SHORT: 2, + ootype.Signed: 4, + ootype.Unsigned: 4, + ootype.SignedLongLong: 8, + ootype.UnsignedLongLong: 8 + } -OOTYPE_TO_MNEMONIC = { - ootype.Bool: 'i1', - ootype.Char: 'i2', - ootype.UniChar: 'i2', - rffi.SHORT: 'i2', - ootype.Signed: 'i4', - ootype.SignedLongLong: 'i8', - ootype.Unsigned: 'u4', - ootype.UnsignedLongLong: 'u8', - ootype.Float: 'r8', - } +UNSIGNED_TYPES = [ootype.Char, ootype.UniChar, rffi.USHORT, + ootype.Unsigned, ootype.UnsignedLongLong] + +def ootype_to_mnemonic(FROM, TO, default=None): + if TO == ootype.Float: + return 'r8' + # + try: + size = str(INT_SIZE[TO]) + except KeyError: + return default + if FROM in UNSIGNED_TYPES: + return 'u' + size + else: + return 'i' + size class _CastPrimitive(MicroInstruction): def render(self, generator, op): + FROM = op.args[0].concretetype TO = op.result.concretetype - mnemonic = OOTYPE_TO_MNEMONIC[TO] + mnemonic = ootype_to_mnemonic(FROM, TO) generator.ilasm.opcode('conv.%s' % mnemonic) Call = _Call() diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/frameobject.h @@ -0,0 +1,17 @@ +#ifndef Py_FRAMEOBJECT_H +#define Py_FRAMEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + PyObject_HEAD + PyCodeObject *f_code; + PyObject *f_globals; + int f_lineno; +} PyFrameObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FRAMEOBJECT_H */ diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py --- a/pypy/jit/metainterp/test/test_fficall.py +++ b/pypy/jit/metainterp/test/test_fficall.py @@ -5,7 +5,7 @@ from pypy.rlib.libffi import ArgChain from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall from pypy.rpython.lltypesystem import lltype, rffi -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestFfiCall(LLJitMixin, _TestLibffiCall): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -138,11 +138,13 @@ # raised after the exception handler block was popped. try: trace = self.w_f_trace - self.w_f_trace = None + if trace is not None: + self.w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: - self.w_f_trace = trace + if trace is not None: + self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( @@ -1421,9 +1423,10 @@ # add a softspace unless we just printed a string which ends in a '\t' # or '\n' -- or more generally any whitespace character but ' ' - if isinstance(x, str) and x and x[-1].isspace() and x[-1]!=' ': - return - # XXX add unicode handling + if isinstance(x, (str, unicode)) and x: + lastchar = x[-1] + if lastchar.isspace() and lastchar != ' ': + return file_softspace(stream, True) print_item_to._annspecialcase_ = "specialize:argtype(0)" diff --git a/pypy/translator/goal/old_queries.py b/pypy/translator/goal/old_queries.py --- a/pypy/translator/goal/old_queries.py +++ b/pypy/translator/goal/old_queries.py @@ -415,12 +415,10 @@ ops = 0 count = Counter() def visit(block): - if isinstance(block, flowmodel.Block): + for block in graph.iterblocks(): count.blocks += 1 count.ops += len(block.operations) - elif isinstance(block, flowmodel.Link): - count.links += 1 - flowmodel.traverse(visit, graph) + count.links = len(list(graph.iterlinks())) return count.blocks, count.links, count.ops # better used before backends opts diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -10,7 +10,7 @@ class AppTestBufferTooShort: def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space if option.runappdirect: @@ -88,7 +88,7 @@ class AppTestSocketConnection(BaseConnectionTest): def setup_class(cls): - space = gettestobjspace(usemodules=('_multiprocessing', 'thread')) + space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal')) cls.space = space cls.w_connections = space.newlist([]) diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -32,7 +32,7 @@ class GenericTestThread: def setup_class(cls): - space = gettestobjspace(usemodules=('thread', 'time')) + space = gettestobjspace(usemodules=('thread', 'time', 'signal')) cls.space = space if option.runappdirect: diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -219,12 +219,14 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] else: - nfreevars = len(codeobj.co_freevars) - freevars = [self.space.interp_w(Cell, self.popvalue()) - for i in range(nfreevars)] - freevars.reverse() - defaultarguments = [self.popvalue() for i in range(numdefaults)] - defaultarguments.reverse() + n = len(codeobj.co_freevars) + freevars = [None] * n + while True: + n -= 1 + if n < 0: + break + freevars[n] = self.space.interp_w(Cell, self.popvalue()) + defaultarguments = self.popvalues(numdefaults) fn = function.Function(self.space, codeobj, self.w_globals, defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/translator/cli/src/debug.cs b/pypy/translator/cli/src/debug.cs --- a/pypy/translator/cli/src/debug.cs +++ b/pypy/translator/cli/src/debug.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using System.Collections.Generic; using System.Diagnostics; // this code is modeled after translator/c/src/debug.h @@ -21,7 +22,7 @@ static int have_debug_prints = -1; static bool debug_ready = false; static bool debug_profile = false; - static string debug_prefix = null; + static string[] active_categories = null; public static void close_file() { @@ -29,6 +30,14 @@ debug_file.Close(); } + public static bool startswithoneof(string category, string[] active_categories) + { + foreach(string cat in active_categories) + if (category.StartsWith(cat)) + return true; + return false; + } + public static bool HAVE_DEBUG_PRINTS() { if ((have_debug_prints & 1) != 0) { @@ -48,7 +57,8 @@ have_debug_prints <<= 1; if (!debug_profile) { /* non-profiling version */ - if (debug_prefix == null || !category.StartsWith(debug_prefix)) { + if (active_categories == null || + !startswithoneof(category, active_categories)) { /* wrong section name, or no PYPYLOG at all, skip it */ return; } @@ -83,7 +93,8 @@ } else { /* PYPYLOG=prefix:filename --- conditional logging */ - debug_prefix = filename.Substring(0, colon); + string debug_prefix = filename.Substring(0, colon); + active_categories = debug_prefix.Split(','); filename = filename.Substring(colon+1); } if (filename != "-") diff --git a/pypy/translator/cli/src/pypylib.cs b/pypy/translator/cli/src/pypylib.cs --- a/pypy/translator/cli/src/pypylib.cs +++ b/pypy/translator/cli/src/pypylib.cs @@ -501,6 +501,11 @@ } } + public static bool IntBetween(int a, int b, int c) + { + return a <= b && b < c; + } + public static bool Equal(T t1, T t2) { if (t1 == null) @@ -1148,10 +1153,36 @@ public class rffi { - public static int tolower(int chr) - { - return (int)Char.ToLower((char)chr); - } + public static int tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_tolower(int chr) + { + return (int)Char.ToLower((char)chr); + } + + public static int locale_isupper(int chr) + { + return Convert.ToInt32(Char.IsUpper((char)chr)); + } + + public static int locale_islower(int chr) + { + return Convert.ToInt32(Char.IsLower((char)chr)); + } + + public static int locale_isalpha(int chr) + { + return Convert.ToInt32(Char.IsLetter((char)chr)); + } + + public static int locale_isalnum(int chr) + { + return Convert.ToInt32(Char.IsLetterOrDigit((char)chr)); + } + } } diff --git a/pypy/jit/backend/x86/test/test_basic.py b/pypy/jit/backend/x86/test/test_basic.py --- a/pypy/jit/backend/x86/test/test_basic.py +++ b/pypy/jit/backend/x86/test/test_basic.py @@ -1,18 +1,18 @@ import py from pypy.jit.backend.detect_cpu import getcpuclass from pypy.jit.metainterp.warmspot import ll_meta_interp -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rlib.jit import JitDriver -class Jit386Mixin(test_basic.LLJitMixin): +class Jit386Mixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() def check_jumps(self, maxcount): pass -class TestBasic(Jit386Mixin, test_basic.BaseLLtypeTests): +class TestBasic(Jit386Mixin, test_ajit.BaseLLtypeTests): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py def test_bug(self): diff --git a/pypy/module/thread/os_local.py b/pypy/module/thread/os_local.py --- a/pypy/module/thread/os_local.py +++ b/pypy/module/thread/os_local.py @@ -12,7 +12,7 @@ def __init__(self, space, initargs): self.initargs = initargs ident = thread.get_ident() - self.dicts = {ident: space.newdict()} + self.dicts = {ident: space.newdict(instance=True)} def getdict(self, space): ident = thread.get_ident() @@ -51,10 +51,6 @@ __dict__ = GetSetProperty(descr_get_dict, cls=Local), ) -def getlocaltype(space): - return space.gettypeobject(Local.typedef) - - def finish_thread(w_obj): assert isinstance(w_obj, Local) ident = thread.get_ident() diff --git a/pypy/rpython/module/ll_os.py b/pypy/rpython/module/ll_os.py --- a/pypy/rpython/module/ll_os.py +++ b/pypy/rpython/module/ll_os.py @@ -29,7 +29,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.annlowlevel import llstr from pypy.rlib import rgc -from pypy.rlib.objectmodel import keepalive_until_here, specialize +from pypy.rlib.objectmodel import specialize def monkeypatch_rposix(posixfunc, unicodefunc, signature): func_name = posixfunc.__name__ diff --git a/pypy/translator/jvm/src/pypy/StatResult.java b/pypy/translator/jvm/src/pypy/StatResult.java --- a/pypy/translator/jvm/src/pypy/StatResult.java +++ b/pypy/translator/jvm/src/pypy/StatResult.java @@ -8,7 +8,7 @@ * *

The actual stat() function is defined in PyPy.java. */ -class StatResult { +public class StatResult { public int item0, item3, item4, item5; public long item1, item2, item6; public double item7, item8, item9; diff --git a/pypy/translator/gensupp.py b/pypy/translator/gensupp.py --- a/pypy/translator/gensupp.py +++ b/pypy/translator/gensupp.py @@ -6,15 +6,13 @@ import sys from pypy.objspace.flow.model import Block -from pypy.objspace.flow.model import traverse # ordering the blocks of a graph by source position def ordered_blocks(graph): # collect all blocks allblocks = [] - def visit(block): - if isinstance(block, Block): + for block in graph.iterblocks(): # first we order by offset in the code string if block.operations: ofs = block.operations[0].offset @@ -26,7 +24,6 @@ else: txt = "dummy" allblocks.append((ofs, txt, block)) - traverse(visit, graph) allblocks.sort() #for ofs, txt, block in allblocks: # print ofs, txt, block diff --git a/pypy/translator/jvm/src/pypy/ll_os.java b/pypy/translator/jvm/src/pypy/ll_os.java --- a/pypy/translator/jvm/src/pypy/ll_os.java +++ b/pypy/translator/jvm/src/pypy/ll_os.java @@ -14,10 +14,22 @@ abstract class FileWrapper { + private final String name; + + public FileWrapper(String name) + { + this.name = name; + } + public abstract void write(String buffer); public abstract String read(int count); public abstract void close(); public abstract RandomAccessFile getFile(); + + public String getName() + { + return this.name; + } } class PrintStreamWrapper extends FileWrapper @@ -25,8 +37,9 @@ private final PrintStream stream; private final ll_os os; - public PrintStreamWrapper(PrintStream stream, ll_os os) + public PrintStreamWrapper(String name, PrintStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -58,8 +71,9 @@ private final InputStream stream; private final ll_os os; - public InputStreamWrapper(InputStream stream, ll_os os) + public InputStreamWrapper(String name, InputStream stream, ll_os os) { + super(name); this.stream = stream; this.os = os; } @@ -102,11 +116,13 @@ private final boolean canWrite; private final ll_os os; - public RandomAccessFileWrapper(RandomAccessFile file, + public RandomAccessFileWrapper(String name, + RandomAccessFile file, boolean canRead, boolean canWrite, ll_os os) { + super(name); this.file = file; this.canRead = canRead; this.canWrite = canWrite; @@ -228,9 +244,9 @@ public ll_os(Interlink interlink) { this.interlink = interlink; - FileDescriptors.put(0, new InputStreamWrapper(System.in, this)); - FileDescriptors.put(1, new PrintStreamWrapper(System.out, this)); - FileDescriptors.put(2, new PrintStreamWrapper(System.err, this)); + FileDescriptors.put(0, new InputStreamWrapper("", System.in, this)); + FileDescriptors.put(1, new PrintStreamWrapper("", System.out, this)); + FileDescriptors.put(2, new PrintStreamWrapper("", System.err, this)); fdcount = 2; } @@ -339,7 +355,7 @@ // XXX: we ignore O_CREAT RandomAccessFile file = open_file(name, javaMode, flags); RandomAccessFileWrapper wrapper = - new RandomAccessFileWrapper(file, canRead, canWrite, this); + new RandomAccessFileWrapper(name, file, canRead, canWrite, this); fdcount++; FileDescriptors.put(fdcount, wrapper); @@ -418,6 +434,12 @@ return ll_os_stat(path); // XXX } + public StatResult ll_os_fstat(int fd) + { + String name = getfd(fd).getName(); + return ll_os_stat(name); + } + public String ll_os_strerror(int errno) { String msg = ErrorMessages.remove(errno); diff --git a/pypy/translator/jvm/test/test_list.py b/pypy/translator/jvm/test/test_list.py --- a/pypy/translator/jvm/test/test_list.py +++ b/pypy/translator/jvm/test/test_list.py @@ -6,7 +6,10 @@ def test_recursive(self): py.test.skip("JVM doesn't support recursive lists") - def test_getitem_exc(self): + def test_getitem_exc_1(self): + py.test.skip('fixme!') + + def test_getitem_exc_2(self): py.test.skip('fixme!') def test_r_short_list(self): diff --git a/pypy/translator/backendopt/test/test_mallocv.py b/pypy/translator/backendopt/test/test_mallocv.py --- a/pypy/translator/backendopt/test/test_mallocv.py +++ b/pypy/translator/backendopt/test/test_mallocv.py @@ -5,7 +5,7 @@ from pypy.translator.backendopt.all import backend_optimizations from pypy.translator.translator import TranslationContext, graphof from pypy.translator import simplify -from pypy.objspace.flow.model import checkgraph, flatten, Block, mkentrymap +from pypy.objspace.flow.model import checkgraph, Block, mkentrymap from pypy.objspace.flow.model import summary from pypy.rpython.llinterp import LLInterpreter, LLException from pypy.rpython.lltypesystem import lltype, llmemory, lloperation @@ -33,8 +33,7 @@ def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 count_calls = 0 - for node in flatten(graph): - if isinstance(node, Block): + for node in graph.iterblocks(): for op in node.operations: if op.opname == 'malloc': count_mallocs += 1 @@ -54,7 +53,7 @@ if option.view: t.view() self.original_graph_count = len(t.graphs) - # to detect missing keepalives and broken intermediate graphs, + # to detect broken intermediate graphs, # we do the loop ourselves instead of calling remove_simple_mallocs() maxiter = 100 mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) @@ -557,36 +556,6 @@ type_system = 'lltype' #MallocRemover = LLTypeMallocRemover - def test_with_keepalive(self): - from pypy.rlib.objectmodel import keepalive_until_here - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - keepalive_until_here(t) - return s*d - self.check(fn1, [int, int], [15, 10], 125) - - def test_add_keepalives(self): - class A: - pass - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn7(i): - big = lltype.malloc(BIG) - a = A() - a.big = big - a.small = big.s - a.small.x = 0 - while i > 0: - a.small.x += i - i -= 1 - return a.small.x - self.check(fn7, [int], [10], 55, - expected_mallocs=1) # no support for interior structs - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -770,39 +739,6 @@ return x.u1.b * x.u2.a self.check(fn, [], [], DONT_CHECK_RESULT) - def test_keep_all_keepalives(self): - SIZE = llmemory.sizeof(lltype.Signed) - PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1)) - class A: - def __init__(self): - self.addr = llmemory.raw_malloc(SIZE) - def __del__(self): - llmemory.raw_free(self.addr) - class B: - pass - def myfunc(): - b = B() - b.keep = A() - b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY) - b.data[0] = 42 - ptr = b.data - # normally 'b' could go away as early as here, which would free - # the memory held by the instance of A in b.keep... - res = ptr[0] - # ...so we explicitly keep 'b' alive until here - objectmodel.keepalive_until_here(b) - return res - graph = self.check(myfunc, [], [], 42, - expected_mallocs=1, # 'A' instance left - expected_calls=1) # to A.__init__() - - # there is a getarrayitem near the end of the graph of myfunc. - # However, the memory it accesses must still be protected by the - # following keepalive, even after malloc removal - entrymap = mkentrymap(graph) - [link] = entrymap[graph.returnblock] - assert link.prevblock.operations[-1].opname == 'keepalive' - def test_nested_struct(self): S = lltype.GcStruct("S", ('x', lltype.Signed)) T = lltype.GcStruct("T", ('s', S)) diff --git a/pypy/jit/metainterp/test/test_loop_unroll.py b/pypy/jit/metainterp/test/test_loop_unroll.py --- a/pypy/jit/metainterp/test/test_loop_unroll.py +++ b/pypy/jit/metainterp/test/test_loop_unroll.py @@ -1,7 +1,7 @@ import py from pypy.rlib.jit import JitDriver from pypy.jit.metainterp.test import test_loop -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES class LoopUnrollTest(test_loop.LoopTest): diff --git a/pypy/rlib/test/test__jit_vref.py b/pypy/rlib/test/test__jit_vref.py --- a/pypy/rlib/test/test__jit_vref.py +++ b/pypy/rlib/test/test__jit_vref.py @@ -6,8 +6,12 @@ from pypy.annotation.annrpython import RPythonAnnotator from pypy.rpython.test.test_llinterp import interpret from pypy.rpython.lltypesystem.rclass import OBJECTPTR +from pypy.rpython.ootypesystem.rclass import OBJECT from pypy.rpython.lltypesystem import lltype +from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin + +from pypy.rpython.ootypesystem import ootype class X(object): pass @@ -79,37 +83,48 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -def test_rtype_1(): - def f(): - return virtual_ref(X()) - x = interpret(f, []) - assert lltype.typeOf(x) == OBJECTPTR +class BaseTestVRef(BaseRtypingTest): + def test_rtype_1(self): + def f(): + return virtual_ref(X()) + x = self.interpret(f, []) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_2(): - def f(): - x1 = X() - vref = virtual_ref(x1) - x2 = vref() - virtual_ref_finish(x2) - return x2 - x = interpret(f, []) - assert lltype.castable(OBJECTPTR, lltype.typeOf(x)) > 0 + def test_rtype_2(self): + def f(): + x1 = X() + vref = virtual_ref(x1) + x2 = vref() + virtual_ref_finish(x2) + return x2 + x = self.interpret(f, []) + assert self.castable(self.OBJECTTYPE, x) -def test_rtype_3(): - def f(n): - if n > 0: - return virtual_ref(Y()) - else: - return non_virtual_ref(Z()) - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR + def test_rtype_3(self): + def f(n): + if n > 0: + return virtual_ref(Y()) + else: + return non_virtual_ref(Z()) + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE -def test_rtype_4(): - def f(n): - if n > 0: - return virtual_ref(X()) - else: - return vref_None - x = interpret(f, [-5]) - assert lltype.typeOf(x) == OBJECTPTR - assert not x + def test_rtype_4(self): + def f(n): + if n > 0: + return virtual_ref(X()) + else: + return vref_None + x = self.interpret(f, [-5]) + assert lltype.typeOf(x) == self.OBJECTTYPE + assert not x + +class TestLLtype(BaseTestVRef, LLRtypeMixin): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + +class TestOOtype(BaseTestVRef, OORtypeMixin): + OBJECTTYPE = OBJECT + def castable(self, TO, var): + return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -245,6 +245,16 @@ expression cmp(o1, o2).""" return space.int_w(space.cmp(w_o1, w_o2)) + at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) +def PyObject_Cmp(space, w_o1, w_o2, result): + """Compare the values of o1 and o2 using a routine provided by o1, if one + exists, otherwise with a routine provided by o2. The result of the + comparison is returned in result. Returns -1 on failure. This is the + equivalent of the Python statement result = cmp(o1, o2).""" + res = space.int_w(space.cmp(w_o1, w_o2)) + result[0] = rffi.cast(rffi.INT, res) + return 0 + @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject) def PyObject_RichCompare(space, w_o1, w_o2, opid_int): """Compare the values of o1 and o2 using the operation specified by opid, @@ -385,7 +395,7 @@ raise OperationError(space.w_TypeError, space.wrap( "expected a character buffer object")) if generic_cpy_call(space, pb.c_bf_getsegcount, - obj, lltype.nullptr(rffi.INTP.TO)) != 1: + obj, lltype.nullptr(Py_ssize_tP.TO)) != 1: raise OperationError(space.w_TypeError, space.wrap( "expected a single-segment buffer object")) size = generic_cpy_call(space, pb.c_bf_getcharbuffer, diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -446,7 +446,6 @@ IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', - 'pslld', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', @@ -457,6 +456,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', + 'paddq', 'pinsr', # zero-extending moves should not produce GC pointers 'movz', ]) @@ -1645,7 +1645,7 @@ darwin64='') print >> output, "%s:" % _globalname('pypy_asm_stackwalk') - print >> output, """\ + s = """\ /* See description in asmgcroot.py */ .cfi_startproc movq\t%rdi, %rdx\t/* 1st argument, which is the callback */ @@ -1691,6 +1691,12 @@ ret .cfi_endproc """ + if self.format == 'darwin64': + # obscure. gcc there seems not to support .cfi_... + # hack it out... + s = re.sub(r'([.]cfi_[^/\n]+)([/\n])', + r'/* \1 disabled on darwin */\2', s) + print >> output, s _variant(elf64='.size pypy_asm_stackwalk, .-pypy_asm_stackwalk', darwin64='') else: diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py --- a/pypy/rlib/runicode.py +++ b/pypy/rlib/runicode.py @@ -87,8 +87,9 @@ result = UnicodeBuilder(size) pos = 0 while pos < size: - ch = s[pos] - ordch1 = ord(ch) + ordch1 = ord(s[pos]) + # fast path for ASCII + # XXX maybe use a while loop here if ordch1 < 0x80: result.append(unichr(ordch1)) pos += 1 @@ -98,110 +99,149 @@ if pos + n > size: if not final: break - else: - endpos = pos + 1 - while endpos < size and ord(s[endpos]) & 0xC0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "unexpected end of data", - s, pos, endpos) + charsleft = size - pos - 1 # either 0, 1, 2 + # note: when we get the 'unexpected end of data' we don't care + # about the pos anymore and we just ignore the value + if not charsleft: + # there's only the start byte and nothing else + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+1) + result.append(r) + break + ordch2 = ord(s[pos+1]) + if n == 3: + # 3-bytes seq with only a continuation byte + if (ordch2>>6 != 0x2 or # 0b10 + (ordch1 == 0xe0 and ordch2 < 0xa0)): + # or (ordch1 == 0xed and ordch2 > 0x9f) + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + else: + # second byte valid, but third byte missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+2) + result.append(r) + break + elif n == 4: + # 4-bytes seq with 1 or 2 continuation bytes + if (ordch2>>6 != 0x2 or # 0b10 + (ordch1 == 0xf0 and ordch2 < 0x90) or + (ordch1 == 0xf4 and ordch2 > 0x8f)): + # second byte invalid, take the first and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 + # third byte invalid, take the first two and continue + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + else: + # there's only 1 or 2 valid cb, but the others are missing + r, pos = errorhandler(errors, 'utf-8', + 'unexpected end of data', + s, pos, pos+charsleft+1) + result.append(r) + break + + if n == 0: + r, pos = errorhandler(errors, 'utf-8', + 'invalid start byte', + s, pos, pos+1) + result.append(r) + + elif n == 1: + assert 0, "ascii should have gone through the fast path" + + elif n == 2: + ordch2 = ord(s[pos+1]) + if ordch2>>6 != 0x2: # 0b10 + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) continue + # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz + result.append(unichr(((ordch1 & 0x1F) << 6) + # 0b00011111 + (ordch2 & 0x3F))) # 0b00111111 + pos += 2 - if n == 0: - r, pos = errorhandler(errors, "utf-8", - "invalid start byte", - s, pos, pos + 1) - result.append(r) - elif n == 1: - assert 0, "you can never get here" - elif n == 2: - # 110yyyyy 10zzzzzz ====> 00000000 00000yyy yyzzzzzz - - ordch2 = ord(s[pos+1]) - z, two = splitter[6, 2](ordch2) - y, six = splitter[5, 3](ordch1) - assert six == 6 - if two != 2: - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, pos + 1) - result.append(r) - else: - c = (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 3: - # 1110xxxx 10yyyyyy 10zzzzzz ====> 00000000 xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - z, two1 = splitter[6, 2](ordch3) - y, two2 = splitter[6, 2](ordch2) - x, fourteen = splitter[4, 4](ordch1) - assert fourteen == 14 - if (two1 != 2 or two2 != 2 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! # Uncomment the line below to make them invalid. # or (ordch1 == 0xed and ordch2 > 0x9f) ): + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) + result.append(r) + continue + elif ordch3>>6 != 0x2: # 0b10 + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz + result.append(unichr(((ordch1 & 0x0F) << 12) + # 0b00001111 + ((ordch2 & 0x3F) << 6) + # 0b00111111 + (ordch3 & 0x3F))) # 0b00111111 + pos += 3 - # if ordch2 first two bits are 1 and 0, then the invalid - # continuation byte is ordch3; else ordch2 is invalid. - if two2 == 2: - endpos = pos + 2 - else: - endpos = pos + 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) - result.append(r) - else: - c = (x << 12) + (y << 6) + z - result.append(unichr(c)) - pos += n elif n == 4: - # 11110www 10xxxxxx 10yyyyyy 10zzzzzz ====> - # 000wwwxx xxxxyyyy yyzzzzzz ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - z, two1 = splitter[6, 2](ordch4) - y, two2 = splitter[6, 2](ordch3) - x, two3 = splitter[6, 2](ordch2) - w, thirty = splitter[3, 5](ordch1) - assert thirty == 30 - if (two1 != 2 or two2 != 2 or two3 != 2 or + if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): - endpos = pos + 1 - if ordch2 & 0xc0 == 0x80: - endpos += 1 - if ordch3 & 0xc0 == 0x80: - endpos += 1 - r, pos = errorhandler(errors, "utf-8", - "invalid continuation byte", - s, pos, endpos) + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+1) result.append(r) + continue + elif ordch3>>6 != 0x2: # 0b10 + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+2) + result.append(r) + continue + elif ordch4>>6 != 0x2: # 0b10 + r, pos = errorhandler(errors, 'utf-8', + 'invalid continuation byte', + s, pos, pos+3) + result.append(r) + continue + # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz + c = (((ordch1 & 0x07) << 18) + # 0b00000111 + ((ordch2 & 0x3F) << 12) + # 0b00111111 + ((ordch3 & 0x3F) << 6) + # 0b00111111 + (ordch4 & 0x3F)) # 0b00111111 + if c <= MAXUNICODE: + result.append(UNICHR(c)) else: - c = (w << 18) + (x << 12) + (y << 6) + z - # convert to UTF-16 if necessary - if c <= MAXUNICODE: - result.append(UNICHR(c)) - else: - # compute and append the two surrogates: - # translate from 10000..10FFFF to 0..FFFF - c -= 0x10000 - # high surrogate = top 10 bits added to D800 - result.append(unichr(0xD800 + (c >> 10))) - # low surrogate = bottom 10 bits added to DC00 - result.append(unichr(0xDC00 + (c & 0x03FF))) - pos += n - else: - r, pos = errorhandler(errors, "utf-8", - "unsupported Unicode code range", - s, pos, pos + n) - result.append(r) + # compute and append the two surrogates: + # translate from 10000..10FFFF to 0..FFFF + c -= 0x10000 + # high surrogate = top 10 bits added to D800 + result.append(unichr(0xD800 + (c >> 10))) + # low surrogate = bottom 10 bits added to DC00 + result.append(unichr(0xDC00 + (c & 0x03FF))) + pos += 4 return result.build(), pos @@ -629,7 +669,7 @@ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, # sp ! " # $ % & ' ( ) * + , - . / 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 3, 0, 0, 0, 0, -# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? +# 0 1 2 3 4 5 6 7 8 9 : ; < = > ? 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, # @ A B C D E F G H I J K L M N O 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -905,20 +945,20 @@ pos = 0 while pos < size: ch = p[pos] - + if ord(ch) < limit: result.append(chr(ord(ch))) pos += 1 else: # startpos for collecting unencodable chars - collstart = pos - collend = pos+1 + collstart = pos + collend = pos+1 while collend < len(p) and ord(p[collend]) >= limit: collend += 1 r, pos = errorhandler(errors, encoding, reason, p, collstart, collend) result.append(r) - + return result.build() def unicode_encode_latin_1(p, size, errors, errorhandler=None): diff --git a/pypy/jit/tl/spli/test/test_jit.py b/pypy/jit/tl/spli/test/test_jit.py --- a/pypy/jit/tl/spli/test/test_jit.py +++ b/pypy/jit/tl/spli/test/test_jit.py @@ -1,6 +1,6 @@ import py -from pypy.jit.metainterp.test.test_basic import JitMixin +from pypy.jit.metainterp.test.support import JitMixin from pypy.jit.tl.spli import interpreter, objects, serializer from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper from pypy.jit.backend.llgraph import runner diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py --- a/pypy/jit/metainterp/test/test_virtual.py +++ b/pypy/jit/metainterp/test/test_virtual.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver, hint from pypy.rlib.objectmodel import compute_unique_id from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rpython.lltypesystem import lltype, rclass from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.ootypesystem import ootype diff --git a/pypy/jit/metainterp/test/support.py b/pypy/jit/metainterp/test/support.py new file mode 100644 --- /dev/null +++ b/pypy/jit/metainterp/test/support.py @@ -0,0 +1,261 @@ + +import py, sys +from pypy.rpython.lltypesystem import lltype, llmemory +from pypy.rpython.ootypesystem import ootype +from pypy.jit.backend.llgraph import runner +from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats +from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT +from pypy.jit.metainterp import pyjitpl, history +from pypy.jit.metainterp.warmstate import set_future_value +from pypy.jit.codewriter.policy import JitPolicy +from pypy.jit.codewriter import longlong + +def _get_jitcodes(testself, CPUClass, func, values, type_system, + supports_longlong=False, **kwds): + from pypy.jit.codewriter import support, codewriter + + class FakeJitCell: + __compiled_merge_points = [] + def get_compiled_merge_points(self): + return self.__compiled_merge_points[:] + def set_compiled_merge_points(self, lst): + self.__compiled_merge_points = lst + + class FakeWarmRunnerState: + def attach_unoptimized_bridge_from_interp(self, greenkey, newloop): + pass + + def jit_cell_at_key(self, greenkey): + assert greenkey == [] + return self._cell + _cell = FakeJitCell() + + trace_limit = sys.maxint + enable_opts = ALL_OPTS_DICT + + func._jit_unroll_safe_ = True + rtyper = support.annotate(func, values, type_system=type_system) + graphs = rtyper.annotator.translator.graphs + result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0] + + class FakeJitDriverSD: + num_green_args = 0 + portal_graph = graphs[0] + virtualizable_info = None + greenfield_info = None + result_type = result_kind + portal_runner_ptr = "???" + + stats = history.Stats() + cpu = CPUClass(rtyper, stats, None, False) + cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) + testself.cw = cw + policy = JitPolicy() + policy.set_supports_longlong(supports_longlong) + cw.find_all_graphs(policy) + # + testself.warmrunnerstate = FakeWarmRunnerState() + testself.warmrunnerstate.cpu = cpu + FakeJitDriverSD.warmstate = testself.warmrunnerstate + if hasattr(testself, 'finish_setup_for_interp_operations'): + testself.finish_setup_for_interp_operations() + # + cw.make_jitcodes(verbose=True) + +def _run_with_blackhole(testself, args): + from pypy.jit.metainterp.blackhole import BlackholeInterpBuilder + cw = testself.cw + blackholeinterpbuilder = BlackholeInterpBuilder(cw) + blackholeinterp = blackholeinterpbuilder.acquire_interp() + count_i = count_r = count_f = 0 + for value in args: + T = lltype.typeOf(value) + if T == lltype.Signed: + blackholeinterp.setarg_i(count_i, value) + count_i += 1 + elif T == llmemory.GCREF: + blackholeinterp.setarg_r(count_r, value) + count_r += 1 + elif T == lltype.Float: + value = longlong.getfloatstorage(value) + blackholeinterp.setarg_f(count_f, value) + count_f += 1 + else: + raise TypeError(T) + [jitdriver_sd] = cw.callcontrol.jitdrivers_sd + blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0) + blackholeinterp.run() + return blackholeinterp._final_result_anytype() + +def _run_with_pyjitpl(testself, args): + + class DoneWithThisFrame(Exception): + pass + + class DoneWithThisFrameRef(DoneWithThisFrame): + def __init__(self, cpu, *args): + DoneWithThisFrame.__init__(self, *args) + + cw = testself.cw + opt = history.Options(listops=True) + metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) + metainterp_sd.finish_setup(cw) + [jitdriver_sd] = metainterp_sd.jitdrivers_sd + metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) + metainterp_sd.DoneWithThisFrameInt = DoneWithThisFrame + metainterp_sd.DoneWithThisFrameRef = DoneWithThisFrameRef + metainterp_sd.DoneWithThisFrameFloat = DoneWithThisFrame + testself.metainterp = metainterp + try: + metainterp.compile_and_run_once(jitdriver_sd, *args) + except DoneWithThisFrame, e: + #if conftest.option.view: + # metainterp.stats.view() + return e.args[0] + else: + raise Exception("FAILED") + +def _run_with_machine_code(testself, args): + metainterp = testself.metainterp + num_green_args = metainterp.jitdriver_sd.num_green_args + loop_tokens = metainterp.get_compiled_merge_points(args[:num_green_args]) + if len(loop_tokens) != 1: + return NotImplemented + # a loop was successfully created by _run_with_pyjitpl(); call it + cpu = metainterp.cpu + for i in range(len(args) - num_green_args): + x = args[num_green_args + i] + typecode = history.getkind(lltype.typeOf(x)) + set_future_value(cpu, i, x, typecode) + faildescr = cpu.execute_token(loop_tokens[0]) + assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr') + if metainterp.jitdriver_sd.result_type == history.INT: + return cpu.get_latest_value_int(0) + elif metainterp.jitdriver_sd.result_type == history.REF: + return cpu.get_latest_value_ref(0) + elif metainterp.jitdriver_sd.result_type == history.FLOAT: + return cpu.get_latest_value_float(0) + else: + return None + + +class JitMixin: + basic = True + def check_loops(self, expected=None, everywhere=False, **check): + get_stats().check_loops(expected=expected, everywhere=everywhere, + **check) + def check_loop_count(self, count): + """NB. This is a hack; use check_tree_loop_count() or + check_enter_count() for the real thing. + This counts as 1 every bridge in addition to every loop; and it does + not count at all the entry bridges from interpreter, although they + are TreeLoops as well.""" + assert get_stats().compiled_count == count + def check_tree_loop_count(self, count): + assert len(get_stats().loops) == count + def check_loop_count_at_most(self, count): + assert get_stats().compiled_count <= count + def check_enter_count(self, count): + assert get_stats().enter_count == count + def check_enter_count_at_most(self, count): + assert get_stats().enter_count <= count + def check_jumps(self, maxcount): + assert get_stats().exec_jumps <= maxcount + def check_aborted_count(self, count): + assert get_stats().aborted_count == count + def check_aborted_count_at_least(self, count): + assert get_stats().aborted_count >= count + + def meta_interp(self, *args, **kwds): + kwds['CPUClass'] = self.CPUClass + kwds['type_system'] = self.type_system + if "backendopt" not in kwds: + kwds["backendopt"] = False + return ll_meta_interp(*args, **kwds) + + def interp_operations(self, f, args, **kwds): + # get the JitCodes for the function f + _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + # try to run it with blackhole.py + result1 = _run_with_blackhole(self, args) + # try to run it with pyjitpl.py + result2 = _run_with_pyjitpl(self, args) + assert result1 == result2 + # try to run it by running the code compiled just before + result3 = _run_with_machine_code(self, args) + assert result1 == result3 or result3 == NotImplemented + # + if (longlong.supports_longlong and + isinstance(result1, longlong.r_float_storage)): + result1 = longlong.getrealfloat(result1) + return result1 + + def check_history(self, expected=None, **isns): + # this can be used after calling meta_interp + get_stats().check_history(expected, **isns) + + def check_operations_history(self, expected=None, **isns): + # this can be used after interp_operations + if expected is not None: + expected = dict(expected) + expected['jump'] = 1 + self.metainterp.staticdata.stats.check_history(expected, **isns) + + +class LLJitMixin(JitMixin): + type_system = 'lltype' + CPUClass = runner.LLtypeCPU + + @staticmethod + def Ptr(T): + return lltype.Ptr(T) + + @staticmethod + def GcStruct(name, *fields, **kwds): + S = lltype.GcStruct(name, *fields, **kwds) + return S + + malloc = staticmethod(lltype.malloc) + nullptr = staticmethod(lltype.nullptr) + + @staticmethod + def malloc_immortal(T): + return lltype.malloc(T, immortal=True) + + def _get_NODE(self): + NODE = lltype.GcForwardReference() + NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), + ('next', lltype.Ptr(NODE)))) + return NODE + +class OOJitMixin(JitMixin): + type_system = 'ootype' + #CPUClass = runner.OOtypeCPU + + def setup_class(cls): + py.test.skip("ootype tests skipped for now") + + @staticmethod + def Ptr(T): + return T + + @staticmethod + def GcStruct(name, *fields, **kwds): + if 'hints' in kwds: + kwds['_hints'] = kwds['hints'] + del kwds['hints'] + I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) + return I + + malloc = staticmethod(ootype.new) + nullptr = staticmethod(ootype.null) + + @staticmethod + def malloc_immortal(T): + return ootype.new(T) + + def _get_NODE(self): + NODE = ootype.Instance('NODE', ootype.ROOT, {}) + NODE._add_fields({'value': ootype.Signed, + 'next': NODE}) + return NODE diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -22,13 +22,21 @@ return func.code class Defaults(object): - _immutable_fields_ = ["items[*]"] + _immutable_fields_ = ["items[*]", "promote"] - def __init__(self, items): + def __init__(self, items, promote=False): self.items = items + self.promote = promote def getitems(self): - return jit.hint(self, promote=True).items + # an idea - we want to promote only items that we know won't change + # too often. this is the case for builtin functions and functions + # with known constant defaults. Otherwise we don't want to promote + # this so lambda a=a won't create a new trace each time it's + # encountered + if self.promote: + return jit.hint(self, promote=True).items + return self.items def getitem(self, idx): return self.getitems()[idx] @@ -44,14 +52,15 @@ can_change_code = True def __init__(self, space, code, w_globals=None, defs_w=[], closure=None, - forcename=None): + forcename=None, promote_defs=False): self.space = space self.name = forcename or code.co_name self.w_doc = None # lazily read from code.getdocstring() self.code = code # Code instance self.w_func_globals = w_globals # the globals dictionary self.closure = closure # normally, list of Cell instances or None - self.defs = Defaults(defs_w) # wrapper around list of w_default's + self.defs = Defaults(defs_w, promote=promote_defs) + # wrapper around list of w_default's self.w_func_dict = None # filled out below if needed self.w_module = None @@ -620,7 +629,8 @@ def __init__(self, func): assert isinstance(func, Function) Function.__init__(self, func.space, func.code, func.w_func_globals, - func.defs.getitems(), func.closure, func.name) + func.defs.getitems(), func.closure, func.name, + promote_defs=True) self.w_doc = func.w_doc self.w_func_dict = func.w_func_dict self.w_module = func.w_module diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -10,7 +10,7 @@ from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted -from pypy.rlib.jit import purefunction, dont_look_inside +from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint class TypeCell(W_Root): @@ -173,7 +173,7 @@ if (not we_are_jitted() or w_self.is_heaptype() or w_self.space.config.objspace.std.mutable_builtintypes): return w_self._version_tag - # heap objects cannot get their version_tag changed + # prebuilt objects cannot get their version_tag changed return w_self._pure_version_tag() @purefunction_promote() @@ -316,7 +316,7 @@ return w_value return None - + @unroll_safe def _lookup(w_self, key): space = w_self.space for w_class in w_self.mro_w: @@ -325,6 +325,7 @@ return w_value return None + @unroll_safe def _lookup_where(w_self, key): # like lookup() but also returns the parent class in which the # attribute was found diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -277,6 +277,7 @@ """) def test_default_and_kw(self): + py.test.skip("Wait until we have saner defaults strat") def main(n): def f(i, j=1): return i + j @@ -539,7 +540,7 @@ i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=) --TICK-- - jump(p0, p1, p2, i12, p4, descr=) + jump(p0, p1, p2, i12, descr=) """) def test_exception_inside_loop_2(self): @@ -585,7 +586,7 @@ --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- - jump(p0, p1, p2, p3, i14, i5, p6, descr=) + jump(p0, p1, p2, p3, i14, i5, descr=) """) def test_chain_of_guards(self): @@ -685,13 +686,13 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr__GcStruct_listLlT_rpy_stringPtr_Char), p8, 46, descr=) + p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) guard_no_exception(descr=) guard_nonnull(p14, descr=) i15 = getfield_gc(p14, descr=) i16 = int_is_true(i15) guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default__dum_nocheckConst_listPtr), p14, descr=) + p18 = call(ConstClass(ll_pop_default), p14, descr=) guard_no_exception(descr=) i19 = getfield_gc(p14, descr=) i20 = int_is_true(i19) @@ -837,7 +838,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -848,7 +849,7 @@ sa += 20000 return sa """ % (op1, a, op2, b) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -866,7 +867,7 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, op2, float(b)/4.0) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_allcases_reflex(self): @@ -887,7 +888,7 @@ src = """ def main(): sa = 0 - for i in range(1000): + for i in range(300): if i %s %d: sa += 1 else: @@ -898,7 +899,7 @@ sa += 20000 return sa """ % (op1, a, b, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) src = """ def main(): @@ -916,11 +917,13 @@ i += 0.25 return sa """ % (op1, float(a)/4.0, float(b)/4.0, op2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=300) def test_boolrewrite_ptr(self): - # XXX this test is way too imprecise in what it is actually testing - # it should count the number of guards instead + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ compares = ('a == b', 'b == a', 'a != b', 'b != a', 'a == c', 'c != b') for e1 in compares: for e2 in compares: @@ -932,7 +935,7 @@ b = tst() c = tst() sa = 0 - for i in range(1000): + for i in range(300): if %s: sa += 1 else: @@ -945,7 +948,7 @@ a = b return sa """ % (e1, e2) - self.run_and_check(src, threshold=400) + self.run_and_check(src, threshold=200) def test_array_sum(self): def main(): @@ -1009,6 +1012,7 @@ """) def test_func_defaults(self): + py.test.skip("until we fix defaults") def main(n): i = 1 while i < n: @@ -1061,7 +1065,7 @@ i23 = int_lt(0, i21) guard_true(i23, descr=) i24 = getfield_gc(p17, descr=) - i25 = getarrayitem_raw(i24, 0, descr=) + i25 = getarrayitem_raw(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=) i28 = int_add_ovf(i10, i25) @@ -1069,7 +1073,7 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) - + def test_mutate_class(self): def fn(n): class A(object): @@ -1112,3 +1116,461 @@ setfield_gc(ConstPtr(ptr21), p20, descr=) jump(p0, p1, p2, p3, p4, p20, p6, i7, descr=) """) + + + def test_intbound_simple(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + ops = ('<', '>', '<=', '>=', '==', '!=') + nbr = (3, 7) + for o1 in ops: + for o2 in ops: + for n1 in nbr: + for n2 in nbr: + src = ''' + def f(i): + a, b = 3, 3 + if i %s %d: + a = 0 + else: + a = 1 + if i %s %d: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (o1, n1, o2, n2) + self.run_and_check(src, threshold=200) + + def test_intbound_addsub_mix(self): + """ + This test only checks that we get the expected result, not that any + optimization has been applied. + """ + tests = ('i > 4', 'i > 2', 'i + 1 > 2', '1 + i > 4', + 'i - 1 > 1', '1 - i > 1', '1 - i < -3', + 'i == 1', 'i == 5', 'i != 1', '-2 * i < -4') + for t1 in tests: + for t2 in tests: + src = ''' + def f(i): + a, b = 3, 3 + if %s: + a = 0 + else: + a = 1 + if %s: + b = 0 + else: + b = 1 + return a + b * 2 + + def main(): + res = [0] * 4 + idx = [] + for i in range(15): + idx.extend([i] * 15) + for i in idx: + res[f(i)] += 1 + return res + + ''' % (t1, t2) + self.run_and_check(src, threshold=200) + + def test_intbound_gt(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i > -1: + a += 1 + if i > -2: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i17 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=) + """) + + def test_intbound_sub_lt(self): + def main(): + i, a = 0, 0 + while i < 300: + if i - 10 < 295: + a += 1 + i += 1 + return a + # + log = self.run(main, [], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i5, 300) + guard_true(i7, descr=...) + i9 = int_sub_ovf(i5, 10) + guard_no_overflow(descr=...) + i11 = int_add_ovf(i4, 1) + guard_no_overflow(descr=...) + i13 = int_add(i5, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i13, descr=) + """) + + def test_intbound_addsub_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < n: + if i + 5 >= 5: + a += 1 + if i - 1 >= -1: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, i9) + guard_true(i10, descr=...) + i12 = int_add_ovf(i8, 5) + guard_no_overflow(descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i19 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) + """) + + def test_intbound_addmul_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < 300: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_add(i8, 5) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_lshift(i8, 1) + i18 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i21 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) + """) + + def test_intbound_eq(self): + def main(a, n): + i, s = 0, 0 + while i < 300: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + # + log = self.run(main, [7, 300], threshold=200) + assert log.result == main(7, 300) + log = self.run(main, [10, 300], threshold=200) + assert log.result == main(10, 300) + log = self.run(main, [42, 300], threshold=200) + assert log.result == main(42, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_eq(i8, 10) + guard_false(i12, descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) + """) + + def test_intbound_mul(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + # + log = self.run(main, [7], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_lshift(i6, 1) + i12 = int_add_ovf(i5, 1) + guard_no_overflow(descr=...) + i14 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i12, i14, descr=) + """) + + def test_assert(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert a == 7 + s += a + 1 + i += 1 + return s + log = self.run(main, [7], threshold=200) + assert log.result == 300*8 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_add_ovf(i5, 8) + guard_no_overflow(descr=...) + i12 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i10, i12, descr=) + """) + + def test_zeropadded(self): + def main(): + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= len(self): + return 0 + return array.__getitem__(self, i) # ID: get + # + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + log = self.run(main, [], threshold=200) + assert log.result == 9895050.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the overloaded __getitem__ does not introduce double + # array bound checks. + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i20 = int_ge(i18, i8) + guard_false(i20, descr=...) + f21 = getarrayitem_raw(i13, i18, descr=...) + f23 = getarrayitem_raw(i13, i14, descr=...) + f24 = float_add(f21, f23) + f26 = getarrayitem_raw(i13, i6, descr=...) + f27 = float_add(f24, f26) + i29 = int_add(i6, 1) + i31 = int_ge(i29, i8) + guard_false(i31, descr=...) + f33 = getarrayitem_raw(i13, i29, descr=...) + f34 = float_add(f27, f33) + i36 = int_add(i6, 2) + i38 = int_ge(i36, i8) + guard_false(i38, descr=...) + f39 = getarrayitem_raw(i13, i36, descr=...) + ... + """) + + + def test_circular(self): + def main(): + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + assert len(self) == 256 + return array.__getitem__(self, i & 255) + # + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + # + log = self.run(main, [], threshold=200) + assert log.result == 1239690.0 + loop, = log.loops_by_filename(self.filepath) + # + # check that the array bound checks are removed + # + # The force_token()s are still there, but will be eliminated by the + # backend regalloc, so they are harmless + assert loop.match(ignore_ops=['force_token'], + expected_src=""" + ... + i17 = int_and(i14, 255) + f18 = getarrayitem_raw(i8, i17, descr=...) + f20 = getarrayitem_raw(i8, i9, descr=...) + f21 = float_add(f18, f20) + f23 = getarrayitem_raw(i8, i10, descr=...) + f24 = float_add(f21, f23) + i26 = int_add(i6, 1) + i29 = int_and(i26, 255) + f30 = getarrayitem_raw(i8, i29, descr=...) + f31 = float_add(f24, f30) + i33 = int_add(i6, 2) + i36 = int_and(i33, 255) + f37 = getarrayitem_raw(i8, i36, descr=...) + ... + """) + + def test_min_max(self): + def main(): + i=0 + sa=0 + while i < 300: + sa+=min(max(i, 3000), 4000) + i+=1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == 300*3000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i7 = int_lt(i4, 300) + guard_true(i7, descr=...) + i9 = int_add_ovf(i5, 3000) + guard_no_overflow(descr=...) + i11 = int_add(i4, 1) + --TICK-- + jump(p0, p1, p2, p3, i11, i9, descr=) + """) + + def test_silly_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(*lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test_iter_max(self): + def main(): + i = 2 + sa = 0 + while i < 300: + lst = range(i) + sa += max(lst) # ID: max + i += 1 + return sa + log = self.run(main, [], threshold=200) + assert log.result == main() + loop, = log.loops_by_filename(self.filepath) + # We dont want too many guards, but a residual call to min_max_loop + guards = [n for n in log.opnames(loop.ops_by_id("max")) if n.startswith('guard')] + assert len(guards) < 20 + assert loop.match_by_id('max',""" + ... + p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...) + ... + """) + + def test__ffi_call(self): + from pypy.rlib.test.test_libffi import get_libm_name + def main(libm_name): + try: + from _ffi import CDLL, types + except ImportError: + sys.stderr.write('SKIP: cannot import _ffi\n') + return 0 + + libm = CDLL(libm_name) + pow = libm.getfunc('pow', [types.double, types.double], + types.double) + i = 0 + res = 0 + while i < 300: + res += pow(2, 3) + i += 1 + return pow.getaddr(), res + # + libm_name = get_libm_name(sys.platform) + log = self.run(main, [libm_name], threshold=200) + pow_addr, res = log.result + assert res == 8.0 * 300 + loop, = log.loops_by_filename(self.filepath) + # XXX: write the actual test when we merge this to jitypes2 + ## ops = self.get_by_bytecode('CALL_FUNCTION') + ## assert len(ops) == 2 # we get two loops, because of specialization + ## call_function = ops[0] + ## last_ops = [op.getopname() for op in call_function[-5:]] + ## assert last_ops == ['force_token', + ## 'setfield_gc', + ## 'call_may_force', + ## 'guard_not_forced', + ## 'guard_no_exception'] + ## call = call_function[-3] + ## assert call.getarg(0).value == pow_addr + ## assert call.getarg(1).value == 2.0 + ## assert call.getarg(2).value == 3.0 diff --git a/pypy/objspace/flow/test/test_model.py b/pypy/objspace/flow/test/test_model.py --- a/pypy/objspace/flow/test/test_model.py +++ b/pypy/objspace/flow/test/test_model.py @@ -71,19 +71,6 @@ pieces.headerblock.exits[1], pieces.whileblock.exits[0]] -def test_traverse(): - lst = [] - traverse(lst.append, graph) - assert lst == [pieces.startblock, - pieces.startblock.exits[0], - pieces.headerblock, - pieces.headerblock.exits[0], - graph.returnblock, - pieces.headerblock.exits[1], - pieces.whileblock, - pieces.whileblock.exits[0]] - assert flatten(graph) == lst - def test_mkentrymap(): entrymap = mkentrymap(graph) startlink = entrymap[graph.startblock][0] diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py --- a/pypy/rpython/memory/test/test_gc.py +++ b/pypy/rpython/memory/test/test_gc.py @@ -8,7 +8,7 @@ from pypy.rpython.lltypesystem.rstr import STR from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.objectmodel import compute_unique_id, keepalive_until_here +from pypy.rlib.objectmodel import compute_unique_id from pypy.rlib import rgc from pypy.rlib.rstring import StringBuilder from pypy.rlib.rarithmetic import LONG_BIT diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -113,7 +113,4 @@ def allocate_lock(space): """Create a new lock object. (allocate() is an obsolete synonym.) See LockType.__doc__ for information about locks.""" - return space.wrap(Lock(space)) - -def getlocktype(space): - return space.gettypeobject(Lock.typedef) + return space.wrap(Lock(space)) \ No newline at end of file diff --git a/pypy/jit/metainterp/test/test_tlc.py b/pypy/jit/metainterp/test/test_tlc.py --- a/pypy/jit/metainterp/test/test_tlc.py +++ b/pypy/jit/metainterp/test/test_tlc.py @@ -3,7 +3,7 @@ from pypy.jit.tl import tlc -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class TLCTests: diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,6 +106,11 @@ del obj import gc; gc.collect() + try: + del space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + for w_obj in state.non_heaptypes_w: Py_DecRef(space, w_obj) state.non_heaptypes_w[:] = [] diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py --- a/pypy/jit/metainterp/test/test_slist.py +++ b/pypy/jit/metainterp/test/test_slist.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.rlib.jit import JitDriver class ListTests(object): diff --git a/pypy/translator/cli/src/ll_math.cs b/pypy/translator/cli/src/ll_math.cs --- a/pypy/translator/cli/src/ll_math.cs +++ b/pypy/translator/cli/src/ll_math.cs @@ -224,5 +224,25 @@ { return Math.Tanh(x); } + + static public bool ll_math_isnan(double x) + { + return double.IsNaN(x); + } + + static public bool ll_math_isinf(double x) + { + return double.IsInfinity(x); + } + + static public double ll_math_copysign(double x, double y) + { + if (x < 0.0) + x = -x; + if (y > 0.0 || (y == 0.0 && Math.Atan2(y, -1.0) > 0.0)) + return x; + else + return -x; + } } } diff --git a/pypy/translator/backendopt/support.py b/pypy/translator/backendopt/support.py --- a/pypy/translator/backendopt/support.py +++ b/pypy/translator/backendopt/support.py @@ -39,74 +39,6 @@ # assume PyObjPtr return True -def needs_conservative_livevar_calculation(block): - from pypy.rpython.lltypesystem import rclass - vars = block.getvariables() - assert len(block.exits) == 1 - exitingvars = block.exits[0].args - for var in vars: - TYPE = getattr(var, "concretetype", lltype.Ptr(lltype.PyObject)) - if isinstance(TYPE, lltype.Ptr) and not var_needsgc(var): - if isinstance(TYPE.TO, lltype.FuncType): - continue - try: - lltype.castable(TYPE, rclass.CLASSTYPE) - except lltype.InvalidCast: - if var in exitingvars: - return True - else: - return False - -def generate_keepalive(vars, annotator=None): - keepalive_ops = [] - for v in vars: - if isinstance(v, Constant): - continue - if v.concretetype._is_atomic(): - continue - v_keepalive = Variable() - v_keepalive.concretetype = lltype.Void - if annotator is not None: - annotator.setbinding(v_keepalive, s_ImpossibleValue) - keepalive_ops.append(SpaceOperation('keepalive', [v], v_keepalive)) - return keepalive_ops - -def split_block_with_keepalive(block, index_operation, - keep_alive_op_args=True, - annotator=None): - splitlink = split_block(annotator, block, index_operation) - afterblock = splitlink.target - conservative_keepalives = needs_conservative_livevar_calculation(block) - if conservative_keepalives: - keep_alive_vars = [var for var in block.getvariables() - if var_needsgc(var)] - # XXX you could maybe remove more, if the variables are kept - # alive by something else. but this is sometimes hard to know - for i, var in enumerate(keep_alive_vars): - try: - index = splitlink.args.index(var) - newvar = afterblock.inputargs[index] - except ValueError: - splitlink.args.append(var) - newvar = copyvar(annotator, var) - afterblock.inputargs.append(newvar) - keep_alive_vars[i] = newvar - elif keep_alive_op_args and afterblock.operations: - keep_alive_vars = [var for var in afterblock.operations[0].args - if isinstance(var, Variable) and var_needsgc(var)] - if len(afterblock.operations) > 1 or afterblock.exitswitch != c_last_exception: - afterblock.operations[1:1] = generate_keepalive(keep_alive_vars, - annotator=annotator) - keep_alive_vars = [] - else: - keep_alive_vars = [] - pos = len(afterblock.operations) - if afterblock.exitswitch == c_last_exception: - pos -= 1 # insert the keepalives just before the last operation - # in case of exception-catching - afterblock.operations[pos:pos] = generate_keepalive(keep_alive_vars) - return splitlink - def find_calls_from(translator, graph, memo=None): if memo and graph in memo: return memo[graph] diff --git a/pypy/rpython/rpbc.py b/pypy/rpython/rpbc.py --- a/pypy/rpython/rpbc.py +++ b/pypy/rpython/rpbc.py @@ -485,7 +485,7 @@ try: thisattrvalue = frozendesc.attrcache[attr] except KeyError: - if not frozendesc.has_attribute(attr): + if frozendesc.warn_missing_attribute(attr): warning("Desc %r has no attribute %r" % (frozendesc, attr)) continue llvalue = r_value.convert_const(thisattrvalue) diff --git a/pypy/translator/exceptiontransform.py b/pypy/translator/exceptiontransform.py --- a/pypy/translator/exceptiontransform.py +++ b/pypy/translator/exceptiontransform.py @@ -229,7 +229,6 @@ n_need_exc_matching_blocks += need_exc_matching n_gen_exc_checks += gen_exc_checks cleanup_graph(graph) - removenoops.remove_superfluous_keep_alive(graph) return n_need_exc_matching_blocks, n_gen_exc_checks def replace_stack_unwind(self, block): diff --git a/pypy/jit/metainterp/test/test_dlist.py b/pypy/jit/metainterp/test/test_dlist.py deleted file mode 100644 --- a/pypy/jit/metainterp/test/test_dlist.py +++ /dev/null @@ -1,165 +0,0 @@ - -import py -from pypy.rlib.jit import JitDriver -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin -py.test.skip("Disabled") - -class ListTests: - def test_basic(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - n -= 1 - return l[0] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(getarrayitem_gc=0, setarrayitem_gc=1) -# XXX fix codewriter -# guard_exception=0, -# guard_no_exception=1) - - def test_list_escapes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=2, getarrayitem_gc=0) - - def test_list_escapes_but_getitem_goes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] * (n + 1) - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0] - l[0] = x + 1 - l[n] = n - x = l[2] - y = l[1] + l[2] - l[1] = x + y - n -= 1 - return l[3] - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=3, getarrayitem_gc=0) - - def test_list_of_ptrs(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - class A(object): - def __init__(self, x): - self.x = x - - def f(n): - l = [A(3)] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - x = l[0].x + 1 - l[0] = A(x) - n -= 1 - return l[0].x - - res = self.meta_interp(f, [10], listops=True) - assert res == f(10) - self.check_loops(setarrayitem_gc=1, getarrayitem_gc=0, - new_with_vtable=1) # A should escape - - def test_list_checklength(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [10, 13], listops=True) - assert res == f(10, 13) - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_list_checklength_run(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n, a): - l = [0] * a - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) > n: - return 42 - l[0] = n - n -= 1 - return l[0] - - res = self.meta_interp(f, [50, 13], listops=True) - assert res == 42 - self.check_loops(setarrayitem_gc=1, arraylen_gc=1) - - def test_checklength_cannot_go_away(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - - def f(n): - l = [0] * n - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - if len(l) < 3: - return len(l) - l = [0] * n - n -= 1 - return 0 - - res = self.meta_interp(f, [10], listops=True) - assert res == 2 - self.check_loops(arraylen_gc=1) - - def test_list_indexerror(self): - # this is an example where IndexError is raised before - # even getting to the JIT - py.test.skip("I suspect bug somewhere outside of the JIT") - myjitdriver = JitDriver(greens = [], reds = ['n', 'l']) - def f(n): - l = [0] - while n > 0: - myjitdriver.can_enter_jit(n=n, l=l) - myjitdriver.jit_merge_point(n=n, l=l) - l[n] = n - n -= 1 - return l[3] - - def g(n): - try: - f(n) - return 0 - except IndexError: - return 42 - - res = self.meta_interp(g, [10]) - assert res == 42 - self.check_loops(setitem=2) - -class TestLLtype(ListTests, LLJitMixin): - pass diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,8 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.executor import execute +from pypy.jit.codewriter.heaptracker import vtable2descr class AbstractVirtualValue(optimizer.OptValue): @@ -72,28 +74,53 @@ assert isinstance(fieldvalue, optimizer.OptValue) self._fields[ofs] = fieldvalue + def _get_descr(self): + raise NotImplementedError + + def _is_immutable_and_filled_with_constants(self): + count = self._get_descr().count_fields_if_immutable() + if count != len(self._fields): # always the case if count == -1 + return False + for value in self._fields.itervalues(): + subbox = value.force_box() + if not isinstance(subbox, Const): + return False + return True + def _really_force(self): - assert self.source_op is not None + op = self.source_op + assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): - self.source_op.name = 'FORCE ' + self.source_op.name - newoperations = self.optimizer.newoperations - newoperations.append(self.source_op) - self.box = box = self.source_op.result - # - iteritems = self._fields.iteritems() - if not we_are_translated(): #random order is fine, except for tests - iteritems = list(iteritems) - iteritems.sort(key = lambda (x,y): x.sort_key()) - for ofs, value in iteritems: - if value.is_null(): - continue - subbox = value.force_box() - op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, - descr=ofs) + op.name = 'FORCE ' + self.source_op.name + + if self._is_immutable_and_filled_with_constants(): + box = self.optimizer.constant_fold(op) + self.make_constant(box) + for ofs, value in self._fields.iteritems(): + subbox = value.force_box() + assert isinstance(subbox, Const) + execute(self.optimizer.cpu, None, rop.SETFIELD_GC, + ofs, box, subbox) + # keep self._fields, because it's all immutable anyway + else: + newoperations = self.optimizer.newoperations newoperations.append(op) - self._fields = None + self.box = box = op.result + # + iteritems = self._fields.iteritems() + if not we_are_translated(): #random order is fine, except for tests + iteritems = list(iteritems) + iteritems.sort(key = lambda (x,y): x.sort_key()) + for ofs, value in iteritems: + if value.is_null(): + continue + subbox = value.force_box() + op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, + descr=ofs) + newoperations.append(op) + self._fields = None def _get_field_descr_list(self): _cached_sorted_fields = self._cached_sorted_fields @@ -168,6 +195,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_virtual(self.known_class, fielddescrs) + def _get_descr(self): + return vtable2descr(self.optimizer.cpu, self.known_class.getint()) + def __repr__(self): cls_name = self.known_class.value.adr.ptr._obj._TYPE._name if self._fields is None: @@ -185,6 +215,9 @@ fielddescrs = self._get_field_descr_list() return modifier.make_vstruct(self.structdescr, fielddescrs) + def _get_descr(self): + return self.structdescr + class VArrayValue(AbstractVirtualValue): def __init__(self, optimizer, arraydescr, size, keybox, source_op=None): diff --git a/pypy/rlib/rsre/test/test_zjit.py b/pypy/rlib/rsre/test/test_zjit.py --- a/pypy/rlib/rsre/test/test_zjit.py +++ b/pypy/rlib/rsre/test/test_zjit.py @@ -1,5 +1,5 @@ import py -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support from pypy.rlib.nonconst import NonConstant from pypy.rlib.rsre.test.test_match import get_code from pypy.rlib.rsre import rsre_core @@ -45,7 +45,7 @@ assert m._jit_unroll_safe_ -class TestJitRSre(test_basic.LLJitMixin): +class TestJitRSre(support.LLJitMixin): def meta_interp_match(self, pattern, string, repeat=1): r = get_code(pattern) diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -351,14 +351,6 @@ """Return the number of free variables in co.""" raise NotImplementedError - at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) -def PyCode_New(space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, freevars, cellvars, filename, name, firstlineno, lnotab): - """Return a new code object. If you need a dummy code object to - create a frame, use PyCode_NewEmpty() instead. Calling - PyCode_New() directly can bind you to a precise Python - version since the definition of the bytecode changes often.""" - raise NotImplementedError - @cpython_api([PyObject], rffi.INT_real, error=-1) def PyCodec_Register(space, search_function): """Register a new codec search function. @@ -1116,20 +1108,6 @@ with an exception set on failure (the module still exists in this case).""" raise NotImplementedError - at cpython_api([rffi.CCHARP], PyObject) -def PyImport_AddModule(space, name): - """Return the module object corresponding to a module name. The name argument - may be of the form package.module. First check the modules dictionary if - there's one there, and if not, create a new one and insert it in the modules - dictionary. Return NULL with an exception set on failure. - - This function does not load or import the module; if the module wasn't already - loaded, you will get an empty module object. Use PyImport_ImportModule() - or one of its variants to import a module. Package structures implied by a - dotted name for name are not created if not already present.""" - borrow_from() - raise NotImplementedError - @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, co): """Given a module name (possibly of the form package.module) and a code @@ -1965,14 +1943,6 @@ """ raise NotImplementedError - at cpython_api([PyObject, PyObject, rffi.INTP], rffi.INT_real, error=-1) -def PyObject_Cmp(space, o1, o2, result): - """Compare the values of o1 and o2 using a routine provided by o1, if one - exists, otherwise with a routine provided by o2. The result of the - comparison is returned in result. Returns -1 on failure. This is the - equivalent of the Python statement result = cmp(o1, o2).""" - raise NotImplementedError - @cpython_api([PyObject], PyObject) def PyObject_Bytes(space, o): """Compute a bytes representation of object o. In 2.x, this is just a alias diff --git a/pypy/jit/backend/x86/test/test_rx86.py b/pypy/jit/backend/x86/test/test_rx86.py --- a/pypy/jit/backend/x86/test/test_rx86.py +++ b/pypy/jit/backend/x86/test/test_rx86.py @@ -206,3 +206,8 @@ s = CodeBuilder64() s.MOV_rm(edx, (edi, -1)) assert s.getvalue() == '\x48\x8B\x57\xFF' + +def test_movsd_xj_64(): + s = CodeBuilder64() + s.MOVSD_xj(xmm2, 0x01234567) + assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -52,6 +52,8 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + if stderr.startswith('SKIP:'): + py.test.skip(stderr) assert not stderr # # parse the JIT log @@ -100,11 +102,11 @@ class TestOpMatcher(object): - def match(self, src1, src2): + def match(self, src1, src2, **kwds): from pypy.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations, src=src1) - return matcher.match(src2) + return matcher.match(src2, **kwds) def test_match_var(self): match_var = OpMatcher([]).match_var @@ -234,6 +236,21 @@ """ assert self.match(loop, expected) + def test_ignore_opcodes(self): + loop = """ + [i0] + i1 = int_add(i0, 1) + i4 = force_token() + i2 = int_sub(i1, 10) + jump(i4) + """ + expected = """ + i1 = int_add(i0, 1) + i2 = int_sub(i1, 10) + jump(i4, descr=...) + """ + assert self.match(loop, expected, ignore_ops=['force_token']) + class TestRunPyPyC(BaseTestPyPyC): @@ -253,6 +270,14 @@ log = self.run(src, [30, 12]) assert log.result == 42 + def test_skip(self): + import pytest + def f(): + import sys + print >> sys.stderr, 'SKIP: foobar' + # + raises(pytest.skip.Exception, "self.run(f, [])") + def test_parse_jitlog(self): def f(): i = 0 diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,7 +37,7 @@ DEBUG_WRAPPER = True # update these for other platforms -Py_ssize_t = lltype.Signed +Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -192,14 +192,19 @@ - set `external` to False to get a C function pointer, but not exported by the API headers. """ + if isinstance(restype, lltype.Typedef): + real_restype = restype.OF + else: + real_restype = restype + if error is _NOT_SPECIFIED: - if isinstance(restype, lltype.Ptr): - error = lltype.nullptr(restype.TO) - elif restype is lltype.Void: + if isinstance(real_restype, lltype.Ptr): + error = lltype.nullptr(real_restype.TO) + elif real_restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: - error = rffi.cast(restype, error) - expect_integer = (isinstance(restype, lltype.Primitive) and + error = rffi.cast(real_restype, error) + expect_integer = (isinstance(real_restype, lltype.Primitive) and rffi.cast(restype, 0) == 0) def decorate(func): @@ -400,21 +405,9 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyBufferProcs = lltype.ForwardReference() PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) -def F(ARGS, RESULT=lltype.Signed): - return lltype.Ptr(lltype.FuncType(ARGS, RESULT)) -PyBufferProcsFields = ( - ("bf_getreadbuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getwritebuffer", F([PyObject, lltype.Signed, rffi.VOIDPP])), - ("bf_getsegcount", F([PyObject, rffi.INTP])), - ("bf_getcharbuffer", F([PyObject, lltype.Signed, rffi.CCHARPP])), -# we don't support new buffer interface for now - ("bf_getbuffer", rffi.VOIDP), - ("bf_releasebuffer", rffi.VOIDP)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -cpython_struct('PyBufferProcs', PyBufferProcsFields, PyBufferProcs) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) @@ -539,7 +532,8 @@ elif is_PyObject(callable.api_func.restype): if result is None: - retval = make_ref(space, None) + retval = rffi.cast(callable.api_func.restype, + make_ref(space, None)) elif isinstance(result, Reference): retval = result.get_ref(space) elif not rffi._isllptr(result): diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py --- a/pypy/rpython/lltypesystem/ll2ctypes.py +++ b/pypy/rpython/lltypesystem/ll2ctypes.py @@ -255,6 +255,9 @@ return cls def build_new_ctypes_type(T, delayed_builders): + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS @@ -758,6 +761,8 @@ """ if T is lltype.Void: return None + if isinstance(T, lltype.Typedef): + T = T.OF if isinstance(T, lltype.Ptr): if not cobj or not ctypes.cast(cobj, ctypes.c_void_p).value: # NULL pointer # CFunctionType.__nonzero__ is broken before Python 2.6 diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -7,10 +7,10 @@ from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, + cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - PyBufferProcs, build_type_checkers) + build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, RefcountState, borrow_from) @@ -24,7 +24,7 @@ from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, - PyNumberMethods, PySequenceMethods) + PyNumberMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.interpreter.error import OperationError @@ -361,14 +361,14 @@ # hopefully this does not clash with the memory model assumed in # extension modules - at cpython_api([PyObject, rffi.INTP], lltype.Signed, external=False, + at cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: - ref[0] = rffi.cast(rffi.INT, space.len_w(w_obj)) + ref[0] = space.len_w(w_obj) return 1 - at cpython_api([PyObject, lltype.Signed, rffi.VOIDPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, external=False, error=-1) def str_getreadbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString @@ -381,7 +381,7 @@ Py_DecRef(space, pyref) return space.len_w(w_str) - at cpython_api([PyObject, lltype.Signed, rffi.CCHARPP], lltype.Signed, + at cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, external=False, error=-1) def str_getcharbuffer(space, w_str, segment, ref): from pypy.module.cpyext.stringobject import PyString_AsString diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -347,8 +347,9 @@ assert list('') == [] assert list('abc') == ['a', 'b', 'c'] assert list((1, 2)) == [1, 2] - l = [] + l = [1] assert list(l) is not l + assert list(l) == l assert list(range(10)) == range(10) def test_explicit_new_init(self): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -798,7 +798,6 @@ return w_default def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default): - # XXX should be more efficient, with only one dict lookup return w_dict.setdefault(w_key, w_default) def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w): diff --git a/pypy/translator/backendopt/test/test_inline.py b/pypy/translator/backendopt/test/test_inline.py --- a/pypy/translator/backendopt/test/test_inline.py +++ b/pypy/translator/backendopt/test/test_inline.py @@ -1,7 +1,7 @@ # XXX clean up these tests to use more uniform helpers import py import os -from pypy.objspace.flow.model import traverse, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import last_exception, checkgraph from pypy.translator.backendopt import canraise from pypy.translator.backendopt.inline import simple_inline_function, CannotInline @@ -20,29 +20,27 @@ from pypy.translator.backendopt import removenoops from pypy.objspace.flow.model import summary -def no_missing_concretetype(node): - if isinstance(node, Block): - for v in node.inputargs: - assert hasattr(v, 'concretetype') - for op in node.operations: - for v in op.args: - assert hasattr(v, 'concretetype') - assert hasattr(op.result, 'concretetype') - if isinstance(node, Link): - if node.exitcase is not None: - assert hasattr(node, 'llexitcase') - for v in node.args: - assert hasattr(v, 'concretetype') - if isinstance(node.last_exception, (Variable, Constant)): - assert hasattr(node.last_exception, 'concretetype') - if isinstance(node.last_exc_value, (Variable, Constant)): - assert hasattr(node.last_exc_value, 'concretetype') - def sanity_check(t): # look for missing '.concretetype' for graph in t.graphs: checkgraph(graph) - traverse(no_missing_concretetype, graph) + for node in graph.iterblocks(): + for v in node.inputargs: + assert hasattr(v, 'concretetype') + for op in node.operations: + for v in op.args: + assert hasattr(v, 'concretetype') + assert hasattr(op.result, 'concretetype') + for node in graph.iterlinks(): + if node.exitcase is not None: + assert hasattr(node, 'llexitcase') + for v in node.args: + assert hasattr(v, 'concretetype') + if isinstance(node.last_exception, (Variable, Constant)): + assert hasattr(node.last_exception, 'concretetype') + if isinstance(node.last_exc_value, (Variable, Constant)): + assert hasattr(node.last_exc_value, 'concretetype') + class CustomError1(Exception): def __init__(self): diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,8 +1,10 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) +from pypy.module.cpyext.pyobject import borrow_from from pypy.rpython.lltypesystem import rffi from pypy.interpreter.error import OperationError +from pypy.interpreter.module import Module @cpython_api([PyObject], PyObject) def PyImport_Import(space, w_name): @@ -51,3 +53,23 @@ from pypy.module.imp.importing import reload return reload(space, w_mod) + at cpython_api([CONST_STRING], PyObject) +def PyImport_AddModule(space, name): + """Return the module object corresponding to a module name. The name + argument may be of the form package.module. First check the modules + dictionary if there's one there, and if not, create a new one and insert + it in the modules dictionary. Return NULL with an exception set on + failure. + + This function does not load or import the module; if the module wasn't + already loaded, you will get an empty module object. Use + PyImport_ImportModule() or one of its variants to import a module. + Package structures implied by a dotted name for name are not created if + not already present.""" + from pypy.module.imp.importing import check_sys_modules_w + modulename = rffi.charp2str(name) + w_mod = check_sys_modules_w(space, modulename) + if not w_mod or space.is_w(w_mod, space.w_None): + w_mod = Module(space, space.wrap(modulename)) + return borrow_from(None, w_mod) + diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,5 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.rpython.lltypesystem import rffi, lltype class TestImport(BaseApiTest): def test_import(self, space, api): @@ -7,6 +8,16 @@ assert pdb assert space.getattr(pdb, space.wrap("pm")) + def test_addmodule(self, space, api): + with rffi.scoped_str2charp("sys") as modname: + w_sys = api.PyImport_AddModule(modname) + assert w_sys is space.sys + + with rffi.scoped_str2charp("foobar") as modname: + w_foobar = api.PyImport_AddModule(modname) + assert space.str_w(space.getattr(w_foobar, + space.wrap('__name__'))) == 'foobar' + def test_reload(self, space, api): pdb = api.PyImport_Import(space.wrap("pdb")) space.delattr(pdb, space.wrap("set_trace")) diff --git a/pypy/tool/jitlogparser/module_finder.py b/pypy/tool/jitlogparser/module_finder.py --- a/pypy/tool/jitlogparser/module_finder.py +++ b/pypy/tool/jitlogparser/module_finder.py @@ -6,7 +6,7 @@ more = [code] while more: next = more.pop() - res[next.co_firstlineno] = next + res[(next.co_firstlineno, next.co_name)] = next more += [co for co in next.co_consts if isinstance(co, types.CodeType)] return res diff --git a/pypy/translator/oosupport/test_template/builtin.py b/pypy/translator/oosupport/test_template/builtin.py --- a/pypy/translator/oosupport/test_template/builtin.py +++ b/pypy/translator/oosupport/test_template/builtin.py @@ -227,6 +227,17 @@ assert res == ord('a') + def test_rlocale(self): + from pypy.rlib.rlocale import isupper, islower, isalpha, isalnum, tolower + def fn(): + assert isupper(ord("A")) + assert islower(ord("a")) + assert not isalpha(ord(" ")) + assert isalnum(ord("1")) + assert tolower(ord("A")) == ord("a") + self.interpret(fn, []) + + class BaseTestTime(llBaseTestTime): def test_time_clock(self): diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -261,7 +261,8 @@ if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') - return space.wrap(rffi.charp2strn(buf, bufsize_p[0] - 1)) + length = intmask(bufsize_p[0] - 1) + return space.wrap(rffi.charp2strn(buf, length)) def convert_to_regdata(space, w_value, typ): buf = None @@ -445,9 +446,10 @@ continue if ret != 0: raiseWindowsError(space, ret, 'RegQueryValueEx') + length = intmask(retDataSize[0]) return space.newtuple([ convert_from_regdata(space, databuf, - retDataSize[0], retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) @@ -595,11 +597,11 @@ if ret != 0: raiseWindowsError(space, ret, 'RegEnumValue') + length = intmask(retDataSize[0]) return space.newtuple([ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, - retDataSize[0], - retType[0]), + length, retType[0]), space.wrap(retType[0]), ]) diff --git a/pypy/jit/backend/cli/test/test_basic.py b/pypy/jit/backend/cli/test/test_basic.py --- a/pypy/jit/backend/cli/test/test_basic.py +++ b/pypy/jit/backend/cli/test/test_basic.py @@ -1,14 +1,14 @@ import py from pypy.jit.backend.cli.runner import CliCPU -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support, test_ajit -class CliJitMixin(test_basic.OOJitMixin): +class CliJitMixin(suport.OOJitMixin): CPUClass = CliCPU def setup_class(cls): from pypy.translator.cli.support import PythonNet PythonNet.System # possibly raises Skip -class TestBasic(CliJitMixin, test_basic.TestOOtype): +class TestBasic(CliJitMixin, test_ajit.TestOOtype): # for the individual tests see # ====> ../../../metainterp/test/test_basic.py diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py --- a/pypy/translator/jvm/opcodes.py +++ b/pypy/translator/jvm/opcodes.py @@ -106,6 +106,10 @@ 'debug_catch_exception': Ignore, 'debug_reraise_traceback': Ignore, 'debug_print_traceback': Ignore, + 'debug_start': Ignore, + 'debug_stop': Ignore, + 'debug_print': Ignore, + 'keepalive': Ignore, # __________ numeric operations __________ @@ -144,6 +148,7 @@ 'int_xor_ovf': jvm.IXOR, 'int_floordiv_ovf_zer': jvm.IFLOORDIVZEROVF, 'int_mod_ovf_zer': _check_zer(jvm.IREMOVF), + 'int_between': jvm.PYPYINTBETWEEN, 'uint_invert': 'bitwise_negate', @@ -185,8 +190,8 @@ 'llong_mod_zer': _check_zer(jvm.LREM), 'llong_and': jvm.LAND, 'llong_or': jvm.LOR, - 'llong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'llong_rshift': [PushAllArgs, jvm.L2I, jvm.LSHR, StoreResult], + 'llong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'llong_rshift': [PushAllArgs, jvm.LSHR, StoreResult], 'llong_xor': jvm.LXOR, 'llong_floordiv_ovf': jvm.LFLOORDIVOVF, 'llong_floordiv_ovf_zer': jvm.LFLOORDIVZEROVF, @@ -202,9 +207,11 @@ 'ullong_truediv': None, # TODO 'ullong_floordiv': jvm.LDIV, # valid? 'ullong_mod': jvm.PYPYULONGMOD, - 'ullong_lshift': [PushAllArgs, jvm.L2I, jvm.LSHL, StoreResult], - 'ullong_rshift': [PushAllArgs, jvm.L2I, jvm.LUSHR, StoreResult], + 'ullong_lshift': [PushAllArgs, jvm.LSHL, StoreResult], + 'ullong_rshift': [PushAllArgs, jvm.LUSHR, StoreResult], 'ullong_mod_zer': jvm.PYPYULONGMOD, + 'ullong_or': jvm.LOR, + 'ullong_and': jvm.LAND, # when casting from bool we want that every truth value is casted # to 1: we can't simply DoNothing, because the CLI stack could @@ -227,5 +234,8 @@ 'cast_float_to_uint': jvm.PYPYDOUBLETOUINT, 'truncate_longlong_to_int': jvm.L2I, 'cast_longlong_to_float': jvm.L2D, + 'cast_float_to_ulonglong': jvm.PYPYDOUBLETOULONG, + 'cast_ulonglong_to_float': jvm.PYPYULONGTODOUBLE, 'cast_primitive': [PushAllArgs, CastPrimitive, StoreResult], + 'force_cast': [PushAllArgs, CastPrimitive, StoreResult], }) diff --git a/pypy/jit/metainterp/test/test_tl.py b/pypy/jit/metainterp/test/test_tl.py --- a/pypy/jit/metainterp/test/test_tl.py +++ b/pypy/jit/metainterp/test/test_tl.py @@ -1,6 +1,6 @@ import py from pypy.jit.codewriter.policy import StopAtXPolicy -from pypy.jit.metainterp.test.test_basic import OOJitMixin, LLJitMixin +from pypy.jit.metainterp.test.support import OOJitMixin, LLJitMixin class ToyLanguageTests: diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py --- a/pypy/jit/backend/llgraph/runner.py +++ b/pypy/jit/backend/llgraph/runner.py @@ -25,12 +25,13 @@ class Descr(history.AbstractDescr): def __init__(self, ofs, typeinfo, extrainfo=None, name=None, - arg_types=None): + arg_types=None, count_fields_if_immut=-1): self.ofs = ofs self.typeinfo = typeinfo self.extrainfo = extrainfo self.name = name self.arg_types = arg_types + self.count_fields_if_immut = count_fields_if_immut def get_arg_types(self): return self.arg_types @@ -63,6 +64,9 @@ def as_vtable_size_descr(self): return self + def count_fields_if_immutable(self): + return self.count_fields_if_immut + def __lt__(self, other): raise TypeError("cannot use comparison on Descrs") def __le__(self, other): @@ -109,12 +113,14 @@ return False def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None, - arg_types=None): - key = (ofs, typeinfo, extrainfo, name, arg_types) + arg_types=None, count_fields_if_immut=-1): + key = (ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) try: return self._descrs[key] except KeyError: - descr = Descr(ofs, typeinfo, extrainfo, name, arg_types) + descr = Descr(ofs, typeinfo, extrainfo, name, arg_types, + count_fields_if_immut) self._descrs[key] = descr return descr @@ -284,7 +290,8 @@ def sizeof(self, S): assert not isinstance(S, lltype.Ptr) - return self.getdescr(symbolic.get_size(S)) + count = heaptracker.count_fields_if_immutable(S) + return self.getdescr(symbolic.get_size(S), count_fields_if_immut=count) class LLtypeCPU(BaseCPU): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,9 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import cpython_struct, \ - PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, \ - Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE, \ - PyTypeObject, PyTypeObjectPtr, PyBufferProcs, FILEP +from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -55,6 +54,14 @@ wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) +readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) +segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) +charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) +## We don't support new buffer interface for now +getbufferproc = rffi.VOIDP +releasebufferproc = rffi.VOIDP + PyGetSetDef = cpython_struct("PyGetSetDef", ( ("name", rffi.CCHARP), @@ -127,7 +134,6 @@ ("mp_ass_subscript", objobjargproc), )) -""" PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getreadbuffer", readbufferproc), ("bf_getwritebuffer", writebufferproc), @@ -136,7 +142,6 @@ ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), )) -""" PyMemberDef = cpython_struct("PyMemberDef", ( ("name", rffi.CCHARP), diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -190,22 +190,30 @@ def wait(): """ wait() -> (pid, status) - + Wait for completion of a child process. """ return posix.waitpid(-1, 0) def wait3(options): - """ wait3() -> (pid, status, rusage) + """ wait3(options) -> (pid, status, rusage) Wait for completion of a child process and provides resource usage informations """ from _pypy_wait import wait3 return wait3(options) + def wait4(pid, options): + """ wait4(pid, options) -> (pid, status, rusage) + + Wait for completion of the child process "pid" and provides resource usage informations + """ + from _pypy_wait import wait4 + return wait4(pid, options) + else: # Windows implementations - + # Supply os.popen() based on subprocess def popen(cmd, mode="r", bufsize=-1): """popen(command [, mode='r' [, bufsize]]) -> pipe @@ -293,7 +301,7 @@ raise TypeError("invalid cmd type (%s, expected string)" % (type(cmd),)) return cmd - + # A proxy for a file whose close waits for the process class _wrap_close(object): def __init__(self, stream, proc): diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -14,6 +14,10 @@ (("func_name", PyObject),) cpython_struct("PyFunctionObject", PyFunctionObjectFields, PyFunctionObjectStruct) +PyCodeObjectStruct = lltype.ForwardReference() +PyCodeObject = lltype.Ptr(PyCodeObjectStruct) +cpython_struct("PyCodeObject", PyObjectFields, PyCodeObjectStruct) + @bootstrap_function def init_functionobject(space): make_typedescr(Function.typedef, @@ -65,7 +69,36 @@ assert isinstance(w_method, Method) return borrow_from(w_method, w_method.w_class) - at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyObject) +def unwrap_list_of_strings(space, w_list): + return [space.str_w(w_item) for w_item in space.fixedview(w_list)] + + at cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, + PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, + PyObject, PyObject, rffi.INT_real, PyObject], PyCodeObject) +def PyCode_New(space, argcount, nlocals, stacksize, flags, + w_code, w_consts, w_names, w_varnames, w_freevars, w_cellvars, + w_filename, w_funcname, firstlineno, w_lnotab): + """Return a new code object. If you need a dummy code object to + create a frame, use PyCode_NewEmpty() instead. Calling + PyCode_New() directly can bind you to a precise Python + version since the definition of the bytecode changes often.""" + return space.wrap(PyCode(space, + argcount=rffi.cast(lltype.Signed, argcount), + nlocals=rffi.cast(lltype.Signed, nlocals), + stacksize=rffi.cast(lltype.Signed, stacksize), + flags=rffi.cast(lltype.Signed, flags), + code=space.str_w(w_code), + consts=space.fixedview(w_consts), + names=unwrap_list_of_strings(space, w_names), + varnames=unwrap_list_of_strings(space, w_varnames), + filename=space.str_w(w_filename), + name=space.str_w(w_funcname), + firstlineno=rffi.cast(lltype.Signed, firstlineno), + lnotab=space.str_w(w_lnotab), + freevars=unwrap_list_of_strings(space, w_freevars), + cellvars=unwrap_list_of_strings(space, w_cellvars))) + + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): """Creates a new empty code object with the specified source location.""" return space.wrap(PyCode(space, diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -1007,7 +1007,8 @@ class AppTestPyPyExtension(object): def setup_class(cls): - cls.space = gettestobjspace(usemodules=['imp', 'zipimport']) + cls.space = gettestobjspace(usemodules=['imp', 'zipimport', + '__pypy__']) cls.w_udir = cls.space.wrap(str(udir)) def test_run_compiled_module(self): diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -135,7 +135,7 @@ return importing.check_sys_modules(space, w_modulename) def new_module(space, w_name): - return space.wrap(Module(space, w_name)) + return space.wrap(Module(space, w_name, add_package=False)) def init_builtin(space, w_name): name = space.str_w(w_name) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -253,8 +253,10 @@ except OperationError, e: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) - return 0 - return 1 + result = 0 + else: + result = 1 + return rffi.cast(rffi.INT, result) callback_type = lltype.Ptr(lltype.FuncType( [rffi.VOIDP, rffi.CCHARP, XML_Encoding_Ptr], rffi.INT)) XML_SetUnknownEncodingHandler = expat_external( diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -754,6 +754,8 @@ ("{x for x in z}", "set comprehension"), ("{x : x for x in z}", "dict comprehension"), ("'str'", "literal"), + ("u'str'", "literal"), + ("b'bytes'", "literal"), ("()", "()"), ("23", "literal"), ("{}", "literal"), diff --git a/pypy/jit/metainterp/test/test_virtualref.py b/pypy/jit/metainterp/test/test_virtualref.py --- a/pypy/jit/metainterp/test/test_virtualref.py +++ b/pypy/jit/metainterp/test/test_virtualref.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import JitDriver, dont_look_inside, vref_None from pypy.rlib.jit import virtual_ref, virtual_ref_finish from pypy.rlib.objectmodel import compute_unique_id -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp.virtualref import VirtualRefInfo diff --git a/pypy/translator/jvm/test/test_extreme.py b/pypy/translator/jvm/test/test_extreme.py --- a/pypy/translator/jvm/test/test_extreme.py +++ b/pypy/translator/jvm/test/test_extreme.py @@ -1,5 +1,8 @@ +import py from pypy.translator.jvm.test.runtest import JvmTest from pypy.translator.oosupport.test_template.extreme import BaseTestExtreme class TestExtreme(BaseTestExtreme, JvmTest): - pass + + def test_runtimeerror_due_to_stack_overflow(self): + py.test.skip('hotspot bug') diff --git a/pypy/jit/tl/tla/test_tla.py b/pypy/jit/tl/tla/test_tla.py --- a/pypy/jit/tl/tla/test_tla.py +++ b/pypy/jit/tl/tla/test_tla.py @@ -155,7 +155,7 @@ # ____________________________________________________________ -from pypy.jit.metainterp.test.test_basic import LLJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin class TestLLtype(LLJitMixin): def test_loop(self): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/pypy/tool/jitlogparser/test/test_parser.py @@ -114,11 +114,11 @@ fname = str(py.path.local(__file__).join('..', 'x.py')) ops = parse(''' [i0, i1] - debug_merge_point(" #9 LOAD_FAST", 0) - debug_merge_point(" #12 LOAD_CONST", 0) - debug_merge_point(" #22 LOAD_CONST", 0) - debug_merge_point(" #28 LOAD_CONST", 0) - debug_merge_point(" #6 SETUP_LOOP", 0) + debug_merge_point(" #9 LOAD_FAST", 0) + debug_merge_point(" #12 LOAD_CONST", 0) + debug_merge_point(" #22 LOAD_CONST", 0) + debug_merge_point(" #28 LOAD_CONST", 0) + debug_merge_point(" #6 SETUP_LOOP", 0) ''' % locals()) res = Function.from_operations(ops.operations, LoopStorage()) assert res.linerange == (7, 9) diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -196,7 +196,7 @@ class _ExceptionInfo(object): def __init__(self): import sys - self.type, self.value, _ = sys.exc_info() + self.type, self.value, self.traceback = sys.exc_info() return _ExceptionInfo """) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/frameobject.py @@ -0,0 +1,82 @@ +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, bootstrap_function, PyObjectFields, cpython_struct) +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) +from pypy.module.cpyext.state import State +from pypy.module.cpyext.pystate import PyThreadState +from pypy.module.cpyext.funcobject import PyCodeObject +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +PyFrameObjectStruct = lltype.ForwardReference() +PyFrameObject = lltype.Ptr(PyFrameObjectStruct) +PyFrameObjectFields = (PyObjectFields + + (("f_code", PyCodeObject), + ("f_globals", PyObject), + ("f_lineno", rffi.INT), + )) +cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) + + at bootstrap_function +def init_frameobject(space): + make_typedescr(PyFrame.typedef, + basestruct=PyFrameObject.TO, + attach=frame_attach, + dealloc=frame_dealloc, + realize=frame_realize) + +def frame_attach(space, py_obj, w_obj): + "Fills a newly allocated PyFrameObject with a frame object" + frame = space.interp_w(PyFrame, w_obj) + py_frame = rffi.cast(PyFrameObject, py_obj) + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) + py_frame.c_f_globals = make_ref(space, frame.w_globals) + rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) + + at cpython_api([PyObject], lltype.Void, external=False) +def frame_dealloc(space, py_obj): + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + Py_DecRef(space, py_code) + Py_DecRef(space, py_frame.c_f_globals) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +def frame_realize(space, py_obj): + """ + Creates the frame in the interpreter. The PyFrameObject structure must not + be modified after this call. + """ + py_frame = rffi.cast(PyFrameObject, py_obj) + py_code = rffi.cast(PyObject, py_frame.c_f_code) + w_code = from_ref(space, py_code) + code = space.interp_w(PyCode, w_code) + w_globals = from_ref(space, py_frame.c_f_globals) + + frame = PyFrame(space, code, w_globals, closure=None) + frame.f_lineno = py_frame.c_f_lineno + w_obj = space.wrap(frame) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) +def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + typedescr = get_typedescr(PyFrame.typedef) + py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) + py_frame = rffi.cast(PyFrameObject, py_obj) + space.interp_w(PyCode, w_code) # sanity check + py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) + py_frame.c_f_globals = make_ref(space, w_globals) + return py_frame + + at cpython_api([PyFrameObject], rffi.INT_real, error=-1) +def PyTraceBack_Here(space, w_frame): + from pypy.interpreter.pytraceback import record_application_traceback + state = space.fromcache(State) + if state.operror is None: + return -1 + frame = space.interp_w(PyFrame, w_frame) + record_application_traceback(space, state.operror, frame, 0) + return 0 diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -19,6 +19,8 @@ def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): + if gcdescr is not None: + gcdescr.force_index_ofs = FORCE_INDEX_OFS AbstractLLCPU.__init__(self, rtyper, stats, opts, translate_support_code, gcdescr) @@ -127,7 +129,7 @@ fail_index = rffi.cast(TP, addr_of_force_index)[0] assert fail_index >= 0, "already forced!" faildescr = self.get_fail_descr_from_number(fail_index) - rffi.cast(TP, addr_of_force_index)[0] = -1 + rffi.cast(TP, addr_of_force_index)[0] = ~fail_index frb = self.assembler._find_failure_recovery_bytecode(faildescr) bytecode = rffi.cast(rffi.UCHARP, frb) # start of "no gc operation!" block @@ -147,7 +149,6 @@ WORD = 4 NUM_REGS = 8 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.esi, regloc.edi] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 supports_longlong = True @@ -163,7 +164,6 @@ WORD = 8 NUM_REGS = 16 CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] - FRAME_FIXED_SIZE = len(CALLEE_SAVE_REGISTERS) + 2 def __init__(self, *args, **kwargs): assert sys.maxint == (2**63 - 1) diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py --- a/pypy/jit/metainterp/test/test_compile.py +++ b/pypy/jit/metainterp/test/test_compile.py @@ -86,6 +86,8 @@ metainterp.history = History() metainterp.history.operations = loop.operations[:] metainterp.history.inputargs = loop.inputargs[:] + cpu._all_size_descrs_with_vtable = ( + LLtypeMixin.cpu._all_size_descrs_with_vtable) # loop_tokens = [] loop_token = compile_new_loop(metainterp, loop_tokens, [], 0, None) diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py --- a/pypy/jit/metainterp/test/test_recursive.py +++ b/pypy/jit/metainterp/test/test_recursive.py @@ -3,7 +3,7 @@ from pypy.rlib.jit import unroll_safe, dont_look_inside from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import fatalerror -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.rpython.annlowlevel import hlstr from pypy.jit.metainterp.warmspot import get_stats diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -487,7 +487,9 @@ # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), # ^^^ returns an address of pointer, since it can change at runtime - + 'gc_adr_of_root_stack_top': LLOp(), + # ^^^ returns the address of gcdata.root_stack_top (for shadowstack only) + # experimental operations in support of thread cloning, only # implemented by the Mark&Sweep GC 'gc_x_swap_pool': LLOp(canraise=(MemoryError,), canunwindgc=True), diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1350,6 +1350,11 @@ pass def _freeze_(self): return True + def __enter__(self): + pass + def __exit__(self, *args): + pass + dummy_lock = DummyLock() ## Table describing the regular part of the interface of object spaces, diff --git a/pypy/translator/jvm/test/test_builtin.py b/pypy/translator/jvm/test/test_builtin.py --- a/pypy/translator/jvm/test/test_builtin.py +++ b/pypy/translator/jvm/test/test_builtin.py @@ -37,6 +37,15 @@ def test_cast_primitive(self): py.test.skip('fixme!') + def test_os_fstat(self): + import os, stat + def fn(): + fd = os.open(__file__, os.O_RDONLY, 0) + st = os.fstat(fd) + os.close(fd) + return st.st_mode + res = self.interpret(fn, []) + assert stat.S_ISREG(res) class TestJvmTime(JvmTest, BaseTestTime): diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/code.h @@ -0,0 +1,12 @@ +#ifndef Py_CODE_H +#define Py_CODE_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyCodeObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_CODE_H */ diff --git a/pypy/translator/backendopt/ssa.py b/pypy/translator/backendopt/ssa.py --- a/pypy/translator/backendopt/ssa.py +++ b/pypy/translator/backendopt/ssa.py @@ -1,4 +1,4 @@ -from pypy.objspace.flow.model import Variable, mkentrymap, flatten, Block +from pypy.objspace.flow.model import Variable, mkentrymap, Block from pypy.tool.algo.unionfind import UnionFind class DataFlowFamilyBuilder: diff --git a/pypy/translator/unsimplify.py b/pypy/translator/unsimplify.py --- a/pypy/translator/unsimplify.py +++ b/pypy/translator/unsimplify.py @@ -54,8 +54,7 @@ def split_block(annotator, block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables - passed on. NOTE: if you call this after rtyping, you WILL need to worry - about keepalives, you may use backendopt.support.split_block_with_keepalive. + passed on. """ assert 0 <= index <= len(block.operations) if block.exitswitch == c_last_exception: @@ -115,46 +114,6 @@ # in the second block! return split_block(annotator, block, 0, _forcelink=block.inputargs) -def remove_direct_loops(annotator, graph): - """This is useful for code generators: it ensures that no link has - common input and output variables, which could occur if a block's exit - points back directly to the same block. It allows code generators to be - simpler because they don't have to worry about overwriting input - variables when generating a sequence of assignments.""" - def visit(link): - if isinstance(link, Link) and link.prevblock is link.target: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def remove_double_links(annotator, graph): - """This can be useful for code generators: it ensures that no block has - more than one incoming links from one and the same other block. It allows - argument passing along links to be implemented with phi nodes since the - value of an argument can be determined by looking from which block the - control passed. """ - def visit(block): - if isinstance(block, Block): - double_links = [] - seen = {} - for link in block.exits: - if link.target in seen: - double_links.append(link) - seen[link.target] = True - for link in double_links: - insert_empty_block(annotator, link) - traverse(visit, graph) - -def no_links_to_startblock(graph): - """Ensure no links to start block.""" - links_to_start_block = False - for block in graph.iterblocks(): - for link in block.exits: - if link.target == graph.startblock: - links_to_start_block = True - break - if links_to_start_block: - insert_empty_startblock(None, graph) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from pypy.annotation import model as annmodel diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -36,29 +36,35 @@ init_defaults = Defaults([None]) def init__List(space, w_list, __args__): + from pypy.objspace.std.tupleobject import W_TupleObject # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) - # - # this is the old version of the loop at the end of this function: - # - # w_list.wrappeditems = space.unpackiterable(w_iterable) - # - # This is commented out to avoid assigning a new RPython list to - # 'wrappeditems', which defeats the W_FastSeqIterObject optimization. - # items_w = w_list.wrappeditems del items_w[:] if w_iterable is not None: - w_iterator = space.iter(w_iterable) - while True: - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - items_w.append(w_item) + # unfortunately this is duplicating space.unpackiterable to avoid + # assigning a new RPython list to 'wrappeditems', which defeats the + # W_FastSeqIterObject optimization. + if isinstance(w_iterable, W_ListObject): + items_w.extend(w_iterable.wrappeditems) + elif isinstance(w_iterable, W_TupleObject): + items_w.extend(w_iterable.wrappeditems) + else: + _init_from_iterable(space, items_w, w_iterable) + +def _init_from_iterable(space, items_w, w_iterable): + # in its own function to make the JIT look into init__List + # XXX this would need a JIT driver somehow? + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + items_w.append(w_item) def len__List(space, w_list): result = len(w_list.wrappeditems) diff --git a/pypy/module/cpyext/include/compile.h b/pypy/module/cpyext/include/compile.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/compile.h @@ -0,0 +1,13 @@ +#ifndef Py_COMPILE_H +#define Py_COMPILE_H + +#include "code.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_COMPILE_H */ diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py --- a/pypy/jit/metainterp/test/test_warmspot.py +++ b/pypy/jit/metainterp/test/test_warmspot.py @@ -6,7 +6,7 @@ from pypy.jit.backend.llgraph import runner from pypy.jit.metainterp.history import BoxInt -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4.1' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.5-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/translator/backendopt/test/test_ssa.py b/pypy/translator/backendopt/test/test_ssa.py --- a/pypy/translator/backendopt/test/test_ssa.py +++ b/pypy/translator/backendopt/test/test_ssa.py @@ -1,6 +1,6 @@ from pypy.translator.backendopt.ssa import * from pypy.translator.translator import TranslationContext -from pypy.objspace.flow.model import flatten, Block, Link, Variable, Constant +from pypy.objspace.flow.model import Block, Link, Variable, Constant from pypy.objspace.flow.model import SpaceOperation diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -61,14 +61,44 @@ except OperationError, e: print e.errorstr(self.space) raise + + try: + del self.space.getexecutioncontext().cpyext_threadstate + except AttributeError: + pass + if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." + at api.cpython_api([api.Py_ssize_t], api.Py_ssize_t, error=-1) +def PyPy_TypedefTest1(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_t + return 0 + + at api.cpython_api([api.Py_ssize_tP], api.Py_ssize_tP) +def PyPy_TypedefTest2(space, arg): + assert lltype.typeOf(arg) == api.Py_ssize_tP + return None + class TestConversion(BaseApiTest): def test_conversions(self, space, api): api.PyPy_GetWrapped(space.w_None) api.PyPy_GetReference(space.w_None) + def test_typedef(self, space): + from pypy.translator.c.database import LowLevelDatabase + db = LowLevelDatabase() + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) + == ('Py_ssize_t', 'Py_ssize_t arg0')) + assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) + == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + + PyPy_TypedefTest1(space, 0) + ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') + ppos[0] = 0 + PyPy_TypedefTest2(space, ppos) + lltype.free(ppos, flavor='raw') + def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir) diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py --- a/pypy/jit/backend/model.py +++ b/pypy/jit/backend/model.py @@ -23,18 +23,22 @@ self.fail_descr_list = [] self.fail_descr_free_list = [] + def reserve_some_free_fail_descr_number(self): + lst = self.fail_descr_list + if len(self.fail_descr_free_list) > 0: + n = self.fail_descr_free_list.pop() + assert lst[n] is None + else: + n = len(lst) + lst.append(None) + return n + def get_fail_descr_number(self, descr): assert isinstance(descr, history.AbstractFailDescr) n = descr.index if n < 0: - lst = self.fail_descr_list - if len(self.fail_descr_free_list) > 0: - n = self.fail_descr_free_list.pop() - assert lst[n] is None - lst[n] = descr - else: - n = len(lst) - lst.append(descr) + n = self.reserve_some_free_fail_descr_number() + self.fail_descr_list[n] = descr descr.index = n return n @@ -294,6 +298,13 @@ def record_faildescr_index(self, n): self.faildescr_indices.append(n) + def reserve_and_record_some_faildescr_index(self): + # like record_faildescr_index(), but invent and return a new, + # unused faildescr index + n = self.cpu.reserve_some_free_fail_descr_number() + self.record_faildescr_index(n) + return n + def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 diff --git a/pypy/translator/backendopt/inline.py b/pypy/translator/backendopt/inline.py --- a/pypy/translator/backendopt/inline.py +++ b/pypy/translator/backendopt/inline.py @@ -5,7 +5,7 @@ from pypy.objspace.flow.model import Variable, Constant, Block, Link from pypy.objspace.flow.model import SpaceOperation, c_last_exception from pypy.objspace.flow.model import FunctionGraph -from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph +from pypy.objspace.flow.model import mkentrymap, checkgraph from pypy.annotation import model as annmodel from pypy.rpython.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr from pypy.rpython.lltypesystem.lltype import normalizeptr @@ -13,7 +13,7 @@ from pypy.rpython import rmodel from pypy.tool.algo import sparsemat from pypy.translator.backendopt import removenoops -from pypy.translator.backendopt.support import log, split_block_with_keepalive +from pypy.translator.backendopt.support import log from pypy.translator.unsimplify import split_block from pypy.translator.backendopt.support import find_backedges, find_loop_blocks from pypy.translator.backendopt.canraise import RaiseAnalyzer @@ -280,13 +280,6 @@ self.varmap[var] = copyvar(None, var) return self.varmap[var] - def generate_keepalive(self, *args): - from pypy.translator.backendopt.support import generate_keepalive - if self.translator.rtyper.type_system.name == 'lltypesystem': - return generate_keepalive(*args) - else: - return [] - def passon_vars(self, cache_key): if cache_key in self._passon_vars: return self._passon_vars[cache_key] @@ -397,7 +390,6 @@ for exceptionlink in afterblock.exits[1:]: if exc_match(vtable, exceptionlink.llexitcase): passon_vars = self.passon_vars(link.prevblock) - copiedblock.operations += self.generate_keepalive(passon_vars) copiedlink.target = exceptionlink.target linkargs = self.find_args_in_exceptional_case( exceptionlink, link.prevblock, var_etype, var_evalue, afterblock, passon_vars) @@ -445,7 +437,6 @@ del blocks[-1].exits[0].llexitcase linkargs = copiedexceptblock.inputargs copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) - copiedexceptblock.operations += self.generate_keepalive(linkargs) def do_inline(self, block, index_operation): splitlink = split_block(None, block, index_operation) @@ -457,11 +448,8 @@ # this copy is created with the method passon_vars self.original_passon_vars = [arg for arg in block.exits[0].args if isinstance(arg, Variable)] - n = 0 - while afterblock.operations[n].opname == 'keepalive': - n += 1 - assert afterblock.operations[n].opname == self.op.opname - self.op = afterblock.operations.pop(n) + assert afterblock.operations[0].opname == self.op.opname + self.op = afterblock.operations.pop(0) #vars that need to be passed through the blocks of the inlined function linktoinlined = splitlink copiedstartblock = self.copy_block(self.graph_to_inline.startblock) @@ -551,7 +539,6 @@ OP_WEIGHTS = {'same_as': 0, 'cast_pointer': 0, - 'keepalive': 0, 'malloc': 2, 'yield_current_frame_to_caller': sys.maxint, # XXX bit extreme 'resume_point': sys.maxint, # XXX bit extreme @@ -784,5 +771,4 @@ call_count_pred=call_count_pred) log.inlining('inlined %d callsites.'% (count,)) for graph in graphs: - removenoops.remove_superfluous_keep_alive(graph) removenoops.remove_duplicate_casts(graph, translator) diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -0,0 +1,66 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestFrameObject(AppTestCpythonExtensionBase): + + def test_forge_frame(self): + module = self.import_extension('foo', [ + ("raise_exception", "METH_NOARGS", + """ + PyObject *py_srcfile = PyString_FromString("filename"); + PyObject *py_funcname = PyString_FromString("funcname"); + PyObject *py_globals = PyDict_New(); + PyObject *empty_string = PyString_FromString(""); + PyObject *empty_tuple = PyTuple_New(0); + PyCodeObject *py_code; + PyFrameObject *py_frame; + + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + empty_tuple, /*PyObject *consts,*/ + empty_tuple, /*PyObject *names,*/ + empty_tuple, /*PyObject *varnames,*/ + empty_tuple, /*PyObject *freevars,*/ + empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + 42, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = 48; /* Does not work with CPython */ + PyErr_SetString(PyExc_ValueError, "error message"); + PyTraceBack_Here(py_frame); + bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(empty_tuple); + Py_XDECREF(py_globals); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); + return NULL; + """), + ]) + exc = raises(ValueError, module.raise_exception) + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" + + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -1,16 +1,20 @@ +from __future__ import with_statement + import re from pypy.rpython.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import generic_cpy_call, cpython_api, PyObject +from pypy.module.cpyext.api import ( + cpython_api, generic_cpy_call, PyObject, Py_ssize_t) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, - hashfunc, descrgetfunc, descrsetfunc, objobjproc) + cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, readbufferproc) from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.buffer import Buffer as W_Buffer from pypy.interpreter.argument import Arguments from pypy.rlib.unroll import unrolling_iterable from pypy.rlib.objectmodel import specialize @@ -193,18 +197,59 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) +class CPyBuffer(W_Buffer): + # Similar to Py_buffer + + def __init__(self, ptr, size, w_obj): + self.ptr = ptr + self.size = size + self.w_obj = w_obj # kept alive + + def getlength(self): + return self.size + + def getitem(self, index): + return self.ptr[index] + +def wrap_getreadbuffer(space, w_self, w_args, func): + func_target = rffi.cast(readbufferproc, func) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: + index = rffi.cast(Py_ssize_t, 0) + size = generic_cpy_call(space, func_target, w_self, index, ptr) + if size < 0: + space.fromcache(State).check_and_raise_exception(always=True) + return space.wrap(CPyBuffer(ptr[0], size, w_self)) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) check_num_args(space, w_args, 1) - args_w = space.fixedview(w_args) - other_w = args_w[0] + w_other, = space.fixedview(w_args) return generic_cpy_call(space, func_target, - w_self, other_w, rffi.cast(rffi.INT_real, OP_CONST)) + w_self, w_other, rffi.cast(rffi.INT_real, OP_CONST)) return inner richcmp_eq = get_richcmp_func(Py_EQ) richcmp_ne = get_richcmp_func(Py_NE) +richcmp_lt = get_richcmp_func(Py_LT) +richcmp_le = get_richcmp_func(Py_LE) +richcmp_gt = get_richcmp_func(Py_GT) +richcmp_ge = get_richcmp_func(Py_GE) + +def wrap_cmpfunc(space, w_self, w_args, func): + func_target = rffi.cast(cmpfunc, func) + check_num_args(space, w_args, 1) + w_other, = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(w_self), + space.type(w_other))): + raise OperationError(space.w_TypeError, space.wrap( + "%s.__cmp__(x,y) requires y to be a '%s', not a '%s'" % + (space.type(w_self).getname(space), + space.type(w_self).getname(space), + space.type(w_other).getname(space)))) + + return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): @@ -466,7 +511,7 @@ "oct(x)"), UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc, "hex(x)"), - NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, + NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "x[y:z] <==> x[y.__index__():z.__index__()]"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+"), @@ -571,12 +616,19 @@ for regex, repl in slotdef_replacements: slotdefs_str = re.sub(regex, repl, slotdefs_str) +slotdefs = eval(slotdefs_str) +# PyPy addition +slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), +) + slotdefs_for_tp_slots = unrolling_iterable( [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in eval(slotdefs_str)]) + for x in slotdefs]) + slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) - for x in eval(slotdefs_str)]) + for x in slotdefs]) if __name__ == "__main__": print slotdefs_str diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -154,6 +154,24 @@ self.emit_operation(op) + def optimize_INT_LSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + + def optimize_INT_RSHIFT(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_CALL_PURE(self, op): arg_consts = [] for i in range(op.numargs()): diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py --- a/pypy/objspace/std/fake.py +++ b/pypy/objspace/std/fake.py @@ -151,9 +151,9 @@ class CPythonFakeFrame(eval.Frame): - def __init__(self, space, code, w_globals=None, numlocals=-1): + def __init__(self, space, code, w_globals=None): self.fakecode = code - eval.Frame.__init__(self, space, w_globals, numlocals) + eval.Frame.__init__(self, space, w_globals) def getcode(self): return self.fakecode diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -101,7 +101,7 @@ # first annotate, rtype, and backendoptimize PyPy try: - interp, graph = get_interpreter(entry_point, [], backendopt=True, + interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -8,9 +8,8 @@ from pypy.rpython.lltypesystem.lloperation import llop from pypy.rpython.annlowlevel import llhelper from pypy.jit.backend.model import CompiledLoopToken -from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager, - X86XMMRegisterManager, get_ebp_ofs, - _get_scale) +from pypy.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, + _get_scale, gpr_reg_mgr_cls) from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64) @@ -78,8 +77,8 @@ self.loop_run_counters = [] self.float_const_neg_addr = 0 self.float_const_abs_addr = 0 - self.malloc_fixedsize_slowpath1 = 0 - self.malloc_fixedsize_slowpath2 = 0 + self.malloc_slowpath1 = 0 + self.malloc_slowpath2 = 0 self.memcpy_addr = 0 self.setup_failure_recovery() self._debug = False @@ -124,8 +123,8 @@ self._build_failure_recovery(True, withfloats=True) support.ensure_sse2_floats() self._build_float_constants() - if hasattr(gc_ll_descr, 'get_malloc_fixedsize_slowpath_addr'): - self._build_malloc_fixedsize_slowpath() + if gc_ll_descr.get_malloc_slowpath_addr is not None: + self._build_malloc_slowpath() self._build_stack_check_slowpath() debug_start('jit-backend-counts') self.set_debug(have_debug_prints()) @@ -133,6 +132,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" + self.current_clt = looptoken.compiled_loop_token self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -145,6 +145,7 @@ self.mc = None self.looppos = -1 self.currently_compiling_loop = None + self.current_clt = None def finish_once(self): if self._debug: @@ -170,26 +171,47 @@ self.float_const_neg_addr = float_constants self.float_const_abs_addr = float_constants + 16 - def _build_malloc_fixedsize_slowpath(self): + def _build_malloc_slowpath(self): + # With asmgcc, we need two helpers, so that we can write two CALL + # instructions in assembler, with a mark_gc_roots in between. + # With shadowstack, this is not needed, so we produce a single helper. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + # # ---------- first helper for the slow path of malloc ---------- mc = codebuf.MachineCodeBlockWrapper() if self.cpu.supports_floats: # save the XMM registers in for i in range(self.cpu.NUM_REGS):# the *caller* frame, from esp+8 mc.MOVSD_sx((WORD*2)+8*i, i) mc.SUB_rr(edx.value, eax.value) # compute the size we want - if IS_X86_32: - mc.MOV_sr(WORD, edx.value) # save it as the new argument - elif IS_X86_64: - # rdi can be clobbered: its content was forced to the stack - # by _fastpath_malloc(), like all other save_around_call_regs. - mc.MOV_rr(edi.value, edx.value) - - addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr() - mc.JMP(imm(addr)) # tail call to the real malloc - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath1 = rawstart - # ---------- second helper for the slow path of malloc ---------- - mc = codebuf.MachineCodeBlockWrapper() + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() + # + if gcrootmap is not None and gcrootmap.is_shadow_stack: + # ---- shadowstack ---- + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_br(ofs, reg.value) + mc.SUB_ri(esp.value, 16 - WORD) # stack alignment of 16 bytes + if IS_X86_32: + mc.MOV_sr(0, edx.value) # push argument + elif IS_X86_64: + mc.MOV_rr(edi.value, edx.value) + mc.CALL(imm(addr)) + mc.ADD_ri(esp.value, 16 - WORD) + for reg, ofs in gpr_reg_mgr_cls.REGLOC_TO_COPY_AREA_OFS.items(): + mc.MOV_rb(reg.value, ofs) + else: + # ---- asmgcc ---- + if IS_X86_32: + mc.MOV_sr(WORD, edx.value) # save it as the new argument + elif IS_X86_64: + # rdi can be clobbered: its content was forced to the stack + # by _fastpath_malloc(), like all other save_around_call_regs. + mc.MOV_rr(edi.value, edx.value) + mc.JMP(imm(addr)) # tail call to the real malloc + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.malloc_slowpath1 = rawstart + # ---------- second helper for the slow path of malloc ---------- + mc = codebuf.MachineCodeBlockWrapper() + # if self.cpu.supports_floats: # restore the XMM registers for i in range(self.cpu.NUM_REGS):# from where they were saved mc.MOVSD_xs(i, (WORD*2)+8*i) @@ -197,7 +219,7 @@ mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.malloc_fixedsize_slowpath2 = rawstart + self.malloc_slowpath2 = rawstart def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() @@ -544,7 +566,7 @@ def _get_offset_of_ebp_from_esp(self, allocated_depth): # Given that [EBP] is where we saved EBP, i.e. in the last word # of our fixed frame, then the 'words' value is: - words = (self.cpu.FRAME_FIXED_SIZE - 1) + allocated_depth + words = (FRAME_FIXED_SIZE - 1) + allocated_depth # align, e.g. for Mac OS X aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP return -WORD * aligned_words @@ -557,6 +579,10 @@ for regloc in self.cpu.CALLEE_SAVE_REGISTERS: self.mc.PUSH_r(regloc.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(gcrootmap) + def _call_header_with_stack_check(self): if self.stack_check_slowpath == 0: pass # no stack check (e.g. not translated) @@ -578,12 +604,32 @@ def _call_footer(self): self.mc.LEA_rb(esp.value, -len(self.cpu.CALLEE_SAVE_REGISTERS) * WORD) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) + for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.POP_r(self.cpu.CALLEE_SAVE_REGISTERS[i].value) self.mc.POP_r(ebp.value) self.mc.RET() + def _call_header_shadowstack(self, gcrootmap): + # we need to put two words into the shadowstack: the MARKER + # and the address of the frame (ebp, actually) + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] + self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER + self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp + self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + + def _call_footer_shadowstack(self, gcrootmap): + rst = gcrootmap.get_root_stack_top_addr() + assert rx86.fits_in_32bits(rst) + self.mc.SUB_ji8(rst, 2*WORD) # SUB [rootstacktop], 2*WORD + def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth): if IS_X86_64: return self._assemble_bootstrap_direct_call_64(arglocs, jmppos, stackdepth) @@ -693,8 +739,8 @@ nonfloatlocs, floatlocs = arglocs self._call_header() stackadjustpos = self._patchable_stackadjust() - tmp = X86RegisterManager.all_regs[0] - xmmtmp = X86XMMRegisterManager.all_regs[0] + tmp = eax + xmmtmp = xmm0 self.mc.begin_reuse_scratch_register() for i in range(len(nonfloatlocs)): loc = nonfloatlocs[i] @@ -903,9 +949,9 @@ self.implement_guard(guard_token, checkfalsecond) return genop_cmp_guard_float - def _emit_call(self, x, arglocs, start=0, tmp=eax): + def _emit_call(self, force_index, x, arglocs, start=0, tmp=eax): if IS_X86_64: - return self._emit_call_64(x, arglocs, start) + return self._emit_call_64(force_index, x, arglocs, start) p = 0 n = len(arglocs) @@ -931,9 +977,9 @@ self._regalloc.reserve_param(p//WORD) # x is a location self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) - def _emit_call_64(self, x, arglocs, start=0): + def _emit_call_64(self, force_index, x, arglocs, start): src_locs = [] dst_locs = [] xmm_src_locs = [] @@ -991,12 +1037,27 @@ self._regalloc.reserve_param(len(pass_on_stack)) self.mc.CALL(x) - self.mark_gc_roots() + self.mark_gc_roots(force_index) def call(self, addr, args, res): - self._emit_call(imm(addr), args) + force_index = self.write_new_force_index() + self._emit_call(force_index, imm(addr), args) assert res is eax + def write_new_force_index(self): + # for shadowstack only: get a new, unused force_index number and + # write it to FORCE_INDEX_OFS. Used to record the call shape + # (i.e. where the GC pointers are in the stack) around a CALL + # instruction that doesn't already have a force_index. + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + clt = self.current_clt + force_index = clt.reserve_and_record_some_faildescr_index() + self.mc.MOV_bi(FORCE_INDEX_OFS, force_index) + return force_index + else: + return 0 + genop_int_neg = _unaryop("NEG") genop_int_invert = _unaryop("NOT") genop_int_add = _binaryop("ADD", True) @@ -1212,6 +1273,11 @@ assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable) + def set_new_array_length(self, loc, ofs_length, loc_num_elem): + assert isinstance(loc, RegLoc) + assert isinstance(loc_num_elem, ImmedLoc) + self.mc.MOV(mem(loc, ofs_length), loc_num_elem) + # XXX genop_new is abused for all varsized mallocs with Boehm, for now # (instead of genop_new_array, genop_newstr, genop_newunicode) def genop_new(self, op, arglocs, result_loc): @@ -1790,6 +1856,10 @@ self.pending_guard_tokens.append(guard_token) def genop_call(self, op, arglocs, resloc): + force_index = self.write_new_force_index() + self._genop_call(op, arglocs, resloc, force_index) + + def _genop_call(self, op, arglocs, resloc, force_index): sizeloc = arglocs[0] assert isinstance(sizeloc, ImmedLoc) size = sizeloc.value @@ -1803,8 +1873,8 @@ tmp = ecx else: tmp = eax - - self._emit_call(x, arglocs, 3, tmp=tmp) + + self._emit_call(force_index, x, arglocs, 3, tmp=tmp) if IS_X86_32 and isinstance(resloc, StackLoc) and resloc.width == 8: # a float or a long long return @@ -1835,7 +1905,7 @@ faildescr = guard_op.getdescr() fail_index = self.cpu.get_fail_descr_number(faildescr) self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index) - self.genop_call(op, arglocs, result_loc) + self._genop_call(op, arglocs, result_loc, fail_index) self.mc.CMP_bi(FORCE_INDEX_OFS, 0) self.implement_guard(guard_token, 'L') @@ -1849,8 +1919,8 @@ assert len(arglocs) - 2 == len(descr._x86_arglocs[0]) # # Write a call to the direct_bootstrap_code of the target assembler - self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2, - tmp=eax) + self._emit_call(fail_index, imm(descr._x86_direct_bootstrap_code), + arglocs, 2, tmp=eax) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_void_v @@ -1875,7 +1945,7 @@ jd = descr.outermost_jitdriver_sd assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) - self._emit_call(imm(asm_helper_adr), [eax, arglocs[1]], 0, + self._emit_call(fail_index, imm(asm_helper_adr), [eax, arglocs[1]], 0, tmp=ecx) if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT: self.mc.FSTP_b(result_loc.value) @@ -1902,7 +1972,7 @@ # load the return value from fail_boxes_xxx[0] kind = op.result.type if kind == FLOAT: - xmmtmp = X86XMMRegisterManager.all_regs[0] + xmmtmp = xmm0 adr = self.fail_boxes_float.get_addr_for_num(0) self.mc.MOVSD(xmmtmp, heap(adr)) self.mc.MOVSD(result_loc, xmmtmp) @@ -1997,11 +2067,16 @@ not_implemented("not implemented operation (guard): %s" % op.getopname()) - def mark_gc_roots(self): + def mark_gc_roots(self, force_index, use_copy_area=False): + if force_index < 0: + return # not needed gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: - mark = self._regalloc.get_mark_gc_roots(gcrootmap) - self.mc.insert_gcroot_marker(mark) + mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area) + if gcrootmap.is_shadow_stack: + gcrootmap.write_callshape(mark, force_index) + else: + self.mc.insert_gcroot_marker(mark) def target_arglocs(self, loop_token): return loop_token._x86_arglocs @@ -2013,8 +2088,7 @@ else: self.mc.JMP(imm(loop_token._x86_loop_code)) - def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr, - size, tid): + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid): size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery) self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edx.value, (eax.value, size)) @@ -2022,7 +2096,7 @@ self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - # See comments in _build_malloc_fixedsize_slowpath for the + # See comments in _build_malloc_slowpath for the # details of the two helper functions that we are calling below. # First, we need to call two of them and not just one because we # need to have a mark_gc_roots() in between. Then the calling @@ -2032,19 +2106,27 @@ # result in EAX; slowpath_addr2 additionally returns in EDX a # copy of heap(nursery_free_adr), so that the final MOV below is # a no-op. - slowpath_addr1 = self.malloc_fixedsize_slowpath1 + # reserve room for the argument to the real malloc and the # 8 saved XMM regs self._regalloc.reserve_param(1+16) - self.mc.CALL(imm(slowpath_addr1)) - self.mark_gc_roots() - slowpath_addr2 = self.malloc_fixedsize_slowpath2 + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack) + if not shadow_stack: + # there are two helpers to call only with asmgcc + slowpath_addr1 = self.malloc_slowpath1 + self.mc.CALL(imm(slowpath_addr1)) + self.mark_gc_roots(self.write_new_force_index(), + use_copy_area=shadow_stack) + slowpath_addr2 = self.malloc_slowpath2 self.mc.CALL(imm(slowpath_addr2)) offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) # on 64-bits, 'tid' is a value that fits in 31 bits + assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/traceback.h @@ -0,0 +1,12 @@ +#ifndef Py_TRACEBACK_H +#define Py_TRACEBACK_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef PyObject PyTracebackObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_TRACEBACK_H */ diff --git a/pypy/jit/metainterp/test/test_loop.py b/pypy/jit/metainterp/test/test_loop.py --- a/pypy/jit/metainterp/test/test_loop.py +++ b/pypy/jit/metainterp/test/test_loop.py @@ -2,7 +2,7 @@ from pypy.rlib.jit import JitDriver from pypy.rlib.objectmodel import compute_hash from pypy.jit.metainterp.warmspot import ll_meta_interp, get_stats -from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin +from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.metainterp.resoperation import rop from pypy.jit.metainterp import history From commits-noreply at bitbucket.org Mon Apr 18 15:12:00 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 18 Apr 2011 15:12:00 +0200 (CEST) Subject: [pypy-svn] pypy default: Cannot use edx here, it may be used to pass arguments! Message-ID: <20110418131200.10973282BF2@codespeak.net> Author: Armin Rigo Branch: Changeset: r43449:a3cdc39781bb Date: 2011-04-18 15:11 +0200 http://bitbucket.org/pypy/pypy/changeset/a3cdc39781bb/ Log: Cannot use edx here, it may be used to pass arguments! diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -619,11 +619,16 @@ # and the address of the frame (ebp, actually) rst = gcrootmap.get_root_stack_top_addr() assert rx86.fits_in_32bits(rst) + if IS_X86_64: + # cannot use rdx here, it's used to pass arguments! + tmp = X86_64_SCRATCH_REG + else: + tmp = edx self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] - self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.LEA_rm(tmp.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp - self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + self.mc.MOV_jr(rst, tmp.value) # MOV [rootstacktop], edx def _call_footer_shadowstack(self, gcrootmap): rst = gcrootmap.get_root_stack_top_addr() @@ -1634,10 +1639,6 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 - if kind == self.DESCR_FLOAT: - size = 2 - else: - size = 1 loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break From commits-noreply at bitbucket.org Mon Apr 18 15:12:00 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 18 Apr 2011 15:12:00 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Cannot use edx here, it may be used to pass arguments! Message-ID: <20110418131200.8F5AA282BF2@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43450:aed7186b5453 Date: 2011-04-18 15:11 +0200 http://bitbucket.org/pypy/pypy/changeset/aed7186b5453/ Log: Cannot use edx here, it may be used to pass arguments! diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -619,11 +619,16 @@ # and the address of the frame (ebp, actually) rst = gcrootmap.get_root_stack_top_addr() assert rx86.fits_in_32bits(rst) + if IS_X86_64: + # cannot use rdx here, it's used to pass arguments! + tmp = X86_64_SCRATCH_REG + else: + tmp = edx self.mc.MOV_rj(eax.value, rst) # MOV eax, [rootstacktop] - self.mc.LEA_rm(edx.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] + self.mc.LEA_rm(tmp.value, (eax.value, 2*WORD)) # LEA edx, [eax+2*WORD] self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER) # MOV [eax], MARKER self.mc.MOV_mr((eax.value, WORD), ebp.value) # MOV [eax+WORD], ebp - self.mc.MOV_jr(rst, edx.value) # MOV [rootstacktop], edx + self.mc.MOV_jr(rst, tmp.value) # MOV [rootstacktop], edx def _call_footer_shadowstack(self, gcrootmap): rst = gcrootmap.get_root_stack_top_addr() @@ -1640,10 +1645,6 @@ break kind = code & 3 code = (code - self.CODE_FROMSTACK) >> 2 - if kind == self.DESCR_FLOAT: - size = 2 - else: - size = 1 loc = X86FrameManager.frame_pos(code, descr_to_box_type[kind]) elif code == self.CODE_STOP: break From commits-noreply at bitbucket.org Mon Apr 18 15:39:52 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 18 Apr 2011 15:39:52 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Duh. Message-ID: <20110418133952.E6C73282BF7@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43451:b84928ba4390 Date: 2011-04-18 15:39 +0200 http://bitbucket.org/pypy/pypy/changeset/b84928ba4390/ Log: Duh. diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py --- a/pypy/jit/backend/llsupport/descr.py +++ b/pypy/jit/backend/llsupport/descr.py @@ -411,7 +411,7 @@ def get_result_size(self, translate_support_code): return symbolic.get_size_of_ptr(translate_support_code) -class GcPtrHidden32CallDescr(BaseCallDescr): +class GcPtrHidden32CallDescr(GcPtrCallDescr): _clsname = 'GcPtrHidden32CallDescr' _return_type = 'H' def get_result_size(self, translate_support_code): From commits-noreply at bitbucket.org Mon Apr 18 15:48:43 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Mon, 18 Apr 2011 15:48:43 +0200 (CEST) Subject: [pypy-svn] pypy default: port test_xor to test_pypy_new; please review the XXX, I do not really understand what is happening Message-ID: <20110418134843.A48BF282BF7@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43452:3ea645eacf78 Date: 2011-04-18 15:48 +0200 http://bitbucket.org/pypy/pypy/changeset/3ea645eacf78/ Log: port test_xor to test_pypy_new; please review the XXX, I do not really understand what is happening diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1530,3 +1530,45 @@ ## assert call.getarg(0).value == pow_addr ## assert call.getarg(1).value == 2.0 ## assert call.getarg(2).value == 3.0 + + def test_xor(self): + def main(a, b): + i = sa = 0 + while i < 300: + if a > 0: # Specialises the loop + pass + if b > 10: + pass + if a^b >= 0: + sa += 1 # ID: add + i += 1 + return sa + + # if both are >=0, a^b is known to be >=0 + log = self.run(main, [3, 14], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i9 = int_lt(i6, 300) + guard_true(i9, descr=...) + i11 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i13 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i13, i11, descr=) + """) + + # XXX: I don't understand why this assert passes, because the + # optimizer doesn't know that b >=0 + log = self.run(main, [3, 4], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i9 = int_lt(i6, 300) + guard_true(i9, descr=...) + i11 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i13 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i13, i11, descr=) + """) diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -260,30 +260,6 @@ assert call.getarg(0).value == pow_addr assert call.getarg(1).value == 2.0 assert call.getarg(2).value == 3.0 - - def test_xor(self): - values = (-4, -3, -2, -1, 0, 1, 2, 3, 4) - for a in values: - for b in values: - if a^b >= 0: - r = 2000 - else: - r = 0 - ops = 46 - - self.run_source(''' - def main(a, b): - i = sa = 0 - while i < 2000: - if a > 0: # Specialises the loop - pass - if b > 1: - pass - if a^b >= 0: - sa += 1 - i += 1 - return sa - ''', ops, ([a, b], r)) def test_shift(self): from sys import maxint From commits-noreply at bitbucket.org Mon Apr 18 16:32:04 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 16:32:04 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: a missing merge? Message-ID: <20110418143204.B0A60282C18@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43453:65472e284ecc Date: 2011-04-18 16:32 +0200 http://bitbucket.org/pypy/pypy/changeset/65472e284ecc/ Log: a missing merge? diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -134,6 +134,7 @@ def setup(self, looptoken): assert self.memcpy_addr != 0, "setup_once() not called?" self.current_clt = looptoken.compiled_loop_token + self.invalidate_positions = [] self.pending_guard_tokens = [] self.mc = codebuf.MachineCodeBlockWrapper() if self.datablockwrapper is None: @@ -142,6 +143,7 @@ allblocks) def teardown(self): + self.invalidate_positions = None self.pending_guard_tokens = None self.mc = None self.looppos = -1 @@ -450,9 +452,10 @@ mc.copy_to_raw_memory(addr) else: # guard not invalidate, patch where it jumps - pos, _ = clt.invalidate_positions[inv_counter] - clt.invalidate_positions[inv_counter] = (pos + rawstart, - relative_target) + pos, _ = self.invalidate_positions[inv_counter] + clt.invalidate_positions.append((pos + rawstart, + relative_target)) + inv_counter += 1 def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1458,9 +1461,9 @@ def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, locs, ign_2): - pos = self.mc.get_relative_pos() + 1 # after jmp + pos = self.mc.get_relative_pos() + 1 # after potential jmp guard_token.pos_jump_offset = pos - self.current_clt.invalidate_positions.append((pos, 0)) + self.invalidate_positions.append((pos, 0)) self.pending_guard_tokens.append(guard_token) def genop_guard_guard_exception(self, ign_1, guard_op, guard_token, diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -233,5 +233,34 @@ assert self.meta_interp(g, []) == g() + def test_invalidate_bridge(self): + jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) + + class Foo(object): + _immutable_fields_ = ['a?'] + + def f(foo): + i = 0 + total = 0 + while i < 10: + jitdriver.jit_merge_point(i=i, total=total, foo=foo) + if i > 5: + total += foo.a + else: + total += 2*foo.a + i += 1 + return total + + def main(): + foo = Foo() + foo.a = 1 + total = f(foo) + foo.a = 2 + total += f(foo) + return total + + res = self.meta_interp(main, []) + assert res == main() + class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): pass diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py --- a/pypy/jit/metainterp/test/test_virtualizable.py +++ b/pypy/jit/metainterp/test/test_virtualizable.py @@ -2,6 +2,7 @@ from pypy.rpython.extregistry import ExtRegistryEntry from pypy.rpython.lltypesystem import lltype, lloperation, rclass, llmemory from pypy.rpython.annlowlevel import llhelper +from pypy.rpython.rclass import IR_IMMUTABLE, IR_ARRAY_IMMUTABLE from pypy.jit.codewriter.policy import StopAtXPolicy from pypy.jit.codewriter import heaptracker from pypy.rlib.jit import JitDriver, hint, dont_look_inside @@ -45,7 +46,7 @@ ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY._hints['virtualizable2_accessor'].initialize( - XY, {'inst_x' : "", 'inst_node' : ""}) + XY, {'inst_x' : IR_IMMUTABLE, 'inst_node' : IR_IMMUTABLE}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY, xy_vtable, 'XY') @@ -210,7 +211,8 @@ ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), hints = {'virtualizable2_accessor': FieldListAccessor()}) XY2._hints['virtualizable2_accessor'].initialize( - XY2, {'inst_x' : "", 'inst_l1' : "[*]", 'inst_l2' : "[*]"}) + XY2, {'inst_x' : IR_IMMUTABLE, + 'inst_l1' : IR_ARRAY_IMMUTABLE, 'inst_l2' : IR_ARRAY_IMMUTABLE}) xy2_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) heaptracker.set_testing_vtable_for_gcstruct(XY2, xy2_vtable, 'XY2') diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py --- a/pypy/jit/metainterp/virtualizable.py +++ b/pypy/jit/metainterp/virtualizable.py @@ -1,6 +1,7 @@ from pypy.rpython.lltypesystem import lltype, llmemory from pypy.rpython.ootypesystem import ootype from pypy.rpython.annlowlevel import cast_base_ptr_to_instance +from pypy.rpython.rclass import IR_ARRAY_IMMUTABLE, IR_IMMUTABLE from pypy.rpython import rvirtualizable2 from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.unroll import unrolling_iterable @@ -10,7 +11,7 @@ from pypy.jit.metainterp.warmstate import wrap, unwrap from pypy.rlib.objectmodel import specialize -class VirtualizableInfo: +class VirtualizableInfo(object): TOKEN_NONE = 0 # must be 0 -- see also x86.call_assembler TOKEN_TRACING_RESCALL = -1 @@ -33,11 +34,13 @@ all_fields = accessor.fields static_fields = [] array_fields = [] - for name, suffix in all_fields.iteritems(): - if suffix == '[*]': + for name, tp in all_fields.iteritems(): + if tp == IR_ARRAY_IMMUTABLE: array_fields.append(name) + elif tp == IR_IMMUTABLE: + static_fields.append(name) else: - static_fields.append(name) + raise Exception("unknown type: %s" % tp) self.static_fields = static_fields self.array_fields = array_fields # From commits-noreply at bitbucket.org Mon Apr 18 18:27:12 2011 From: commits-noreply at bitbucket.org (hager) Date: Mon, 18 Apr 2011 18:27:12 +0200 (CEST) Subject: [pypy-svn] pypy default: (cfbolz, arigo, bivab, hager): fix is_true on unsigned long longs Message-ID: <20110418162712.6A6B7282B90@codespeak.net> Author: Sven Hager Branch: Changeset: r43454:4a39ab702a89 Date: 2011-04-18 18:24 +0200 http://bitbucket.org/pypy/pypy/changeset/4a39ab702a89/ Log: (cfbolz, arigo, bivab, hager): fix is_true on unsigned long longs diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py --- a/pypy/jit/codewriter/test/test_longlong.py +++ b/pypy/jit/codewriter/test/test_longlong.py @@ -82,6 +82,7 @@ assert list(oplist[0].args[4]) == [] v_x = oplist[0].result assert isinstance(v_x, Variable) + assert v_x.concretetype is T assert oplist[1].opname == 'residual_call_irf_i' assert oplist[1].args[0].value == 'llong_ne' assert oplist[1].args[1] == 'calldescr-76' diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py --- a/pypy/jit/codewriter/jtransform.py +++ b/pypy/jit/codewriter/jtransform.py @@ -857,7 +857,7 @@ self._normalize(self.rewrite_operation(op1))) def rewrite_op_llong_is_true(self, op): - v = varoftype(lltype.SignedLongLong) + v = varoftype(op.args[0].concretetype) op0 = SpaceOperation('cast_int_to_longlong', [Constant(0, lltype.Signed)], v) From commits-noreply at bitbucket.org Mon Apr 18 18:47:30 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Mon, 18 Apr 2011 18:47:30 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer: Refactor existing PyArg_ParseTuple tests into several methods; add a new one for 's*' Message-ID: <20110418164730.2F086282B90@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer Changeset: r43455:2a0b1ca24dbb Date: 2011-04-18 12:25 -0400 http://bitbucket.org/pypy/pypy/changeset/2a0b1ca24dbb/ Log: Refactor existing PyArg_ParseTuple tests into several methods; add a new one for 's*' diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -3,66 +3,124 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class AppTestGetargs(AppTestCpythonExtensionBase): - def test_pyarg_parse(self): - mod = self.import_extension('foo', [ - ('oneargint', 'METH_VARARGS', - ''' - int l; - if (!PyArg_ParseTuple(args, "i", &l)) { - return NULL; - } - return PyInt_FromLong(l); - ''' - ), - ('oneargandform', 'METH_VARARGS', - ''' - int l; - if (!PyArg_ParseTuple(args, "i:oneargandstuff", &l)) { - return NULL; - } - return PyInt_FromLong(l); - '''), - ('oneargobject', 'METH_VARARGS', - ''' - PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) { - return NULL; - } - Py_INCREF(obj); - return obj; - '''), - ('oneargobjectandlisttype', 'METH_VARARGS', - ''' - PyObject *obj; - if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &obj)) { - return NULL; - } - Py_INCREF(obj); - return obj; - '''), - ('twoopt', 'METH_VARARGS', - ''' - PyObject *a; - PyObject *b = NULL; - if (!PyArg_ParseTuple(args, "O|O", &a, &b)) { - return NULL; - } - if (b) - Py_INCREF(b); - else - b = PyInt_FromLong(42); - /* return an owned reference */ - return b; - ''')]) - assert mod.oneargint(1) == 1 - raises(TypeError, mod.oneargint, None) - raises(TypeError, mod.oneargint) - assert mod.oneargandform(1) == 1 + def setup_method(self, func): + super(AppTestGetargs, self).setup_method(func) + self.w_import_parser = self.space.wrap(self.import_parser) + + def import_parser(self, implementation, argstyle='METH_VARARGS'): + mod = self.import_extension( + 'modname', [('funcname', argstyle, implementation)]) + return self.space.getattr(mod, self.space.wrap("funcname")) + + + def test_pyarg_parse_int(self): + """ + The `i` format specifier can be used to parse an integer. + """ + oneargint = self.import_parser( + ''' + int l; + if (!PyArg_ParseTuple(args, "i", &l)) { + return NULL; + } + return PyInt_FromLong(l); + ''') + assert oneargint(1) == 1 + raises(TypeError, oneargint, None) + raises(TypeError, oneargint) + + + def test_pyarg_parse_fromname(self): + """ + The name of the function parsing the arguments can be given after a `:` + in the argument format string. + """ + oneargandform = self.import_parser( + ''' + int l; + if (!PyArg_ParseTuple(args, "i:oneargandstuff", &l)) { + return NULL; + } + return PyInt_FromLong(l); + ''') + assert oneargandform(1) == 1 + + + def test_pyarg_parse_object(self): + """ + The `O` format specifier can be used to parse an arbitrary object. + """ + oneargobject = self.import_parser( + ''' + PyObject *obj; + if (!PyArg_ParseTuple(args, "O", &obj)) { + return NULL; + } + Py_INCREF(obj); + return obj; + ''') sentinel = object() - res = mod.oneargobject(sentinel) - raises(TypeError, "mod.oneargobjectandlisttype(sentinel)") + res = oneargobject(sentinel) assert res is sentinel - assert mod.twoopt(1) == 42 - assert mod.twoopt(1, 2) == 2 - raises(TypeError, mod.twoopt, 1, 2, 3) + + def test_pyarg_parse_restricted_object_type(self): + """ + The `O!` format specifier can be used to parse an object of a particular + type. + """ + oneargobjectandlisttype = self.import_parser( + ''' + PyObject *obj; + if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &obj)) { + return NULL; + } + Py_INCREF(obj); + return obj; + ''') + sentinel = object() + raises(TypeError, "oneargobjectandlisttype(sentinel)") + sentinel = [] + res = oneargobjectandlisttype(sentinel) + assert res is sentinel + + + def test_pyarg_parse_one_optional(self): + """ + An object corresponding to a format specifier after a `|` in the + argument format string is optional and may be passed or not. + """ + twoopt = self.import_parser( + ''' + PyObject *a; + PyObject *b = NULL; + if (!PyArg_ParseTuple(args, "O|O", &a, &b)) { + return NULL; + } + if (b) + Py_INCREF(b); + else + b = PyInt_FromLong(42); + /* return an owned reference */ + return b; + ''') + assert twoopt(1) == 42 + assert twoopt(1, 2) == 2 + raises(TypeError, twoopt, 1, 2, 3) + + + def test_pyarg_parse_string_py_buffer(self): + """ + The `s*` format specifier can be used to parse a str into a Py_buffer + structure containing a pointer to the string data and the length of the + string data. + """ + pybuffer = self.import_parser( + ''' + Py_buffer buf; + if (!PyArg_ParseTuple(args, "s*", &buf)) { + return NULL; + } + return PyString_FromStringAndSize(buf.buf, buf.len); + ''') + assert pybuffer('foo\0bar\0baz') == 'foo\0bar\0baz' From commits-noreply at bitbucket.org Mon Apr 18 18:47:32 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Mon, 18 Apr 2011 18:47:32 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer: Very basic, incomplete implementation of some Py_buffer APIs Message-ID: <20110418164732.B84CD282C1B@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer Changeset: r43456:db5434774665 Date: 2011-04-18 12:46 -0400 http://bitbucket.org/pypy/pypy/changeset/db5434774665/ Log: Very basic, incomplete implementation of some Py_buffer APIs diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -16,9 +16,6 @@ */ #define staticforward static -typedef void* Py_buffer; - - #define PyObject_HEAD \ long ob_refcnt; \ struct _typeobject *ob_type; @@ -130,6 +127,25 @@ typedef int (*visitproc)(PyObject *, void *); typedef int (*traverseproc)(PyObject *, visitproc, void *); +/* Py3k buffer interface */ +typedef struct bufferinfo { + void *buf; + PyObject *obj; /* owned reference */ + Py_ssize_t len; + Py_ssize_t itemsize; /* This is Py_ssize_t so it can be + pointed to by strides in simple case.*/ + int readonly; + int ndim; + char *format; + Py_ssize_t *shape; + Py_ssize_t *strides; + Py_ssize_t *suboffsets; + Py_ssize_t smalltable[2]; /* static store for shape and strides of + mono-dimensional buffers. */ + void *internal; +} Py_buffer; + + typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -1,7 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, - PyVarObject, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, + PyVarObject, Py_buffer, + Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE, CONST_STRING, FILEP, fwrite, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, @@ -466,3 +467,15 @@ w_filename = space.wrap(rffi.charp2str(filename)) w_mode = space.wrap(rffi.charp2str(mode)) return space.call_method(space.builtin, 'file', w_filename, w_mode) + + + at cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, + lltype.Signed, lltype.Signed], rffi.INT, error=CANNOT_FAIL) +def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags): + view.c_buf = buf + view.c_len = length + + + at cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) +def PyBuffer_Release(space, view): + pass diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -135,13 +135,11 @@ PyMem_FREE(ptr); } -#if 0 static void cleanup_buffer(void *ptr) { PyBuffer_Release((Py_buffer *) ptr); } -#endif static int addcleanup(void *ptr, PyObject **freelist, void (*destr)(void *)) @@ -775,16 +773,22 @@ break; } case 's': {/* string */ + printf("Hello, string\n"); if (*format == '*') { - Py_FatalError("* format unsupported for strings in PyArg_*\n"); -#if 0 + printf("hello, buffer\n"); Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } + } else { + PyErr_SetString( + PyExc_NotImplementedError, + "s* not implemented for non-string values"); + return NULL; + } +#if 0 #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { uarg = UNICODE_DEFAULT_ENCODING(arg); @@ -801,13 +805,13 @@ if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } +#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", arg, msgbuf, bufsize); } format++; -#endif } else if (*format == '#') { void **p = (void **)va_arg(*p_va, char **); FETCH_SIZE; @@ -1616,7 +1620,7 @@ int match = 0; char *ks; if (!PyString_Check(key)) { - PyErr_SetString(PyExc_TypeError, + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); return cleanreturn(0, freelist); } diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -415,6 +415,23 @@ PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) +Py_buffer = cpython_struct( + "Py_buffer", ( + ('buf', rffi.VOIDP), + # ('obj', PyObject), + ('len', Py_ssize_t), + # ('itemsize', Py_ssize_t), + + # ('readonly', lltype.Signed), + # ('ndim', lltype.Signed), + # ('format', rffi.CCHARP), + # ('shape', Py_ssize_tP), + # ('strides', Py_ssize_tP), + # ('suboffets', Py_ssize_tP), + # ('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), + # ('internal', rffi.VOIDP) + )) + @specialize.memo() def is_PyObject(TYPE): if not isinstance(TYPE, lltype.Ptr): From commits-noreply at bitbucket.org Mon Apr 18 19:08:20 2011 From: commits-noreply at bitbucket.org (fijal) Date: Mon, 18 Apr 2011 19:08:20 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: Remove extra guard_not_invalidated ops Message-ID: <20110418170820.B0B8E282B90@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43457:5858ed5ac5f3 Date: 2011-04-18 19:08 +0200 http://bitbucket.org/pypy/pypy/changeset/5858ed5ac5f3/ Log: Remove extra guard_not_invalidated ops diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5747,6 +5747,37 @@ """ self.optimize_loop(ops, expected, expected) + def test_remove_extra_guards_not_invalidated(self): + ops = """ + [] + guard_not_invalidated() [] + guard_not_invalidated() [] + jump() + """ + expected = """ + [] + guard_not_invalidated() [] + jump() + """ + self.optimize_loop(ops, expected) + + def test_call_may_force_invalidated_guards(self): + ops = """ + [i0] + guard_not_invalidated() [] + call_may_force(i0, descr=mayforcevirtdescr) + guard_not_invalidated() [] + jump(i0) + """ + expected = """ + [i0] + guard_not_invalidated() [] + call_may_force(i0, descr=mayforcevirtdescr) + guard_not_invalidated() [] + jump(i0) + """ + self.optimize_loop(ops, expected) + ##class TestOOtype(OptimizeOptTest, OOtypeMixin): ## def test_instanceof(self): diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -120,6 +120,7 @@ # cached array items: {descr: CachedArrayItems} self.cached_arrayitems = {} self._remove_guard_not_invalidated = False + self._seen_guard_not_invalidated = False def reconstruct_for_next_iteration(self, optimizer, valuemap): new = OptHeap() @@ -239,6 +240,9 @@ effectinfo = None else: effectinfo = op.getdescr().get_extra_info() + if (effectinfo is None or effectinfo.extraeffect >= + effectinfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE): + self._seen_guard_not_invalidated = False if effectinfo is not None: # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large @@ -414,6 +418,9 @@ if self._remove_guard_not_invalidated: return self._remove_guard_not_invalidated = False + if self._seen_guard_not_invalidated: + return + self._seen_guard_not_invalidated = True self.emit_operation(op) def propagate_forward(self, op): From commits-noreply at bitbucket.org Mon Apr 18 22:03:14 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 18 Apr 2011 22:03:14 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Add Dario Bertini Message-ID: <20110418200314.E2D3F282B90@codespeak.net> Author: Laura Creighton Branch: extradoc Changeset: r3522:2254be372f4a Date: 2011-04-18 22:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/2254be372f4a/ Log: Add Dario Bertini diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -18,5 +18,7 @@ Armin Rigo 23-02 SGS Veckobostader Hakan Ardo 24-27 ??? Romain Guillebert 23-03 ??? +Dario Bertini 23-02 ??? no alcohol, cheese + sliced meat ==================== ============== ===================== ================== From commits-noreply at bitbucket.org Mon Apr 18 22:16:18 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Mon, 18 Apr 2011 22:16:18 +0200 (CEST) Subject: [pypy-svn] pypy default: It's now safe to inline ll_math. Message-ID: <20110418201618.A1972282B90@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43458:17f24da392ce Date: 2011-04-18 20:16 +0000 http://bitbucket.org/pypy/pypy/changeset/17f24da392ce/ Log: It's now safe to inline ll_math. diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py --- a/pypy/jit/codewriter/policy.py +++ b/pypy/jit/codewriter/policy.py @@ -42,9 +42,6 @@ mod = func.__module__ or '?' if mod.startswith('pypy.rpython.module.'): return True - if mod == 'pypy.rpython.lltypesystem.module.ll_math': - # XXX temporary, contains force_cast - return True if mod.startswith('pypy.translator.'): # XXX wtf? return True # string builder interface From commits-noreply at bitbucket.org Mon Apr 18 23:54:18 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Mon, 18 Apr 2011 23:54:18 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer: save the input object and incref it Message-ID: <20110418215418.D3084282B90@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer Changeset: r43459:a9e9d816a5cf Date: 2011-04-18 17:29 -0400 http://bitbucket.org/pypy/pypy/changeset/a9e9d816a5cf/ Log: save the input object and incref it diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -4,7 +4,7 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import Py_LT, Py_LE, Py_NE, Py_EQ,\ - Py_GE, Py_GT, fopen, fclose, fwrite + Py_GE, Py_GT, Py_buffer, fopen, fclose, fwrite from pypy.tool.udir import udir class TestObject(BaseApiTest): @@ -267,3 +267,55 @@ """)]) assert module.dump(self.tmpname, None) assert open(self.tmpname).read() == 'None' + + + +class AppTestPyBuffer_FillInfo(AppTestCpythonExtensionBase): + """ + PyBuffer_FillInfo populates the fields of a Py_buffer from its arguments. + """ + def test_nullObject(self): + """ + PyBuffer_FillInfo populates the C{buf}, C{length}, and C{obj} fields of + the Py_buffer passed to it. + """ + module = self.import_extension('foo', [ + ("fillinfo", "METH_VARARGS", + """ + Py_buffer buf; + PyObject *str = PyString_FromString("hello, world."); + PyObject *result; + + if (PyBuffer_FillInfo(&buf, str, PyString_AsString(str), 13, 0, 0)) { + return NULL; + } + + /* Get rid of our own reference to the object, but the Py_buffer should + * still have a reference. + */ + Py_DECREF(str); + + /* Give back a new string to the caller, constructed from data in the + * Py_buffer. It better still be valid. + */ + if (!(result = PyString_FromStringAndSize(buf.buf, buf.len))) { + return NULL; + } + + /* Now the data in the Py_buffer is really no longer needed, get rid of it + *(could use PyBuffer_Release here, but that would drag in more code than + * necessary). + */ + Py_DECREF(buf.obj); + + /* Py_DECREF can't directly signal error to us, but if it makes a reference + * count go negative, it will set an error. + */ + if (PyErr_Occurred()) { + return NULL; + } + + return result; + """)]) + result = module.fillinfo() + assert "hello, world." == result diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -474,6 +474,9 @@ def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags): view.c_buf = buf view.c_len = length + view.c_obj = obj + Py_IncRef(space, obj) + return 0 @cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -418,7 +418,7 @@ Py_buffer = cpython_struct( "Py_buffer", ( ('buf', rffi.VOIDP), - # ('obj', PyObject), + ('obj', PyObject), ('len', Py_ssize_t), # ('itemsize', Py_ssize_t), diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -123,4 +123,4 @@ } return PyString_FromStringAndSize(buf.buf, buf.len); ''') - assert pybuffer('foo\0bar\0baz') == 'foo\0bar\0baz' + assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') From commits-noreply at bitbucket.org Mon Apr 18 23:54:19 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Mon, 18 Apr 2011 23:54:19 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer: A test for when the object passed to FillInfo is NULL - and it passes already, I suppose because Py_IncRef is very forgiving. Message-ID: <20110418215419.D70C7282B90@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer Changeset: r43460:d7ac4c4ac5f3 Date: 2011-04-18 17:53 -0400 http://bitbucket.org/pypy/pypy/changeset/d7ac4c4ac5f3/ Log: A test for when the object passed to FillInfo is NULL - and it passes already, I suppose because Py_IncRef is very forgiving. diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -274,10 +274,62 @@ """ PyBuffer_FillInfo populates the fields of a Py_buffer from its arguments. """ - def test_nullObject(self): + def test_fillWithoutObject(self): + """ + PyBuffer_FillInfo populates the C{buf} and C{length}fields of the + Py_buffer passed to it. + """ + module = self.import_extension('foo', [ + ("fillinfo", "METH_VARARGS", + """ + Py_buffer buf; + PyObject *str = PyString_FromString("hello, world."); + PyObject *result; + + if (PyBuffer_FillInfo(&buf, NULL, PyString_AsString(str), 13, 0, 0)) { + return NULL; + } + + /* Check a few things we want to have happened. + */ + if (buf.buf != PyString_AsString(str)) { + PyErr_SetString(PyExc_ValueError, "buf field not initialized"); + return NULL; + } + + if (buf.len != 13) { + PyErr_SetString(PyExc_ValueError, "len field not initialized"); + return NULL; + } + + if (buf.obj != NULL) { + PyErr_SetString(PyExc_ValueError, "obj field not initialized"); + return NULL; + } + + /* Give back a new string to the caller, constructed from data in the + * Py_buffer. + */ + if (!(result = PyString_FromStringAndSize(buf.buf, buf.len))) { + return NULL; + } + + /* Free that string we allocated above. result does not share storage with + * it. + */ + Py_DECREF(str); + + return result; + """)]) + result = module.fillinfo() + assert "hello, world." == result + + + def test_fillWithObject(self): """ PyBuffer_FillInfo populates the C{buf}, C{length}, and C{obj} fields of - the Py_buffer passed to it. + the Py_buffer passed to it and increments the reference count of the + object. """ module = self.import_extension('foo', [ ("fillinfo", "METH_VARARGS", From commits-noreply at bitbucket.org Tue Apr 19 08:40:10 2011 From: commits-noreply at bitbucket.org (fijal) Date: Tue, 19 Apr 2011 08:40:10 +0200 (CEST) Subject: [pypy-svn] pypy use-out-of-line-guards: A new branch for trying using out of line guards in places Message-ID: <20110419064010.6A188282C19@codespeak.net> Author: Maciej Fijalkowski Branch: use-out-of-line-guards Changeset: r43461:ff681a0136c1 Date: 2011-04-18 21:36 +0200 http://bitbucket.org/pypy/pypy/changeset/ff681a0136c1/ Log: A new branch for trying using out of line guards in places From commits-noreply at bitbucket.org Tue Apr 19 08:40:11 2011 From: commits-noreply at bitbucket.org (fijal) Date: Tue, 19 Apr 2011 08:40:11 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: add a missing file Message-ID: <20110419064011.78544282C19@codespeak.net> Author: Maciej Fijalkowski Branch: jit-lsprofile Changeset: r43462:2e501f94bdfd Date: 2011-04-19 08:39 +0200 http://bitbucket.org/pypy/pypy/changeset/2e501f94bdfd/ Log: add a missing file diff --git a/pypy/translator/c/src/debug_print.c b/pypy/translator/c/src/debug_print.c new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/debug_print.c @@ -0,0 +1,150 @@ + +#include +#include +#include + +#include +#include +#include "src/profiling.h" +#include "src/debug_print.h" + +long pypy_have_debug_prints = -1; +FILE *pypy_debug_file = NULL; +static unsigned char debug_ready = 0; +static unsigned char debug_profile = 0; +static char *debug_start_colors_1 = ""; +static char *debug_start_colors_2 = ""; +static char *debug_stop_colors = ""; +static char *debug_prefix = NULL; + +static void pypy_debug_open(void) +{ + char *filename = getenv("PYPYLOG"); + if (filename) +#ifndef MS_WINDOWS + unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ +#else + putenv("PYPYLOG="); /* don't pass it to subprocesses */ +#endif + if (filename && filename[0]) + { + char *colon = strchr(filename, ':'); + if (!colon) + { + /* PYPYLOG=filename --- profiling version */ + debug_profile = 1; + pypy_setup_profiling(); + } + else + { + /* PYPYLOG=prefix:filename --- conditional logging */ + int n = colon - filename; + debug_prefix = malloc(n + 1); + memcpy(debug_prefix, filename, n); + debug_prefix[n] = '\0'; + filename = colon + 1; + } + if (strcmp(filename, "-") != 0) + pypy_debug_file = fopen(filename, "w"); + } + if (!pypy_debug_file) + { + pypy_debug_file = stderr; + if (isatty(2)) + { + debug_start_colors_1 = "\033[1m\033[31m"; + debug_start_colors_2 = "\033[31m"; + debug_stop_colors = "\033[0m"; + } + } + debug_ready = 1; +} + +void pypy_debug_ensure_opened(void) +{ + if (!debug_ready) + pypy_debug_open(); +} + + +#ifndef _WIN32 + + static long long pypy_read_timestamp(void) + { +# ifdef CLOCK_THREAD_CPUTIME_ID + struct timespec tspec; + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tspec); + return ((long long)tspec.tv_sec) * 1000000000LL + tspec.tv_nsec; +# else + /* argh, we don't seem to have clock_gettime(). Bad OS. */ + struct timeval tv; + gettimeofday(&tv, NULL); + return ((long long)tv.tv_sec) * 1000000LL + tv.tv_usec; +# endif + } +#endif + + +static unsigned char startswithoneof(const char *str, const char *substr) +{ + const char *p = str; + for (; *substr; substr++) + { + if (*substr != ',') + { + if (p && *p++ != *substr) + p = NULL; /* mismatch */ + } + else if (p != NULL) + return 1; /* match */ + else + p = str; /* mismatched, retry with the next */ + } + return p != NULL; +} + +#if defined(_MSC_VER) || defined(__MINGW32__) +#define PYPY_LONG_LONG_PRINTF_FORMAT "I64" +#else +#define PYPY_LONG_LONG_PRINTF_FORMAT "ll" +#endif + +static void display_startstop(const char *prefix, const char *postfix, + const char *category, const char *colors) +{ + long long timestamp; + READ_TIMESTAMP(timestamp); + fprintf(pypy_debug_file, "%s[%"PYPY_LONG_LONG_PRINTF_FORMAT"x] %s%s%s\n%s", + colors, + timestamp, prefix, category, postfix, + debug_stop_colors); +} + +void pypy_debug_start(const char *category) +{ + pypy_debug_ensure_opened(); + /* Enter a nesting level. Nested debug_prints are disabled by default + because the following left shift introduces a 0 in the last bit. + Note that this logic assumes that we are never going to nest + debug_starts more than 31 levels (63 on 64-bits). */ + pypy_have_debug_prints <<= 1; + if (!debug_profile) + { + /* non-profiling version */ + if (!debug_prefix || !startswithoneof(category, debug_prefix)) + { + /* wrong section name, or no PYPYLOG at all, skip it */ + return; + } + /* else make this subsection active */ + pypy_have_debug_prints |= 1; + } + display_startstop("{", "", category, debug_start_colors_1); +} + +void pypy_debug_stop(const char *category) +{ + if (debug_profile | (pypy_have_debug_prints & 1)) + display_startstop("", "}", category, debug_start_colors_2); + pypy_have_debug_prints >>= 1; +} From commits-noreply at bitbucket.org Tue Apr 19 09:33:06 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 19 Apr 2011 09:33:06 +0200 (CEST) Subject: [pypy-svn] pypy default: fix this test; fromint is now a purefunction, no need for the guard_no_exception Message-ID: <20110419073306.679E2282C19@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43463:5f1b8a8a3d4c Date: 2011-04-19 09:32 +0200 http://bitbucket.org/pypy/pypy/changeset/5f1b8a8a3d4c/ Log: fix this test; fromint is now a purefunction, no need for the guard_no_exception diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -68,7 +68,6 @@ i7 = int_gt(i4, 1) guard_true(i7, descr=...) p9 = call(ConstClass(fromint), i4, descr=...) - guard_no_exception(descr=...) p11 = call(ConstClass(rbigint.mul), p5, p9, descr=...) guard_no_exception(descr=...) i13 = int_sub(i4, 1) From commits-noreply at bitbucket.org Tue Apr 19 10:03:56 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 19 Apr 2011 10:03:56 +0200 (CEST) Subject: [pypy-svn] pypy default: add lib-tk to the initial sys.path Message-ID: <20110419080356.F280D282C19@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43464:999cc606b472 Date: 2011-04-19 09:59 +0200 http://bitbucket.org/pypy/pypy/changeset/999cc606b472/ Log: add lib-tk to the initial sys.path diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py --- a/pypy/module/sys/state.py +++ b/pypy/module/sys/state.py @@ -54,6 +54,11 @@ importlist.append(python_std_lib_modified) importlist.append(python_std_lib) # + lib_tk_modified = os.path.join(python_std_lib_modified, 'lib-tk') + lib_tk = os.path.join(python_std_lib, 'lib-tk') + importlist.append(lib_tk_modified) + importlist.append(lib_tk) + # # List here the extra platform-specific paths. if platform != 'win32': importlist.append(os.path.join(python_std_lib, 'plat-'+platform)) diff --git a/pypy/module/sys/test/test_initialpath.py b/pypy/module/sys/test/test_initialpath.py --- a/pypy/module/sys/test/test_initialpath.py +++ b/pypy/module/sys/test/test_initialpath.py @@ -17,3 +17,12 @@ path = getinitialpath(str(tmpdir)) # we get at least 'dirs', and maybe more (e.g. plat-linux2) assert path[:len(dirs)] == map(str, dirs) + +def test_include_libtk(tmpdir): + lib_pypy, lib_python_modified, lib_python = build_hierarchy(tmpdir) + lib_tk_modified = lib_python_modified.join('lib-tk') + lib_tk = lib_python.join('lib-tk') + path = getinitialpath(str(tmpdir)) + i = path.index(str(lib_tk_modified)) + j = path.index(str(lib_tk)) + assert i < j From commits-noreply at bitbucket.org Tue Apr 19 11:23:07 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 11:23:07 +0200 (CEST) Subject: [pypy-svn] pypy arm-backend-2: fix import Message-ID: <20110419092307.8FCA3282C1A@codespeak.net> Author: David Schneider Branch: arm-backend-2 Changeset: r43466:a40fddd9e634 Date: 2011-04-14 16:59 +0200 http://bitbucket.org/pypy/pypy/changeset/a40fddd9e634/ Log: fix import diff --git a/pypy/jit/backend/arm/test/support.py b/pypy/jit/backend/arm/test/support.py --- a/pypy/jit/backend/arm/test/support.py +++ b/pypy/jit/backend/arm/test/support.py @@ -3,10 +3,10 @@ from pypy.rpython.lltypesystem import lltype, rffi from pypy.jit.backend.detect_cpu import getcpuclass -from pypy.jit.metainterp.test import test_basic +from pypy.jit.metainterp.test import support from pypy.rlib.jit import JitDriver -class JitARMMixin(test_basic.LLJitMixin): +class JitARMMixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() From commits-noreply at bitbucket.org Tue Apr 19 11:23:09 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 11:23:09 +0200 (CEST) Subject: [pypy-svn] pypy arm-backend-2: merge default Message-ID: <20110419092309.851DB282C1A@codespeak.net> Author: David Schneider Branch: arm-backend-2 Changeset: r43467:e011973843d7 Date: 2011-04-14 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/e011973843d7/ Log: merge default diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1212,8 +1212,6 @@ assert loop.match(""" i10 = int_lt(i8, i9) guard_true(i10, descr=...) - # XXX: why do we need ovf check here? If we put a literal "300" - # instead of "n", it disappears i12 = int_add_ovf(i8, 5) guard_no_overflow(descr=...) i14 = int_add_ovf(i7, 1) @@ -1224,3 +1222,160 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=) """) + + def test_intbound_addmul_ge(self): + def main(n): + i, a, b = 0, 0, 0 + while i < 300: + if i + 5 >= 5: + a += 1 + if 2 * i >= 0: + b += 1 + i += 1 + return (a, b) + # + log = self.run(main, [300], threshold=200) + assert log.result == (300, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_add(i8, 5) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_lshift(i8, 1) + i18 = int_add_ovf(i6, 1) + guard_no_overflow(descr=...) + i21 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=) + """) + + def test_intbound_eq(self): + def main(a, n): + i, s = 0, 0 + while i < 300: + if a == 7: + s += a + 1 + elif i == 10: + s += i + else: + s += 1 + i += 1 + return s + # + log = self.run(main, [7, 300], threshold=200) + assert log.result == main(7, 300) + log = self.run(main, [10, 300], threshold=200) + assert log.result == main(10, 300) + log = self.run(main, [42, 300], threshold=200) + assert log.result == main(42, 300) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i10 = int_lt(i8, 300) + guard_true(i10, descr=...) + i12 = int_eq(i8, 10) + guard_false(i12, descr=...) + i14 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i16 = int_add(i8, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=) + """) + + def test_intbound_mul(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert i >= 0 + if 2 * i < 30000: + s += 1 + else: + s += a + i += 1 + return s + # + log = self.run(main, [7], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_lshift(i6, 1) + i12 = int_add_ovf(i5, 1) + guard_no_overflow(descr=...) + i14 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i12, i14, descr=) + """) + + def test_assert(self): + def main(a): + i, s = 0, 0 + while i < 300: + assert a == 7 + s += a + 1 + i += 1 + return s + log = self.run(main, [7], threshold=200) + assert log.result == 300*8 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = int_lt(i6, 300) + guard_true(i8, descr=...) + i10 = int_add_ovf(i5, 8) + guard_no_overflow(descr=...) + i12 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, i10, i12, descr=) + """) + + def test_zeropadded(self): + def main(): + from array import array + class ZeroPadded(array): + def __new__(cls, l): + self = array.__new__(cls, 'd', range(l)) + return self + + def __getitem__(self, i): + if i < 0 or i >= self.__len__(): + return 0 + return array.__getitem__(self, i) # ID: get + # + buf = ZeroPadded(2000) + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + + log = self.run(main, [], threshold=200) + assert log.result == 9895050.0 + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? + + def test_circular(self): + def main(): + from array import array + class Circular(array): + def __new__(cls): + self = array.__new__(cls, 'd', range(256)) + return self + def __getitem__(self, i): + # assert self.__len__() == 256 (FIXME: does not improve) + return array.__getitem__(self, i & 255) + # + buf = Circular() + i = 10 + sa = 0 + while i < 2000 - 10: + sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] + i += 1 + return sa + # + log = self.run(main, [], threshold=200) + assert log.result == 1239690.0 + loop, = log.loops_by_filename(self.filepath) + # XXX: what do we want to check here? diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,106 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test_intbound_addmul_ge(self): - self.run_source(''' - def main(): - i, a, b = 0, 0, 0 - while i < 2000: - if i + 5 >= 5: - a += 1 - if 2 * i >= 0: - b += 1 - i += 1 - return (a, b) - ''', 53, ([], (2000, 2000))) - - def test_intbound_eq(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - if a == 7: - s += a + 1 - elif i == 10: - s += i - else: - s += 1 - i += 1 - return s - ''', 69, ([7], 12000), ([42], 1509), ([10], 1509)) - - def test_intbound_mul(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert i >= 0 - if 2 * i < 30000: - s += 1 - else: - s += a - i += 1 - return s - ''', 43, ([7], 1500)) - - def test_assert(self): - self.run_source(''' - def main(a): - i, s = 0, 0 - while i < 1500: - assert a == 7 - s += a + 1 - i += 1 - return s - ''', 38, ([7], 8*1500)) - - def test_zeropadded(self): - self.run_source(''' - from array import array - class ZeroPadded(array): - def __new__(cls, l): - self = array.__new__(cls, 'd', range(l)) - return self - - def __getitem__(self, i): - if i < 0 or i >= self.__len__(): - return 0 - return array.__getitem__(self, i) - - - def main(): - buf = ZeroPadded(2000) - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 232, ([], 9895050.0)) - - def test_circular(self): - self.run_source(''' - from array import array - class Circular(array): - def __new__(cls): - self = array.__new__(cls, 'd', range(256)) - return self - def __getitem__(self, i): - # assert self.__len__() == 256 (FIXME: does not improve) - return array.__getitem__(self, i & 255) - - def main(): - buf = Circular() - i = 10 - sa = 0 - while i < 2000 - 10: - sa += buf[i-2] + buf[i-1] + buf[i] + buf[i+1] + buf[i+2] - i += 1 - return sa - - ''', 170, ([], 1239690.0)) - def test_min_max(self): self.run_source(''' def main(): From commits-noreply at bitbucket.org Tue Apr 19 12:01:02 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 12:01:02 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: in progress: support frame remapping for floats and int/ref Message-ID: <20110419100102.B4F95282C1A@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43469:fd3bb7058b08 Date: 2011-04-15 15:22 +0200 http://bitbucket.org/pypy/pypy/changeset/fd3bb7058b08/ Log: in progress: support frame remapping for floats and int/ref diff --git a/pypy/jit/backend/arm/jump.py b/pypy/jit/backend/arm/jump.py --- a/pypy/jit/backend/arm/jump.py +++ b/pypy/jit/backend/arm/jump.py @@ -8,7 +8,9 @@ srccount = {} # maps dst_locations to how many times the same # location appears in src_locations for dst in dst_locations: - srccount[dst.as_key()] = 0 + key = dst.as_key() + assert key not in srccount, "duplicate value in dst_locations!" + srccount[key] = 0 for i in range(len(dst_locations)): src = src_locations[i] if src.is_imm(): @@ -68,3 +70,41 @@ assembler.regalloc_mov(src, tmpreg) src = tmpreg assembler.regalloc_mov(src, dst) + +def remap_frame_layout_mixed(assembler, + src_locations1, dst_locations1, tmpreg1, + src_locations2, dst_locations2, tmpreg2): + # find and push the xmm stack locations from src_locations2 that + # are going to be overwritten by dst_locations1 + from pypy.jit.backend.arm.arch import WORD + extrapushes = [] + dst_keys = {} + for loc in dst_locations1: + dst_keys[loc.as_key()] = None + src_locations2red = [] + dst_locations2red = [] + for i in range(len(src_locations2)): + loc = src_locations2[i] + dstloc = dst_locations2[i] + if loc.is_stack(): + key = loc.as_key() + if (key in dst_keys or (loc.width > WORD and + (key + WORD) in dst_keys)): + assembler.regalloc_push(loc) + extrapushes.append(dstloc) + continue + src_locations2red.append(loc) + dst_locations2red.append(dstloc) + src_locations2 = src_locations2red + dst_locations2 = dst_locations2red + # + # remap the integer and pointer registers and stack locations + remap_frame_layout(assembler, src_locations1, dst_locations1, tmpreg1) + # + # remap the vfp registers and stack locations + remap_frame_layout(assembler, src_locations2, dst_locations2, tmpreg2) + # + # finally, pop the extra xmm stack locations + while len(extrapushes) > 0: + loc = extrapushes.pop() + assembler.regalloc_pop(loc) diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -164,7 +164,9 @@ if group == self.INT_TYPE: self.fail_boxes_int.setitem(fail_index, value) elif group == self.REF_TYPE: - self.fail_boxes_ptr.setitem(fail_index, rffi.cast(llmemory.GCREF, value)) + tgt = self.fail_boxes_ptr.get_addr_for_num(fail_index) + rffi.cast(rffi.LONGP, tgt)[0] = value + #self.fail_boxes_ptr.setitem(fail_index, value)# rffi.cast(llmemory.GCREF, value)) elif group == self.FLOAT_TYPE: self.fail_boxes_float.setitem(fail_index, value) else: @@ -345,41 +347,73 @@ self.mc.SUB_ri(r.sp.value, r.sp.value, WORD) self.mc.MOV_rr(r.fp.value, r.sp.value) - def gen_bootstrap_code(self, inputargs, regalloc, looptoken): - for i in range(len(inputargs)): - loc = inputargs[i] - reg = regalloc.force_allocate_reg(loc) - if loc.type != FLOAT: - if loc.type == REF: - addr = self.fail_boxes_ptr.get_addr_for_num(i) - elif loc.type == INT: - addr = self.fail_boxes_int.get_addr_for_num(i) - else: - assert 0 - self.mc.gen_load_int(reg.value, addr) - self.mc.LDR_ri(reg.value, reg.value) - elif loc.type == FLOAT: - addr = self.fail_boxes_float.get_addr_for_num(i) - self.mc.gen_load_int(r.ip.value, addr) - self.mc.VLDR(reg.value, r.ip.value) + def gen_bootstrap_code(self, nonfloatlocs, floatlocs, inputargs): + for i in range(len(nonfloatlocs)): + loc = nonfloatlocs[i] + if loc is None: + continue + arg = inputargs[i] + assert arg.type != FLOAT + if arg.type == REF: + addr = self.fail_boxes_ptr.get_addr_for_num(i) + elif arg.type == INT: + addr = self.fail_boxes_int.get_addr_for_num(i) else: assert 0 - regalloc.possibly_free_var(loc) - arglocs = [regalloc.loc(arg) for arg in inputargs] - looptoken._arm_arglocs = arglocs - return arglocs + if loc.is_reg(): + reg = loc + else: + reg = r.ip + self.mc.gen_load_int(reg.value, addr) + self.mc.LDR_ri(reg.value, reg.value) + if loc.is_stack(): + self.mov_loc_loc(r.ip, loc) + for i in range(len(floatlocs)): + loc = floatlocs[i] + if loc is None: + continue + arg = inputargs[i] + assert arg.type == FLOAT + addr = self.fail_boxes_float.get_addr_for_num(i) + self.mc.gen_load_int(r.ip.value, addr) + if loc.is_vfp_reg(): + self.mc.VLDR(loc.value, r.ip.value) + else: + tmpreg = r.d0 + with saved_registers(self.mc, [], [tmpreg]): + self.mc.VLDR(tmpreg.value, r.ip.value) + self.mov_loc_loc(tmpreg, loc) - def gen_direct_bootstrap_code(self, arglocs, loop_head, looptoken): + def _count_reg_args(self, args): + reg_args = 0 + words = 0 + for x in range(min(len(args), 4)): + if args[x].type == FLOAT: + words += 2 + else: + words += 1 + reg_args += 1 + if words > 4: + reg_args = x + break + return reg_args + + def gen_direct_bootstrap_code(self, loop_head, looptoken, inputargs): self.gen_func_prolog() - #import pdb; pdb.set_trace() - reg_args = self._count_reg_args(arglocs) + nonfloatlocs, floatlocs = looptoken._arm_arglocs - stack_locs = len(arglocs) - reg_args + reg_args = self._count_reg_args(inputargs) + + stack_locs = len(inputargs) - reg_args selected_reg = 0 for i in range(reg_args): - loc = arglocs[i] + arg = inputargs[i] + if arg.type == FLOAT: + loc = floatlocs[i] + else: + loc = nonfloatlocs[i] self.mov_loc_loc(r.all_regs[selected_reg], loc) - if arglocs[i].type == FLOAT: + if inputargs[i].type == FLOAT: selected_reg += 2 else: selected_reg += 1 @@ -387,8 +421,12 @@ stack_position = len(r.callee_saved_registers)*WORD + \ len(r.callee_saved_vfp_registers)*2*WORD + \ WORD # for the FAIL INDEX - for i in range(reg_args, len(arglocs)): - loc = arglocs[i] + for i in range(reg_args, len(inputargs)): + arg = inputargs[i] + if arg.type == FLOAT: + loc = floatlocs[i] + else: + loc = nonfloatlocs[i] if loc.is_reg(): self.mc.LDR_ri(loc.value, r.fp.value, stack_position) elif loc.is_vfp_reg(): @@ -434,9 +472,9 @@ self.align() self.gen_func_prolog() sp_patch_location = self._prepare_sp_patch_position() - arglocs = self.gen_bootstrap_code(inputargs, regalloc, looptoken) - #for x in range(5): - # self.mc.NOP() + nonfloatlocs, floatlocs = regalloc.prepare_loop(inputargs, operations, looptoken) + self.gen_bootstrap_code(nonfloatlocs, floatlocs, inputargs) + looptoken._arm_arglocs = [nonfloatlocs, floatlocs] loop_head = self.mc.currpos() looptoken._arm_loop_code = loop_head @@ -450,7 +488,7 @@ self.align() direct_bootstrap_code = self.mc.currpos() - self.gen_direct_bootstrap_code(arglocs, loop_head, looptoken) + self.gen_direct_bootstrap_code(loop_head, looptoken, inputargs) loop_start = self.materialize_loop(looptoken) looptoken._arm_bootstrap_code = loop_start @@ -531,7 +569,7 @@ # manager if frame_depth == 1: return - n = (frame_depth-1)*WORD + n = (frame_depth)*WORD self._adjust_sp(n, cb, base_reg=r.fp) def _adjust_sp(self, n, cb=None, fcond=c.AL, base_reg=r.sp): diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -228,10 +228,8 @@ def emit_op_jump(self, op, arglocs, regalloc, fcond): descr = op.getdescr() assert isinstance(descr, LoopToken) - destlocs = descr._arm_arglocs assert fcond == c.AL - remap_frame_layout(self, arglocs, destlocs, r.ip) if descr._arm_bootstrap_code == 0: self.mc.B_offs(descr._arm_loop_code, fcond) else: @@ -260,24 +258,12 @@ self._ensure_result_bit_extension(loc, size, signed) return cond - def _count_reg_args(self, args): - reg_args = 0 - words = 0 - for x in range(min(len(args), 4)): - if args[x].type == FLOAT: - words += 2 - else: - words += 1 - reg_args += 1 - if words > 4: - reg_args = x - break - return reg_args # XXX improve this interface # emit_op_call_may_force # XXX improve freeing of stuff here def _emit_call(self, adr, args, regalloc, fcond=c.AL, result=None): n_args = len(args) + #XXX replace with _count_reg_args reg_args = 0 words = 0 for x in range(min(n_args, 4)): @@ -727,7 +713,8 @@ descr = op.getdescr() assert isinstance(descr, LoopToken) - assert op.numargs() == len(descr._arm_arglocs) + # XXX check this + assert op.numargs() == len(descr._arm_arglocs[0]) resbox = TempInt() self._emit_call(descr._arm_direct_bootstrap_code, op.getarglist(), regalloc, fcond, result=resbox) diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -1,6 +1,7 @@ from pypy.jit.metainterp.history import Const, Box, REF from pypy.rlib.objectmodel import we_are_translated +from pypy.jit.metainterp.resoperation import rop class TempBox(Box): def __init__(self): diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -1,5 +1,5 @@ from pypy.jit.backend.llsupport.regalloc import FrameManager, \ - RegisterManager, compute_vars_longevity, TempBox + RegisterManager, compute_vars_longevity, TempBox, compute_loop_consts from pypy.jit.backend.arm import registers as r from pypy.jit.backend.arm import locations from pypy.jit.backend.arm.locations import imm @@ -9,6 +9,7 @@ prepare_cmp_op, prepare_float_op, _check_imm_arg) +from pypy.jit.backend.arm.jump import remap_frame_layout_mixed from pypy.jit.codewriter import longlong from pypy.jit.metainterp.history import (Const, ConstInt, ConstFloat, ConstPtr, Box, BoxInt, BoxPtr, AbstractFailDescr, @@ -43,14 +44,17 @@ def __init__(self): FrameManager.__init__(self) self.frame_depth = 1 + @staticmethod + def frame_pos(loc, type): + num_words = ARMFrameManager.frame_size(type) + return locations.StackLocation(loc, num_words=num_words, type=type) @staticmethod - def frame_pos(loc, type): - if type == INT or type == REF: - num_words = 1 - else: + def frame_size(type): + num_words = 1 + if type == FLOAT: num_words = 2 - return locations.StackLocation(loc, num_words=num_words, type=type) + return num_words def void(self, op, fcond): return [] @@ -205,6 +209,27 @@ assert isinstance(value, ConstFloat) return self.vfprm.convert_to_imm(value) + def prepare_loop(self, inputargs, operations, looptoken): + loop_consts = compute_loop_consts(inputargs, operations[-1], looptoken) + floatlocs = [None] * len(inputargs) + nonfloatlocs = [None] * len(inputargs) + for i in range(len(inputargs)): + arg = inputargs[i] + assert not isinstance(arg, Const) + reg = None + loc = inputargs[i] + if arg not in loop_consts and self.longevity[arg][1] > -1: + reg = self.try_allocate_reg(loc) + + loc = self.loc(arg) + if arg.type == FLOAT: + floatlocs[i] = loc + else: + nonfloatlocs[i] = loc + self.possibly_free_vars(list(inputargs)) + + return nonfloatlocs, floatlocs + def update_bindings(self, locs, frame_depth, inputargs): used = {} i = 0 @@ -509,11 +534,33 @@ def prepare_op_jump(self, op, fcond): + assembler = self.assembler descr = op.getdescr() assert isinstance(descr, LoopToken) - locs = [self.loc(op.getarg(i)) for i in range(op.numargs())] - return locs + nonfloatlocs, floatlocs = descr._arm_arglocs + # get temporary locs + tmploc = r.ip + box = TempFloat() + # compute 'vfptmploc' to be all_regs[0] by spilling what is there + vfptmp = self.vfprm.all_regs[0] + vfptmploc = self.vfprm.force_allocate_reg(box, selected_reg=vfptmp) + + # Part about non-floats + # XXX we don't need a copy, we only just the original list + src_locations1 = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type != FLOAT] + assert tmploc not in nonfloatlocs + dst_locations1 = [loc for loc in nonfloatlocs if loc is not None] + # Part about floats + src_locations2 = [self.loc(op.getarg(i)) for i in range(op.numargs()) + if op.getarg(i).type == FLOAT] + dst_locations2 = [loc for loc in floatlocs if loc is not None] + remap_frame_layout_mixed(self.assembler, + src_locations1, dst_locations1, tmploc, + src_locations2, dst_locations2, vfptmploc) + self.possibly_free_var(box) + return [] def prepare_op_setfield_gc(self, op, fcond): boxes = list(op.getarglist()) From commits-noreply at bitbucket.org Tue Apr 19 12:01:06 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 12:01:06 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: small fixes Message-ID: <20110419100106.8F1E1282C1B@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43470:8de45ef0ec94 Date: 2011-04-15 16:53 +0200 http://bitbucket.org/pypy/pypy/changeset/8de45ef0ec94/ Log: small fixes diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -569,7 +569,7 @@ # manager if frame_depth == 1: return - n = (frame_depth)*WORD + n = (frame_depth-1)*WORD self._adjust_sp(n, cb, base_reg=r.fp) def _adjust_sp(self, n, cb=None, fcond=c.AL, base_reg=r.sp): diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -103,9 +103,6 @@ self.width = num_words * WORD self.type = type - def frame_size(self): - return self.width // WORD - def __repr__(self): return 'FP+%d' % (self.position,) diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -51,10 +51,9 @@ @staticmethod def frame_size(type): - num_words = 1 if type == FLOAT: - num_words = 2 - return num_words + return 2 + return 1 def void(self, op, fcond): return [] From commits-noreply at bitbucket.org Tue Apr 19 12:01:08 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 12:01:08 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: correctly calculate stack locations for floats Message-ID: <20110419100108.AAA8B282C1A@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43471:77710502dfb3 Date: 2011-04-18 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/77710502dfb3/ Log: correctly calculate stack locations for floats diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -47,6 +47,8 @@ @staticmethod def frame_pos(loc, type): num_words = ARMFrameManager.frame_size(type) + if type == FLOAT: + return locations.StackLocation(loc+1, num_words=num_words, type=type) return locations.StackLocation(loc, num_words=num_words, type=type) @staticmethod From commits-noreply at bitbucket.org Tue Apr 19 12:01:11 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 12:01:11 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: insert stack checks when running non-translated after each operation and at some other places Message-ID: <20110419100111.C8692282C22@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43472:49abee244330 Date: 2011-04-19 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/49abee244330/ Log: insert stack checks when running non-translated after each operation and at some other places diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -123,10 +123,18 @@ the failboxes. Values for spilled vars and registers are stored on stack at frame_loc """ + #XXX check if units are correct here, when comparing words and bytes and stuff + # assert 0, 'check if units are correct here, when comparing words and bytes and stuff' + enc = rffi.cast(rffi.CCHARP, mem_loc) frame_depth = frame_loc - (regs_loc + len(r.all_regs)*WORD + len(r.all_vfp_regs)*2*WORD) + assert (frame_loc - frame_depth) % 4 == 0 stack = rffi.cast(rffi.CCHARP, frame_loc - frame_depth) + assert regs_loc % 4 == 0 vfp_regs = rffi.cast(rffi.CCHARP, regs_loc) + assert (regs_loc + len(r.all_vfp_regs)*2*WORD) % 4 == 0 + assert frame_depth >= 0 + regs = rffi.cast(rffi.CCHARP, regs_loc + len(r.all_vfp_regs)*2*WORD) i = -1 fail_index = -1 @@ -246,12 +254,13 @@ def _gen_exit_path(self): mc = ARMv7Builder() decode_registers_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func) - + + self._insert_checks(mc) with saved_registers(mc, r.all_regs, r.all_vfp_regs): mc.MOV_rr(r.r0.value, r.ip.value) # move mem block address, to r0 to pass as mc.MOV_rr(r.r1.value, r.fp.value) # pass the current frame pointer as second param mc.MOV_rr(r.r2.value, r.sp.value) # pass the current stack pointer as third param - + self._insert_checks(mc) mc.BL(rffi.cast(lltype.Signed, decode_registers_addr)) mc.MOV_rr(r.ip.value, r.r0.value) mc.MOV_rr(r.r0.value, r.ip.value) @@ -594,6 +603,7 @@ def _walk_operations(self, operations, regalloc): fcond=c.AL + self._insert_checks() while regalloc.position() < len(operations) - 1: regalloc.next_instruction() i = regalloc.position() @@ -614,6 +624,7 @@ regalloc.possibly_free_var(op.result) regalloc.possibly_free_vars_for_op(op) regalloc._check_invariants() + self._insert_checks() def can_merge_with_next_guard(self, op, i, operations): if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER: @@ -626,6 +637,13 @@ return False + def _insert_checks(self, mc=None): + if not we_are_translated(): + if mc is None: + mc = self.mc + mc.CMP_rr(r.fp.value, r.sp.value) + mc.MOV_rr(r.pc.value, r.pc.value, cond=c.GE) + mc.BKPT() def _ensure_result_bit_extension(self, resloc, size, signed): if size == 4: return diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -230,6 +230,7 @@ assert isinstance(descr, LoopToken) assert fcond == c.AL + self._insert_checks() if descr._arm_bootstrap_code == 0: self.mc.B_offs(descr._arm_loop_code, fcond) else: From commits-noreply at bitbucket.org Tue Apr 19 12:01:13 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 12:01:13 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: use d15 as vfp scratch register and use it instead of temporarily pushing some register Message-ID: <20110419100113.D5375282C1D@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43473:8c20c87901d7 Date: 2011-04-19 10:41 +0200 http://bitbucket.org/pypy/pypy/changeset/8c20c87901d7/ Log: use d15 as vfp scratch register and use it instead of temporarily pushing some register diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -388,10 +388,8 @@ if loc.is_vfp_reg(): self.mc.VLDR(loc.value, r.ip.value) else: - tmpreg = r.d0 - with saved_registers(self.mc, [], [tmpreg]): - self.mc.VLDR(tmpreg.value, r.ip.value) - self.mov_loc_loc(tmpreg, loc) + self.mc.VLDR(r.vfp_ip.value, r.ip.value) + self.mov_loc_loc(r.vfp_ip, loc) def _count_reg_args(self, args): reg_args = 0 @@ -442,9 +440,8 @@ self.mc.VLDR(loc.value, r.fp.value, stack_position) elif loc.is_stack(): if loc.type == FLOAT: - with saved_registers(self.mc, [], [r.d0]): - self.mc.VLDR(r.d0.value, r.fp.value, stack_position) - self.mov_loc_loc(r.d0, loc) + self.mc.VLDR(r.vfp_ip.value, r.fp.value, stack_position) + self.mov_loc_loc(r.vfp_ip, loc) elif loc.type == INT or loc.type == REF: self.mc.LDR_ri(r.ip.value, r.fp.value, stack_position) self.mov_loc_loc(r.ip, loc) diff --git a/pypy/jit/backend/arm/registers.py b/pypy/jit/backend/arm/registers.py --- a/pypy/jit/backend/arm/registers.py +++ b/pypy/jit/backend/arm/registers.py @@ -13,9 +13,10 @@ sp = r13 lr = r14 pc = r15 +vfp_ip = d15 all_regs = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10] -all_vfp_regs = vfpregisters +all_vfp_regs = vfpregisters[:-1] caller_resp = [r0, r1, r2, r3] callee_resp = [r4, r5, r6, r7, r8, r9, r10, fp] From commits-noreply at bitbucket.org Tue Apr 19 12:01:15 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 12:01:15 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: fix an issue when pushing a vfp value to the stack that was stored in the spilling area of the stack Message-ID: <20110419100115.30B6F282C1D@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43474:f5141bd3ac45 Date: 2011-04-19 10:43 +0200 http://bitbucket.org/pypy/pypy/changeset/f5141bd3ac45/ Log: fix an issue when pushing a vfp value to the stack that was stored in the spilling area of the stack diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -757,8 +757,12 @@ def regalloc_push(self, loc): if loc.is_stack(): - self.regalloc_mov(loc, r.ip) - self.mc.PUSH([r.ip.value]) + if loc.type != FLOAT: + scratch_reg = r.ip + else: + scratch_reg = r.vfp_ip + self.regalloc_mov(loc, scratch_reg) + self.regalloc_push(scratch_reg) elif loc.is_reg(): self.mc.PUSH([loc.value]) elif loc.is_vfp_reg(): @@ -771,8 +775,12 @@ def regalloc_pop(self, loc): if loc.is_stack(): - self.mc.POP([r.ip.value]) - self.regalloc_mov(r.ip, loc) + if loc.type != FLOAT: + scratch_reg = r.ip + else: + scratch_reg = r.vfp_ip + self.regalloc_pop(scratch_reg) + self.regalloc_mov(scratch_reg, loc) elif loc.is_reg(): self.mc.POP([loc.value]) elif loc.is_vfp_reg(): From commits-noreply at bitbucket.org Tue Apr 19 13:10:14 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 19 Apr 2011 13:10:14 +0200 (CEST) Subject: [pypy-svn] pypy default: revert changes that were commited by mistake in d7b6bd1b8284 Message-ID: <20110419111014.22F92282C1A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43476:c454f733a67b Date: 2011-04-19 13:09 +0200 http://bitbucket.org/pypy/pypy/changeset/c454f733a67b/ Log: revert changes that were commited by mistake in d7b6bd1b8284 diff --git a/lib-python/2.7.0/idlelib/ColorDelegator.py b/lib-python/2.7.0/idlelib/ColorDelegator.py --- a/lib-python/2.7.0/idlelib/ColorDelegator.py +++ b/lib-python/2.7.0/idlelib/ColorDelegator.py @@ -6,7 +6,7 @@ from idlelib.Delegator import Delegator from idlelib.configHandler import idleConf -DEBUG = True +DEBUG = False def any(name, alternates): "Return a named group pattern matching list of alternates." @@ -89,7 +89,6 @@ colorizing = False def notify_range(self, index1, index2=None): - print 'notify_range' self.tag_add("TODO", index1, index2) if self.after_id: if DEBUG: print "colorizing already scheduled" diff --git a/lib-python/2.7.0/idlelib/Delegator.py b/lib-python/2.7.0/idlelib/Delegator.py --- a/lib-python/2.7.0/idlelib/Delegator.py +++ b/lib-python/2.7.0/idlelib/Delegator.py @@ -6,9 +6,6 @@ self.delegate = delegate self.__cache = {} - def __nonzero__(self): - return True - def __getattr__(self, name): attr = getattr(self.delegate, name) # May raise AttributeError setattr(self, name, attr) From commits-noreply at bitbucket.org Tue Apr 19 13:21:03 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 19 Apr 2011 13:21:03 +0200 (CEST) Subject: [pypy-svn] pypy default: add a modifiable copy of idlelib Message-ID: <20110419112103.A82F9282C1A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43477:4b91858622e9 Date: 2011-04-19 13:13 +0200 http://bitbucket.org/pypy/pypy/changeset/4b91858622e9/ Log: add a modifiable copy of idlelib diff --git a/lib-python/2.7.0/idlelib/SearchDialog.py b/lib-python/modified-2.7.0/idlelib/SearchDialog.py copy from lib-python/2.7.0/idlelib/SearchDialog.py copy to lib-python/modified-2.7.0/idlelib/SearchDialog.py diff --git a/lib-python/2.7.0/idlelib/CodeContext.py b/lib-python/modified-2.7.0/idlelib/CodeContext.py copy from lib-python/2.7.0/idlelib/CodeContext.py copy to lib-python/modified-2.7.0/idlelib/CodeContext.py diff --git a/lib-python/2.7.0/idlelib/PyParse.py b/lib-python/modified-2.7.0/idlelib/PyParse.py copy from lib-python/2.7.0/idlelib/PyParse.py copy to lib-python/modified-2.7.0/idlelib/PyParse.py diff --git a/lib-python/2.7.0/idlelib/Icons/tk.gif b/lib-python/modified-2.7.0/idlelib/Icons/tk.gif copy from lib-python/2.7.0/idlelib/Icons/tk.gif copy to lib-python/modified-2.7.0/idlelib/Icons/tk.gif diff --git a/lib-python/2.7.0/idlelib/ZoomHeight.py b/lib-python/modified-2.7.0/idlelib/ZoomHeight.py copy from lib-python/2.7.0/idlelib/ZoomHeight.py copy to lib-python/modified-2.7.0/idlelib/ZoomHeight.py diff --git a/lib-python/2.7.0/idlelib/ScriptBinding.py b/lib-python/modified-2.7.0/idlelib/ScriptBinding.py copy from lib-python/2.7.0/idlelib/ScriptBinding.py copy to lib-python/modified-2.7.0/idlelib/ScriptBinding.py diff --git a/lib-python/2.7.0/idlelib/rpc.py b/lib-python/modified-2.7.0/idlelib/rpc.py copy from lib-python/2.7.0/idlelib/rpc.py copy to lib-python/modified-2.7.0/idlelib/rpc.py diff --git a/lib-python/2.7.0/idlelib/IdleHistory.py b/lib-python/modified-2.7.0/idlelib/IdleHistory.py copy from lib-python/2.7.0/idlelib/IdleHistory.py copy to lib-python/modified-2.7.0/idlelib/IdleHistory.py diff --git a/lib-python/2.7.0/idlelib/ObjectBrowser.py b/lib-python/modified-2.7.0/idlelib/ObjectBrowser.py copy from lib-python/2.7.0/idlelib/ObjectBrowser.py copy to lib-python/modified-2.7.0/idlelib/ObjectBrowser.py diff --git a/lib-python/2.7.0/idlelib/RemoteObjectBrowser.py b/lib-python/modified-2.7.0/idlelib/RemoteObjectBrowser.py copy from lib-python/2.7.0/idlelib/RemoteObjectBrowser.py copy to lib-python/modified-2.7.0/idlelib/RemoteObjectBrowser.py diff --git a/lib-python/2.7.0/idlelib/AutoExpand.py b/lib-python/modified-2.7.0/idlelib/AutoExpand.py copy from lib-python/2.7.0/idlelib/AutoExpand.py copy to lib-python/modified-2.7.0/idlelib/AutoExpand.py diff --git a/lib-python/2.7.0/idlelib/FormatParagraph.py b/lib-python/modified-2.7.0/idlelib/FormatParagraph.py copy from lib-python/2.7.0/idlelib/FormatParagraph.py copy to lib-python/modified-2.7.0/idlelib/FormatParagraph.py diff --git a/lib-python/2.7.0/idlelib/config-keys.def b/lib-python/modified-2.7.0/idlelib/config-keys.def copy from lib-python/2.7.0/idlelib/config-keys.def copy to lib-python/modified-2.7.0/idlelib/config-keys.def diff --git a/lib-python/2.7.0/idlelib/run.py b/lib-python/modified-2.7.0/idlelib/run.py copy from lib-python/2.7.0/idlelib/run.py copy to lib-python/modified-2.7.0/idlelib/run.py diff --git a/lib-python/2.7.0/idlelib/idle.pyw b/lib-python/modified-2.7.0/idlelib/idle.pyw copy from lib-python/2.7.0/idlelib/idle.pyw copy to lib-python/modified-2.7.0/idlelib/idle.pyw diff --git a/lib-python/2.7.0/idlelib/README.txt b/lib-python/modified-2.7.0/idlelib/README.txt copy from lib-python/2.7.0/idlelib/README.txt copy to lib-python/modified-2.7.0/idlelib/README.txt diff --git a/lib-python/2.7.0/idlelib/TreeWidget.py b/lib-python/modified-2.7.0/idlelib/TreeWidget.py copy from lib-python/2.7.0/idlelib/TreeWidget.py copy to lib-python/modified-2.7.0/idlelib/TreeWidget.py diff --git a/lib-python/2.7.0/idlelib/Percolator.py b/lib-python/modified-2.7.0/idlelib/Percolator.py copy from lib-python/2.7.0/idlelib/Percolator.py copy to lib-python/modified-2.7.0/idlelib/Percolator.py diff --git a/lib-python/2.7.0/idlelib/ParenMatch.py b/lib-python/modified-2.7.0/idlelib/ParenMatch.py copy from lib-python/2.7.0/idlelib/ParenMatch.py copy to lib-python/modified-2.7.0/idlelib/ParenMatch.py diff --git a/lib-python/2.7.0/idlelib/HyperParser.py b/lib-python/modified-2.7.0/idlelib/HyperParser.py copy from lib-python/2.7.0/idlelib/HyperParser.py copy to lib-python/modified-2.7.0/idlelib/HyperParser.py diff --git a/lib-python/2.7.0/idlelib/Icons/plusnode.gif b/lib-python/modified-2.7.0/idlelib/Icons/plusnode.gif copy from lib-python/2.7.0/idlelib/Icons/plusnode.gif copy to lib-python/modified-2.7.0/idlelib/Icons/plusnode.gif diff --git a/lib-python/2.7.0/idlelib/EditorWindow.py b/lib-python/modified-2.7.0/idlelib/EditorWindow.py copy from lib-python/2.7.0/idlelib/EditorWindow.py copy to lib-python/modified-2.7.0/idlelib/EditorWindow.py diff --git a/lib-python/2.7.0/idlelib/Icons/openfolder.gif b/lib-python/modified-2.7.0/idlelib/Icons/openfolder.gif copy from lib-python/2.7.0/idlelib/Icons/openfolder.gif copy to lib-python/modified-2.7.0/idlelib/Icons/openfolder.gif diff --git a/lib-python/2.7.0/idlelib/PathBrowser.py b/lib-python/modified-2.7.0/idlelib/PathBrowser.py copy from lib-python/2.7.0/idlelib/PathBrowser.py copy to lib-python/modified-2.7.0/idlelib/PathBrowser.py diff --git a/lib-python/2.7.0/idlelib/testcode.py b/lib-python/modified-2.7.0/idlelib/testcode.py copy from lib-python/2.7.0/idlelib/testcode.py copy to lib-python/modified-2.7.0/idlelib/testcode.py diff --git a/lib-python/2.7.0/idlelib/Icons/folder.gif b/lib-python/modified-2.7.0/idlelib/Icons/folder.gif copy from lib-python/2.7.0/idlelib/Icons/folder.gif copy to lib-python/modified-2.7.0/idlelib/Icons/folder.gif diff --git a/lib-python/2.7.0/idlelib/RstripExtension.py b/lib-python/modified-2.7.0/idlelib/RstripExtension.py copy from lib-python/2.7.0/idlelib/RstripExtension.py copy to lib-python/modified-2.7.0/idlelib/RstripExtension.py diff --git a/lib-python/2.7.0/idlelib/ScrolledList.py b/lib-python/modified-2.7.0/idlelib/ScrolledList.py copy from lib-python/2.7.0/idlelib/ScrolledList.py copy to lib-python/modified-2.7.0/idlelib/ScrolledList.py diff --git a/lib-python/2.7.0/idlelib/ColorDelegator.py b/lib-python/modified-2.7.0/idlelib/ColorDelegator.py copy from lib-python/2.7.0/idlelib/ColorDelegator.py copy to lib-python/modified-2.7.0/idlelib/ColorDelegator.py diff --git a/lib-python/2.7.0/idlelib/CallTipWindow.py b/lib-python/modified-2.7.0/idlelib/CallTipWindow.py copy from lib-python/2.7.0/idlelib/CallTipWindow.py copy to lib-python/modified-2.7.0/idlelib/CallTipWindow.py diff --git a/lib-python/2.7.0/idlelib/Debugger.py b/lib-python/modified-2.7.0/idlelib/Debugger.py copy from lib-python/2.7.0/idlelib/Debugger.py copy to lib-python/modified-2.7.0/idlelib/Debugger.py diff --git a/lib-python/2.7.0/idlelib/UndoDelegator.py b/lib-python/modified-2.7.0/idlelib/UndoDelegator.py copy from lib-python/2.7.0/idlelib/UndoDelegator.py copy to lib-python/modified-2.7.0/idlelib/UndoDelegator.py diff --git a/lib-python/2.7.0/idlelib/Delegator.py b/lib-python/modified-2.7.0/idlelib/Delegator.py copy from lib-python/2.7.0/idlelib/Delegator.py copy to lib-python/modified-2.7.0/idlelib/Delegator.py diff --git a/lib-python/2.7.0/idlelib/configDialog.py b/lib-python/modified-2.7.0/idlelib/configDialog.py copy from lib-python/2.7.0/idlelib/configDialog.py copy to lib-python/modified-2.7.0/idlelib/configDialog.py diff --git a/lib-python/2.7.0/idlelib/RemoteDebugger.py b/lib-python/modified-2.7.0/idlelib/RemoteDebugger.py copy from lib-python/2.7.0/idlelib/RemoteDebugger.py copy to lib-python/modified-2.7.0/idlelib/RemoteDebugger.py diff --git a/lib-python/2.7.0/idlelib/AutoComplete.py b/lib-python/modified-2.7.0/idlelib/AutoComplete.py copy from lib-python/2.7.0/idlelib/AutoComplete.py copy to lib-python/modified-2.7.0/idlelib/AutoComplete.py diff --git a/lib-python/2.7.0/idlelib/WidgetRedirector.py b/lib-python/modified-2.7.0/idlelib/WidgetRedirector.py copy from lib-python/2.7.0/idlelib/WidgetRedirector.py copy to lib-python/modified-2.7.0/idlelib/WidgetRedirector.py diff --git a/lib-python/2.7.0/idlelib/configHelpSourceEdit.py b/lib-python/modified-2.7.0/idlelib/configHelpSourceEdit.py copy from lib-python/2.7.0/idlelib/configHelpSourceEdit.py copy to lib-python/modified-2.7.0/idlelib/configHelpSourceEdit.py diff --git a/lib-python/2.7.0/idlelib/ChangeLog b/lib-python/modified-2.7.0/idlelib/ChangeLog copy from lib-python/2.7.0/idlelib/ChangeLog copy to lib-python/modified-2.7.0/idlelib/ChangeLog diff --git a/lib-python/2.7.0/idlelib/AutoCompleteWindow.py b/lib-python/modified-2.7.0/idlelib/AutoCompleteWindow.py copy from lib-python/2.7.0/idlelib/AutoCompleteWindow.py copy to lib-python/modified-2.7.0/idlelib/AutoCompleteWindow.py diff --git a/lib-python/2.7.0/idlelib/NEWS.txt b/lib-python/modified-2.7.0/idlelib/NEWS.txt copy from lib-python/2.7.0/idlelib/NEWS.txt copy to lib-python/modified-2.7.0/idlelib/NEWS.txt diff --git a/lib-python/2.7.0/idlelib/config-highlight.def b/lib-python/modified-2.7.0/idlelib/config-highlight.def copy from lib-python/2.7.0/idlelib/config-highlight.def copy to lib-python/modified-2.7.0/idlelib/config-highlight.def diff --git a/lib-python/2.7.0/idlelib/configSectionNameDialog.py b/lib-python/modified-2.7.0/idlelib/configSectionNameDialog.py copy from lib-python/2.7.0/idlelib/configSectionNameDialog.py copy to lib-python/modified-2.7.0/idlelib/configSectionNameDialog.py diff --git a/lib-python/2.7.0/idlelib/SearchEngine.py b/lib-python/modified-2.7.0/idlelib/SearchEngine.py copy from lib-python/2.7.0/idlelib/SearchEngine.py copy to lib-python/modified-2.7.0/idlelib/SearchEngine.py diff --git a/lib-python/2.7.0/idlelib/MultiCall.py b/lib-python/modified-2.7.0/idlelib/MultiCall.py copy from lib-python/2.7.0/idlelib/MultiCall.py copy to lib-python/modified-2.7.0/idlelib/MultiCall.py diff --git a/lib-python/2.7.0/idlelib/keybindingDialog.py b/lib-python/modified-2.7.0/idlelib/keybindingDialog.py copy from lib-python/2.7.0/idlelib/keybindingDialog.py copy to lib-python/modified-2.7.0/idlelib/keybindingDialog.py diff --git a/lib-python/2.7.0/idlelib/idle.py b/lib-python/modified-2.7.0/idlelib/idle.py copy from lib-python/2.7.0/idlelib/idle.py copy to lib-python/modified-2.7.0/idlelib/idle.py diff --git a/lib-python/2.7.0/idlelib/Icons/idle.icns b/lib-python/modified-2.7.0/idlelib/Icons/idle.icns copy from lib-python/2.7.0/idlelib/Icons/idle.icns copy to lib-python/modified-2.7.0/idlelib/Icons/idle.icns diff --git a/lib-python/2.7.0/idlelib/tabbedpages.py b/lib-python/modified-2.7.0/idlelib/tabbedpages.py copy from lib-python/2.7.0/idlelib/tabbedpages.py copy to lib-python/modified-2.7.0/idlelib/tabbedpages.py diff --git a/lib-python/2.7.0/idlelib/IOBinding.py b/lib-python/modified-2.7.0/idlelib/IOBinding.py copy from lib-python/2.7.0/idlelib/IOBinding.py copy to lib-python/modified-2.7.0/idlelib/IOBinding.py diff --git a/lib-python/2.7.0/idlelib/Bindings.py b/lib-python/modified-2.7.0/idlelib/Bindings.py copy from lib-python/2.7.0/idlelib/Bindings.py copy to lib-python/modified-2.7.0/idlelib/Bindings.py diff --git a/lib-python/2.7.0/idlelib/Icons/python.gif b/lib-python/modified-2.7.0/idlelib/Icons/python.gif copy from lib-python/2.7.0/idlelib/Icons/python.gif copy to lib-python/modified-2.7.0/idlelib/Icons/python.gif diff --git a/lib-python/2.7.0/idlelib/__init__.py b/lib-python/modified-2.7.0/idlelib/__init__.py copy from lib-python/2.7.0/idlelib/__init__.py copy to lib-python/modified-2.7.0/idlelib/__init__.py diff --git a/lib-python/2.7.0/idlelib/FileList.py b/lib-python/modified-2.7.0/idlelib/FileList.py copy from lib-python/2.7.0/idlelib/FileList.py copy to lib-python/modified-2.7.0/idlelib/FileList.py diff --git a/lib-python/2.7.0/idlelib/TODO.txt b/lib-python/modified-2.7.0/idlelib/TODO.txt copy from lib-python/2.7.0/idlelib/TODO.txt copy to lib-python/modified-2.7.0/idlelib/TODO.txt diff --git a/lib-python/2.7.0/idlelib/WindowList.py b/lib-python/modified-2.7.0/idlelib/WindowList.py copy from lib-python/2.7.0/idlelib/WindowList.py copy to lib-python/modified-2.7.0/idlelib/WindowList.py diff --git a/lib-python/2.7.0/idlelib/Icons/minusnode.gif b/lib-python/modified-2.7.0/idlelib/Icons/minusnode.gif copy from lib-python/2.7.0/idlelib/Icons/minusnode.gif copy to lib-python/modified-2.7.0/idlelib/Icons/minusnode.gif diff --git a/lib-python/2.7.0/idlelib/CREDITS.txt b/lib-python/modified-2.7.0/idlelib/CREDITS.txt copy from lib-python/2.7.0/idlelib/CREDITS.txt copy to lib-python/modified-2.7.0/idlelib/CREDITS.txt diff --git a/lib-python/2.7.0/idlelib/aboutDialog.py b/lib-python/modified-2.7.0/idlelib/aboutDialog.py copy from lib-python/2.7.0/idlelib/aboutDialog.py copy to lib-python/modified-2.7.0/idlelib/aboutDialog.py diff --git a/lib-python/2.7.0/idlelib/dynOptionMenuWidget.py b/lib-python/modified-2.7.0/idlelib/dynOptionMenuWidget.py copy from lib-python/2.7.0/idlelib/dynOptionMenuWidget.py copy to lib-python/modified-2.7.0/idlelib/dynOptionMenuWidget.py diff --git a/lib-python/2.7.0/idlelib/configHandler.py b/lib-python/modified-2.7.0/idlelib/configHandler.py copy from lib-python/2.7.0/idlelib/configHandler.py copy to lib-python/modified-2.7.0/idlelib/configHandler.py diff --git a/lib-python/2.7.0/idlelib/MultiStatusBar.py b/lib-python/modified-2.7.0/idlelib/MultiStatusBar.py copy from lib-python/2.7.0/idlelib/MultiStatusBar.py copy to lib-python/modified-2.7.0/idlelib/MultiStatusBar.py diff --git a/lib-python/2.7.0/idlelib/help.txt b/lib-python/modified-2.7.0/idlelib/help.txt copy from lib-python/2.7.0/idlelib/help.txt copy to lib-python/modified-2.7.0/idlelib/help.txt diff --git a/lib-python/2.7.0/idlelib/textView.py b/lib-python/modified-2.7.0/idlelib/textView.py copy from lib-python/2.7.0/idlelib/textView.py copy to lib-python/modified-2.7.0/idlelib/textView.py diff --git a/lib-python/2.7.0/idlelib/StackViewer.py b/lib-python/modified-2.7.0/idlelib/StackViewer.py copy from lib-python/2.7.0/idlelib/StackViewer.py copy to lib-python/modified-2.7.0/idlelib/StackViewer.py diff --git a/lib-python/2.7.0/idlelib/ClassBrowser.py b/lib-python/modified-2.7.0/idlelib/ClassBrowser.py copy from lib-python/2.7.0/idlelib/ClassBrowser.py copy to lib-python/modified-2.7.0/idlelib/ClassBrowser.py diff --git a/lib-python/2.7.0/idlelib/GrepDialog.py b/lib-python/modified-2.7.0/idlelib/GrepDialog.py copy from lib-python/2.7.0/idlelib/GrepDialog.py copy to lib-python/modified-2.7.0/idlelib/GrepDialog.py diff --git a/lib-python/2.7.0/idlelib/HISTORY.txt b/lib-python/modified-2.7.0/idlelib/HISTORY.txt copy from lib-python/2.7.0/idlelib/HISTORY.txt copy to lib-python/modified-2.7.0/idlelib/HISTORY.txt diff --git a/lib-python/2.7.0/idlelib/macosxSupport.py b/lib-python/modified-2.7.0/idlelib/macosxSupport.py copy from lib-python/2.7.0/idlelib/macosxSupport.py copy to lib-python/modified-2.7.0/idlelib/macosxSupport.py diff --git a/lib-python/2.7.0/idlelib/SearchDialogBase.py b/lib-python/modified-2.7.0/idlelib/SearchDialogBase.py copy from lib-python/2.7.0/idlelib/SearchDialogBase.py copy to lib-python/modified-2.7.0/idlelib/SearchDialogBase.py diff --git a/lib-python/2.7.0/idlelib/ToolTip.py b/lib-python/modified-2.7.0/idlelib/ToolTip.py copy from lib-python/2.7.0/idlelib/ToolTip.py copy to lib-python/modified-2.7.0/idlelib/ToolTip.py diff --git a/lib-python/2.7.0/idlelib/PyShell.py b/lib-python/modified-2.7.0/idlelib/PyShell.py copy from lib-python/2.7.0/idlelib/PyShell.py copy to lib-python/modified-2.7.0/idlelib/PyShell.py diff --git a/lib-python/2.7.0/idlelib/ReplaceDialog.py b/lib-python/modified-2.7.0/idlelib/ReplaceDialog.py copy from lib-python/2.7.0/idlelib/ReplaceDialog.py copy to lib-python/modified-2.7.0/idlelib/ReplaceDialog.py diff --git a/lib-python/2.7.0/idlelib/config-main.def b/lib-python/modified-2.7.0/idlelib/config-main.def copy from lib-python/2.7.0/idlelib/config-main.def copy to lib-python/modified-2.7.0/idlelib/config-main.def diff --git a/lib-python/2.7.0/idlelib/CallTips.py b/lib-python/modified-2.7.0/idlelib/CallTips.py copy from lib-python/2.7.0/idlelib/CallTips.py copy to lib-python/modified-2.7.0/idlelib/CallTips.py diff --git a/lib-python/2.7.0/idlelib/idle.bat b/lib-python/modified-2.7.0/idlelib/idle.bat copy from lib-python/2.7.0/idlelib/idle.bat copy to lib-python/modified-2.7.0/idlelib/idle.bat diff --git a/lib-python/2.7.0/idlelib/extend.txt b/lib-python/modified-2.7.0/idlelib/extend.txt copy from lib-python/2.7.0/idlelib/extend.txt copy to lib-python/modified-2.7.0/idlelib/extend.txt diff --git a/lib-python/2.7.0/idlelib/OutputWindow.py b/lib-python/modified-2.7.0/idlelib/OutputWindow.py copy from lib-python/2.7.0/idlelib/OutputWindow.py copy to lib-python/modified-2.7.0/idlelib/OutputWindow.py diff --git a/lib-python/2.7.0/idlelib/idlever.py b/lib-python/modified-2.7.0/idlelib/idlever.py copy from lib-python/2.7.0/idlelib/idlever.py copy to lib-python/modified-2.7.0/idlelib/idlever.py diff --git a/lib-python/2.7.0/idlelib/config-extensions.def b/lib-python/modified-2.7.0/idlelib/config-extensions.def copy from lib-python/2.7.0/idlelib/config-extensions.def copy to lib-python/modified-2.7.0/idlelib/config-extensions.def From commits-noreply at bitbucket.org Tue Apr 19 13:21:04 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 19 Apr 2011 13:21:04 +0200 (CEST) Subject: [pypy-svn] pypy default: fix for pypy, look at the comment for the details; this makes syntax highlighting working inside IDLE Message-ID: <20110419112104.6FDC0282C1A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43478:f8c673fee06d Date: 2011-04-19 13:19 +0200 http://bitbucket.org/pypy/pypy/changeset/f8c673fee06d/ Log: fix for pypy, look at the comment for the details; this makes syntax highlighting working inside IDLE diff --git a/lib-python/modified-2.7.0/idlelib/Delegator.py b/lib-python/modified-2.7.0/idlelib/Delegator.py --- a/lib-python/modified-2.7.0/idlelib/Delegator.py +++ b/lib-python/modified-2.7.0/idlelib/Delegator.py @@ -12,6 +12,14 @@ self.__cache[name] = attr return attr + def __nonzero__(self): + # this is needed for PyPy: else, if self.delegate is None, the + # __getattr__ above picks NoneType.__nonzero__, which returns + # False. Thus, bool(Delegator()) is False as well, but it's not what + # we want. On CPython, bool(Delegator()) is True because NoneType + # does not have __nonzero__ + return True + def resetcache(self): for key in self.__cache.keys(): try: From commits-noreply at bitbucket.org Tue Apr 19 14:40:58 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 14:40:58 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: Merge ovf ops with the following guard Message-ID: <20110419124058.EB4C2282C1A@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43479:ae8e7dd2a258 Date: 2011-04-19 12:12 +0200 http://bitbucket.org/pypy/pypy/changeset/ae8e7dd2a258/ Log: Merge ovf ops with the following guard diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -624,10 +624,11 @@ self._insert_checks() def can_merge_with_next_guard(self, op, i, operations): - if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER: + num = op.getopnum() + if num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER: assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED return True - if op.getopnum() == rop.INT_MUL_OVF: + if num == rop.INT_MUL_OVF or num == rop.INT_ADD_OVF or num == rop.INT_SUB_OVF: opnum = operations[i + 1].getopnum() assert opnum == rop.GUARD_OVERFLOW or opnum == rop.GUARD_NO_OVERFLOW return True diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -31,30 +31,38 @@ _mixin_ = True - def emit_op_int_add(self, op, arglocs, regalloc, fcond): + def emit_op_int_add(self, op, arglocs, regalloc, fcond, flags=False): l0, l1, res = arglocs + if flags: + s = 1 + else: + s = 0 if l0.is_imm(): - self.mc.ADD_ri(res.value, l1.value, imm=l0.value, s=1) + self.mc.ADD_ri(res.value, l1.value, imm=l0.value, s=s) elif l1.is_imm(): - self.mc.ADD_ri(res.value, l0.value, imm=l1.value, s=1) + self.mc.ADD_ri(res.value, l0.value, imm=l1.value, s=s) else: self.mc.ADD_rr(res.value, l0.value, l1.value, s=1) return fcond - def emit_op_int_sub(self, op, arglocs, regalloc, fcond): + def emit_op_int_sub(self, op, arglocs, regalloc, fcond, flags=False): l0, l1, res = arglocs + if flags: + s = 1 + else: + s = 0 if l0.is_imm(): value = l0.getint() assert value >= 0 # reverse substract ftw - self.mc.RSB_ri(res.value, l1.value, value, s=1) + self.mc.RSB_ri(res.value, l1.value, value, s=s) elif l1.is_imm(): value = l1.getint() assert value >= 0 - self.mc.SUB_ri(res.value, l0.value, value, s=1) + self.mc.SUB_ri(res.value, l0.value, value, s=s) else: - self.mc.SUB_rr(res.value, l0.value, l1.value, s=1) + self.mc.SUB_rr(res.value, l0.value, l1.value, s=s) return fcond @@ -80,6 +88,17 @@ assert 0 return fcond + def emit_guard_int_add_ovf(self, op, guard, arglocs, regalloc, fcond): + import pdb; pdb.set_trace() + self.emit_op_int_add(op, arglocs[0:3], regalloc, fcond, flags=True) + self._emit_guard_overflow(guard, arglocs[3:], fcond) + return fcond + + def emit_guard_int_sub_ovf(self, op, guard, arglocs, regalloc, fcond): + self.emit_op_int_sub(op, arglocs[0:3], regalloc, fcond, flags=True) + self._emit_guard_overflow(guard, arglocs[3:], fcond) + return fcond + emit_op_int_floordiv = gen_emit_op_by_helper_call('DIV') emit_op_int_mod = gen_emit_op_by_helper_call('MOD') emit_op_uint_floordiv = gen_emit_op_by_helper_call('UDIV') @@ -149,6 +168,15 @@ descr._failure_recovery_code = memaddr return c.AL + def _emit_guard_overflow(self, guard, failargs, fcond): + if guard.getopnum() == rop.GUARD_OVERFLOW: + fcond = self._emit_guard(guard, failargs, c.VS) + elif guard.getopnum() == rop.GUARD_NO_OVERFLOW: + fcond = self._emit_guard(guard, failargs, c.VC) + else: + assert 0 + return fcond + def emit_op_guard_true(self, op, arglocs, regalloc, fcond): l0 = arglocs[0] failargs = arglocs[1:] diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -314,9 +314,7 @@ boxes.append(box) l1, box = self._ensure_value_is_boxed(a1, [box]) boxes.append(box) - self.possibly_free_vars(boxes) res = self.force_allocate_reg(op.result) - self.possibly_free_var(op.result) return [l0, l1, res] def prepare_op_int_sub(self, op, fcond): @@ -337,9 +335,7 @@ boxes.append(box) l1, box = self._ensure_value_is_boxed(a1, boxes) boxes.append(box) - self.possibly_free_vars(boxes) res = self.force_allocate_reg(op.result) - self.possibly_free_var(op.result) return [l0, l1, res] def prepare_op_int_mul(self, op, fcond): @@ -375,6 +371,21 @@ return args + def prepare_guard_int_add_ovf(self, op, guard, fcond): + import pdb; pdb.set_trace() + boxes = self.prepare_op_int_add(op, fcond) + locs = self._prepare_guard(guard, boxes) + self.possibly_free_vars_for_op(op) + self.possibly_free_vars(guard.getfailargs()) + return locs + + def prepare_guard_int_sub_ovf(self, op, guard, fcond): + boxes = self.prepare_op_int_sub(op, fcond) + locs = self._prepare_guard(guard, boxes) + self.possibly_free_vars_for_op(op) + self.possibly_free_vars(guard.getfailargs()) + return locs + prepare_op_int_floordiv = prepare_op_by_helper_call() prepare_op_int_mod = prepare_op_by_helper_call() prepare_op_uint_floordiv = prepare_op_by_helper_call() From commits-noreply at bitbucket.org Tue Apr 19 14:41:00 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 14:41:00 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: remove pdbs Message-ID: <20110419124100.45597282C1A@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43480:858c22e56a22 Date: 2011-04-19 12:22 +0200 http://bitbucket.org/pypy/pypy/changeset/858c22e56a22/ Log: remove pdbs diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py --- a/pypy/jit/backend/arm/opassembler.py +++ b/pypy/jit/backend/arm/opassembler.py @@ -89,7 +89,6 @@ return fcond def emit_guard_int_add_ovf(self, op, guard, arglocs, regalloc, fcond): - import pdb; pdb.set_trace() self.emit_op_int_add(op, arglocs[0:3], regalloc, fcond, flags=True) self._emit_guard_overflow(guard, arglocs[3:], fcond) return fcond diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -372,7 +372,6 @@ def prepare_guard_int_add_ovf(self, op, guard, fcond): - import pdb; pdb.set_trace() boxes = self.prepare_op_int_add(op, fcond) locs = self._prepare_guard(guard, boxes) self.possibly_free_vars_for_op(op) From commits-noreply at bitbucket.org Tue Apr 19 14:41:03 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 14:41:03 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: register allocation related fixes due to operation merges Message-ID: <20110419124103.015E6282C1E@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43481:cf45d0813fb5 Date: 2011-04-19 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/cf45d0813fb5/ Log: register allocation related fixes due to operation merges diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py --- a/pypy/jit/backend/arm/assembler.py +++ b/pypy/jit/backend/arm/assembler.py @@ -680,7 +680,6 @@ self.mc.gen_load_int(r.ip.value, value.getint()) self.mc.VLDR(loc.value, r.ip.value) - # XXX needs float support def regalloc_mov(self, prev_loc, loc, cond=c.AL): if prev_loc.is_imm(): if loc.is_reg(): diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py --- a/pypy/jit/backend/arm/regalloc.py +++ b/pypy/jit/backend/arm/regalloc.py @@ -296,7 +296,7 @@ else: self.rm._sync_var(v) - def prepare_op_int_add(self, op, fcond): + def _prepare_op_int_add(self, op, fcond): boxes = list(op.getarglist()) a0, a1 = boxes imm_a0 = _check_imm_arg(a0) @@ -314,10 +314,15 @@ boxes.append(box) l1, box = self._ensure_value_is_boxed(a1, [box]) boxes.append(box) + return [l0, l1], boxes + + def prepare_op_int_add(self, op, fcond): + locs, boxes = self._prepare_op_int_add(op, fcond) + self.possibly_free_vars(boxes) res = self.force_allocate_reg(op.result) - return [l0, l1, res] + return locs + [res] - def prepare_op_int_sub(self, op, fcond): + def _prepare_op_int_sub(self, op, fcond): boxes = list(op.getarglist()) a0, a1 = boxes imm_a0 = _check_imm_arg(a0) @@ -335,8 +340,13 @@ boxes.append(box) l1, box = self._ensure_value_is_boxed(a1, boxes) boxes.append(box) + return [l0, l1], boxes + + def prepare_op_int_sub(self, op, fcond): + locs, boxes = self._prepare_op_int_sub(op, fcond) + self.possibly_free_vars(boxes) res = self.force_allocate_reg(op.result) - return [l0, l1, res] + return locs + [res] def prepare_op_int_mul(self, op, fcond): boxes = list(op.getarglist()) @@ -372,15 +382,21 @@ def prepare_guard_int_add_ovf(self, op, guard, fcond): - boxes = self.prepare_op_int_add(op, fcond) - locs = self._prepare_guard(guard, boxes) + locs, boxes = self._prepare_op_int_add(op, fcond) + res = self.force_allocate_reg(op.result) + locs.append(res) + locs = self._prepare_guard(guard, locs) + self.possibly_free_vars(boxes) self.possibly_free_vars_for_op(op) self.possibly_free_vars(guard.getfailargs()) return locs def prepare_guard_int_sub_ovf(self, op, guard, fcond): - boxes = self.prepare_op_int_sub(op, fcond) - locs = self._prepare_guard(guard, boxes) + locs, boxes = self._prepare_op_int_sub(op, fcond) + res = self.force_allocate_reg(op.result) + locs.append(res) + locs = self._prepare_guard(guard, locs) + self.possibly_free_vars(boxes) self.possibly_free_vars_for_op(op) self.possibly_free_vars(guard.getfailargs()) return locs From commits-noreply at bitbucket.org Tue Apr 19 14:41:04 2011 From: commits-noreply at bitbucket.org (bivab) Date: Tue, 19 Apr 2011 14:41:04 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: import changes to compute_vars_longevity from default Message-ID: <20110419124104.39327282C1B@codespeak.net> Author: David Schneider Branch: arm-backed-float Changeset: r43482:3c3e3de9d860 Date: 2011-04-19 14:40 +0200 http://bitbucket.org/pypy/pypy/changeset/3c3e3de9d860/ Log: import changes to compute_vars_longevity from default diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py --- a/pypy/jit/backend/llsupport/regalloc.py +++ b/pypy/jit/backend/llsupport/regalloc.py @@ -380,6 +380,11 @@ last_used = {} for i in range(len(operations)-1, -1, -1): op = operations[i] + if op.result: + if op.result not in last_used and op.has_no_side_effect(): + continue + assert op.result not in produced + produced[op.result] = i for j in range(op.numargs()): arg = op.getarg(j) if isinstance(arg, Box) and arg not in last_used: @@ -391,12 +396,7 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - if op.result: - if op.result not in last_used and op.has_no_side_effect(): - continue - assert op.result not in produced - produced[op.result] = i - + longevity = {} for arg in produced: if arg in last_used: From commits-noreply at bitbucket.org Tue Apr 19 14:44:41 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Tue, 19 Apr 2011 14:44:41 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix JIT tests that broke because they were more optimized by inlining. Message-ID: <20110419124441.91F29282C1A@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43483:bc75e2dff401 Date: 2011-04-19 08:44 -0400 http://bitbucket.org/pypy/pypy/changeset/bc75e2dff401/ Log: Fix JIT tests that broke because they were more optimized by inlining. diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -16,7 +16,7 @@ from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin -class BasicTests: +class BasicTests: def test_basic(self): def f(x, y): @@ -36,7 +36,7 @@ def test_uint_floordiv(self): from pypy.rlib.rarithmetic import r_uint - + def f(a, b): a = r_uint(a) b = r_uint(b) @@ -251,7 +251,7 @@ res = self.meta_interp(f, [6, 15], no_stats=True) finally: history.TreeLoop.__init__ = old_init - + assert res == f(6, 15) gc.collect() @@ -839,7 +839,7 @@ def test_bridge_from_interpreter_4(self): jitdriver = JitDriver(reds = ['n', 'k'], greens = []) - + def f(n, k): while n > 0: jitdriver.can_enter_jit(n=n, k=k) @@ -852,7 +852,7 @@ from pypy.rpython.test.test_llinterp import get_interpreter, clear_tcache from pypy.jit.metainterp.warmspot import WarmRunnerDesc - + interp, graph = get_interpreter(f, [0, 0], backendopt=False, inline_threshold=0, type_system=self.type_system) clear_tcache() @@ -1207,6 +1207,14 @@ def test_residual_external_call(self): import math myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) + + # When this test was written ll_math couldn't be inlined, now it can, + # instead of rewriting this test, just ensure that an external call is + # still generated by wrapping the function. + @dont_look_inside + def modf(x): + return math.modf(x) + def f(x, y): x = float(x) res = 0.0 @@ -1214,7 +1222,7 @@ myjitdriver.can_enter_jit(x=x, y=y, res=res) myjitdriver.jit_merge_point(x=x, y=y, res=res) # this is an external call that the default policy ignores - rpart, ipart = math.modf(x) + rpart, ipart = modf(x) res += ipart y -= 1 return res @@ -1248,7 +1256,7 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_value=2) + self.check_loops(guard_class=0, guard_value=2) self.check_loops(guard_class=0, guard_value=5, everywhere=True) def test_merge_guardnonnull_guardclass(self): @@ -1542,9 +1550,9 @@ def test_raw_malloc_and_access(self): from pypy.rpython.lltypesystem import rffi - + TP = rffi.CArray(lltype.Signed) - + def f(n): a = lltype.malloc(TP, n, flavor='raw') a[0] = n @@ -1557,9 +1565,9 @@ def test_raw_malloc_and_access_float(self): from pypy.rpython.lltypesystem import rffi - + TP = rffi.CArray(lltype.Float) - + def f(n, f): a = lltype.malloc(TP, n, flavor='raw') a[0] = f @@ -1862,7 +1870,7 @@ def test_dont_trace_every_iteration(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) - + def main(a, b): i = sa = 0 #while i < 200: @@ -1958,7 +1966,7 @@ return n res = self.meta_interp(f, [sys.maxint>>10]) assert res == 11 - self.check_tree_loop_count(2) + self.check_tree_loop_count(2) def test_wrap_around_sub(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'n']) @@ -1974,7 +1982,7 @@ return n res = self.meta_interp(f, [10-sys.maxint]) assert res == 12 - self.check_tree_loop_count(2) + self.check_tree_loop_count(2) @@ -2052,7 +2060,7 @@ policy=StopAtXPolicy(getcls), enable_opts='') assert not res - + res = self.meta_interp(f, [0, 100], policy=StopAtXPolicy(getcls), enable_opts='') @@ -2072,7 +2080,7 @@ def test_oops_on_nongc(self): from pypy.rpython.lltypesystem import lltype - + TP = lltype.Struct('x') def f(i1, i2): p1 = prebuilt[i1] @@ -2144,7 +2152,7 @@ def f(): a = A(0) - + while a.i < 10: jitdriver.jit_merge_point(a=a) jitdriver.can_enter_jit(a=a) From commits-noreply at bitbucket.org Tue Apr 19 15:32:53 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Tue, 19 Apr 2011 15:32:53 +0200 (CEST) Subject: [pypy-svn] pypy default: Add a verbatim copy of bytesobject.h Message-ID: <20110419133253.4B33F282C1A@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43484:44721f35217a Date: 2011-04-19 09:25 +0200 http://bitbucket.org/pypy/pypy/changeset/44721f35217a/ Log: Add a verbatim copy of bytesobject.h diff --git a/pypy/module/cpyext/include/bytesobject.h b/pypy/module/cpyext/include/bytesobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/bytesobject.h @@ -0,0 +1,27 @@ +#define PyBytesObject PyStringObject +#define PyBytes_Type PyString_Type + +#define PyBytes_Check PyString_Check +#define PyBytes_CheckExact PyString_CheckExact +#define PyBytes_CHECK_INTERNED PyString_CHECK_INTERNED +#define PyBytes_AS_STRING PyString_AS_STRING +#define PyBytes_GET_SIZE PyString_GET_SIZE +#define Py_TPFLAGS_BYTES_SUBCLASS Py_TPFLAGS_STRING_SUBCLASS + +#define PyBytes_FromStringAndSize PyString_FromStringAndSize +#define PyBytes_FromString PyString_FromString +#define PyBytes_FromFormatV PyString_FromFormatV +#define PyBytes_FromFormat PyString_FromFormat +#define PyBytes_Size PyString_Size +#define PyBytes_AsString PyString_AsString +#define PyBytes_Repr PyString_Repr +#define PyBytes_Concat PyString_Concat +#define PyBytes_ConcatAndDel PyString_ConcatAndDel +#define _PyBytes_Resize _PyString_Resize +#define _PyBytes_Eq _PyString_Eq +#define PyBytes_Format PyString_Format +#define _PyBytes_FormatLong _PyString_FormatLong +#define PyBytes_DecodeEscape PyString_DecodeEscape +#define _PyBytes_Join _PyString_Join +#define PyBytes_AsStringAndSize PyString_AsStringAndSize +#define _PyBytes_InsertThousandsGrouping _PyString_InsertThousandsGrouping diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -117,6 +117,7 @@ #include "pycobject.h" #include "pycapsule.h" #include "bufferobject.h" +#include "bytesobject.h" #include "sliceobject.h" #include "datetime.h" #include "pystate.h" From commits-noreply at bitbucket.org Tue Apr 19 15:32:54 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Tue, 19 Apr 2011 15:32:54 +0200 (CEST) Subject: [pypy-svn] pypy default: Add a minimal test; this module would not even import... Message-ID: <20110419133254.7C3DB282C1A@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43485:1615dfd7d8f1 Date: 2011-04-19 13:50 +0200 http://bitbucket.org/pypy/pypy/changeset/1615dfd7d8f1/ Log: Add a minimal test; this module would not even import... diff --git a/pypy/module/_demo/demo.py b/pypy/module/_demo/demo.py --- a/pypy/module/_demo/demo.py +++ b/pypy/module/_demo/demo.py @@ -10,7 +10,7 @@ time_t = rffi_platform.getsimpletype('time_t', '#include ', rffi.LONG) eci = ExternalCompilationInfo(includes=['time.h']) -time = rffi.llexternal('time', [int], time_t, +time = rffi.llexternal('time', [lltype.Signed], time_t, compilation_info=eci) def get(space, name): diff --git a/pypy/module/_demo/test/test_import.py b/pypy/module/_demo/test/test_import.py --- a/pypy/module/_demo/test/test_import.py +++ b/pypy/module/_demo/test/test_import.py @@ -26,3 +26,5 @@ w_demo = space.call(w_import, space.newlist([space.wrap('_demo')])) assert _demo.Module.demo_events == ['setup', 'startup'] + + assert space.getattr(w_demo, space.wrap('measuretime')) From commits-noreply at bitbucket.org Tue Apr 19 15:38:17 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 19 Apr 2011 15:38:17 +0200 (CEST) Subject: [pypy-svn] tkinter default: include tkinter.h Message-ID: <20110419133817.4D4F9282C1A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r3:2c7ef9dafe0d Date: 2011-04-19 15:37 +0200 http://bitbucket.org/pypy/tkinter/changeset/2c7ef9dafe0d/ Log: include tkinter.h diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, Extension setup(name="tkinter-pypy", - version="8.4", + version="0.1", description="Python interface to Tk GUI toolkit (for PyPy)", author="Python development team and PyPy development team", author_email="pypy-dev at codespeak.net", @@ -15,5 +15,5 @@ library_dirs=["/usr/X11R6/lib"], include_dirs=["/usr/include/tcl", "/usr/include/tk"], libraries=["tk8.4", "tcl8.4", "X11"], - )] + )], ) diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,1 @@ +include src/tkinter.h From commits-noreply at bitbucket.org Tue Apr 19 16:32:55 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Tue, 19 Apr 2011 16:32:55 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer: Remove debug prints Message-ID: <20110419143255.A312F282C1A@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer Changeset: r43486:35f61ebcb2cf Date: 2011-04-19 10:31 -0400 http://bitbucket.org/pypy/pypy/changeset/35f61ebcb2cf/ Log: Remove debug prints diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -773,9 +773,7 @@ break; } case 's': {/* string */ - printf("Hello, string\n"); if (*format == '*') { - printf("hello, buffer\n"); Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { From commits-noreply at bitbucket.org Tue Apr 19 17:59:53 2011 From: commits-noreply at bitbucket.org (fijal) Date: Tue, 19 Apr 2011 17:59:53 +0200 (CEST) Subject: [pypy-svn] pypy use-out-of-line-guards: Improve the test and a fix Message-ID: <20110419155953.AC36D282C1A@codespeak.net> Author: Maciej Fijalkowski Branch: use-out-of-line-guards Changeset: r43487:7ace34a3a062 Date: 2011-04-19 17:59 +0200 http://bitbucket.org/pypy/pypy/changeset/7ace34a3a062/ Log: Improve the test and a fix diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -257,9 +257,12 @@ total = f(foo) foo.a = 2 total += f(foo) + foo.a = 1 + total += f(foo) return total res = self.meta_interp(main, []) + self.check_loop_count(7) assert res == main() class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -152,6 +152,8 @@ mc = codebuf.MachineCodeBlockWrapper() mc.JMP_l(tgt) mc.copy_to_raw_memory(addr - 1) + # positions invalidated + looptoken.compiled_loop_token.invalidate_positions = [] class CPU386(AbstractX86CPU): WORD = 4 From commits-noreply at bitbucket.org Tue Apr 19 17:59:55 2011 From: commits-noreply at bitbucket.org (fijal) Date: Tue, 19 Apr 2011 17:59:55 +0200 (CEST) Subject: [pypy-svn] pypy out-of-line-guards-2: merge it here, this goes under review Message-ID: <20110419155955.F10CB282C1E@codespeak.net> Author: Maciej Fijalkowski Branch: out-of-line-guards-2 Changeset: r43488:91846e44e16c Date: 2011-04-19 18:00 +0200 http://bitbucket.org/pypy/pypy/changeset/91846e44e16c/ Log: merge it here, this goes under review diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py --- a/pypy/jit/metainterp/test/test_quasiimmut.py +++ b/pypy/jit/metainterp/test/test_quasiimmut.py @@ -257,9 +257,12 @@ total = f(foo) foo.a = 2 total += f(foo) + foo.a = 1 + total += f(foo) return total res = self.meta_interp(main, []) + self.check_loop_count(7) assert res == main() class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin): diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py --- a/pypy/jit/backend/x86/runner.py +++ b/pypy/jit/backend/x86/runner.py @@ -152,6 +152,8 @@ mc = codebuf.MachineCodeBlockWrapper() mc.JMP_l(tgt) mc.copy_to_raw_memory(addr - 1) + # positions invalidated + looptoken.compiled_loop_token.invalidate_positions = [] class CPU386(AbstractX86CPU): WORD = 4 From commits-noreply at bitbucket.org Tue Apr 19 22:00:56 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Tue, 19 Apr 2011 22:00:56 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer: Comment out the fields of Py_buffer that aren't actually supported/implement yet Message-ID: <20110419200056.81502282C1A@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer Changeset: r43489:74a9c91cbac7 Date: 2011-04-19 12:28 -0400 http://bitbucket.org/pypy/pypy/changeset/74a9c91cbac7/ Log: Comment out the fields of Py_buffer that aren't actually supported/implement yet diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -132,17 +132,21 @@ void *buf; PyObject *obj; /* owned reference */ Py_ssize_t len; - Py_ssize_t itemsize; /* This is Py_ssize_t so it can be - pointed to by strides in simple case.*/ - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - Py_ssize_t smalltable[2]; /* static store for shape and strides of - mono-dimensional buffers. */ - void *internal; + + /* This is Py_ssize_t so it can be + pointed to by strides in simple case.*/ + /* Py_ssize_t itemsize; */ + /* int readonly; */ + /* int ndim; */ + /* char *format; */ + /* Py_ssize_t *shape; */ + /* Py_ssize_t *strides; */ + /* Py_ssize_t *suboffsets; */ + + /* static store for shape and strides of + mono-dimensional buffers. */ + /* Py_ssize_t smalltable[2]; */ + /* void *internal; */ } Py_buffer; From commits-noreply at bitbucket.org Tue Apr 19 22:00:58 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Tue, 19 Apr 2011 22:00:58 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer: A test for some parts of `t#` and part of an implementation Message-ID: <20110419200058.12A03282C1A@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer Changeset: r43490:6a2f0310d664 Date: 2011-04-19 12:59 -0400 http://bitbucket.org/pypy/pypy/changeset/6a2f0310d664/ Log: A test for some parts of `t#` and part of an implementation diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -1268,24 +1268,28 @@ } case 't': { /* 8-bit character buffer, read-only access */ - Py_FatalError("'t' unsupported"); -#if 0 char **p = va_arg(*p_va, char **); PyBufferProcs *pb = arg->ob_type->tp_as_buffer; Py_ssize_t count; - + printf("this far\n"); + +#if 0 if (*format++ != '#') return converterr( "invalid use of 't' format character", arg, msgbuf, bufsize); +#endif if (!PyType_HasFeature(arg->ob_type, - Py_TPFLAGS_HAVE_GETCHARBUFFER) || - pb == NULL || pb->bf_getcharbuffer == NULL || - pb->bf_getsegcount == NULL) + Py_TPFLAGS_HAVE_GETCHARBUFFER) +#if 0 + || pb == NULL || pb->bf_getcharbuffer == NULL || + pb->bf_getsegcount == NULL +#endif + ) return converterr( "string or read-only character buffer", arg, msgbuf, bufsize); - +#if 0 if (pb->bf_getsegcount(arg, NULL) != 1) return converterr( "string or single-segment read-only buffer", @@ -1295,16 +1299,23 @@ return converterr( "string or pinned buffer", arg, msgbuf, bufsize); - +#endif + printf("this far!\n"); + printf("%p\n", pb->bf_getcharbuffer); count = pb->bf_getcharbuffer(arg, 0, p); + printf("after\n"); +#if 0 if (count < 0) return converterr("(unspecified)", arg, msgbuf, bufsize); +#endif { + printf("fetch size\n"); FETCH_SIZE; + printf("did that\n"); STORE_SIZE(count); + printf("store size done\n"); } break; -#endif } default: return converterr("impossible", arg, msgbuf, bufsize); diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -124,3 +124,21 @@ return PyString_FromStringAndSize(buf.buf, buf.len); ''') assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + + + def test_pyarg_parse_charbuf_and_length(self): + """ + The `t#` format specifier can be used to parse a read-only 8-bit + character buffer into a char* and int giving its length in bytes. + """ + charbuf = self.import_parser( + ''' + char *buf; + int len; + if (!PyArg_ParseTuple(args, "t#", &buf, &len)) { + return NULL; + } + return PyString_FromStringAndSize(buf, len); + ''') + raises(TypeError, "charbuf(10)") + assert 'foo\0bar\0baz' == charbuf('foo\0bar\0baz') From commits-noreply at bitbucket.org Tue Apr 19 22:01:03 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Tue, 19 Apr 2011 22:01:03 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer-new: Merge the first pyarg branch Message-ID: <20110419200103.F16EE282C1B@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer-new Changeset: r43491:d834b9e5b238 Date: 2011-04-19 16:00 -0400 http://bitbucket.org/pypy/pypy/changeset/d834b9e5b238/ Log: Merge the first pyarg branch diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -16,9 +16,6 @@ */ #define staticforward static -typedef void* Py_buffer; - - #define PyObject_HEAD \ long ob_refcnt; \ struct _typeobject *ob_type; @@ -130,6 +127,29 @@ typedef int (*visitproc)(PyObject *, void *); typedef int (*traverseproc)(PyObject *, visitproc, void *); +/* Py3k buffer interface */ +typedef struct bufferinfo { + void *buf; + PyObject *obj; /* owned reference */ + Py_ssize_t len; + + /* This is Py_ssize_t so it can be + pointed to by strides in simple case.*/ + /* Py_ssize_t itemsize; */ + /* int readonly; */ + /* int ndim; */ + /* char *format; */ + /* Py_ssize_t *shape; */ + /* Py_ssize_t *strides; */ + /* Py_ssize_t *suboffsets; */ + + /* static store for shape and strides of + mono-dimensional buffers. */ + /* Py_ssize_t smalltable[2]; */ + /* void *internal; */ +} Py_buffer; + + typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -411,6 +411,23 @@ PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) +Py_buffer = cpython_struct( + "Py_buffer", ( + ('buf', rffi.VOIDP), + ('obj', PyObject), + ('len', Py_ssize_t), + # ('itemsize', Py_ssize_t), + + # ('readonly', lltype.Signed), + # ('ndim', lltype.Signed), + # ('format', rffi.CCHARP), + # ('shape', Py_ssize_tP), + # ('strides', Py_ssize_tP), + # ('suboffets', Py_ssize_tP), + # ('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), + # ('internal', rffi.VOIDP) + )) + @specialize.memo() def is_PyObject(TYPE): if not isinstance(TYPE, lltype.Ptr): diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -1,7 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, - PyVarObject, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, + PyVarObject, Py_buffer, + Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, @@ -428,3 +429,17 @@ rffi.free_nonmovingbuffer(data, buf) return 0 + + at cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, + lltype.Signed, lltype.Signed], rffi.INT, error=CANNOT_FAIL) +def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags): + view.c_buf = buf + view.c_len = length + view.c_obj = obj + Py_IncRef(space, obj) + return 0 + + + at cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) +def PyBuffer_Release(space, view): + pass diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -135,13 +135,11 @@ PyMem_FREE(ptr); } -#if 0 static void cleanup_buffer(void *ptr) { PyBuffer_Release((Py_buffer *) ptr); } -#endif static int addcleanup(void *ptr, PyObject **freelist, void (*destr)(void *)) @@ -776,15 +774,19 @@ } case 's': {/* string */ if (*format == '*') { - Py_FatalError("* format unsupported for strings in PyArg_*\n"); -#if 0 Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } + } else { + PyErr_SetString( + PyExc_NotImplementedError, + "s* not implemented for non-string values"); + return NULL; + } +#if 0 #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { uarg = UNICODE_DEFAULT_ENCODING(arg); @@ -801,13 +803,13 @@ if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } +#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", arg, msgbuf, bufsize); } format++; -#endif } else if (*format == '#') { void **p = (void **)va_arg(*p_va, char **); FETCH_SIZE; @@ -1266,24 +1268,28 @@ } case 't': { /* 8-bit character buffer, read-only access */ - Py_FatalError("'t' unsupported"); -#if 0 char **p = va_arg(*p_va, char **); PyBufferProcs *pb = arg->ob_type->tp_as_buffer; Py_ssize_t count; - + printf("this far\n"); + +#if 0 if (*format++ != '#') return converterr( "invalid use of 't' format character", arg, msgbuf, bufsize); +#endif if (!PyType_HasFeature(arg->ob_type, - Py_TPFLAGS_HAVE_GETCHARBUFFER) || - pb == NULL || pb->bf_getcharbuffer == NULL || - pb->bf_getsegcount == NULL) + Py_TPFLAGS_HAVE_GETCHARBUFFER) +#if 0 + || pb == NULL || pb->bf_getcharbuffer == NULL || + pb->bf_getsegcount == NULL +#endif + ) return converterr( "string or read-only character buffer", arg, msgbuf, bufsize); - +#if 0 if (pb->bf_getsegcount(arg, NULL) != 1) return converterr( "string or single-segment read-only buffer", @@ -1293,16 +1299,23 @@ return converterr( "string or pinned buffer", arg, msgbuf, bufsize); - +#endif + printf("this far!\n"); + printf("%p\n", pb->bf_getcharbuffer); count = pb->bf_getcharbuffer(arg, 0, p); + printf("after\n"); +#if 0 if (count < 0) return converterr("(unspecified)", arg, msgbuf, bufsize); +#endif { + printf("fetch size\n"); FETCH_SIZE; + printf("did that\n"); STORE_SIZE(count); + printf("store size done\n"); } break; -#endif } default: return converterr("impossible", arg, msgbuf, bufsize); @@ -1616,7 +1629,7 @@ int match = 0; char *ks; if (!PyString_Check(key)) { - PyErr_SetString(PyExc_TypeError, + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); return cleanreturn(0, freelist); } diff --git a/.hgsubstate b/.hgsubstate new file mode 100644 diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -3,66 +3,142 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class AppTestGetargs(AppTestCpythonExtensionBase): - def test_pyarg_parse(self): - mod = self.import_extension('foo', [ - ('oneargint', 'METH_VARARGS', - ''' - int l; - if (!PyArg_ParseTuple(args, "i", &l)) { - return NULL; - } - return PyInt_FromLong(l); - ''' - ), - ('oneargandform', 'METH_VARARGS', - ''' - int l; - if (!PyArg_ParseTuple(args, "i:oneargandstuff", &l)) { - return NULL; - } - return PyInt_FromLong(l); - '''), - ('oneargobject', 'METH_VARARGS', - ''' - PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) { - return NULL; - } - Py_INCREF(obj); - return obj; - '''), - ('oneargobjectandlisttype', 'METH_VARARGS', - ''' - PyObject *obj; - if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &obj)) { - return NULL; - } - Py_INCREF(obj); - return obj; - '''), - ('twoopt', 'METH_VARARGS', - ''' - PyObject *a; - PyObject *b = NULL; - if (!PyArg_ParseTuple(args, "O|O", &a, &b)) { - return NULL; - } - if (b) - Py_INCREF(b); - else - b = PyInt_FromLong(42); - /* return an owned reference */ - return b; - ''')]) - assert mod.oneargint(1) == 1 - raises(TypeError, mod.oneargint, None) - raises(TypeError, mod.oneargint) - assert mod.oneargandform(1) == 1 + def setup_method(self, func): + super(AppTestGetargs, self).setup_method(func) + self.w_import_parser = self.space.wrap(self.import_parser) + + def import_parser(self, implementation, argstyle='METH_VARARGS'): + mod = self.import_extension( + 'modname', [('funcname', argstyle, implementation)]) + return self.space.getattr(mod, self.space.wrap("funcname")) + + + def test_pyarg_parse_int(self): + """ + The `i` format specifier can be used to parse an integer. + """ + oneargint = self.import_parser( + ''' + int l; + if (!PyArg_ParseTuple(args, "i", &l)) { + return NULL; + } + return PyInt_FromLong(l); + ''') + assert oneargint(1) == 1 + raises(TypeError, oneargint, None) + raises(TypeError, oneargint) + + + def test_pyarg_parse_fromname(self): + """ + The name of the function parsing the arguments can be given after a `:` + in the argument format string. + """ + oneargandform = self.import_parser( + ''' + int l; + if (!PyArg_ParseTuple(args, "i:oneargandstuff", &l)) { + return NULL; + } + return PyInt_FromLong(l); + ''') + assert oneargandform(1) == 1 + + + def test_pyarg_parse_object(self): + """ + The `O` format specifier can be used to parse an arbitrary object. + """ + oneargobject = self.import_parser( + ''' + PyObject *obj; + if (!PyArg_ParseTuple(args, "O", &obj)) { + return NULL; + } + Py_INCREF(obj); + return obj; + ''') sentinel = object() - res = mod.oneargobject(sentinel) - raises(TypeError, "mod.oneargobjectandlisttype(sentinel)") + res = oneargobject(sentinel) assert res is sentinel - assert mod.twoopt(1) == 42 - assert mod.twoopt(1, 2) == 2 - raises(TypeError, mod.twoopt, 1, 2, 3) + + def test_pyarg_parse_restricted_object_type(self): + """ + The `O!` format specifier can be used to parse an object of a particular + type. + """ + oneargobjectandlisttype = self.import_parser( + ''' + PyObject *obj; + if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &obj)) { + return NULL; + } + Py_INCREF(obj); + return obj; + ''') + sentinel = object() + raises(TypeError, "oneargobjectandlisttype(sentinel)") + sentinel = [] + res = oneargobjectandlisttype(sentinel) + assert res is sentinel + + + def test_pyarg_parse_one_optional(self): + """ + An object corresponding to a format specifier after a `|` in the + argument format string is optional and may be passed or not. + """ + twoopt = self.import_parser( + ''' + PyObject *a; + PyObject *b = NULL; + if (!PyArg_ParseTuple(args, "O|O", &a, &b)) { + return NULL; + } + if (b) + Py_INCREF(b); + else + b = PyInt_FromLong(42); + /* return an owned reference */ + return b; + ''') + assert twoopt(1) == 42 + assert twoopt(1, 2) == 2 + raises(TypeError, twoopt, 1, 2, 3) + + + def test_pyarg_parse_string_py_buffer(self): + """ + The `s*` format specifier can be used to parse a str into a Py_buffer + structure containing a pointer to the string data and the length of the + string data. + """ + pybuffer = self.import_parser( + ''' + Py_buffer buf; + if (!PyArg_ParseTuple(args, "s*", &buf)) { + return NULL; + } + return PyString_FromStringAndSize(buf.buf, buf.len); + ''') + assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + + + def test_pyarg_parse_charbuf_and_length(self): + """ + The `t#` format specifier can be used to parse a read-only 8-bit + character buffer into a char* and int giving its length in bytes. + """ + charbuf = self.import_parser( + ''' + char *buf; + int len; + if (!PyArg_ParseTuple(args, "t#", &buf, &len)) { + return NULL; + } + return PyString_FromStringAndSize(buf, len); + ''') + raises(TypeError, "charbuf(10)") + assert 'foo\0bar\0baz' == charbuf('foo\0bar\0baz') diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -231,3 +231,107 @@ """)]) assert module.dump(self.tmpname, None) assert open(self.tmpname).read() == 'None' + + + +class AppTestPyBuffer_FillInfo(AppTestCpythonExtensionBase): + """ + PyBuffer_FillInfo populates the fields of a Py_buffer from its arguments. + """ + def test_fillWithoutObject(self): + """ + PyBuffer_FillInfo populates the C{buf} and C{length}fields of the + Py_buffer passed to it. + """ + module = self.import_extension('foo', [ + ("fillinfo", "METH_VARARGS", + """ + Py_buffer buf; + PyObject *str = PyString_FromString("hello, world."); + PyObject *result; + + if (PyBuffer_FillInfo(&buf, NULL, PyString_AsString(str), 13, 0, 0)) { + return NULL; + } + + /* Check a few things we want to have happened. + */ + if (buf.buf != PyString_AsString(str)) { + PyErr_SetString(PyExc_ValueError, "buf field not initialized"); + return NULL; + } + + if (buf.len != 13) { + PyErr_SetString(PyExc_ValueError, "len field not initialized"); + return NULL; + } + + if (buf.obj != NULL) { + PyErr_SetString(PyExc_ValueError, "obj field not initialized"); + return NULL; + } + + /* Give back a new string to the caller, constructed from data in the + * Py_buffer. + */ + if (!(result = PyString_FromStringAndSize(buf.buf, buf.len))) { + return NULL; + } + + /* Free that string we allocated above. result does not share storage with + * it. + */ + Py_DECREF(str); + + return result; + """)]) + result = module.fillinfo() + assert "hello, world." == result + + + def test_fillWithObject(self): + """ + PyBuffer_FillInfo populates the C{buf}, C{length}, and C{obj} fields of + the Py_buffer passed to it and increments the reference count of the + object. + """ + module = self.import_extension('foo', [ + ("fillinfo", "METH_VARARGS", + """ + Py_buffer buf; + PyObject *str = PyString_FromString("hello, world."); + PyObject *result; + + if (PyBuffer_FillInfo(&buf, str, PyString_AsString(str), 13, 0, 0)) { + return NULL; + } + + /* Get rid of our own reference to the object, but the Py_buffer should + * still have a reference. + */ + Py_DECREF(str); + + /* Give back a new string to the caller, constructed from data in the + * Py_buffer. It better still be valid. + */ + if (!(result = PyString_FromStringAndSize(buf.buf, buf.len))) { + return NULL; + } + + /* Now the data in the Py_buffer is really no longer needed, get rid of it + *(could use PyBuffer_Release here, but that would drag in more code than + * necessary). + */ + Py_DECREF(buf.obj); + + /* Py_DECREF can't directly signal error to us, but if it makes a reference + * count go negative, it will set an error. + */ + if (PyErr_Occurred()) { + return NULL; + } + + return result; + """)]) + result = module.fillinfo() + assert "hello, world." == result From commits-noreply at bitbucket.org Wed Apr 20 11:20:57 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 20 Apr 2011 11:20:57 +0200 (CEST) Subject: [pypy-svn] tkinter default: typo Message-ID: <20110420092057.3C3E6282C1B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r4:8d4c012f5134 Date: 2011-04-20 11:20 +0200 http://bitbucket.org/pypy/tkinter/changeset/8d4c012f5134/ Log: typo diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ setup(name="tkinter-pypy", version="0.1", - description="Python interface to Tk GUI toolkit (for PyPy)", + description="Python interface to the Tk GUI toolkit (for PyPy)", author="Python development team and PyPy development team", author_email="pypy-dev at codespeak.net", url="http://bitbucket.org/pypy/tkinter/", From commits-noreply at bitbucket.org Wed Apr 20 11:41:38 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 20 Apr 2011 11:41:38 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: blog post about tkinter in pypy Message-ID: <20110420094138.8D5D1282C1B@codespeak.net> Author: Antonio Cuni Branch: extradoc Changeset: r3523:a8f22b6cb9c8 Date: 2011-04-20 11:41 +0200 http://bitbucket.org/pypy/extradoc/changeset/a8f22b6cb9c8/ Log: blog post about tkinter in pypy diff --git a/blog/draft/tkinter.rst b/blog/draft/tkinter.rst new file mode 100644 --- /dev/null +++ b/blog/draft/tkinter.rst @@ -0,0 +1,58 @@ +Using Tkinter and IDLE with PyPy +================================= + +We are pleased to announce that Tkinter, the GUI library based on TCL/TK, now +works with PyPy. + +Tkinter is composed of two parts: + + - ``_tkinter``, a module written in C which interfaces with the TCL world + + - ``Tkinter``, a pure Python package which wraps ``_tkinter`` to expose the + pythonic API we are used to + +The `PyPy version of _tkinter`_ reuses the C code of as found in CPython and +compile it through the PyPy C-API compatibility layer, ``cpyext``. To make it +working with PyPy, we had to modify it slightly, in order to remove the +dependency on some API which are not supported by PyPy. In particular, we +removed the dependency on the ``PyOS_InputHook`` variable, which allows a nice +integration of Tkinter and the Python interactive prompt: the result is that, +unlike CPython, in PyPy Tk windows created at the interactive prompt are not +shown until we manually call the ``mainloop`` method. Apart from this +inconvenience, all the rest works fine. + +At the moment, ``_tkinter`` is not distributed with PyPy because our build +system does not support automatic compilation of C extension. Instead, it is +necessary to install it manually, either directly from the source_ or by +easy_installing/pip installing `tkinter-pypy`_ from PyPI. + +For everything to work correctly, you need a recent build of PyPy: the +following is a step-by-step guide to install ``_tkinter`` in a PyPy nighlty +build for Linux 64 bit; for other architectures, look at the `nighlty build +page`_:: + + $ wget http://buildbot.pypy.org/nightly/trunk/pypy-c-jit-43485-1615dfd7d8f1-linux64.tar.bz2 + + $ tar xfv pypy-c-jit-43485-1615dfd7d8f1-linux64.tar.bz2 + + $ cd pypy-c-jit-43485-1615dfd7d8f1-linux64/ + + $ wget http://peak.telecommunity.com/dist/ez_setup.py + + $ ./bin/pypy ez_setup.py # install setuptools + + $ ./bin/easy_install tkinter-pypy + +Once you complete the steps above, you can start using ``Tkinter`` from your +python programs. In particular, you can use IDLE, the IDE which is part of +the Python standard library. To start IDLE, type:: + + $ ./bin/pypy -m idlelib.idle + +Have fun :-) + +.. _`PyPy version of _tkinter`: http://bitbucket.org/pypy/tkinter +.. _source: http://bitbucket.org/pypy/tkinter +.. _`tkinter-pypy`: http://pypi.python.org/pypi/tkinter-pypy/ + +http://buildbot.pypy.org/nightly/trunk/pypy-c-jit-43478-f8c673fee06d-linux.tar.bz2 diff --git a/blog/draft/idle.png b/blog/draft/idle.png new file mode 100644 index 0000000000000000000000000000000000000000..249f886ec7cb470b58a04d86c76637876679f82a GIT binary patch [cut] From commits-noreply at bitbucket.org Wed Apr 20 11:57:03 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 20 Apr 2011 11:57:03 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: some typos Message-ID: <20110420095703.C7842282C1B@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3524:3014dcfe1882 Date: 2011-04-20 11:56 +0200 http://bitbucket.org/pypy/extradoc/changeset/3014dcfe1882/ Log: some typos diff --git a/blog/draft/tkinter.rst b/blog/draft/tkinter.rst --- a/blog/draft/tkinter.rst +++ b/blog/draft/tkinter.rst @@ -13,8 +13,8 @@ The `PyPy version of _tkinter`_ reuses the C code of as found in CPython and compile it through the PyPy C-API compatibility layer, ``cpyext``. To make it -working with PyPy, we had to modify it slightly, in order to remove the -dependency on some API which are not supported by PyPy. In particular, we +work rwith PyPy, we had to modify it slightly, in order to remove the +dependency on some API functions which are not supported by PyPy. In particular, we removed the dependency on the ``PyOS_InputHook`` variable, which allows a nice integration of Tkinter and the Python interactive prompt: the result is that, unlike CPython, in PyPy Tk windows created at the interactive prompt are not @@ -23,12 +23,12 @@ At the moment, ``_tkinter`` is not distributed with PyPy because our build system does not support automatic compilation of C extension. Instead, it is -necessary to install it manually, either directly from the source_ or by +necessary to install it manually, either directly from source_ or by easy_installing/pip installing `tkinter-pypy`_ from PyPI. For everything to work correctly, you need a recent build of PyPy: the -following is a step-by-step guide to install ``_tkinter`` in a PyPy nighlty -build for Linux 64 bit; for other architectures, look at the `nighlty build +following is a step-by-step guide to install ``_tkinter`` in a PyPy nightly +build for Linux 64 bit; for other architectures, look at the `nightly build page`_:: $ wget http://buildbot.pypy.org/nightly/trunk/pypy-c-jit-43485-1615dfd7d8f1-linux64.tar.bz2 From commits-noreply at bitbucket.org Wed Apr 20 12:05:34 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 20 Apr 2011 12:05:34 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: typo Message-ID: <20110420100534.1B9B0282C1B@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3525:00697fb5a30c Date: 2011-04-20 12:05 +0200 http://bitbucket.org/pypy/extradoc/changeset/00697fb5a30c/ Log: typo diff --git a/blog/draft/tkinter.rst b/blog/draft/tkinter.rst --- a/blog/draft/tkinter.rst +++ b/blog/draft/tkinter.rst @@ -13,7 +13,7 @@ The `PyPy version of _tkinter`_ reuses the C code of as found in CPython and compile it through the PyPy C-API compatibility layer, ``cpyext``. To make it -work rwith PyPy, we had to modify it slightly, in order to remove the +work with PyPy, we had to modify it slightly, in order to remove the dependency on some API functions which are not supported by PyPy. In particular, we removed the dependency on the ``PyOS_InputHook`` variable, which allows a nice integration of Tkinter and the Python interactive prompt: the result is that, From commits-noreply at bitbucket.org Wed Apr 20 15:53:15 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:15 +0200 (CEST) Subject: [pypy-svn] buildbot default: make the base code a flask app Message-ID: <20110420135315.1AC90282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r451:0ed8d402c339 Date: 2011-04-18 17:46 +0200 http://bitbucket.org/pypy/buildbot/changeset/0ed8d402c339/ Log: make the base code a flask app diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -8,56 +8,47 @@ """ import time -import BaseHTTPServer import json -import cgi import traceback import pprint import sys +import flask + +app = flask.Flask('bb-hook') + from hook import BitbucketHookHandler HOST_NAME = 'codespeak.net' PORT_NUMBER = 9237 -class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler): + at app.route('/', methods=['GET']) +def test_form(): + """Respond to a GET request.""" + return """ + +

This is the pypy bitbucket hook. Use the following form only for testing

+
+ payload:
+ submit: +
+ + """ - def do_GET(self): - """Respond to a GET request.""" - self.send_response(200) - self.send_header("Content-type", "text/html") - self.end_headers() - self.wfile.write(""" - -

This is the pypy bitbucket hook. Use the following form only for testing

-
- payload:
- submit: -
- - """) - def do_POST(self): - length = int(self.headers['Content-Length']) - query_string = self.rfile.read(length) - data = dict(cgi.parse_qsl(query_string)) - payload = json.loads(data['payload']) + + at app.route('/', methods=['POST']) +def handle_payload(): + payload = json.loads(flask.request.form['payload']) + try: handler = BitbucketHookHandler() - try: - handler.handle(payload) - except: - traceback.print_exc() - print >> sys.stderr, 'payload:' - pprint.pprint(payload, sys.stderr) - print >> sys.stderr + handler.handle(payload) + except: + traceback.print_exc() + print >> sys.stderr, 'payload:' + pprint.pprint(payload, sys.stderr) + print >> sys.stderr + raise if __name__ == '__main__': - server_class = BaseHTTPServer.HTTPServer - httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler) - print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER) - try: - httpd.serve_forever() - except KeyboardInterrupt: - pass - httpd.server_close() - print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER) + app.run(debug=True) From commits-noreply at bitbucket.org Wed Apr 20 15:53:15 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:15 +0200 (CEST) Subject: [pypy-svn] buildbot default: make the hg calls global functions Message-ID: <20110420135315.DF768282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r452:fc8c248f4680 Date: 2011-04-18 18:39 +0200 http://bitbucket.org/pypy/buildbot/changeset/fc8c248f4680/ Log: make the hg calls global functions diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -29,6 +29,21 @@ hgexe = str(py.path.local.sysfind('hg')) +def _hgexe(argv): + proc = Popen([hgexe] + list(argv), stdout=PIPE, stderr=PIPE) + stdout, stderr = proc.communicate() + ret = proc.wait() + return stdout, stderr, ret + +def hg(self, *argv): + argv = map(str, argv) + stdout, stderr, ret = _hgexe(argv) + if ret != 0: + print >> sys.stderr, 'error: hg', ' '.join(argv) + print >> sys.stderr, stderr + raise Exception('error when executing hg') + return unicode(stdout, encoding='utf-8', errors='replace') + TEMPLATE = u"""\ Author: {author} Branch: {branches} @@ -91,21 +106,7 @@ self.seen_nodes.add(key) yield commit - def _hgexe(self, argv): - proc = self.Popen([hgexe] + list(argv), stdout=self.PIPE, - stderr=self.PIPE) - stdout, stderr = proc.communicate() - ret = proc.wait() - return stdout, stderr, ret - def hg(self, *argv): - argv = map(str, argv) - stdout, stderr, ret = self._hgexe(argv) - if ret != 0: - print >> sys.stderr, 'error: hg', ' '.join(argv) - print >> sys.stderr, stderr - raise Exception('error when executing hg') - return unicode(stdout, encoding='utf-8', errors='replace') SMTP = smtplib.SMTP def send(self, from_, to, subject, body, test=False): @@ -145,7 +146,7 @@ if not self.check_for_local_repo(self.local_repo): print >> sys.stderr, 'Ignoring unknown repo', path return - self.hg('pull', '-R', self.local_repo) + hg('pull', '-R', self.local_repo) self.handle_irc_message(test) self.handle_diff_email(test) @@ -201,7 +202,7 @@ url = self.remote_repo + 'changeset/' + commit['node'] + '/' template = TEMPLATE % {'url': url} subject = '%s %s: %s' % (reponame, commit['branch'], line0) - body = self.hg('-R', self.local_repo, 'log', '-r', hgid, + body = hg('-R', self.local_repo, 'log', '-r', hgid, '--template', template) diff = self.get_diff(hgid, commit['files']) body = body+diff @@ -213,7 +214,7 @@ files = [item['file'] for item in files] lines = [] for filename in files: - out = self.hg('-R', self.local_repo, 'diff', '--git', '-c', hgid, + out = hg('-R', self.local_repo, 'diff', '--git', '-c', hgid, self.local_repo.join(filename)) match = binary.search(out) if match: diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -1,13 +1,13 @@ # -*- encoding: utf-8 -*- -from bitbucket_hook.hook import BitbucketHookHandler, getpaths +from bitbucket_hook import hook -class BaseHandler(BitbucketHookHandler): +class BaseHandler(hook.BitbucketHookHandler): USE_COLOR_CODES = False def __init__(self): - BitbucketHookHandler.__init__(self) + hook.BitbucketHookHandler.__init__(self) self.mails = [] self.messages = [] @@ -18,23 +18,20 @@ self.messages.append(message) -def test_non_ascii_encoding_guess_utf8(): - class MyHandler(BaseHandler): - def _hgexe(self, argv): - return u'späm'.encode('utf-8'), '', 0 - # - handler = MyHandler() - stdout = handler.hg('foobar') +def test_non_ascii_encoding_guess_utf8(monkeypatch): + def _hgexe(argv): + return u'späm'.encode('utf-8'), '', 0 + monkeypatch.setattr(hook, '_hgexe', _hgexe) + stdout = hook.hg('foobar') assert type(stdout) is unicode assert stdout == u'späm' -def test_non_ascii_encoding_invalid_utf8(): - class MyHandler(BaseHandler): - def _hgexe(self, argv): - return '\xe4aa', '', 0 # invalid utf-8 string +def test_non_ascii_encoding_invalid_utf8(monkeypatch): + def _hgexe(argv): + return '\xe4aa', '', 0 # invalid utf-8 string # - handler = MyHandler() - stdout = handler.hg('foobar') + monkeypatch.setattr(hook, '_hgexe', _hgexe) + stdout = hook.hg('foobar') assert type(stdout) is unicode assert stdout == u'\ufffdaa' @@ -113,7 +110,7 @@ ] for f, wanted in files_expected: - assert getpaths(f) == wanted + assert hook.getpaths(f) == wanted # (input, expected output) for listfiles=True files_expected = [([], nothing), @@ -140,7 +137,7 @@ ] for f, wanted in files_expected: - assert getpaths(f, listfiles=True) == wanted + assert hook.getpaths(f, listfiles=True) == wanted @@ -227,8 +224,8 @@ def wait(*args, **kwargs): return 0 sendmail = noop -def test_handle(): - handler = BitbucketHookHandler() +def test_handle(monkeypatch): + handler = hook.BitbucketHookHandler() commits, _ = irc_cases() test_payload = {u'repository': {u'absolute_url': '', u'name': u'test', @@ -239,7 +236,7 @@ 'commits': commits['commits']} handler.call_subprocess = noop - handler.Popen = mock + monkeypatch.setattr(hook, 'Popen', mock) handler.SMTP = mock handler.handle(test_payload) @@ -250,13 +247,14 @@ handler.handle(test_payload, test=True) -def test_ignore_duplicate_commits(): +def test_ignore_duplicate_commits(monkeypatch): + def hg(self, *args): + return '' % ' '.join(map(str, args)) + monkeypatch.setattr(hook, 'hg', hg) class MyHandler(BaseHandler): seen_nodes = set() # make sure we do not depend on what the other # tests did - def hg(self, *args): - return '' % ' '.join(map(str, args)) def check_for_local_repo(self, local_repo): return True From commits-noreply at bitbucket.org Wed Apr 20 15:53:16 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:16 +0200 (CEST) Subject: [pypy-svn] buildbot default: make seen_nodes a global Message-ID: <20110420135316.AAD0E282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r453:49c1057079e1 Date: 2011-04-18 18:59 +0200 http://bitbucket.org/pypy/buildbot/changeset/49c1057079e1/ Log: make seen_nodes a global diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -88,11 +88,11 @@ filenames = '' return common_prefix, filenames +seen_nodes = set() class BitbucketHookHandler(object): Popen, PIPE = Popen, PIPE - seen_nodes = set() def get_commits(self, service, payload): import operator @@ -101,9 +101,9 @@ for commit in commits: node = commit['raw_node'] key = service, node - if key in self.seen_nodes: + if key in seen_nodes: continue - self.seen_nodes.add(key) + seen_nodes.add(key) yield commit diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -251,10 +251,8 @@ def hg(self, *args): return '' % ' '.join(map(str, args)) monkeypatch.setattr(hook, 'hg', hg) + monkeypatch.setattr(hook, 'seen_nodes', set()) class MyHandler(BaseHandler): - seen_nodes = set() # make sure we do not depend on what the other - # tests did - def check_for_local_repo(self, local_repo): return True From commits-noreply at bitbucket.org Wed Apr 20 15:53:17 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:17 +0200 (CEST) Subject: [pypy-svn] buildbot default: make the monkeypatchable check_for_local_repo a global Message-ID: <20110420135317.CD82E282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r454:0e988306b84c Date: 2011-04-18 19:02 +0200 http://bitbucket.org/pypy/buildbot/changeset/0e988306b84c/ Log: make the monkeypatchable check_for_local_repo a global diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -90,6 +90,11 @@ seen_nodes = set() + +def check_for_local_repo(local_repo): + return local_repo.check(dir=True) + + class BitbucketHookHandler(object): Popen, PIPE = Popen, PIPE @@ -135,15 +140,12 @@ else: return self.call_subprocess([BOT, CHANNEL, message]) - def check_for_local_repo(self, local_repo): - return local_repo.check(dir=True) - def handle(self, payload, test=False): path = payload['repository']['absolute_url'] self.payload = payload self.local_repo = LOCAL_REPOS.join(path) self.remote_repo = REMOTE_BASE + path - if not self.check_for_local_repo(self.local_repo): + if not check_for_local_repo(self.local_repo): print >> sys.stderr, 'Ignoring unknown repo', path return hg('pull', '-R', self.local_repo) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -252,11 +252,9 @@ return '' % ' '.join(map(str, args)) monkeypatch.setattr(hook, 'hg', hg) monkeypatch.setattr(hook, 'seen_nodes', set()) - class MyHandler(BaseHandler): - def check_for_local_repo(self, local_repo): - return True + monkeypatch.setattr(hook, 'check_for_local_repo', lambda _:True) - handler = MyHandler() + handler = BaseHandler() commits, _ = irc_cases() payload = {u'repository': {u'absolute_url': '', u'name': u'test', From commits-noreply at bitbucket.org Wed Apr 20 15:53:19 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:19 +0200 (CEST) Subject: [pypy-svn] buildbot default: kill the call_subprocess hack Message-ID: <20110420135319.0BD99282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r455:54ab0de1f500 Date: 2011-04-18 19:06 +0200 http://bitbucket.org/pypy/buildbot/changeset/54ab0de1f500/ Log: kill the call_subprocess hack diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -132,13 +132,12 @@ smtp = self.SMTP(SMTP_SERVER, SMTP_PORT) smtp.sendmail(from_, [to], msg.as_string()) - call_subprocess = staticmethod(subprocess.call) def send_irc_message(self, message, test=False): if test: print message + '\n' else: - return self.call_subprocess([BOT, CHANNEL, message]) + return subprocess.call([BOT, CHANNEL, message]) def handle(self, payload, test=False): path = payload['repository']['absolute_url'] diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -235,8 +235,8 @@ u'user': u'antocuni', 'commits': commits['commits']} - handler.call_subprocess = noop monkeypatch.setattr(hook, 'Popen', mock) + monkeypatch.setattr(hook.subprocess, 'call', noop) handler.SMTP = mock handler.handle(test_payload) From commits-noreply at bitbucket.org Wed Apr 20 15:53:20 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:20 +0200 (CEST) Subject: [pypy-svn] buildbot default: turn get_commits into a global Message-ID: <20110420135320.0E837282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r456:847d5d247e9a Date: 2011-04-18 19:15 +0200 http://bitbucket.org/pypy/buildbot/changeset/847d5d247e9a/ Log: turn get_commits into a global diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -95,23 +95,21 @@ return local_repo.check(dir=True) +def get_commits(service, payload): + #XXX: service is evil, get rid + import operator + commits = sorted(payload['commits'], + key=operator.itemgetter('revision')) + for commit in commits: + node = commit['raw_node'] + key = service, node + if key in seen_nodes: + continue + seen_nodes.add(key) + yield commit + + class BitbucketHookHandler(object): - Popen, PIPE = Popen, PIPE - - - def get_commits(self, service, payload): - import operator - commits = sorted(self.payload['commits'], - key=operator.itemgetter('revision')) - for commit in commits: - node = commit['raw_node'] - key = service, node - if key in seen_nodes: - continue - seen_nodes.add(key) - yield commit - - SMTP = smtplib.SMTP def send(self, from_, to, subject, body, test=False): @@ -154,7 +152,7 @@ USE_COLOR_CODES = True LISTFILES = False def handle_irc_message(self, test=False): - commits = self.get_commits('irc', self.payload) + commits = get_commits('irc', self.payload) if test: print "#" * 20 print "IRC messages:" @@ -189,7 +187,7 @@ self.send_irc_message(irc_msg, test) def handle_diff_email(self, test=False): - commits = self.get_commits('email', self.payload) + commits = get_commits('email', self.payload) for commit in commits: self.send_diff_for_commit(commit, test) From commits-noreply at bitbucket.org Wed Apr 20 15:53:20 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:20 +0200 (CEST) Subject: [pypy-svn] buildbot default: make SMTP a global Message-ID: <20110420135320.B2EA8282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r457:e4ea316be2c1 Date: 2011-04-18 19:17 +0200 http://bitbucket.org/pypy/buildbot/changeset/e4ea316be2c1/ Log: make SMTP a global diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -1,10 +1,10 @@ import os.path import py -import smtplib import socket import subprocess import sys import time +from smtplib import SMTP from subprocess import Popen, PIPE LOCAL_REPOS = py.path.local(__file__).dirpath('repos') @@ -111,7 +111,6 @@ class BitbucketHookHandler(object): - SMTP = smtplib.SMTP def send(self, from_, to, subject, body, test=False): from email.mime.text import MIMEText # Is this a valid workaround for unicode errors? @@ -127,7 +126,7 @@ print to print msg.get_payload(decode=True) else: - smtp = self.SMTP(SMTP_SERVER, SMTP_PORT) + smtp = SMTP(SMTP_SERVER, SMTP_PORT) smtp.sendmail(from_, [to], msg.as_string()) From commits-noreply at bitbucket.org Wed Apr 20 15:53:21 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:21 +0200 (CEST) Subject: [pypy-svn] buildbot default: fix up the hg calling code Message-ID: <20110420135321.A25D1282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r458:34c1fd12fcd0 Date: 2011-04-19 02:08 +0200 http://bitbucket.org/pypy/buildbot/changeset/34c1fd12fcd0/ Log: fix up the hg calling code diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -27,15 +27,14 @@ CHANNEL = '#pypy' BOT = '/svn/hooks/commit-bot/message' -hgexe = str(py.path.local.sysfind('hg')) def _hgexe(argv): - proc = Popen([hgexe] + list(argv), stdout=PIPE, stderr=PIPE) + proc = Popen(['hg'] + list(argv), stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() ret = proc.wait() return stdout, stderr, ret -def hg(self, *argv): +def hg(*argv): argv = map(str, argv) stdout, stderr, ret = _hgexe(argv) if ret != 0: diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- - +import py +import pytest from bitbucket_hook import hook class BaseHandler(hook.BitbucketHookHandler): @@ -248,7 +249,7 @@ def test_ignore_duplicate_commits(monkeypatch): - def hg(self, *args): + def hg( *args): return '' % ' '.join(map(str, args)) monkeypatch.setattr(hook, 'hg', hg) monkeypatch.setattr(hook, 'seen_nodes', set()) @@ -269,3 +270,14 @@ num_commits = len(commits['commits']) assert len(handler.mails) == num_commits assert len(handler.messages) == num_commits + + +def test_hg(): + if not py.path.local.sysfind('hg'): + pytest.skip('hg binary missing') + + #hook.hg('help') + with pytest.raises(Exception): + print hook.hg + hook.hg('uhmwrong') + From commits-noreply at bitbucket.org Wed Apr 20 15:53:23 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:23 +0200 (CEST) Subject: [pypy-svn] buildbot default: move getpaths to the irc module, more irc code to follow Message-ID: <20110420135323.6158C282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r459:bff6117f3431 Date: 2011-04-19 08:20 +0200 http://bitbucket.org/pypy/buildbot/changeset/bff6117f3431/ Log: move getpaths to the irc module, more irc code to follow diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -7,6 +7,8 @@ from smtplib import SMTP from subprocess import Popen, PIPE +from irc import getpaths + LOCAL_REPOS = py.path.local(__file__).dirpath('repos') REMOTE_BASE = 'http://bitbucket.org' @@ -54,38 +56,6 @@ """ -def getpaths(files, listfiles=False): - - # Handle empty input - if not files: - return '', '' - files = [f['file'] for f in files] - if not any(files): - return '', '' - - dirname = os.path.dirname - basename = os.path.basename - - common_prefix = [dirname(f) for f in files] - - # Single file, show its full path - if len(files) == 1: - common_prefix = files[0] - listfiles = False - - else: - common_prefix = [path.split(os.sep) for path in common_prefix] - common_prefix = os.sep.join(os.path.commonprefix(common_prefix)) - if common_prefix and not common_prefix.endswith('/'): - common_prefix += '/' - - if listfiles: - # XXX Maybe should return file paths relative to prefix? Or TMI? - filenames = [basename(f) for f in files if f and basename(f)] - filenames = ' M(%s)' % ', '.join(filenames) - else: - filenames = '' - return common_prefix, filenames seen_nodes = set() diff --git a/bitbucket_hook/irc.py b/bitbucket_hook/irc.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/irc.py @@ -0,0 +1,38 @@ +''' +utilities for interacting with the irc bot (via cli) +''' + +import os + +def getpaths(files, listfiles=False): + + # Handle empty input + if not files: + return '', '' + files = [f['file'] for f in files] + if not any(files): + return '', '' + + dirname = os.path.dirname + basename = os.path.basename + + common_prefix = [dirname(f) for f in files] + + # Single file, show its full path + if len(files) == 1: + common_prefix = files[0] + listfiles = False + + else: + common_prefix = [path.split(os.sep) for path in common_prefix] + common_prefix = os.sep.join(os.path.commonprefix(common_prefix)) + if common_prefix and not common_prefix.endswith('/'): + common_prefix += '/' + + if listfiles: + # XXX Maybe should return file paths relative to prefix? Or TMI? + filenames = [basename(f) for f in files if f and basename(f)] + filenames = ' M(%s)' % ', '.join(filenames) + else: + filenames = '' + return common_prefix, filenames diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -52,95 +52,6 @@ handler.handle_diff_email() assert handler.sent_commits == ['first', 'second'] -def test_getpaths(): - d = dict - - barefile = [d(file='file')] - distinct = [d(file='path1/file1'), d(file='path2/file2'), - d(file='path3/file')] - shared = [d(file='path/file1'), d(file='path/file2'), - d(file='path/file')] - - deepfile = [d(file='a/long/path/to/deepfile.py')] - slashesfile = [d(file='/slashesfile/')] - slashleft = [d(file='/slashleft')] - slashright = [d(file='slashright/')] - - - nocommon = distinct + [d(file='path4/file')] - nocommonplusroot = distinct + barefile - - common = [d(file='some/path/to/file'), d(file='some/path/to/deeper/file'), - d(file='some/path/to/anotherfile'), d(file='some/path/to/afile')] - commonplusroot = shared + barefile - - empty = d(file='') - nocommonplusempty = distinct + [empty] - commonplusempty = shared + [empty] - nocommonplusslash = distinct + [d(file='path4/dir/')] - commonplusslash = shared + [d(file='path/dir/')] - - pypydoubleslash = [d(file='pypy/jit/metainterp/opt/u.py'), - d(file='pypy/jit/metainterp/test/test_c.py'), - d(file='pypy/jit/metainterp/test/test_o.py')] - - pypyempty = [d(file='pypy/rlib/rdtoa.py'), - d(file='pypy/rlib/test/test_rdtoa.py')] - - nothing = ('', '') - - # (input, expected output) for listfiles=False - files_expected = [([], nothing), - ([empty], nothing), - ([empty, empty], nothing), - (barefile, ('file', '')), - (deepfile, ('a/long/path/to/deepfile.py', '')), - (slashesfile, ('/slashesfile/', '')), - (slashleft, ('/slashleft', '')), - (slashright, ('slashright/', '')), - (nocommon, nothing), - (nocommonplusroot, nothing), - (nocommonplusempty, nothing), - (common, ('some/path/to/', '')), - (commonplusroot, nothing), - (commonplusempty, nothing), - (nocommonplusslash, nothing), - (commonplusslash, ('path/', '')), - (pypydoubleslash, ('pypy/jit/metainterp/', '')), - (pypyempty, ('pypy/rlib/', '')), - ] - - for f, wanted in files_expected: - assert hook.getpaths(f) == wanted - - # (input, expected output) for listfiles=True - files_expected = [([], nothing), - ([empty], nothing), - ([empty, empty], nothing), - (barefile, ('file', '')), - (deepfile, ('a/long/path/to/deepfile.py', '')), - (slashesfile, ('/slashesfile/', '')), - (slashleft, ('/slashleft', '')), - (slashright, ('slashright/', '')), - (nocommon, ('', ' M(file1, file2, file, file)')), - (nocommonplusroot, ('', ' M(file1, file2, file, file)')), - (nocommonplusempty, ('',' M(file1, file2, file)')), - (common, ('some/path/to/', - ' M(file, file, anotherfile, afile)')), - (commonplusroot, ('', ' M(file1, file2, file, file)')), - (commonplusempty, ('',' M(file1, file2, file)')), - (nocommonplusslash, ('',' M(file1, file2, file)')), - (commonplusslash, ('path/',' M(file1, file2, file)')), - (pypydoubleslash, ('pypy/jit/metainterp/', - ' M(u.py, test_c.py, test_o.py)')), - (pypyempty, ('pypy/rlib/', - ' M(rdtoa.py, test_rdtoa.py)')), - ] - - for f, wanted in files_expected: - assert hook.getpaths(f, listfiles=True) == wanted - - LONG_MESSAGE = u'This is a test with a long message: ' + 'x'*1000 LONG_CUT = LONG_MESSAGE[:160-29] diff --git a/bitbucket_hook/test/test_irc.py b/bitbucket_hook/test/test_irc.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test/test_irc.py @@ -0,0 +1,90 @@ +from bitbucket_hook.irc import getpaths + +def fl(*paths): + return [{'file': x} for x in paths] + +def test_getpaths(): + d = dict + barefile = fl('file') + distinct = fl('path1/file1', 'path2/file2', 'path3/file') + shared = fl('path/file1', 'path/file2', 'path/file') + + deepfile = fl('a/long/path/to/deepfile.py') + slashesfile = fl('/slashesfile/') + slashleft = fl('/slashleft') + slashright = fl('slashright/') + + + nocommon = distinct + fl('path4/file') + nocommonplusroot = distinct + barefile + + common = fl('some/path/to/file', 'some/path/to/deeper/file', + 'some/path/to/anotherfile', 'some/path/to/afile') + commonplusroot = shared + barefile + + empty = d(file='') + nocommonplusempty = distinct + [empty] + commonplusempty = shared + [empty] + nocommonplusslash = distinct + fl('path4/dir/') + commonplusslash = shared + fl('path/dir/') + + pypydoubleslash = fl('pypy/jit/metainterp/opt/u.py', + 'pypy/jit/metainterp/test/test_c.py', + 'pypy/jit/metainterp/test/test_o.py') + + pypyempty = fl('pypy/rlib/rdtoa.py', 'pypy/rlib/test/test_rdtoa.py' + + nothing = ('', '') + + # (input, expected output) for listfiles=False + files_expected = [([], nothing), + ([empty], nothing), + ([empty, empty], nothing), + (barefile, ('file', '')), + (deepfile, ('a/long/path/to/deepfile.py', '')), + (slashesfile, ('/slashesfile/', '')), + (slashleft, ('/slashleft', '')), + (slashright, ('slashright/', '')), + (nocommon, nothing), + (nocommonplusroot, nothing), + (nocommonplusempty, nothing), + (common, ('some/path/to/', '')), + (commonplusroot, nothing), + (commonplusempty, nothing), + (nocommonplusslash, nothing), + (commonplusslash, ('path/', '')), + (pypydoubleslash, ('pypy/jit/metainterp/', '')), + (pypyempty, ('pypy/rlib/', '')), + ] + + for f, wanted in files_expected: + assert getpaths(f) == wanted + + # (input, expected output) for listfiles=True + files_expected = [([], nothing), + ([empty], nothing), + ([empty, empty], nothing), + (barefile, ('file', '')), + (deepfile, ('a/long/path/to/deepfile.py', '')), + (slashesfile, ('/slashesfile/', '')), + (slashleft, ('/slashleft', '')), + (slashright, ('slashright/', '')), + (nocommon, ('', ' M(file1, file2, file, file)')), + (nocommonplusroot, ('', ' M(file1, file2, file, file)')), + (nocommonplusempty, ('',' M(file1, file2, file)')), + (common, ('some/path/to/', + ' M(file, file, anotherfile, afile)')), + (commonplusroot, ('', ' M(file1, file2, file, file)')), + (commonplusempty, ('',' M(file1, file2, file)')), + (nocommonplusslash, ('',' M(file1, file2, file)')), + (commonplusslash, ('path/',' M(file1, file2, file)')), + (pypydoubleslash, ('pypy/jit/metainterp/', + ' M(u.py, test_c.py, test_o.py)')), + (pypyempty, ('pypy/rlib/', + ' M(rdtoa.py, test_rdtoa.py)')), + ] + + for f, wanted in files_expected: + assert getpaths(f, listfiles=True) == wanted + + From commits-noreply at bitbucket.org Wed Apr 20 15:53:24 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:24 +0200 (CEST) Subject: [pypy-svn] buildbot default: turn the getfiles tests over to generate_tests Message-ID: <20110420135324.26583282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r460:9ea95b60e126 Date: 2011-04-19 08:50 +0200 http://bitbucket.org/pypy/buildbot/changeset/9ea95b60e126/ Log: turn the getfiles tests over to generate_tests diff --git a/bitbucket_hook/test/test_irc.py b/bitbucket_hook/test/test_irc.py --- a/bitbucket_hook/test/test_irc.py +++ b/bitbucket_hook/test/test_irc.py @@ -3,8 +3,8 @@ def fl(*paths): return [{'file': x} for x in paths] -def test_getpaths(): - d = dict +def pytest_generate_tests(metafunc): + barefile = fl('file') distinct = fl('path1/file1', 'path2/file2', 'path3/file') shared = fl('path/file1', 'path/file2', 'path/file') @@ -22,9 +22,9 @@ 'some/path/to/anotherfile', 'some/path/to/afile') commonplusroot = shared + barefile - empty = d(file='') - nocommonplusempty = distinct + [empty] - commonplusempty = shared + [empty] + empty = fl('') + nocommonplusempty = distinct + empty + commonplusempty = shared + empty nocommonplusslash = distinct + fl('path4/dir/') commonplusslash = shared + fl('path/dir/') @@ -32,59 +32,51 @@ 'pypy/jit/metainterp/test/test_c.py', 'pypy/jit/metainterp/test/test_o.py') - pypyempty = fl('pypy/rlib/rdtoa.py', 'pypy/rlib/test/test_rdtoa.py' + pypyempty = fl('pypy/rlib/rdtoa.py', 'pypy/rlib/test/test_rdtoa.py') + nothing = ('', '') + expectations = [ + ('null', [], nothing), + ('empty', empty, nothing), + ('empty*2', empty*2, nothing), + ('bare', barefile, ('file', '')), + ('deep', deepfile, ('a/long/path/to/deepfile.py', '')), + ('slashes', slashesfile, ('/slashesfile/', '')), + ('slashleft', slashleft, ('/slashleft', '')), + ('slashright', slashright, ('slashright/', '')), + ('nocommon', nocommon, ('', ' M(file1, file2, file, file)')), + ('nocommon+root', nocommonplusroot, + ('', ' M(file1, file2, file, file)')), + ('nocommon+empty', nocommonplusempty, ('',' M(file1, file2, file)')), + ('common', common, ('some/path/to/', + ' M(file, file, anotherfile, afile)')), + ('common+root', commonplusroot, ('', ' M(file1, file2, file, file)')), + ('common+empty', commonplusempty, ('',' M(file1, file2, file)')), + ('nocommon+slash', nocommonplusslash, ('',' M(file1, file2, file)')), + ('common+slash', commonplusslash, ('path/',' M(file1, file2, file)')), + ('pypydoubledash', pypydoubleslash, ('pypy/jit/metainterp/', + ' M(u.py, test_c.py, test_o.py)')), + ('pypyempty', pypyempty, ('pypy/rlib/', + ' M(rdtoa.py, test_rdtoa.py)')), + ] - # (input, expected output) for listfiles=False - files_expected = [([], nothing), - ([empty], nothing), - ([empty, empty], nothing), - (barefile, ('file', '')), - (deepfile, ('a/long/path/to/deepfile.py', '')), - (slashesfile, ('/slashesfile/', '')), - (slashleft, ('/slashleft', '')), - (slashright, ('slashright/', '')), - (nocommon, nothing), - (nocommonplusroot, nothing), - (nocommonplusempty, nothing), - (common, ('some/path/to/', '')), - (commonplusroot, nothing), - (commonplusempty, nothing), - (nocommonplusslash, nothing), - (commonplusslash, ('path/', '')), - (pypydoubleslash, ('pypy/jit/metainterp/', '')), - (pypyempty, ('pypy/rlib/', '')), - ] + for name, files, (common, listfiles) in expectations: + metafunc.addcall(id='list/'+name, funcargs={ + 'files': files, + 'expected_common': common, + 'expected_listfiles': listfiles, + }) + metafunc.addcall(id='nolist/'+name, funcargs={ + 'files': files, + 'expected_common': common, + 'expected_listfiles': listfiles, + }) - for f, wanted in files_expected: - assert getpaths(f) == wanted - # (input, expected output) for listfiles=True - files_expected = [([], nothing), - ([empty], nothing), - ([empty, empty], nothing), - (barefile, ('file', '')), - (deepfile, ('a/long/path/to/deepfile.py', '')), - (slashesfile, ('/slashesfile/', '')), - (slashleft, ('/slashleft', '')), - (slashright, ('slashright/', '')), - (nocommon, ('', ' M(file1, file2, file, file)')), - (nocommonplusroot, ('', ' M(file1, file2, file, file)')), - (nocommonplusempty, ('',' M(file1, file2, file)')), - (common, ('some/path/to/', - ' M(file, file, anotherfile, afile)')), - (commonplusroot, ('', ' M(file1, file2, file, file)')), - (commonplusempty, ('',' M(file1, file2, file)')), - (nocommonplusslash, ('',' M(file1, file2, file)')), - (commonplusslash, ('path/',' M(file1, file2, file)')), - (pypydoubleslash, ('pypy/jit/metainterp/', - ' M(u.py, test_c.py, test_o.py)')), - (pypyempty, ('pypy/rlib/', - ' M(rdtoa.py, test_rdtoa.py)')), - ] +def test_getpaths(files, expected_common, expected_listfiles): + common, files = getpaths(files, listfiles=bool(expected_listfiles)) + assert common == expected_common + assert files == expected_listfiles - for f, wanted in files_expected: - assert getpaths(f, listfiles=True) == wanted - From commits-noreply at bitbucket.org Wed Apr 20 15:53:24 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:24 +0200 (CEST) Subject: [pypy-svn] buildbot default: only parameterize test_getpaths Message-ID: <20110420135324.C5585282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r461:a503c944fc46 Date: 2011-04-19 09:00 +0200 http://bitbucket.org/pypy/buildbot/changeset/a503c944fc46/ Log: only parameterize test_getpaths diff --git a/bitbucket_hook/test/test_irc.py b/bitbucket_hook/test/test_irc.py --- a/bitbucket_hook/test/test_irc.py +++ b/bitbucket_hook/test/test_irc.py @@ -61,17 +61,18 @@ ' M(rdtoa.py, test_rdtoa.py)')), ] - for name, files, (common, listfiles) in expectations: - metafunc.addcall(id='list/'+name, funcargs={ - 'files': files, - 'expected_common': common, - 'expected_listfiles': listfiles, - }) - metafunc.addcall(id='nolist/'+name, funcargs={ - 'files': files, - 'expected_common': common, - 'expected_listfiles': listfiles, - }) + if metafunc.function.__name__=='test_getpaths': + for name, files, (common, listfiles) in expectations: + metafunc.addcall(id='list/'+name, funcargs={ + 'files': files, + 'expected_common': common, + 'expected_listfiles': listfiles, + }) + metafunc.addcall(id='nolist/'+name, funcargs={ + 'files': files, + 'expected_common': common, + 'expected_listfiles': listfiles, + }) def test_getpaths(files, expected_common, expected_listfiles): From commits-noreply at bitbucket.org Wed Apr 20 15:53:25 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:25 +0200 (CEST) Subject: [pypy-svn] buildbot default: use module import for the hook, so we can import app in hook Message-ID: <20110420135325.7A1FD282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r462:b2ffaeb67574 Date: 2011-04-19 09:40 +0200 http://bitbucket.org/pypy/buildbot/changeset/b2ffaeb67574/ Log: use module import for the hook, so we can import app in hook diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -16,8 +16,7 @@ app = flask.Flask('bb-hook') - -from hook import BitbucketHookHandler +import hook HOST_NAME = 'codespeak.net' PORT_NUMBER = 9237 @@ -41,7 +40,7 @@ def handle_payload(): payload = json.loads(flask.request.form['payload']) try: - handler = BitbucketHookHandler() + handler = hook.BitbucketHookHandler() handler.handle(payload) except: traceback.print_exc() From commits-noreply at bitbucket.org Wed Apr 20 15:53:26 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:26 +0200 (CEST) Subject: [pypy-svn] buildbot default: make email sending a global Message-ID: <20110420135326.6B77A282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r463:70c9e99b114d Date: 2011-04-19 12:17 +0200 http://bitbucket.org/pypy/buildbot/changeset/70c9e99b114d/ Log: make email sending a global diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -77,26 +77,26 @@ seen_nodes.add(key) yield commit +def send(from_, to, subject, body, test=False): + from email.mime.text import MIMEText + # Is this a valid workaround for unicode errors? + body = body.encode('ascii', 'xmlcharrefreplace') + msg = MIMEText(body, _charset='utf-8') + msg['From'] = from_ + msg['To'] = to + msg['Subject'] = subject + if test: + print '#' * 20 + print "Email contents:\n" + print from_ + print to + print msg.get_payload(decode=True) + else: + smtp = SMTP(SMTP_SERVER, SMTP_PORT) + smtp.sendmail(from_, [to], msg.as_string()) class BitbucketHookHandler(object): - def send(self, from_, to, subject, body, test=False): - from email.mime.text import MIMEText - # Is this a valid workaround for unicode errors? - body = body.encode('ascii', 'xmlcharrefreplace') - msg = MIMEText(body, _charset='utf-8') - msg['From'] = from_ - msg['To'] = to - msg['Subject'] = subject - if test: - print '#' * 20 - print "Email contents:\n" - print from_ - print to - print msg.get_payload(decode=True) - else: - smtp = SMTP(SMTP_SERVER, SMTP_PORT) - smtp.sendmail(from_, [to], msg.as_string()) def send_irc_message(self, message, test=False): @@ -173,7 +173,7 @@ '--template', template) diff = self.get_diff(hgid, commit['files']) body = body+diff - self.send(sender, ADDRESS, subject, body, test) + send(sender, ADDRESS, subject, body, test) def get_diff(self, hgid, files): import re diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -9,12 +9,8 @@ def __init__(self): hook.BitbucketHookHandler.__init__(self) - self.mails = [] self.messages = [] - def send(self, from_, to, subject, body, test=False): - self.mails.append((from_, to, subject, body)) - def send_irc_message(self, message, test=False): self.messages.append(message) @@ -159,7 +155,7 @@ handler.handle(test_payload, test=True) -def test_ignore_duplicate_commits(monkeypatch): +def test_ignore_duplicate_commits(monkeypatch, mails): def hg( *args): return '' % ' '.join(map(str, args)) monkeypatch.setattr(hook, 'hg', hg) @@ -179,7 +175,7 @@ handler.handle(payload) # num_commits = len(commits['commits']) - assert len(handler.mails) == num_commits + assert len(mails) == num_commits assert len(handler.messages) == num_commits From commits-noreply at bitbucket.org Wed Apr 20 15:53:27 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:27 +0200 (CEST) Subject: [pypy-svn] buildbot default: add missing conftest Message-ID: <20110420135327.1D365282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r464:9fe7b9130d3c Date: 2011-04-19 12:29 +0200 http://bitbucket.org/pypy/buildbot/changeset/9fe7b9130d3c/ Log: add missing conftest diff --git a/bitbucket_hook/test/conftest.py b/bitbucket_hook/test/conftest.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test/conftest.py @@ -0,0 +1,16 @@ +from bitbucket_hook import hook + +def pytest_funcarg__mails(request): + return [] + + +def pytest_funcarg__monkeypatch(request): + mp = request.getfuncargvalue('monkeypatch') + mails = request.getfuncargvalue('mails') + def send(from_, to, subject, body,test=False, mails=mails): + mails.append((from_, to, subject, body)) + mp.setattr(hook, 'send', send) + + + return mp + From commits-noreply at bitbucket.org Wed Apr 20 15:53:28 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:28 +0200 (CEST) Subject: [pypy-svn] buildbot default: use app.config for storing settings, use classes for having sets of settings Message-ID: <20110420135328.525AA282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r465:ed715cff4588 Date: 2011-04-19 12:35 +0200 http://bitbucket.org/pypy/buildbot/changeset/ed715cff4588/ Log: use app.config for storing settings, use classes for having sets of settings diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -8,26 +8,9 @@ from subprocess import Popen, PIPE from irc import getpaths +from main import app -LOCAL_REPOS = py.path.local(__file__).dirpath('repos') -REMOTE_BASE = 'http://bitbucket.org' -if socket.gethostname() == 'viper': - # for debugging, antocuni's settings - SMTP_SERVER = "out.alice.it" - SMTP_PORT = 25 - ADDRESS = 'anto.cuni at gmail.com' - # - CHANNEL = '#test' - BOT = '/tmp/commit-bot/message' -else: - # real settings, (they works on codespeak at least) - SMTP_SERVER = 'localhost' - SMTP_PORT = 25 - ADDRESS = 'pypy-svn at codespeak.net' - # - CHANNEL = '#pypy' - BOT = '/svn/hooks/commit-bot/message' def _hgexe(argv): @@ -103,13 +86,15 @@ if test: print message + '\n' else: - return subprocess.call([BOT, CHANNEL, message]) + return subprocess.call([ + app.config['BOT'], app.config['CHANNEL'], message + ]) def handle(self, payload, test=False): path = payload['repository']['absolute_url'] self.payload = payload - self.local_repo = LOCAL_REPOS.join(path) - self.remote_repo = REMOTE_BASE + path + self.local_repo = app.config['LOCAL_REPOS'].join(path) + self.remote_repo = app.config['REMOTE_BASE'] + path if not check_for_local_repo(self.local_repo): print >> sys.stderr, 'Ignoring unknown repo', path return @@ -117,8 +102,6 @@ self.handle_irc_message(test) self.handle_diff_email(test) - USE_COLOR_CODES = True - LISTFILES = False def handle_irc_message(self, test=False): commits = get_commits('irc', self.payload) if test: @@ -133,11 +116,11 @@ print '[%s] %s %s %s' % (time.strftime('%Y-%m-%d %H:%M'), node, timestamp, author) files = commit.get('files', []) - common_prefix, filenames = getpaths(files, self.LISTFILES) + common_prefix, filenames = getpaths(files, app.config['LISTFILES']) pathlen = len(common_prefix) + len(filenames) + 2 common_prefix = '/' + common_prefix - if self.USE_COLOR_CODES: + if app.config['USE_COLOR_CODES']: author = '\x0312%s\x0F' % author # in blue branch = '\x02%s\x0F' % branch # in bold node = '\x0311%s\x0F' % node # in azure @@ -173,7 +156,7 @@ '--template', template) diff = self.get_diff(hgid, commit['files']) body = body+diff - send(sender, ADDRESS, subject, body, test) + send(sender, app.config['ADDRESS'], subject, body, test) def get_diff(self, hgid, files): import re @@ -288,5 +271,5 @@ LOCAL_REPOS = py.path.local(repopath) hook = BitbucketHookHandler() - hook.USE_COLOR_CODES = False + app.config['USE_COLOR_CODES'] = False hook.handle(test_payload, test=True) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -3,10 +3,12 @@ import pytest from bitbucket_hook import hook +#XXX +hook.app.config['USE_COLOR_CODES'] = False + + class BaseHandler(hook.BitbucketHookHandler): - USE_COLOR_CODES = False - def __init__(self): hook.BitbucketHookHandler.__init__(self) self.messages = [] diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -13,6 +13,8 @@ import pprint import sys import flask +import py + app = flask.Flask('bb-hook') @@ -49,5 +51,36 @@ print >> sys.stderr raise + +class DefaultConfig(object): + LOCAL_REPOS = py.path.local(__file__).dirpath('repos') + REMOTE_BASE = 'http://bitbucket.org' + USE_COLOR_CODES = True + LISTFILES = False + +class CodeSpeakConfig(DefaultConfig): + SMTP_SERVER = 'localhost' + SMTP_PORT = 25 + ADDRESS = 'pypy-svn at codespeak.net' + # + CHANNEL = '#pypy' + BOT = '/svn/hooks/commit-bot/message' + +class ViperConfig(DefaultConfig): + SMTP_SERVER = "out.alice.it" + SMTP_PORT = 25 + ADDRESS = 'anto.cuni at gmail.com' + # + CHANNEL = '#test' + BOT = '/tmp/commit-bot/message' + + +if py.std.socket.gethostname() == 'viper': + # for debugging, antocuni's settings + app.config.from_object(ViperConfig) +else: + # real settings, (they works on codespeak at least) + app.config.from_object(CodeSpeakConfig) + if __name__ == '__main__': app.run(debug=True) From commits-noreply at bitbucket.org Wed Apr 20 15:53:30 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:30 +0200 (CEST) Subject: [pypy-svn] buildbot default: turn irc handling to globals Message-ID: <20110420135330.981DB282C31@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r466:2ddf5cbdec01 Date: 2011-04-19 19:56 +0200 http://bitbucket.org/pypy/buildbot/changeset/2ddf5cbdec01/ Log: turn irc handling to globals diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -78,18 +78,52 @@ smtp = SMTP(SMTP_SERVER, SMTP_PORT) smtp.sendmail(from_, [to], msg.as_string()) + +def send_irc_message(message, test=False): + if test: + print message + '\n' + else: + return subprocess.call([ + app.config['BOT'], app.config['CHANNEL'], message + ]) + +def handle_irc_message(payload, test=False): + commits = get_commits('irc', payload) + if test: + print "#" * 20 + print "IRC messages:" + + for commit in commits: + author = commit['author'] + branch = commit['branch'] + node = commit['node'] + timestamp = commit.get('timestamp') + print '[%s] %s %s %s' % (time.strftime('%Y-%m-%d %H:%M'), node, timestamp, author) + + files = commit.get('files', []) + common_prefix, filenames = getpaths(files, app.config['LISTFILES']) + pathlen = len(common_prefix) + len(filenames) + 2 + common_prefix = '/' + common_prefix + + if app.config['USE_COLOR_CODES']: + author = '\x0312%s\x0F' % author # in blue + branch = '\x02%s\x0F' % branch # in bold + node = '\x0311%s\x0F' % node # in azure + common_prefix = '\x0315%s\x0F' % common_prefix # in gray + + message = commit['message'].replace('\n', ' ') + fields = (author, branch, node, common_prefix, filenames) + part1 = '%s %s %s %s%s: ' % fields + totallen = 160 + pathlen + if len(message) + len(part1) <= totallen: + irc_msg = part1 + message + else: + maxlen = totallen - (len(part1) + 3) + irc_msg = part1 + message[:maxlen] + '...' + send_irc_message(irc_msg, test) + class BitbucketHookHandler(object): - - - def send_irc_message(self, message, test=False): - if test: - print message + '\n' - else: - return subprocess.call([ - app.config['BOT'], app.config['CHANNEL'], message - ]) - def handle(self, payload, test=False): path = payload['repository']['absolute_url'] self.payload = payload @@ -99,43 +133,9 @@ print >> sys.stderr, 'Ignoring unknown repo', path return hg('pull', '-R', self.local_repo) - self.handle_irc_message(test) + handle_irc_message(payload, test) self.handle_diff_email(test) - def handle_irc_message(self, test=False): - commits = get_commits('irc', self.payload) - if test: - print "#" * 20 - print "IRC messages:" - - for commit in commits: - author = commit['author'] - branch = commit['branch'] - node = commit['node'] - timestamp = commit.get('timestamp') - print '[%s] %s %s %s' % (time.strftime('%Y-%m-%d %H:%M'), node, timestamp, author) - - files = commit.get('files', []) - common_prefix, filenames = getpaths(files, app.config['LISTFILES']) - pathlen = len(common_prefix) + len(filenames) + 2 - common_prefix = '/' + common_prefix - - if app.config['USE_COLOR_CODES']: - author = '\x0312%s\x0F' % author # in blue - branch = '\x02%s\x0F' % branch # in bold - node = '\x0311%s\x0F' % node # in azure - common_prefix = '\x0315%s\x0F' % common_prefix # in gray - - message = commit['message'].replace('\n', ' ') - fields = (author, branch, node, common_prefix, filenames) - part1 = '%s %s %s %s%s: ' % fields - totallen = 160 + pathlen - if len(message) + len(part1) <= totallen: - irc_msg = part1 + message - else: - maxlen = totallen - (len(part1) + 3) - irc_msg = part1 + message[:maxlen] + '...' - self.send_irc_message(irc_msg, test) def handle_diff_email(self, test=False): commits = get_commits('email', self.payload) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -11,10 +11,6 @@ def __init__(self): hook.BitbucketHookHandler.__init__(self) - self.messages = [] - - def send_irc_message(self, message, test=False): - self.messages.append(message) def test_non_ascii_encoding_guess_utf8(monkeypatch): @@ -96,9 +92,8 @@ return payload, expected -def test_irc_message(): - handler = BaseHandler() - handler.payload = { +def test_irc_message(monkeypatch, messages): + payload = { 'commits': [{'revision': 42, 'branch': u'default', 'author': u'antocuni', @@ -115,16 +110,16 @@ } ]} - handler.payload, expected = irc_cases(handler.payload) - handler.handle_irc_message() + payload, expected = irc_cases(payload) + hook.handle_irc_message(payload) - msg1, msg2 = handler.messages[:2] + msg1, msg2 = messages[:2] assert msg1 == 'antocuni default abcdef /: this is a test' x = 'antocuni mybranch xxxyyy /: %s...' % LONG_CUT assert msg2 == x - for got, wanted in zip(handler.messages[2:], expected): + for got, wanted in zip(messages[2:], expected): assert got == wanted def noop(*args, **kwargs): pass @@ -157,7 +152,7 @@ handler.handle(test_payload, test=True) -def test_ignore_duplicate_commits(monkeypatch, mails): +def test_ignore_duplicate_commits(monkeypatch, mails, messages): def hg( *args): return '' % ' '.join(map(str, args)) monkeypatch.setattr(hook, 'hg', hg) @@ -178,7 +173,7 @@ # num_commits = len(commits['commits']) assert len(mails) == num_commits - assert len(handler.messages) == num_commits + assert len(messages) == num_commits def test_hg(): diff --git a/bitbucket_hook/test/conftest.py b/bitbucket_hook/test/conftest.py --- a/bitbucket_hook/test/conftest.py +++ b/bitbucket_hook/test/conftest.py @@ -3,6 +3,8 @@ def pytest_funcarg__mails(request): return [] +def pytest_funcarg__messages(request): + return [] def pytest_funcarg__monkeypatch(request): mp = request.getfuncargvalue('monkeypatch') @@ -11,6 +13,11 @@ mails.append((from_, to, subject, body)) mp.setattr(hook, 'send', send) + messages = request.getfuncargvalue('messages') + def send_irc_message(message, test=False): + messages.append(message) + mp.setattr(hook, 'send_irc_message', send_irc_message) + return mp From commits-noreply at bitbucket.org Wed Apr 20 15:53:33 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:33 +0200 (CEST) Subject: [pypy-svn] buildbot default: replace some leftover config vars Message-ID: <20110420135333.064D1282C27@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r467:d4d93fd244e9 Date: 2011-04-19 19:58 +0200 http://bitbucket.org/pypy/buildbot/changeset/d4d93fd244e9/ Log: replace some leftover config vars diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -75,7 +75,7 @@ print to print msg.get_payload(decode=True) else: - smtp = SMTP(SMTP_SERVER, SMTP_PORT) + smtp = SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT']) smtp.sendmail(from_, [to], msg.as_string()) From commits-noreply at bitbucket.org Wed Apr 20 15:53:36 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:36 +0200 (CEST) Subject: [pypy-svn] buildbot default: remove unused imports Message-ID: <20110420135336.A4466282C27@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r468:0fcb428afeb4 Date: 2011-04-19 19:59 +0200 http://bitbucket.org/pypy/buildbot/changeset/0fcb428afeb4/ Log: remove unused imports diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -1,6 +1,5 @@ import os.path import py -import socket import subprocess import sys import time diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -7,7 +7,6 @@ codespeak. """ -import time import json import traceback import pprint From commits-noreply at bitbucket.org Wed Apr 20 15:53:37 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:37 +0200 (CEST) Subject: [pypy-svn] buildbot default: some code cleanups Message-ID: <20110420135337.E6F8B282C27@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r469:997da8eeb7e0 Date: 2011-04-19 20:19 +0200 http://bitbucket.org/pypy/buildbot/changeset/997da8eeb7e0/ Log: some code cleanups diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -142,7 +142,7 @@ monkeypatch.setattr(hook, 'Popen', mock) monkeypatch.setattr(hook.subprocess, 'call', noop) - handler.SMTP = mock + monkeypatch.setattr(hook, 'SMTP', mock) handler.handle(test_payload) handler.handle(test_payload, test=True) From commits-noreply at bitbucket.org Wed Apr 20 15:53:39 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:39 +0200 (CEST) Subject: [pypy-svn] buildbot default: move some hg utilities to a separate module Message-ID: <20110420135339.F2AD6282C27@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r470:866fb82ed294 Date: 2011-04-19 21:27 +0200 http://bitbucket.org/pypy/buildbot/changeset/866fb82ed294/ Log: move some hg utilities to a separate module diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -4,29 +4,14 @@ import sys import time from smtplib import SMTP -from subprocess import Popen, PIPE from irc import getpaths from main import app +from .scm import hg -def _hgexe(argv): - proc = Popen(['hg'] + list(argv), stdout=PIPE, stderr=PIPE) - stdout, stderr = proc.communicate() - ret = proc.wait() - return stdout, stderr, ret - -def hg(*argv): - argv = map(str, argv) - stdout, stderr, ret = _hgexe(argv) - if ret != 0: - print >> sys.stderr, 'error: hg', ' '.join(argv) - print >> sys.stderr, stderr - raise Exception('error when executing hg') - return unicode(stdout, encoding='utf-8', errors='replace') - TEMPLATE = u"""\ Author: {author} Branch: {branches} diff --git a/bitbucket_hook/scm.py b/bitbucket_hook/scm.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/scm.py @@ -0,0 +1,17 @@ +import sys +from subprocess import Popen, PIPE + +def _hgexe(argv): + proc = Popen(['hg'] + list(argv), stdout=PIPE, stderr=PIPE) + stdout, stderr = proc.communicate() + ret = proc.wait() + return stdout, stderr, ret + +def hg(*argv): + argv = map(str, argv) + stdout, stderr, ret = _hgexe(argv) + if ret != 0: + print >> sys.stderr, 'error: hg', ' '.join(argv) + print >> sys.stderr, stderr + raise Exception('error when executing hg') + return unicode(stdout, encoding='utf-8', errors='replace') diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -1,7 +1,7 @@ # -*- encoding: utf-8 -*- import py import pytest -from bitbucket_hook import hook +from bitbucket_hook import hook, scm #XXX hook.app.config['USE_COLOR_CODES'] = False @@ -13,23 +13,6 @@ hook.BitbucketHookHandler.__init__(self) -def test_non_ascii_encoding_guess_utf8(monkeypatch): - def _hgexe(argv): - return u'späm'.encode('utf-8'), '', 0 - monkeypatch.setattr(hook, '_hgexe', _hgexe) - stdout = hook.hg('foobar') - assert type(stdout) is unicode - assert stdout == u'späm' - -def test_non_ascii_encoding_invalid_utf8(monkeypatch): - def _hgexe(argv): - return '\xe4aa', '', 0 # invalid utf-8 string - # - monkeypatch.setattr(hook, '_hgexe', _hgexe) - stdout = hook.hg('foobar') - assert type(stdout) is unicode - assert stdout == u'\ufffdaa' - def test_sort_commits(): class MyHandler(BaseHandler): def __init__(self): @@ -140,7 +123,7 @@ u'user': u'antocuni', 'commits': commits['commits']} - monkeypatch.setattr(hook, 'Popen', mock) + monkeypatch.setattr(scm, 'Popen', mock) monkeypatch.setattr(hook.subprocess, 'call', noop) monkeypatch.setattr(hook, 'SMTP', mock) diff --git a/bitbucket_hook/test/test_scm.py b/bitbucket_hook/test/test_scm.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test/test_scm.py @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +from bitbucket_hook import scm + + +def test_non_ascii_encoding_guess_utf8(monkeypatch): + def _hgexe(argv): + return u'späm'.encode('utf-8'), '', 0 + monkeypatch.setattr(scm, '_hgexe', _hgexe) + stdout = scm.hg('foobar') + assert type(stdout) is unicode + assert stdout == u'späm' + + +def test_non_ascii_encoding_invalid_utf8(monkeypatch): + def _hgexe(argv): + return '\xe4aa', '', 0 # invalid utf-8 string + monkeypatch.setattr(scm, '_hgexe', _hgexe) + stdout = scm.hg('foobar') + assert type(stdout) is unicode + assert stdout == u'\ufffdaa' + From commits-noreply at bitbucket.org Wed Apr 20 15:53:40 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:40 +0200 (CEST) Subject: [pypy-svn] buildbot default: use relative imports explicitly Message-ID: <20110420135340.E7665282C27@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r471:82d4ff2793a9 Date: 2011-04-19 21:29 +0200 http://bitbucket.org/pypy/buildbot/changeset/82d4ff2793a9/ Log: use relative imports explicitly diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -5,8 +5,8 @@ import time from smtplib import SMTP -from irc import getpaths -from main import app +from .irc import getpaths +from .main import app from .scm import hg From commits-noreply at bitbucket.org Wed Apr 20 15:53:42 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:42 +0200 (CEST) Subject: [pypy-svn] buildbot default: move more hg related code to the scm module Message-ID: <20110420135342.00646282C27@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r472:ad6611f26bf3 Date: 2011-04-19 21:58 +0200 http://bitbucket.org/pypy/buildbot/changeset/ad6611f26bf3/ Log: move more hg related code to the scm module diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -8,7 +8,7 @@ from .irc import getpaths from .main import app -from .scm import hg +from . import scm @@ -116,7 +116,7 @@ if not check_for_local_repo(self.local_repo): print >> sys.stderr, 'Ignoring unknown repo', path return - hg('pull', '-R', self.local_repo) + scm.hg('pull', '-R', self.local_repo) handle_irc_message(payload, test) self.handle_diff_email(test) @@ -136,27 +136,12 @@ url = self.remote_repo + 'changeset/' + commit['node'] + '/' template = TEMPLATE % {'url': url} subject = '%s %s: %s' % (reponame, commit['branch'], line0) - body = hg('-R', self.local_repo, 'log', '-r', hgid, + body = scm.hg('-R', self.local_repo, 'log', '-r', hgid, '--template', template) - diff = self.get_diff(hgid, commit['files']) + diff = scm.get_diff(self.local_repo, hgid, commit['files']) body = body+diff send(sender, app.config['ADDRESS'], subject, body, test) - def get_diff(self, hgid, files): - import re - binary = re.compile('^GIT binary patch$', re.MULTILINE) - files = [item['file'] for item in files] - lines = [] - for filename in files: - out = hg('-R', self.local_repo, 'diff', '--git', '-c', hgid, - self.local_repo.join(filename)) - match = binary.search(out) - if match: - # it's a binary patch, omit the content - out = out[:match.end()] - out += u'\n[cut]' - lines.append(out) - return u'\n'.join(lines) if __name__ == '__main__': diff --git a/bitbucket_hook/scm.py b/bitbucket_hook/scm.py --- a/bitbucket_hook/scm.py +++ b/bitbucket_hook/scm.py @@ -15,3 +15,20 @@ print >> sys.stderr, stderr raise Exception('error when executing hg') return unicode(stdout, encoding='utf-8', errors='replace') + + +def get_diff(local_repo, hgid, files): + import re + binary = re.compile('^GIT binary patch$', re.MULTILINE) + files = [item['file'] for item in files] + lines = [] + for filename in files: + out = hg('-R', local_repo, 'diff', '--git', '-c', hgid, + local_repo.join(filename)) + match = binary.search(out) + if match: + # it's a binary patch, omit the content + out = out[:match.end()] + out += u'\n[cut]' + lines.append(out) + return u'\n'.join(lines) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -138,7 +138,7 @@ def test_ignore_duplicate_commits(monkeypatch, mails, messages): def hg( *args): return '' % ' '.join(map(str, args)) - monkeypatch.setattr(hook, 'hg', hg) + monkeypatch.setattr(scm, 'hg', hg) monkeypatch.setattr(hook, 'seen_nodes', set()) monkeypatch.setattr(hook, 'check_for_local_repo', lambda _:True) @@ -159,12 +159,3 @@ assert len(messages) == num_commits -def test_hg(): - if not py.path.local.sysfind('hg'): - pytest.skip('hg binary missing') - - #hook.hg('help') - with pytest.raises(Exception): - print hook.hg - hook.hg('uhmwrong') - diff --git a/bitbucket_hook/test/test_scm.py b/bitbucket_hook/test/test_scm.py --- a/bitbucket_hook/test/test_scm.py +++ b/bitbucket_hook/test/test_scm.py @@ -1,4 +1,7 @@ # -*- encoding: utf-8 -*- +import py +import pytest + from bitbucket_hook import scm @@ -19,3 +22,13 @@ assert type(stdout) is unicode assert stdout == u'\ufffdaa' + +def test_hg(): + if not py.path.local.sysfind('hg'): + pytest.skip('hg binary missing') + + scm.hg('help') + with pytest.raises(Exception): + print scm.hg + scm.hg('uhmwrong') + From commits-noreply at bitbucket.org Wed Apr 20 15:53:45 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:45 +0200 (CEST) Subject: [pypy-svn] buildbot default: get rid of Handlers Message-ID: <20110420135345.90E68282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r473:0b23291484ef Date: 2011-04-20 07:58 +0200 http://bitbucket.org/pypy/buildbot/changeset/0b23291484ef/ Log: get rid of Handlers diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -3,25 +3,15 @@ import subprocess import sys import time -from smtplib import SMTP -from .irc import getpaths +from . import irc from .main import app from . import scm +from . import mail -TEMPLATE = u"""\ -Author: {author} -Branch: {branches} -Changeset: r{rev}:{node|short} -Date: {date|isodate} -%(url)s - -Log:\t{desc|fill68|tabindent} - -""" seen_nodes = set() @@ -44,103 +34,19 @@ seen_nodes.add(key) yield commit -def send(from_, to, subject, body, test=False): - from email.mime.text import MIMEText - # Is this a valid workaround for unicode errors? - body = body.encode('ascii', 'xmlcharrefreplace') - msg = MIMEText(body, _charset='utf-8') - msg['From'] = from_ - msg['To'] = to - msg['Subject'] = subject - if test: - print '#' * 20 - print "Email contents:\n" - print from_ - print to - print msg.get_payload(decode=True) - else: - smtp = SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT']) - smtp.sendmail(from_, [to], msg.as_string()) -def send_irc_message(message, test=False): - if test: - print message + '\n' - else: - return subprocess.call([ - app.config['BOT'], app.config['CHANNEL'], message - ]) -def handle_irc_message(payload, test=False): - commits = get_commits('irc', payload) - if test: - print "#" * 20 - print "IRC messages:" - - for commit in commits: - author = commit['author'] - branch = commit['branch'] - node = commit['node'] - timestamp = commit.get('timestamp') - print '[%s] %s %s %s' % (time.strftime('%Y-%m-%d %H:%M'), node, timestamp, author) - - files = commit.get('files', []) - common_prefix, filenames = getpaths(files, app.config['LISTFILES']) - pathlen = len(common_prefix) + len(filenames) + 2 - common_prefix = '/' + common_prefix - - if app.config['USE_COLOR_CODES']: - author = '\x0312%s\x0F' % author # in blue - branch = '\x02%s\x0F' % branch # in bold - node = '\x0311%s\x0F' % node # in azure - common_prefix = '\x0315%s\x0F' % common_prefix # in gray - - message = commit['message'].replace('\n', ' ') - fields = (author, branch, node, common_prefix, filenames) - part1 = '%s %s %s %s%s: ' % fields - totallen = 160 + pathlen - if len(message) + len(part1) <= totallen: - irc_msg = part1 + message - else: - maxlen = totallen - (len(part1) + 3) - irc_msg = part1 + message[:maxlen] + '...' - send_irc_message(irc_msg, test) - -class BitbucketHookHandler(object): - - def handle(self, payload, test=False): - path = payload['repository']['absolute_url'] - self.payload = payload - self.local_repo = app.config['LOCAL_REPOS'].join(path) - self.remote_repo = app.config['REMOTE_BASE'] + path - if not check_for_local_repo(self.local_repo): - print >> sys.stderr, 'Ignoring unknown repo', path - return - scm.hg('pull', '-R', self.local_repo) - handle_irc_message(payload, test) - self.handle_diff_email(test) - - - def handle_diff_email(self, test=False): - commits = get_commits('email', self.payload) - for commit in commits: - self.send_diff_for_commit(commit, test) - - def send_diff_for_commit(self, commit, test=False): - hgid = commit['raw_node'] - sender = commit['author'] + ' ' - lines = commit['message'].splitlines() - line0 = lines and lines[0] or '' - reponame = self.payload['repository']['name'] - # TODO: maybe include the modified paths in the subject line? - url = self.remote_repo + 'changeset/' + commit['node'] + '/' - template = TEMPLATE % {'url': url} - subject = '%s %s: %s' % (reponame, commit['branch'], line0) - body = scm.hg('-R', self.local_repo, 'log', '-r', hgid, - '--template', template) - diff = scm.get_diff(self.local_repo, hgid, commit['files']) - body = body+diff - send(sender, app.config['ADDRESS'], subject, body, test) +def handle(payload, test=False): + path = payload['repository']['absolute_url'] + local_repo = app.config['LOCAL_REPOS'].join(path) + remote_repo = app.config['REMOTE_BASE'] + path + if not check_for_local_repo(local_repo): + print >> sys.stderr, 'Ignoring unknown repo', path + return + scm.hg('pull', '-R', local_repo) + irc.handle_message(payload, test) + mail.handle_diff_email(payload, test) diff --git a/bitbucket_hook/irc.py b/bitbucket_hook/irc.py --- a/bitbucket_hook/irc.py +++ b/bitbucket_hook/irc.py @@ -3,6 +3,8 @@ ''' import os +import time +import subprocess def getpaths(files, listfiles=False): @@ -36,3 +38,55 @@ else: filenames = '' return common_prefix, filenames + + + +def send_message(message, test=False): + if test: + print message + '\n' + else: + from .main import app + return subprocess.call([ + app.config['BOT'], app.config['CHANNEL'], message + ]) + + +def handle_message(payload, test=False): + #XXX + from .hook import get_commits + from .main import app + commits = get_commits('irc', payload) + if test: + print "#" * 20 + print "IRC messages:" + + for commit in commits: + author = commit['author'] + branch = commit['branch'] + node = commit['node'] + timestamp = commit.get('timestamp') + print '[%s] %s %s %s' % (time.strftime('%Y-%m-%d %H:%M'), node, timestamp, author) + + files = commit.get('files', []) + common_prefix, filenames = getpaths(files, app.config['LISTFILES']) + pathlen = len(common_prefix) + len(filenames) + 2 + common_prefix = '/' + common_prefix + + if app.config['USE_COLOR_CODES']: + author = '\x0312%s\x0F' % author # in blue + branch = '\x02%s\x0F' % branch # in bold + node = '\x0311%s\x0F' % node # in azure + common_prefix = '\x0315%s\x0F' % common_prefix # in gray + + message = commit['message'].replace('\n', ' ') + fields = (author, branch, node, common_prefix, filenames) + part1 = '%s %s %s %s%s: ' % fields + totallen = 160 + pathlen + if len(message) + len(part1) <= totallen: + irc_msg = part1 + message + else: + maxlen = totallen - (len(part1) + 3) + irc_msg = part1 + message[:maxlen] + '...' + send_message(irc_msg, test) + + diff --git a/bitbucket_hook/mail.py b/bitbucket_hook/mail.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/mail.py @@ -0,0 +1,66 @@ +from . import scm +from smtplib import SMTP + + +TEMPLATE = u"""\ +Author: {author} +Branch: {branches} +Changeset: r{rev}:{node|short} +Date: {date|isodate} +%(url)s + +Log:\t{desc|fill68|tabindent} + +""" + + +def send_diff_for_commit(payload, commit, test=False): + from .main import app + + path = payload['repository']['absolute_url'] + local_repo = app.config['LOCAL_REPOS'].join(path) + remote_repo = app.config['REMOTE_BASE'] + path + + hgid = commit['raw_node'] + sender = commit['author'] + ' ' + lines = commit['message'].splitlines() + line0 = lines and lines[0] or '' + reponame = payload['repository']['name'] + # TODO: maybe include the modified paths in the subject line? + url = remote_repo + 'changeset/' + commit['node'] + '/' + template = TEMPLATE % {'url': url} + subject = '%s %s: %s' % (reponame, commit['branch'], line0) + body = scm.hg('-R', local_repo, 'log', '-r', hgid, + '--template', template) + diff = scm.get_diff(local_repo, hgid, commit['files']) + body = body+diff + send(sender, app.config['ADDRESS'], subject, body, test) + + +def send(from_, to, subject, body, test=False): + from email.mime.text import MIMEText + # Is this a valid workaround for unicode errors? + body = body.encode('ascii', 'xmlcharrefreplace') + msg = MIMEText(body, _charset='utf-8') + msg['From'] = from_ + msg['To'] = to + msg['Subject'] = subject + if test: + print '#' * 20 + print "Email contents:\n" + print from_ + print to + print msg.get_payload(decode=True) + else: + smtp = SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT']) + smtp.sendmail(from_, [to], msg.as_string()) + + +def handle_diff_email(payload, test=False): + from . import hook + commits = hook.get_commits('email', payload) + for commit in commits: + send_diff_for_commit(payload, commit, test) + + + diff --git a/bitbucket_hook/test/conftest.py b/bitbucket_hook/test/conftest.py --- a/bitbucket_hook/test/conftest.py +++ b/bitbucket_hook/test/conftest.py @@ -1,4 +1,4 @@ -from bitbucket_hook import hook +from bitbucket_hook import irc, mail, hook def pytest_funcarg__mails(request): return [] @@ -6,17 +6,20 @@ def pytest_funcarg__messages(request): return [] +def pytest_runtest_setup(item): + hook.seen_nodes.clear() + def pytest_funcarg__monkeypatch(request): mp = request.getfuncargvalue('monkeypatch') mails = request.getfuncargvalue('mails') def send(from_, to, subject, body,test=False, mails=mails): mails.append((from_, to, subject, body)) - mp.setattr(hook, 'send', send) + mp.setattr(mail, 'send', send) messages = request.getfuncargvalue('messages') def send_irc_message(message, test=False): messages.append(message) - mp.setattr(hook, 'send_irc_message', send_irc_message) + mp.setattr(irc, 'send_message', send_irc_message) return mp diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -1,33 +1,22 @@ # -*- encoding: utf-8 -*- import py import pytest -from bitbucket_hook import hook, scm +from bitbucket_hook import hook, scm, mail, irc #XXX hook.app.config['USE_COLOR_CODES'] = False -class BaseHandler(hook.BitbucketHookHandler): - - def __init__(self): - hook.BitbucketHookHandler.__init__(self) - def test_sort_commits(): - class MyHandler(BaseHandler): - def __init__(self): - BaseHandler.__init__(self) - self.sent_commits = [] - def send_diff_for_commit(self, commit, test=False): - self.sent_commits.append(commit['node']) # - handler = MyHandler() - handler.payload = { + commits = hook.get_commits('test_sort', { 'commits': [{'revision': 43, 'node': 'second', 'raw_node': 'first'}, {'revision': 42, 'node': 'first', 'raw_node': 'second'}] - } - handler.handle_diff_email() - assert handler.sent_commits == ['first', 'second'] + }) + commits = [x['node'] for x in commits] + + assert commits == ['first', 'second'] LONG_MESSAGE = u'This is a test with a long message: ' + 'x'*1000 @@ -94,7 +83,7 @@ ]} payload, expected = irc_cases(payload) - hook.handle_irc_message(payload) + irc.handle_message(payload) msg1, msg2 = messages[:2] @@ -113,7 +102,6 @@ sendmail = noop def test_handle(monkeypatch): - handler = hook.BitbucketHookHandler() commits, _ = irc_cases() test_payload = {u'repository': {u'absolute_url': '', u'name': u'test', @@ -124,15 +112,15 @@ 'commits': commits['commits']} monkeypatch.setattr(scm, 'Popen', mock) - monkeypatch.setattr(hook.subprocess, 'call', noop) - monkeypatch.setattr(hook, 'SMTP', mock) + monkeypatch.setattr(irc.subprocess, 'call', noop) + monkeypatch.setattr(mail, 'SMTP', mock) - handler.handle(test_payload) - handler.handle(test_payload, test=True) + hook.handle(test_payload) + hook.handle(test_payload, test=True) - handler.LISTFILES = True - handler.handle(test_payload) - handler.handle(test_payload, test=True) + hook.app.config['LISTFILES'] = True + hook.handle(test_payload) + hook.handle(test_payload, test=True) def test_ignore_duplicate_commits(monkeypatch, mails, messages): @@ -142,7 +130,6 @@ monkeypatch.setattr(hook, 'seen_nodes', set()) monkeypatch.setattr(hook, 'check_for_local_repo', lambda _:True) - handler = BaseHandler() commits, _ = irc_cases() payload = {u'repository': {u'absolute_url': '', u'name': u'test', @@ -151,8 +138,8 @@ u'website': u''}, u'user': u'antocuni', 'commits': commits['commits']} - handler.handle(payload) - handler.handle(payload) + hook.handle(payload) + hook.handle(payload) # num_commits = len(commits['commits']) assert len(mails) == num_commits From commits-noreply at bitbucket.org Wed Apr 20 15:53:46 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:46 +0200 (CEST) Subject: [pypy-svn] buildbot default: clean up some config leftovers Message-ID: <20110420135346.8CF6C282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r474:32964231e586 Date: 2011-04-20 08:00 +0200 http://bitbucket.org/pypy/buildbot/changeset/32964231e586/ Log: clean up some config leftovers diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -143,8 +143,7 @@ ## ## test_payload['commits'] = commits - LOCAL_REPOS = py.path.local(repopath) + app.config['LOCAL_REPOS'] = py.path.local(repopath) + app.config['USE_COLOR_CODES'] = False - hook = BitbucketHookHandler() - app.config['USE_COLOR_CODES'] = False hook.handle(test_payload, test=True) From commits-noreply at bitbucket.org Wed Apr 20 15:53:47 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:47 +0200 (CEST) Subject: [pypy-svn] buildbot default: remove missed usage of bitbuckethookhandler Message-ID: <20110420135347.6FF29282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r475:cf77a5a4f697 Date: 2011-04-20 08:02 +0200 http://bitbucket.org/pypy/buildbot/changeset/cf77a5a4f697/ Log: remove missed usage of bitbuckethookhandler diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -41,8 +41,7 @@ def handle_payload(): payload = json.loads(flask.request.form['payload']) try: - handler = hook.BitbucketHookHandler() - handler.handle(payload) + hook.handle(payload) except: traceback.print_exc() print >> sys.stderr, 'payload:' From commits-noreply at bitbucket.org Wed Apr 20 15:53:48 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:48 +0200 (CEST) Subject: [pypy-svn] buildbot default: minor missing variable fixes Message-ID: <20110420135348.96D2336C201@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r476:d7bba811c113 Date: 2011-04-20 08:12 +0200 http://bitbucket.org/pypy/buildbot/changeset/d7bba811c113/ Log: minor missing variable fixes diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -146,4 +146,4 @@ app.config['LOCAL_REPOS'] = py.path.local(repopath) app.config['USE_COLOR_CODES'] = False - hook.handle(test_payload, test=True) + handle(test_payload, test=True) diff --git a/bitbucket_hook/mail.py b/bitbucket_hook/mail.py --- a/bitbucket_hook/mail.py +++ b/bitbucket_hook/mail.py @@ -38,6 +38,7 @@ def send(from_, to, subject, body, test=False): + from .main import app from email.mime.text import MIMEText # Is this a valid workaround for unicode errors? body = body.encode('ascii', 'xmlcharrefreplace') From commits-noreply at bitbucket.org Wed Apr 20 15:53:52 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:52 +0200 (CEST) Subject: [pypy-svn] buildbot default: pep8 cleanups Message-ID: <20110420135352.6BB7536C201@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r477:cf60c5d1dea0 Date: 2011-04-20 08:42 +0200 http://bitbucket.org/pypy/buildbot/changeset/cf60c5d1dea0/ Log: pep8 cleanups diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -11,9 +11,6 @@ from . import mail - - - seen_nodes = set() @@ -35,8 +32,6 @@ yield commit - - def handle(payload, test=False): path = payload['repository']['absolute_url'] local_repo = app.config['LOCAL_REPOS'].join(path) @@ -49,7 +44,6 @@ mail.handle_diff_email(payload, test) - if __name__ == '__main__': import hook as hookfile repopath = os.path.dirname(os.path.dirname(hookfile.__file__)) @@ -75,7 +69,8 @@ {u'author': u'antocuni', u'branch': u'default', - u'files': [{u'file': u'bitbucket_hook/hook.py', u'type': u'modified'}], + u'files': [{u'file': u'bitbucket_hook/hook.py', + u'type': u'modified'}], u'message': u"don't send newlines to irc", u'node': u'e17583fbfa5c', u'parents': [u'69e9eac01cf6'], @@ -99,13 +94,16 @@ {u'author': u'antocuni', u'branch': u'default', - u'files': [{u'file': u'bitbucket_hook/hook.py', u'type': u'modified'}, - {u'file': u'bitbucket_hook/__init__.py', u'type': u'added'}, + u'files': [{u'file': u'bitbucket_hook/hook.py', + u'type': u'modified'}, + {u'file': u'bitbucket_hook/__init__.py', + u'type': u'added'}, {u'file': u'bitbucket_hook/test/__init__.py', u'type': u'added'}, {u'file': u'bitbucket_hook/test/test_hook.py', u'type': u'added'}], - u'message': u'partially refactor the hook to be more testable, and write a test for the fix in 12cc0caf054d', + u'message': u'partially refactor the hook to be more testable,' + u' and write a test for the fix in 12cc0caf054d', u'node': u'9c7bc068df88', u'parents': [u'12cc0caf054d'], u'raw_author': u'Antonio Cuni ', @@ -114,8 +112,7 @@ u'size': 753, u'timestamp': u'2010-12-19 14:45:44'}] - - test_payload[u'commits'] = commits + test_payload[u'commits'] = commits ## # To regenerate: ## try: diff --git a/bitbucket_hook/irc.py b/bitbucket_hook/irc.py --- a/bitbucket_hook/irc.py +++ b/bitbucket_hook/irc.py @@ -6,6 +6,7 @@ import time import subprocess + def getpaths(files, listfiles=False): # Handle empty input @@ -40,14 +41,15 @@ return common_prefix, filenames - def send_message(message, test=False): if test: print message + '\n' else: from .main import app return subprocess.call([ - app.config['BOT'], app.config['CHANNEL'], message + app.config['BOT'], + app.config['CHANNEL'], + message, ]) @@ -65,7 +67,7 @@ branch = commit['branch'] node = commit['node'] timestamp = commit.get('timestamp') - print '[%s] %s %s %s' % (time.strftime('%Y-%m-%d %H:%M'), node, timestamp, author) + print time.strftime('[%Y-%m-%d %H:%M]'), node, timestamp, author files = commit.get('files', []) common_prefix, filenames = getpaths(files, app.config['LISTFILES']) @@ -74,9 +76,9 @@ if app.config['USE_COLOR_CODES']: author = '\x0312%s\x0F' % author # in blue - branch = '\x02%s\x0F' % branch # in bold - node = '\x0311%s\x0F' % node # in azure - common_prefix = '\x0315%s\x0F' % common_prefix # in gray + branch = '\x02%s\x0F' % branch # in bold + node = '\x0311%s\x0F' % node # in azure + common_prefix = '\x0315%s\x0F' % common_prefix # in gray message = commit['message'].replace('\n', ' ') fields = (author, branch, node, common_prefix, filenames) @@ -88,5 +90,3 @@ maxlen = totallen - (len(part1) + 3) irc_msg = part1 + message[:maxlen] + '...' send_message(irc_msg, test) - - diff --git a/bitbucket_hook/test/conftest.py b/bitbucket_hook/test/conftest.py --- a/bitbucket_hook/test/conftest.py +++ b/bitbucket_hook/test/conftest.py @@ -1,26 +1,30 @@ from bitbucket_hook import irc, mail, hook + def pytest_funcarg__mails(request): return [] + def pytest_funcarg__messages(request): return [] + def pytest_runtest_setup(item): hook.seen_nodes.clear() + def pytest_funcarg__monkeypatch(request): - mp = request.getfuncargvalue('monkeypatch') + mp = request.getfuncargvalue('monkeypatch') mails = request.getfuncargvalue('mails') - def send(from_, to, subject, body,test=False, mails=mails): + + def send(from_, to, subject, body, test=False, mails=mails): mails.append((from_, to, subject, body)) mp.setattr(mail, 'send', send) messages = request.getfuncargvalue('messages') + def send_irc_message(message, test=False): messages.append(message) mp.setattr(irc, 'send_message', send_irc_message) - return mp - diff --git a/bitbucket_hook/test/test_scm.py b/bitbucket_hook/test/test_scm.py --- a/bitbucket_hook/test/test_scm.py +++ b/bitbucket_hook/test/test_scm.py @@ -6,6 +6,7 @@ def test_non_ascii_encoding_guess_utf8(monkeypatch): + def _hgexe(argv): return u'späm'.encode('utf-8'), '', 0 monkeypatch.setattr(scm, '_hgexe', _hgexe) @@ -15,8 +16,9 @@ def test_non_ascii_encoding_invalid_utf8(monkeypatch): + def _hgexe(argv): - return '\xe4aa', '', 0 # invalid utf-8 string + return '\xe4aa', '', 0 # invalid utf-8 string monkeypatch.setattr(scm, '_hgexe', _hgexe) stdout = scm.hg('foobar') assert type(stdout) is unicode @@ -31,4 +33,3 @@ with pytest.raises(Exception): print scm.hg scm.hg('uhmwrong') - diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -7,20 +7,22 @@ hook.app.config['USE_COLOR_CODES'] = False - def test_sort_commits(): # commits = hook.get_commits('test_sort', { - 'commits': [{'revision': 43, 'node': 'second', 'raw_node': 'first'}, - {'revision': 42, 'node': 'first', 'raw_node': 'second'}] - }) + 'commits': [ + {'revision': 43, 'node': 'second', 'raw_node': 'first'}, + {'revision': 42, 'node': 'first', 'raw_node': 'second'}, + ], + }) commits = [x['node'] for x in commits] assert commits == ['first', 'second'] -LONG_MESSAGE = u'This is a test with a long message: ' + 'x'*1000 -LONG_CUT = LONG_MESSAGE[:160-29] +LONG_MESSAGE = u'This is a test with a long message: ' + 'x' * 1000 +LONG_CUT = LONG_MESSAGE[:160 - 29] + def irc_cases(payload=None): @@ -37,12 +39,12 @@ d(file='my/file3')] single_file_deep = [d(file='path/to/single')] - cases = [(no_file, ''), # No diff - (single_file,'single'), # Single file + cases = [(no_file, ''), # No diff + (single_file, 'single'), # Single file (multiple_files, ''), # No common prefix - (multiple_files_subdir, 'path/'), # Common prefix - (multiple_files_subdir_root, ''), # No common subdir, file in root - (single_file_deep,'path/to/single') # Single file in deep path + (multiple_files_subdir, 'path/'), # Common prefix + (multiple_files_subdir_root, ''), # No common subdir file in root + (single_file_deep, 'path/to/single'), # Single file in deep path ] author = u'antocuni' @@ -54,7 +56,7 @@ for i, (case, snippet) in enumerate(cases): rev = 44 + i - node = chr(97+i) + 'xxyyy' + node = chr(97 + i) + 'xxyyy' raw_node = node * 2 expected.append(expected_template % (node, snippet, LONG_CUT)) commits.append(d(revision=rev, files=case, author=author, @@ -66,21 +68,25 @@ def test_irc_message(monkeypatch, messages): payload = { - 'commits': [{'revision': 42, - 'branch': u'default', - 'author': u'antocuni', - 'message': u'this is a test', - 'node': 'abcdef', - 'raw_node': 'abcdef', - }, - {'revision': 43, - 'author': u'antocuni', - 'branch': u'mybranch', - 'message': LONG_MESSAGE, - 'node': 'xxxyyy', - 'raw_node': 'xxxyyy', - } - ]} + 'commits': [ + { + 'revision': 42, + 'branch': u'default', + 'author': u'antocuni', + 'message': u'this is a test', + 'node': 'abcdef', + 'raw_node': 'abcdef', + }, + { + 'revision': 43, + 'author': u'antocuni', + 'branch': u'mybranch', + 'message': LONG_MESSAGE, + 'node': 'xxxyyy', + 'raw_node': 'xxxyyy', + }, + ] + } payload, expected = irc_cases(payload) irc.handle_message(payload) @@ -94,13 +100,23 @@ for got, wanted in zip(messages[2:], expected): assert got == wanted -def noop(*args, **kwargs): pass + +def noop(*args, **kwargs): + pass + + class mock: __init__ = noop - def communicate(*args, **kwargs): return '1', 2 - def wait(*args, **kwargs): return 0 + + def communicate(*args, **kwargs): + return '1', 2 + + def wait(*args, **kwargs): + return 0 + sendmail = noop + def test_handle(monkeypatch): commits, _ = irc_cases() test_payload = {u'repository': {u'absolute_url': '', @@ -124,11 +140,11 @@ def test_ignore_duplicate_commits(monkeypatch, mails, messages): - def hg( *args): + def hg(*args): return '' % ' '.join(map(str, args)) monkeypatch.setattr(scm, 'hg', hg) monkeypatch.setattr(hook, 'seen_nodes', set()) - monkeypatch.setattr(hook, 'check_for_local_repo', lambda _:True) + monkeypatch.setattr(hook, 'check_for_local_repo', lambda _: True) commits, _ = irc_cases() payload = {u'repository': {u'absolute_url': '', @@ -144,5 +160,3 @@ num_commits = len(commits['commits']) assert len(mails) == num_commits assert len(messages) == num_commits - - diff --git a/bitbucket_hook/mail.py b/bitbucket_hook/mail.py --- a/bitbucket_hook/mail.py +++ b/bitbucket_hook/mail.py @@ -33,7 +33,7 @@ body = scm.hg('-R', local_repo, 'log', '-r', hgid, '--template', template) diff = scm.get_diff(local_repo, hgid, commit['files']) - body = body+diff + body = body + diff send(sender, app.config['ADDRESS'], subject, body, test) @@ -62,6 +62,3 @@ commits = hook.get_commits('email', payload) for commit in commits: send_diff_for_commit(payload, commit, test) - - - diff --git a/bitbucket_hook/test/test_irc.py b/bitbucket_hook/test/test_irc.py --- a/bitbucket_hook/test/test_irc.py +++ b/bitbucket_hook/test/test_irc.py @@ -1,8 +1,10 @@ from bitbucket_hook.irc import getpaths + def fl(*paths): return [{'file': x} for x in paths] + def pytest_generate_tests(metafunc): barefile = fl('file') @@ -14,7 +16,6 @@ slashleft = fl('/slashleft') slashright = fl('slashright/') - nocommon = distinct + fl('path4/file') nocommonplusroot = distinct + barefile @@ -34,41 +35,41 @@ pypyempty = fl('pypy/rlib/rdtoa.py', 'pypy/rlib/test/test_rdtoa.py') + nothing = ('', '') - nothing = ('', '') expectations = [ ('null', [], nothing), ('empty', empty, nothing), - ('empty*2', empty*2, nothing), + ('empty*2', empty * 2, nothing), ('bare', barefile, ('file', '')), ('deep', deepfile, ('a/long/path/to/deepfile.py', '')), ('slashes', slashesfile, ('/slashesfile/', '')), ('slashleft', slashleft, ('/slashleft', '')), ('slashright', slashright, ('slashright/', '')), ('nocommon', nocommon, ('', ' M(file1, file2, file, file)')), - ('nocommon+root', nocommonplusroot, + ('nocommon+root', nocommonplusroot, ('', ' M(file1, file2, file, file)')), - ('nocommon+empty', nocommonplusempty, ('',' M(file1, file2, file)')), + ('nocommon+empty', nocommonplusempty, ('', ' M(file1, file2, file)')), ('common', common, ('some/path/to/', ' M(file, file, anotherfile, afile)')), ('common+root', commonplusroot, ('', ' M(file1, file2, file, file)')), - ('common+empty', commonplusempty, ('',' M(file1, file2, file)')), - ('nocommon+slash', nocommonplusslash, ('',' M(file1, file2, file)')), - ('common+slash', commonplusslash, ('path/',' M(file1, file2, file)')), + ('common+empty', commonplusempty, ('', ' M(file1, file2, file)')), + ('nocommon+slash', nocommonplusslash, ('', ' M(file1, file2, file)')), + ('common+slash', commonplusslash, ('path/', ' M(file1, file2, file)')), ('pypydoubledash', pypydoubleslash, ('pypy/jit/metainterp/', ' M(u.py, test_c.py, test_o.py)')), ('pypyempty', pypyempty, ('pypy/rlib/', ' M(rdtoa.py, test_rdtoa.py)')), ] - if metafunc.function.__name__=='test_getpaths': + if metafunc.function.__name__ == 'test_getpaths': for name, files, (common, listfiles) in expectations: - metafunc.addcall(id='list/'+name, funcargs={ + metafunc.addcall(id='list/' + name, funcargs={ 'files': files, 'expected_common': common, 'expected_listfiles': listfiles, }) - metafunc.addcall(id='nolist/'+name, funcargs={ + metafunc.addcall(id='nolist/' + name, funcargs={ 'files': files, 'expected_common': common, 'expected_listfiles': listfiles, @@ -79,5 +80,3 @@ common, files = getpaths(files, listfiles=bool(expected_listfiles)) assert common == expected_common assert files == expected_listfiles - - diff --git a/bitbucket_hook/scm.py b/bitbucket_hook/scm.py --- a/bitbucket_hook/scm.py +++ b/bitbucket_hook/scm.py @@ -1,12 +1,14 @@ import sys from subprocess import Popen, PIPE + def _hgexe(argv): proc = Popen(['hg'] + list(argv), stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() ret = proc.wait() return stdout, stderr, ret + def hg(*argv): argv = map(str, argv) stdout, stderr, ret = _hgexe(argv) diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -22,12 +22,16 @@ HOST_NAME = 'codespeak.net' PORT_NUMBER = 9237 + @app.route('/', methods=['GET']) def test_form(): """Respond to a GET request.""" return """ -

This is the pypy bitbucket hook. Use the following form only for testing

+

+ This is the pypy bitbucket hook. + Use the following form only for testing +

payload:
submit: @@ -36,7 +40,6 @@ """ - @app.route('/', methods=['POST']) def handle_payload(): payload = json.loads(flask.request.form['payload']) @@ -56,6 +59,7 @@ USE_COLOR_CODES = True LISTFILES = False + class CodeSpeakConfig(DefaultConfig): SMTP_SERVER = 'localhost' SMTP_PORT = 25 @@ -64,6 +68,7 @@ CHANNEL = '#pypy' BOT = '/svn/hooks/commit-bot/message' + class ViperConfig(DefaultConfig): SMTP_SERVER = "out.alice.it" SMTP_PORT = 25 From commits-noreply at bitbucket.org Wed Apr 20 15:53:54 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:54 +0200 (CEST) Subject: [pypy-svn] buildbot default: more test coverage Message-ID: <20110420135354.3ADC136C201@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r478:e59068516083 Date: 2011-04-20 09:09 +0200 http://bitbucket.org/pypy/buildbot/changeset/e59068516083/ Log: more test coverage diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -45,8 +45,7 @@ if __name__ == '__main__': - import hook as hookfile - repopath = os.path.dirname(os.path.dirname(hookfile.__file__)) + repopath = os.path.dirname(os.path.dirname(__file__)) print 'Repository path:', repopath test_payload = {u'repository': {u'absolute_url': '', u'name': u'test', diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -139,6 +139,20 @@ hook.handle(test_payload, test=True) +def test_handle_unknown(monkeypatch): + def hgraise(*k): + raise Exception('this should never be called') + + py.test.raises(Exception, hgraise) + + monkeypatch.setattr(scm, 'hg', hgraise) + hook.handle({ + u'repository': { + u'absolute_url': 'uhm/missing/yeah', + }, + }) + + def test_ignore_duplicate_commits(monkeypatch, mails, messages): def hg(*args): return '' % ' '.join(map(str, args)) diff --git a/bitbucket_hook/test/conftest.py b/bitbucket_hook/test/conftest.py --- a/bitbucket_hook/test/conftest.py +++ b/bitbucket_hook/test/conftest.py @@ -1,4 +1,4 @@ -from bitbucket_hook import irc, mail, hook +#XXX imports in conftest globals dont sow in coverage reports def pytest_funcarg__mails(request): @@ -10,10 +10,12 @@ def pytest_runtest_setup(item): + from bitbucket_hook import hook hook.seen_nodes.clear() def pytest_funcarg__monkeypatch(request): + from bitbucket_hook import irc, mail mp = request.getfuncargvalue('monkeypatch') mails = request.getfuncargvalue('mails') diff --git a/bitbucket_hook/test/test_scm.py b/bitbucket_hook/test/test_scm.py --- a/bitbucket_hook/test/test_scm.py +++ b/bitbucket_hook/test/test_scm.py @@ -25,10 +25,9 @@ assert stdout == u'\ufffdaa' + at pytest.mark.skip_if("not py.path.local.sysfind('hg')", + reason='hg binary missing') def test_hg(): - if not py.path.local.sysfind('hg'): - pytest.skip('hg binary missing') - scm.hg('help') with pytest.raises(Exception): print scm.hg From commits-noreply at bitbucket.org Wed Apr 20 15:53:55 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:55 +0200 (CEST) Subject: [pypy-svn] buildbot default: move the script test based code thats in hook to a module where its also a test of the testsuite Message-ID: <20110420135355.3911836C201@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r479:7d12ecd06454 Date: 2011-04-20 10:02 +0200 http://bitbucket.org/pypy/buildbot/changeset/7d12ecd06454/ Log: move the script test based code thats in hook to a module where its also a test of the testsuite diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -42,104 +42,3 @@ scm.hg('pull', '-R', local_repo) irc.handle_message(payload, test) mail.handle_diff_email(payload, test) - - -if __name__ == '__main__': - repopath = os.path.dirname(os.path.dirname(__file__)) - print 'Repository path:', repopath - test_payload = {u'repository': {u'absolute_url': '', - u'name': u'test', - u'owner': u'antocuni', - u'slug': u'test', - u'website': u''}, - u'user': u'antocuni'} - - commits = [{u'author': u'arigo', - u'branch': u'default', - u'files': [], - u'message': u'Merge heads.', - u'node': u'00ae063c6b8c', - u'parents': [u'278760e9c560', u'29f1ff96548d'], - u'raw_author': u'Armin Rigo ', - u'raw_node': u'00ae063c6b8c13d873d92afc5485671f6a944077', - u'revision': 403, - u'size': 0, - u'timestamp': u'2011-01-09 13:07:24'}, - - {u'author': u'antocuni', - u'branch': u'default', - u'files': [{u'file': u'bitbucket_hook/hook.py', - u'type': u'modified'}], - u'message': u"don't send newlines to irc", - u'node': u'e17583fbfa5c', - u'parents': [u'69e9eac01cf6'], - u'raw_author': u'Antonio Cuni ', - u'raw_node': u'e17583fbfa5c5636b5375a5fc81f3d388ce1b76e', - u'revision': 399, - u'size': 19, - u'timestamp': u'2011-01-07 17:42:13'}, - - {u'author': u'antocuni', - u'branch': u'default', - u'files': [{u'file': u'.hgignore', u'type': u'added'}], - u'message': u'ignore irrelevant files', - u'node': u'5cbd6e289c04', - u'parents': [u'3a7c89443fc8'], - u'raw_author': u'Antonio Cuni ', - u'raw_node': u'5cbd6e289c043c4dd9b6f55b5ec1c8d05711c6ad', - u'revision': 362, - u'size': 658, - u'timestamp': u'2010-11-04 16:34:31'}, - - {u'author': u'antocuni', - u'branch': u'default', - u'files': [{u'file': u'bitbucket_hook/hook.py', - u'type': u'modified'}, - {u'file': u'bitbucket_hook/__init__.py', - u'type': u'added'}, - {u'file': u'bitbucket_hook/test/__init__.py', - u'type': u'added'}, - {u'file': u'bitbucket_hook/test/test_hook.py', - u'type': u'added'}], - u'message': u'partially refactor the hook to be more testable,' - u' and write a test for the fix in 12cc0caf054d', - u'node': u'9c7bc068df88', - u'parents': [u'12cc0caf054d'], - u'raw_author': u'Antonio Cuni ', - u'raw_node': u'9c7bc068df8850f4102c610d2bee3cdef67b30e6', - u'revision': 391, - u'size': 753, - u'timestamp': u'2010-12-19 14:45:44'}] - - test_payload[u'commits'] = commits - -## # To regenerate: -## try: -## from json import loads # 2.6 -## except ImportError: -## from simplejson import loads -## -## from urllib2 import urlopen -## url = ("https://api.bitbucket.org/1.0/repositories/pypy/buildbot/" -## "changesets/%s/") -## -## # Representative changesets -## mergeheads = u'00ae063c6b8c' -## singlefilesub = u'e17583fbfa5c' -## root = u'5cbd6e289c04' -## multiadd = u'9c7bc068df88' -## test_nodes = mergeheads, singlefilesub, root, multiadd -## -## commits = [] -## for commit in test_nodes: -## req = urlopen(url % commit) -## payload = req.read() -## req.close() -## commits.append(loads(payload)) -## -## test_payload['commits'] = commits - - app.config['LOCAL_REPOS'] = py.path.local(repopath) - app.config['USE_COLOR_CODES'] = False - - handle(test_payload, test=True) diff --git a/bitbucket_hook/test_hook_testcall.py b/bitbucket_hook/test_hook_testcall.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test_hook_testcall.py @@ -0,0 +1,109 @@ +import os +from bitbucket_hook.hook import handle +from bitbucket_hook.main import app +import py + + +def test_handlecall(): + repopath = os.path.dirname(os.path.dirname(__file__)) + print 'Repository path:', repopath + test_payload = {u'repository': {u'absolute_url': '', + u'name': u'test', + u'owner': u'antocuni', + u'slug': u'test', + u'website': u''}, + u'user': u'antocuni'} + + commits = [{u'author': u'arigo', + u'branch': u'default', + u'files': [], + u'message': u'Merge heads.', + u'node': u'00ae063c6b8c', + u'parents': [u'278760e9c560', u'29f1ff96548d'], + u'raw_author': u'Armin Rigo ', + u'raw_node': u'00ae063c6b8c13d873d92afc5485671f6a944077', + u'revision': 403, + u'size': 0, + u'timestamp': u'2011-01-09 13:07:24'}, + + {u'author': u'antocuni', + u'branch': u'default', + u'files': [{u'file': u'bitbucket_hook/hook.py', + u'type': u'modified'}], + u'message': u"don't send newlines to irc", + u'node': u'e17583fbfa5c', + u'parents': [u'69e9eac01cf6'], + u'raw_author': u'Antonio Cuni ', + u'raw_node': u'e17583fbfa5c5636b5375a5fc81f3d388ce1b76e', + u'revision': 399, + u'size': 19, + u'timestamp': u'2011-01-07 17:42:13'}, + + {u'author': u'antocuni', + u'branch': u'default', + u'files': [{u'file': u'.hgignore', u'type': u'added'}], + u'message': u'ignore irrelevant files', + u'node': u'5cbd6e289c04', + u'parents': [u'3a7c89443fc8'], + u'raw_author': u'Antonio Cuni ', + u'raw_node': u'5cbd6e289c043c4dd9b6f55b5ec1c8d05711c6ad', + u'revision': 362, + u'size': 658, + u'timestamp': u'2010-11-04 16:34:31'}, + + {u'author': u'antocuni', + u'branch': u'default', + u'files': [{u'file': u'bitbucket_hook/hook.py', + u'type': u'modified'}, + {u'file': u'bitbucket_hook/__init__.py', + u'type': u'added'}, + {u'file': u'bitbucket_hook/test/__init__.py', + u'type': u'added'}, + {u'file': u'bitbucket_hook/test/test_hook.py', + u'type': u'added'}], + u'message': u'partially refactor the hook to be more testable,' + u' and write a test for the fix in 12cc0caf054d', + u'node': u'9c7bc068df88', + u'parents': [u'12cc0caf054d'], + u'raw_author': u'Antonio Cuni ', + u'raw_node': u'9c7bc068df8850f4102c610d2bee3cdef67b30e6', + u'revision': 391, + u'size': 753, + u'timestamp': u'2010-12-19 14:45:44'}] + + test_payload[u'commits'] = commits + +## # To regenerate: +## try: +## from json import loads # 2.6 +## except ImportError: +## from simplejson import loads +## +## from urllib2 import urlopen +## url = ("https://api.bitbucket.org/1.0/repositories/pypy/buildbot/" +## "changesets/%s/") +## +## # Representative changesets +## mergeheads = u'00ae063c6b8c' +## singlefilesub = u'e17583fbfa5c' +## root = u'5cbd6e289c04' +## multiadd = u'9c7bc068df88' +## test_nodes = mergeheads, singlefilesub, root, multiadd +## +## commits = [] +## for commit in test_nodes: +## req = urlopen(url % commit) +## payload = req.read() +## req.close() +## commits.append(loads(payload)) +## +## test_payload['commits'] = commits + + app.config['LOCAL_REPOS'] = py.path.local(repopath) + app.config['USE_COLOR_CODES'] = False + + handle(test_payload, test=True) + + +if __name__ == '__main__': + test_handlecall() From commits-noreply at bitbucket.org Wed Apr 20 15:53:56 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:56 +0200 (CEST) Subject: [pypy-svn] buildbot default: add some basic tests for the data pass-trough of the post handler Message-ID: <20110420135356.3D534282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r480:78de3faa59cc Date: 2011-04-20 10:19 +0200 http://bitbucket.org/pypy/buildbot/changeset/78de3faa59cc/ Log: add some basic tests for the data pass-trough of the post handler diff --git a/bitbucket_hook/test/test_main.py b/bitbucket_hook/test/test_main.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test/test_main.py @@ -0,0 +1,32 @@ +from bitbucket_hook.main import app +from bitbucket_hook import hook + +def test_get(): + client = app.test_client() + response = client.get('/') + + +def test_post(monkeypatch): + client = app.test_client() + def handle(payload, test): + assert payload=={} + assert test==app.config['TESTING'] + monkeypatch.setattr(hook, 'handle', handle) + + app.config['TESTING'] = True + response = client.post('/', data={'payload':"{}"}) + + app.config['TESTING'] = False + response = client.post('/', data={'payload':"{}"}) + + assert response.status_code == 200 + +def test_post_error(monkeypatch): + client = app.test_client() + def handle(payload, test): + raise Exception('omg') + monkeypatch.setattr(hook, 'handle', handle) + response = client.post('/', data={'payload':"{}"}) + assert response.status_code == 500 + + diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -44,13 +44,14 @@ def handle_payload(): payload = json.loads(flask.request.form['payload']) try: - hook.handle(payload) + hook.handle(payload, test=app.testing) except: traceback.print_exc() print >> sys.stderr, 'payload:' pprint.pprint(payload, sys.stderr) print >> sys.stderr raise + return 'ok' class DefaultConfig(object): From commits-noreply at bitbucket.org Wed Apr 20 15:53:59 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:53:59 +0200 (CEST) Subject: [pypy-svn] buildbot default: ignore coverage tempfiles Message-ID: <20110420135359.21C06282C28@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r481:69dd0e8c474b Date: 2011-04-20 11:26 +0200 http://bitbucket.org/pypy/buildbot/changeset/69dd0e8c474b/ Log: ignore coverage tempfiles diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -2,6 +2,9 @@ *.pyc *~ +# test coverage files +.coverage + # master/slaveinfo.py contains the passwords, so it should never be tracked master/slaveinfo.py From commits-noreply at bitbucket.org Wed Apr 20 15:54:03 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:54:03 +0200 (CEST) Subject: [pypy-svn] buildbot default: test the behaviour of the irc message sender Message-ID: <20110420135403.0DFBA282C45@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r482:9e68e0dd7772 Date: 2011-04-20 11:33 +0200 http://bitbucket.org/pypy/buildbot/changeset/9e68e0dd7772/ Log: test the behaviour of the irc message sender diff --git a/bitbucket_hook/test/test_irc.py b/bitbucket_hook/test/test_irc.py --- a/bitbucket_hook/test/test_irc.py +++ b/bitbucket_hook/test/test_irc.py @@ -1,5 +1,5 @@ -from bitbucket_hook.irc import getpaths - +from bitbucket_hook import irc +import subprocess def fl(*paths): return [{'file': x} for x in paths] @@ -77,6 +77,19 @@ def test_getpaths(files, expected_common, expected_listfiles): - common, files = getpaths(files, listfiles=bool(expected_listfiles)) + common, files = irc.getpaths(files, listfiles=bool(expected_listfiles)) assert common == expected_common assert files == expected_listfiles + +def test_send_message(monkeypatch): + monkeypatch.undo() # hack to get at the functions + + # gets called in normal mode + monkeypatch.setattr(subprocess, 'call', lambda *k, **kw: None) + irc.send_message('test') + + # doesnt get called in test mode + monkeypatch.setattr(subprocess, 'call', lambda: None) + irc.send_message('test', test=True) + + From commits-noreply at bitbucket.org Wed Apr 20 15:54:05 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:54:05 +0200 (CEST) Subject: [pypy-svn] buildbot default: simplify the duplicate avoidance tests Message-ID: <20110420135405.7A97D282C42@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r483:cc63f7889d1f Date: 2011-04-20 12:13 +0200 http://bitbucket.org/pypy/buildbot/changeset/cc63f7889d1f/ Log: simplify the duplicate avoidance tests diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -154,11 +154,7 @@ def test_ignore_duplicate_commits(monkeypatch, mails, messages): - def hg(*args): - return '' % ' '.join(map(str, args)) - monkeypatch.setattr(scm, 'hg', hg) monkeypatch.setattr(hook, 'seen_nodes', set()) - monkeypatch.setattr(hook, 'check_for_local_repo', lambda _: True) commits, _ = irc_cases() payload = {u'repository': {u'absolute_url': '', @@ -168,9 +164,9 @@ u'website': u''}, u'user': u'antocuni', 'commits': commits['commits']} - hook.handle(payload) - hook.handle(payload) - # + commits_listed = list(hook.get_commits('test', payload)) + commits_again = list(hook.get_commits('test', payload)) num_commits = len(commits['commits']) - assert len(mails) == num_commits - assert len(messages) == num_commits + assert len(commits_listed) == num_commits + assert not commits_again + From commits-noreply at bitbucket.org Wed Apr 20 15:54:06 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 15:54:06 +0200 (CEST) Subject: [pypy-svn] buildbot default: add a run script to run the app propperly Message-ID: <20110420135406.A04EF282C30@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r484:e9a3f65ee62c Date: 2011-04-20 15:26 +0200 http://bitbucket.org/pypy/buildbot/changeset/e9a3f65ee62c/ Log: add a run script to run the app propperly diff --git a/bitbucket_hook/run.py b/bitbucket_hook/run.py new file mode 100755 --- /dev/null +++ b/bitbucket_hook/run.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +import py +import sys +import argparse +main = py.path.local(__file__).dirpath().join('main.py').pyimport() + + +if __name__ == '__main__': + HOST_NAME = 'codespeak.net' + PORT_NUMBER = 9237 + main.app.run( + host = HOST_NAME if 'deploy' in sys.argv else 'localhost', + debug = 'debug' in sys.argv, + port=PORT_NUMBER) + diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -17,10 +17,8 @@ app = flask.Flask('bb-hook') -import hook +from . import hook -HOST_NAME = 'codespeak.net' -PORT_NUMBER = 9237 @app.route('/', methods=['GET']) @@ -86,5 +84,3 @@ # real settings, (they works on codespeak at least) app.config.from_object(CodeSpeakConfig) -if __name__ == '__main__': - app.run(debug=True) From commits-noreply at bitbucket.org Wed Apr 20 16:01:40 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Wed, 20 Apr 2011 16:01:40 +0200 (CEST) Subject: [pypy-svn] buildbot default: some pep8 cleanups Message-ID: <20110420140140.E10CC282C22@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r485:6ef1bc8a9a11 Date: 2011-04-20 16:01 +0200 http://bitbucket.org/pypy/buildbot/changeset/6ef1bc8a9a11/ Log: some pep8 cleanups diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -20,7 +20,6 @@ from . import hook - @app.route('/', methods=['GET']) def test_form(): """Respond to a GET request.""" @@ -83,4 +82,3 @@ else: # real settings, (they works on codespeak at least) app.config.from_object(CodeSpeakConfig) - From commits-noreply at bitbucket.org Wed Apr 20 18:32:50 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 20 Apr 2011 18:32:50 +0200 (CEST) Subject: [pypy-svn] pypy default: actually test the right thing in this test; before, the loops were specialized anyway because a and b were loop invariants Message-ID: <20110420163250.A73BE282C22@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43492:f353797c847a Date: 2011-04-20 18:32 +0200 http://bitbucket.org/pypy/pypy/changeset/f353797c847a/ Log: actually test the right thing in this test; before, the loops were specialized anyway because a and b were loop invariants diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1531,43 +1531,37 @@ ## assert call.getarg(2).value == 3.0 def test_xor(self): - def main(a, b): - i = sa = 0 - while i < 300: + def main(b): + a = sa = 0 + while a < 300: if a > 0: # Specialises the loop pass if b > 10: pass - if a^b >= 0: - sa += 1 # ID: add - i += 1 + if a^b >= 0: # ID: guard + sa += 1 + sa += a^a # ID: a_xor_a + a += 1 return sa - # if both are >=0, a^b is known to be >=0 - log = self.run(main, [3, 14], threshold=200) + log = self.run(main, [11], threshold=200) assert log.result == 300 loop, = log.loops_by_filename(self.filepath) - assert loop.match(""" - i9 = int_lt(i6, 300) - guard_true(i9, descr=...) - i11 = int_add_ovf(i7, 1) - guard_no_overflow(descr=...) - i13 = int_add(i6, 1) - --TICK-- - jump(p0, p1, p2, p3, p4, p5, i13, i11, descr=) + # if both are >=0, a^b is known to be >=0 + # note that we know that b>10 + assert loop.match_by_id('guard', """ + i10 = int_xor(i5, i7) """) + # + # x^x is always optimized to 0 + assert loop.match_by_id('a_xor_a', "") - # XXX: I don't understand why this assert passes, because the - # optimizer doesn't know that b >=0 - log = self.run(main, [3, 4], threshold=200) + log = self.run(main, [9], threshold=200) assert log.result == 300 loop, = log.loops_by_filename(self.filepath) - assert loop.match(""" - i9 = int_lt(i6, 300) - guard_true(i9, descr=...) - i11 = int_add_ovf(i7, 1) - guard_no_overflow(descr=...) - i13 = int_add(i6, 1) - --TICK-- - jump(p0, p1, p2, p3, p4, p5, i13, i11, descr=) + # we don't know that b>10, hence we cannot optimize it + assert loop.match_by_id('guard', """ + i10 = int_xor(i5, i7) + i12 = int_ge(i10, 0) + guard_true(i12, descr=...) """) From commits-noreply at bitbucket.org Wed Apr 20 21:06:19 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 20 Apr 2011 21:06:19 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: work in progress Message-ID: <20110420190619.C6AD5282C22@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43493:01e3685f89c6 Date: 2011-04-19 19:39 +0200 http://bitbucket.org/pypy/pypy/changeset/01e3685f89c6/ Log: work in progress diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -590,9 +590,11 @@ for sh in short: ok = False extra_guards = [] + if sh.virtual_state.generalization_of(virtual_state): ok = True else: + import pdb; pdb.set_trace() try: cpu = self.optimizer.cpu sh.virtual_state.generate_guards(virtual_state, diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2010,6 +2010,37 @@ assert res == 12 self.check_tree_loop_count(2) + def test_caching_setfield(self): + myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node']) + class A: + pass + def f(n, a): + i = sa = 0 + node = A() + while i < n: + myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node) + myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node) + if i < n/2: + node.val1 = a + node.val2 = a + sa += node.val1 + node.val2 + i += 1 + return n + res = self.meta_interp(f, [32, 7]) + assert res == f(32, 7) + + # write same val to 2 locations + # read them + # write 2 differnt values from branch + + # r=getfield + # assert r<0 + # bridge violating assert + + + + + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -175,6 +175,7 @@ def generalization_of(self, other): assert len(self.state) == len(other.state) for i in range(len(self.state)): + print 'ge: ', i if not self.state[i].generalization_of(other.state[i]): return False return True @@ -182,6 +183,7 @@ def generate_guards(self, other, args, cpu, extra_guards): assert len(self.state) == len(other.state) == len(args) for i in range(len(self.state)): + print 'gu: ', i self.state[i].generate_guards(other.state[i], args[i], cpu, extra_guards) From commits-noreply at bitbucket.org Wed Apr 20 21:06:21 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 20 Apr 2011 21:06:21 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: imported from jit-usable_retrace Message-ID: <20110420190621.B0462282C23@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43494:83a3250ba994 Date: 2011-04-19 19:42 +0200 http://bitbucket.org/pypy/pypy/changeset/83a3250ba994/ Log: imported from jit-usable_retrace diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -140,6 +140,7 @@ def _generate_guards(self, other, box, cpu, extra_guards): if not isinstance(other, NotVirtualStateInfo): raise InvalidLoop + if self.level == LEVEL_KNOWNCLASS and \ box.nonnull() and \ self.known_class.same_constant(cpu.ts.cls_of_box(box)): @@ -153,6 +154,39 @@ op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) extra_guards.append(op) return + + if self.level == LEVEL_NONNULL and \ + other.level == LEVEL_UNKNOWN and \ + isinstance(box, BoxPtr) and \ + box.nonnull(): + op = ResOperation(rop.GUARD_NONNULL, [box], None) + extra_guards.append(op) + return + + if self.level == LEVEL_UNKNOWN and \ + other.level == LEVEL_UNKNOWN and \ + isinstance(box, BoxInt) and \ + self.intbound.contains(box.getint()): + if self.intbound.has_lower: + bound = self.intbound.lower + if not (other.intbound.has_lower and \ + other.intbound.lower >= bound): + res = BoxInt() + op = ResOperation(rop.INT_GE, [box, ConstInt(bound)], res) + extra_guards.append(op) + op = ResOperation(rop.GUARD_TRUE, [res], None) + extra_guards.append(op) + if self.intbound.has_upper: + bound = self.intbound.upper + if not (other.intbound.has_upper and \ + other.intbound.upper <= bound): + res = BoxInt() + op = ResOperation(rop.INT_LE, [box, ConstInt(bound)], res) + extra_guards.append(op) + op = ResOperation(rop.GUARD_TRUE, [res], None) + extra_guards.append(op) + return + # Remaining cases are probably not interesting raise InvalidLoop if self.level == LEVEL_CONSTANT: From commits-noreply at bitbucket.org Wed Apr 20 21:06:23 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 20 Apr 2011 21:06:23 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: imported dependencies from jit-usable_retrace too Message-ID: <20110420190623.830A1282C22@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43495:1aac0128cbd1 Date: 2011-04-19 19:47 +0200 http://bitbucket.org/pypy/pypy/changeset/1aac0128cbd1/ Log: imported dependencies from jit-usable_retrace too diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -594,7 +594,6 @@ if sh.virtual_state.generalization_of(virtual_state): ok = True else: - import pdb; pdb.set_trace() try: cpu = self.optimizer.cpu sh.virtual_state.generate_guards(virtual_state, @@ -619,9 +618,10 @@ jumpop = self.optimizer.newoperations.pop() assert jumpop.getopnum() == rop.JUMP for guard in extra_guards: - descr = sh.start_resumedescr.clone_if_mutable() - self.inliner.inline_descr_inplace(descr) - guard.setdescr(descr) + if guard.is_guard(): + descr = sh.start_resumedescr.clone_if_mutable() + self.inliner.inline_descr_inplace(descr) + guard.setdescr(descr) self.emit_operation(guard) self.optimizer.newoperations.append(jumpop) return diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -2,7 +2,10 @@ from pypy.jit.metainterp.optimizeopt import virtualize from pypy.jit.metainterp.optimizeopt.optimizer import LEVEL_CONSTANT, \ LEVEL_KNOWNCLASS, \ + LEVEL_NONNULL, \ + LEVEL_UNKNOWN, \ MININT, MAXINT +from pypy.jit.metainterp.history import BoxInt, ConstInt from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.optimizeopt.intutils import IntBound from pypy.jit.metainterp.resoperation import rop, ResOperation From commits-noreply at bitbucket.org Wed Apr 20 21:06:25 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 20 Apr 2011 21:06:25 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: test for two not handled cases Message-ID: <20110420190625.3A02A282C22@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43496:1938c6d07c91 Date: 2011-04-19 20:00 +0200 http://bitbucket.org/pypy/pypy/changeset/1938c6d07c91/ Log: test for two not handled cases diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2017,30 +2017,42 @@ def f(n, a): i = sa = 0 node = A() + node.val1 = node.val2 = 0 while i < n: myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node) myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node) + sa += node.val1 + node.val2 if i < n/2: node.val1 = a node.val2 = a - sa += node.val1 + node.val2 + else: + node.val1 = a + node.val2 = a + 1 i += 1 - return n + return sa res = self.meta_interp(f, [32, 7]) assert res == f(32, 7) - # write same val to 2 locations - # read them - # write 2 differnt values from branch + def test_getfield_result_with_intbound(self): + myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node']) + class A: + pass + def f(n, a): + i = sa = 0 + node = A() + node.val1 = a + while i < n: + myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node) + myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node) + if node.val1 > 0: + sa += 1 + if i > n/2: + node.val1 = -a + i += 1 + return sa + res = self.meta_interp(f, [32, 7]) + assert res == f(32, 7) - # r=getfield - # assert r<0 - # bridge violating assert - - - - - class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -212,7 +212,6 @@ def generalization_of(self, other): assert len(self.state) == len(other.state) for i in range(len(self.state)): - print 'ge: ', i if not self.state[i].generalization_of(other.state[i]): return False return True @@ -220,7 +219,6 @@ def generate_guards(self, other, args, cpu, extra_guards): assert len(self.state) == len(other.state) == len(args) for i in range(len(self.state)): - print 'gu: ', i self.state[i].generate_guards(other.state[i], args[i], cpu, extra_guards) From commits-noreply at bitbucket.org Wed Apr 20 21:06:29 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 20 Apr 2011 21:06:29 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: - Dont cache setfields across loop boundaries we dont want them in the Message-ID: <20110420190629.6A613282C22@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43497:eb7cfada75ee Date: 2011-04-20 21:04 +0200 http://bitbucket.org/pypy/pypy/changeset/eb7cfada75ee/ Log: - Dont cache setfields across loop boundaries we dont want them in the short preamble. - Dissabled cached_arrayitems support for now. - Make the short preamble guard for the state of all values from the preamble surviving into the loop. They can influence the loop without turning up as arguments by introducing assumptions such as for eaxmple getfield(p1)==7. The vaues in the original inputargs are already treated by the VirtualState so they are ignored here. diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -166,8 +166,8 @@ inputargs = virtual_state.make_inputargs(values) sb = preamble_optimizer.produce_short_preamble_ops(inputargs) self.short_boxes = sb + initial_inputargs_len = len(inputargs) - inputargs, short = self.inline(self.cloned_operations, loop.inputargs, jump_args, @@ -274,10 +274,16 @@ self.short_inliner = Inliner(inputargs, jumpargs) short = [] + short_seen = {} + for result, op in self.short_boxes.items(): + if op is not None: + for op in self.getvalue(result).make_guards(result): + self.add_op_to_short(op, short, short_seen) i = j = 0 while i < len(self.optimizer.newoperations): op = self.optimizer.newoperations[i] + self.boxes_created_this_iteration[op.result] = True args = op.getarglist() if op.is_guard(): @@ -285,14 +291,14 @@ for a in args: self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, True) + jumpargs, short_seen) i += 1 if i == len(self.optimizer.newoperations): while j < len(jumpargs): a = jumpargs[j] self.import_box(a, inputargs, short, short_jumpargs, - jumpargs, True) + jumpargs, short_seen) j += 1 jumpop.initarglist(jumpargs) @@ -300,35 +306,47 @@ short.append(ResOperation(rop.JUMP, short_jumpargs, None)) return inputargs, short + def add_op_to_short(self, op, short, short_seen): + if op is None: + return + if op.result is not None and op.result in short_seen: + return self.short_inliner.inline_arg(op.result) + for a in op.getarglist(): + if not isinstance(a, Const) and a not in short_seen: + self.add_op_to_short(self.short_boxes[a], short, short_seen) + short.append(op) + short_seen[op.result] = True + newop = self.short_inliner.inline_op(op) + self.optimizer.send_extra_operation(newop) + assert self.optimizer.newoperations[-1] is not newop + + if op.is_ovf(): + # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here + guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) + short.append(guard) + self.optimizer.send_extra_operation(guard) + assert self.optimizer.newoperations[-1] is not guard + + # FIXME: Emit a proper guards here in case it is not + # removed by the optimizer. Can that happen? + return newop.result + def import_box(self, box, inputargs, short, short_jumpargs, - jumpargs, extend_inputargs): + jumpargs, short_seen): if isinstance(box, Const) or box in inputargs: return if box in self.boxes_created_this_iteration: return short_op = self.short_boxes[box] - for a in short_op.getarglist(): - self.import_box(a, inputargs, short, short_jumpargs, jumpargs, False) - short.append(short_op) - newop = self.short_inliner.inline_op(short_op) - self.optimizer.send_extra_operation(newop) - if newop.is_ovf(): - # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here - guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) - short.append(guard) - # FIXME: Emit a proper guard here in case it is not - # removed by the optimizer. Can that happen? - self.optimizer.send_extra_operation(guard) - assert self.optimizer.newoperations[-1] is not guard - - if extend_inputargs: - short_jumpargs.append(short_op.result) - inputargs.append(box) - box = newop.result - if box in self.optimizer.values: - box = self.optimizer.values[box].force_box() - jumpargs.append(box) + newresult = self.add_op_to_short(short_op, short, short_seen) + + short_jumpargs.append(short_op.result) + inputargs.append(box) + box = newresult + if box in self.optimizer.values: + box = self.optimizer.values[box].force_box() + jumpargs.append(box) def sameop(self, op1, op2): if op1.getopnum() != op2.getopnum(): @@ -652,7 +670,6 @@ for op in loop_operations: newop = inliner.inline_op(op) - if not dryrun: self.emit_operation(newop) else: diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2053,6 +2053,26 @@ res = self.meta_interp(f, [32, 7]) assert res == f(32, 7) + def test_getfield_result_constant(self): + myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node']) + class A: + pass + def f(n, a): + i = sa = 0 + node = A() + node.val1 = 7 + while i < n: + myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node) + myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node) + if node.val1 == 7: + sa += 1 + if i > n/2: + node.val1 = -7 + i += 1 + return sa + res = self.meta_interp(f, [32, 7]) + assert res == f(32, 7) + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -23,6 +23,7 @@ # 'cached_fields'. # self._cached_fields = {} + self._cached_fields_getfield_op = {} self._lazy_setfield = None self._lazy_setfield_registered = False @@ -37,6 +38,10 @@ if cached_fieldvalue is not fieldvalue: # common case: store the 'op' as lazy_setfield, and register # myself in the optheap's _lazy_setfields list + try: + del self._cached_fields_getfield_op[structvalue] + except KeyError: + pass self._lazy_setfield = op if not self._lazy_setfield_registered: optheap._lazy_setfields.append(self) @@ -69,9 +74,10 @@ else: return self._cached_fields.get(structvalue, None) - def remember_field_value(self, structvalue, fieldvalue): + def remember_field_value(self, structvalue, fieldvalue, getfield_op=None): assert self._lazy_setfield is None self._cached_fields[structvalue] = fieldvalue + self._cached_fields_getfield_op[structvalue] = getfield_op def force_lazy_setfield(self, optheap): op = self._lazy_setfield @@ -81,6 +87,7 @@ # setfield might impact any of the stored result (because of # possible aliasing). self._cached_fields.clear() + self._cached_fields_getfield_op.clear() self._lazy_setfield = None optheap.next_optimization.propagate_forward(op) # Once it is done, we can put at least one piece of information @@ -94,17 +101,18 @@ assert self._lazy_setfield is None cf = CachedField() for structvalue, fieldvalue in self._cached_fields.iteritems(): - structvalue2 = structvalue.get_cloned(optimizer, valuemap) - fieldvalue2 = fieldvalue .get_cloned(optimizer, valuemap) - cf._cached_fields[structvalue2] = fieldvalue2 + op = self._cached_fields_getfield_op.get(structvalue, None) + if op: + structvalue2 = structvalue.get_cloned(optimizer, valuemap) + fieldvalue2 = fieldvalue .get_cloned(optimizer, valuemap) + cf._cached_fields[structvalue2] = fieldvalue2 return cf - def produce_potential_short_preamble_ops(self, potential_ops, descr): - for structvalue, fieldvalue in self._cached_fields.iteritems(): - result = fieldvalue.get_key_box() - potential_ops[result] = ResOperation(rop.GETFIELD_GC, - [structvalue.get_key_box()], - result, descr) + def produce_potential_short_preamble_ops(self, optimizer, + potential_ops, descr): + for structvalue, op in self._cached_fields_getfield_op.iteritems(): + if op and structvalue in self._cached_fields: + potential_ops[op.result] = op class CachedArrayItems(object): @@ -140,6 +148,8 @@ for descr, d in self.cached_fields.items(): new.cached_fields[descr] = d.get_cloned(optimizer, valuemap) + return new + new.cached_arrayitems = {} for descr, d in self.cached_arrayitems.items(): newd = {} @@ -162,7 +172,10 @@ def produce_potential_short_preamble_ops(self, potential_ops): for descr, d in self.cached_fields.items(): - d.produce_potential_short_preamble_ops(potential_ops, descr) + d.produce_potential_short_preamble_ops(self.optimizer, + potential_ops, descr) + + # FIXME for descr, d in self.cached_arrayitems.items(): for value, cache in d.items(): for index, fieldvalue in cache.fixed_index_items.items(): @@ -278,6 +291,7 @@ try: cf = self.cached_fields[fielddescr] cf._cached_fields.clear() + cf._cached_fields_getfield_op.clear() except KeyError: pass for arraydescr in effectinfo.write_descrs_arrays: @@ -369,14 +383,14 @@ fieldvalue = cf.getfield_from_cache(self, structvalue) if fieldvalue is not None: self.make_equal_to(op.result, fieldvalue) - return - # default case: produce the operation - structvalue.ensure_nonnull() - ###self.optimizer.optimize_default(op) - self.emit_operation(op) + else: + # default case: produce the operation + structvalue.ensure_nonnull() + ###self.optimizer.optimize_default(op) + self.emit_operation(op) # then remember the result of reading the field fieldvalue = self.getvalue(op.result) - cf.remember_field_value(structvalue, fieldvalue) + cf.remember_field_value(structvalue, fieldvalue, op) def optimize_SETFIELD_GC(self, op): if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)], diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -46,6 +46,34 @@ self.make_constant(box) # invariant: box is a Const if and only if level == LEVEL_CONSTANT + def make_guards(self, box): + guards = [] + if self.level == LEVEL_CONSTANT: + op = ResOperation(rop.GUARD_VALUE, [box, self.box], None) + guards.append(op) + elif self.level == LEVEL_KNOWNCLASS: + op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) + guards.append(op) + else: + if self.level == LEVEL_NONNULL: + op = ResOperation(rop.GUARD_NONNULL, [box], None) + guards.append(op) + if self.intbound.has_lower: + bound = self.intbound.lower + res = BoxInt() + op = ResOperation(rop.INT_GE, [box, ConstInt(bound)], res) + guards.append(op) + op = ResOperation(rop.GUARD_TRUE, [res], None) + guards.append(op) + if self.intbound.has_upper: + bound = self.intbound.upper + res = BoxInt() + op = ResOperation(rop.INT_LE, [box, ConstInt(bound)], res) + guards.append(op) + op = ResOperation(rop.GUARD_TRUE, [res], None) + guards.append(op) + return guards + def force_box(self): return self.box @@ -359,7 +387,7 @@ def produce_short_preamble_box(self, box, short_boxes, potential_ops): if box in short_boxes: return - if self.getvalue(box).is_constant(): + if isinstance(box, Const): #self.getvalue(box).is_constant(): return if box in potential_ops: op = potential_ops[box] From commits-noreply at bitbucket.org Wed Apr 20 23:20:13 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Wed, 20 Apr 2011 23:20:13 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer-new: Add a test for PyBuffer_Release decrefing the owned object and make it pass Message-ID: <20110420212013.B1AEA282C22@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer-new Changeset: r43498:d4569f9560ed Date: 2011-04-20 17:15 -0400 http://bitbucket.org/pypy/pypy/changeset/d4569f9560ed/ Log: Add a test for PyBuffer_Release decrefing the owned object and make it pass diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -335,3 +335,31 @@ """)]) result = module.fillinfo() assert "hello, world." == result + + +class AppTestPyBuffer_Release(AppTestCpythonExtensionBase): + """ + PyBuffer_Release releases the resources held by a Py_buffer. + """ + def test_decrefObject(self): + """ + The PyObject referenced by Py_buffer.obj has its reference count + decremented by PyBuffer_Release. + """ + module = self.import_extension('foo', [ + ("release", "METH_VARARGS", + """ + Py_buffer buf; + buf.obj = PyString_FromString("release me!"); + buf.buf = PyString_AsString(buf.obj); + buf.len = PyString_Size(buf.obj); + + /* The Py_buffer owns the only reference to that string. Release the + * Py_buffer and the string should be released as well. + */ + PyBuffer_Release(&buf); + + Py_RETURN_NONE; + """)]) + assert module.release() is None + diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -442,4 +442,4 @@ @cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) def PyBuffer_Release(space, view): - pass + Py_DecRef(space, view.c_obj) From commits-noreply at bitbucket.org Wed Apr 20 23:20:14 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Wed, 20 Apr 2011 23:20:14 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer-new: Correct the reference counting in this Py_buffer/PyArg_ParseTuple test Message-ID: <20110420212014.80A35282C22@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer-new Changeset: r43499:32677dbc3517 Date: 2011-04-20 17:18 -0400 http://bitbucket.org/pypy/pypy/changeset/32677dbc3517/ Log: Correct the reference counting in this Py_buffer/PyArg_ParseTuple test diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -118,10 +118,13 @@ pybuffer = self.import_parser( ''' Py_buffer buf; + PyObject *result; if (!PyArg_ParseTuple(args, "s*", &buf)) { return NULL; } - return PyString_FromStringAndSize(buf.buf, buf.len); + result = PyString_FromStringAndSize(buf.buf, buf.len); + PyBuffer_Release(&buf); + return result; ''') assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') From commits-noreply at bitbucket.org Wed Apr 20 23:20:16 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Wed, 20 Apr 2011 23:20:16 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer-new: Mark str as HAVE_GETCHARBUFFER so getarg lets it through t# Message-ID: <20110420212016.5F11E282C25@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer-new Changeset: r43500:186a3c976b98 Date: 2011-04-20 17:19 -0400 http://bitbucket.org/pypy/pypy/changeset/186a3c976b98/ Log: Mark str as HAVE_GETCHARBUFFER so getarg lets it through t# diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -10,6 +10,7 @@ cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, + Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, @@ -403,6 +404,7 @@ c_buf.c_bf_getcharbuffer = llhelper(str_getcharbuffer.api_func.functype, str_getcharbuffer.api_func.get_wrapper(space)) pto.c_tp_as_buffer = c_buf + pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): @@ -443,7 +445,7 @@ if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) - pto.c_tp_flags = Py_TPFLAGS_HEAPTYPE + pto.c_tp_flags |= Py_TPFLAGS_HEAPTYPE pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, PyObject_Del.api_func.get_wrapper(space)) pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype, diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -1271,7 +1271,6 @@ char **p = va_arg(*p_va, char **); PyBufferProcs *pb = arg->ob_type->tp_as_buffer; Py_ssize_t count; - printf("this far\n"); #if 0 if (*format++ != '#') @@ -1279,7 +1278,7 @@ "invalid use of 't' format character", arg, msgbuf, bufsize); #endif - if (!PyType_HasFeature(arg->ob_type, + if (!PyType_HasFeature(arg->ob_type, Py_TPFLAGS_HAVE_GETCHARBUFFER) #if 0 || pb == NULL || pb->bf_getcharbuffer == NULL || @@ -1300,20 +1299,15 @@ "string or pinned buffer", arg, msgbuf, bufsize); #endif - printf("this far!\n"); - printf("%p\n", pb->bf_getcharbuffer); count = pb->bf_getcharbuffer(arg, 0, p); - printf("after\n"); #if 0 if (count < 0) return converterr("(unspecified)", arg, msgbuf, bufsize); #endif { - printf("fetch size\n"); FETCH_SIZE; - printf("did that\n"); STORE_SIZE(count); - printf("store size done\n"); + ++format; } break; } diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -90,8 +90,8 @@ constant_names = """ -Py_TPFLAGS_READY Py_TPFLAGS_READYING -METH_COEXIST METH_STATIC METH_CLASS +Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER +METH_COEXIST METH_STATIC METH_CLASS METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE From commits-noreply at bitbucket.org Thu Apr 21 00:25:15 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Thu, 21 Apr 2011 00:25:15 +0200 (CEST) Subject: [pypy-svn] pypy default: math.copysign is a pure function, as is floor() which doesn't have any error conditions. Message-ID: <20110420222515.882E2282C22@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43501:b7051089b674 Date: 2011-04-20 18:24 -0400 http://bitbucket.org/pypy/pypy/changeset/b7051089b674/ Log: math.copysign is a pure function, as is floor() which doesn't have any error conditions. diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -29,9 +29,9 @@ math_eci = eci math_prefix = '' -def llexternal(name, ARGS, RESULT): +def llexternal(name, ARGS, RESULT, **kwargs): return rffi.llexternal(name, ARGS, RESULT, compilation_info=eci, - sandboxsafe=True) + sandboxsafe=True, **kwargs) def math_llexternal(name, ARGS, RESULT): return rffi.llexternal(math_prefix + name, ARGS, RESULT, @@ -47,7 +47,8 @@ math_log = llexternal('log', [rffi.DOUBLE], rffi.DOUBLE) math_log10 = llexternal('log10', [rffi.DOUBLE], rffi.DOUBLE) math_copysign = llexternal(underscore + 'copysign', - [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) + [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE, + pure_function=True) math_atan2 = llexternal('atan2', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_frexp = llexternal('frexp', [rffi.DOUBLE, rffi.INTP], rffi.DOUBLE) math_modf = llexternal('modf', [rffi.DOUBLE, rffi.DOUBLEP], rffi.DOUBLE) @@ -56,7 +57,7 @@ math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_hypot = llexternal(underscore + 'hypot', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_isinf = math_llexternal('isinf', [rffi.DOUBLE], rffi.INT) +math_floor = llexternal('floor', [rffi.DOUBLE], rffi.DOUBLE, pure_function=True) # ____________________________________________________________ # @@ -99,6 +100,8 @@ return (y == INFINITY) | (y == -INFINITY) +ll_math_floor = math_floor + ll_math_copysign = math_copysign @@ -337,7 +340,7 @@ unary_math_functions = [ 'acos', 'asin', 'atan', - 'ceil', 'cos', 'cosh', 'exp', 'fabs', 'floor', + 'ceil', 'cos', 'cosh', 'exp', 'fabs', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'log', 'log10', 'acosh', 'asinh', 'atanh', 'log1p', 'expm1', ] From commits-noreply at bitbucket.org Thu Apr 21 00:58:43 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Thu, 21 Apr 2011 00:58:43 +0200 (CEST) Subject: [pypy-svn] pypy default: interp_math.floor really can't raise. Message-ID: <20110420225843.CEB70282C22@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43502:0d778b35092c Date: 2011-04-20 18:58 -0400 http://bitbucket.org/pypy/pypy/changeset/0d778b35092c/ Log: interp_math.floor really can't raise. diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py --- a/pypy/module/math/interp_math.py +++ b/pypy/module/math/interp_math.py @@ -5,11 +5,11 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import NoneNotWrapped -class State: - def __init__(self, space): +class State: + def __init__(self, space): self.w_e = space.wrap(math.e) self.w_pi = space.wrap(math.pi) -def get(space): +def get(space): return space.fromcache(State) def _get_double(space, w_x): @@ -153,7 +153,8 @@ Return the floor of x as a float. This is the largest integral value <= x. """ - return math1(space, math.floor, w_x) + x = _get_double(space, w_x) + return space.wrap(math.floor(x)) def sqrt(space, w_x): """sqrt(x) From commits-noreply at bitbucket.org Thu Apr 21 07:46:35 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Thu, 21 Apr 2011 07:46:35 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: reenabled getarrayitem support without caching setarrayitems across loop boundaries Message-ID: <20110421054635.E167A282C26@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43503:16de1276f38a Date: 2011-04-21 07:45 +0200 http://bitbucket.org/pypy/pypy/changeset/16de1276f38a/ Log: reenabled getarrayitem support without caching setarrayitems across loop boundaries diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2032,6 +2032,46 @@ return sa res = self.meta_interp(f, [32, 7]) assert res == f(32, 7) + + def test_caching_setarrayitem_fixed(self): + myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node']) + def f(n, a): + i = sa = 0 + node = [1, 2, 3] + while i < n: + myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node) + myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node) + sa += node[0] + node[1] + if i < n/2: + node[0] = a + node[1] = a + else: + node[0] = a + node[1] = a + 1 + i += 1 + return sa + res = self.meta_interp(f, [32, 7]) + assert res == f(32, 7) + + def test_caching_setarrayitem_var(self): + myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'b', 'node']) + def f(n, a, b): + i = sa = 0 + node = [1, 2, 3] + while i < n: + myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, b=b, node=node) + myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, b=b, node=node) + sa += node[0] + node[b] + if i < n/2: + node[0] = a + node[b] = a + else: + node[0] = a + node[b] = a + 1 + i += 1 + return sa + res = self.meta_interp(f, [32, 7, 2]) + assert res == f(32, 7, 2) def test_getfield_result_with_intbound(self): myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node']) diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -118,8 +118,10 @@ class CachedArrayItems(object): def __init__(self): self.fixed_index_items = {} + self.fixed_index_getops = {} self.var_index_item = None self.var_index_indexvalue = None + self.var_index_getop = None class BogusPureField(JitException): pass @@ -148,8 +150,6 @@ for descr, d in self.cached_fields.items(): new.cached_fields[descr] = d.get_cloned(optimizer, valuemap) - return new - new.cached_arrayitems = {} for descr, d in self.cached_arrayitems.items(): newd = {} @@ -157,7 +157,7 @@ for value, cache in d.items(): newcache = CachedArrayItems() newd[value.get_cloned(optimizer, valuemap)] = newcache - if cache.var_index_item: + if cache.var_index_item and cache.var_index_getop: newcache.var_index_item = \ cache.var_index_item.get_cloned(optimizer, valuemap) if cache.var_index_indexvalue: @@ -165,7 +165,8 @@ cache.var_index_indexvalue.get_cloned(optimizer, valuemap) for index, fieldvalue in cache.fixed_index_items.items(): - newcache.fixed_index_items[index] = \ + if cache.fixed_index_getops.get(index, None): + newcache.fixed_index_items[index] = \ fieldvalue.get_cloned(optimizer, valuemap) return new @@ -175,22 +176,16 @@ d.produce_potential_short_preamble_ops(self.optimizer, potential_ops, descr) - # FIXME for descr, d in self.cached_arrayitems.items(): for value, cache in d.items(): - for index, fieldvalue in cache.fixed_index_items.items(): - result = fieldvalue.get_key_box() - op = ResOperation(rop.GETARRAYITEM_GC, - [value.get_key_box(), ConstInt(index)], - result, descr) - potential_ops[result] = op + for index in cache.fixed_index_items.keys(): + op = cache.fixed_index_getops[index] + if op: + potential_ops[op.result] = op if cache.var_index_item and cache.var_index_indexvalue: - result = cache.var_index_item.get_key_box() - op = ResOperation(rop.GETARRAYITEM_GC, - [value.get_key_box(), - cache.var_index_indexvalue.get_key_box()], - result, descr) - potential_ops[result] = op + op = cache.var_index_getop + if op: + potential_ops[op.result] = op def clean_caches(self): @@ -205,7 +200,8 @@ cf = self.cached_fields[descr] = CachedField() return cf - def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False): + def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, + write=False, getop=None): d = self.cached_arrayitems.get(descr, None) if d is None: d = self.cached_arrayitems[descr] = {} @@ -223,9 +219,11 @@ othercache.var_index_item = None try: del othercache.fixed_index_items[index] + del othercache.fixed_index_getops[index] except KeyError: pass cache.fixed_index_items[index] = fieldvalue + cache.fixed_index_getops[index] = getop else: if write: for value, othercache in d.iteritems(): @@ -233,8 +231,10 @@ othercache.var_index_indexvalue = None othercache.var_index_item = None othercache.fixed_index_items.clear() + othercache.fixed_index_getops.clear() cache.var_index_indexvalue = indexvalue cache.var_index_item = fieldvalue + cache.var_index_getop = getop def read_cached_arrayitem(self, descr, value, indexvalue): d = self.cached_arrayitems.get(descr, None) @@ -388,9 +388,9 @@ structvalue.ensure_nonnull() ###self.optimizer.optimize_default(op) self.emit_operation(op) - # then remember the result of reading the field - fieldvalue = self.getvalue(op.result) - cf.remember_field_value(structvalue, fieldvalue, op) + # then remember the result of reading the field + fieldvalue = self.getvalue(op.result) + cf.remember_field_value(structvalue, fieldvalue, op) def optimize_SETFIELD_GC(self, op): if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)], @@ -412,7 +412,8 @@ ###self.optimizer.optimize_default(op) self.emit_operation(op) fieldvalue = self.getvalue(op.result) - self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue) + self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue, + getop=op) def optimize_SETARRAYITEM_GC(self, op): self.emit_operation(op) From commits-noreply at bitbucket.org Thu Apr 21 08:15:54 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Thu, 21 Apr 2011 08:15:54 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: corrected asserts Message-ID: <20110421061554.70764282C26@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43504:1378c791e6d2 Date: 2011-04-21 08:15 +0200 http://bitbucket.org/pypy/pypy/changeset/1378c791e6d2/ Log: corrected asserts diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -317,15 +317,17 @@ short.append(op) short_seen[op.result] = True newop = self.short_inliner.inline_op(op) + newoplen = len(self.optimizer.newoperations) self.optimizer.send_extra_operation(newop) - assert self.optimizer.newoperations[-1] is not newop + assert len(self.optimizer.newoperations) == newoplen if op.is_ovf(): # FIXME: ensure that GUARD_OVERFLOW:ed ops not end up here guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None) short.append(guard) + newoplen = len(self.optimizer.newoperations) self.optimizer.send_extra_operation(guard) - assert self.optimizer.newoperations[-1] is not guard + assert len(self.optimizer.newoperations) == newoplen # FIXME: Emit a proper guards here in case it is not # removed by the optimizer. Can that happen? From commits-noreply at bitbucket.org Thu Apr 21 09:10:26 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Thu, 21 Apr 2011 09:10:26 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix translation (sorry). Thanks anto. Message-ID: <20110421071026.5FA2B282C26@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43505:a3cce19037e6 Date: 2011-04-21 03:10 -0400 http://bitbucket.org/pypy/pypy/changeset/a3cce19037e6/ Log: Fix translation (sorry). Thanks anto. diff --git a/pypy/rpython/extfuncregistry.py b/pypy/rpython/extfuncregistry.py --- a/pypy/rpython/extfuncregistry.py +++ b/pypy/rpython/extfuncregistry.py @@ -39,6 +39,9 @@ register_external(rfloat.copysign, [float, float], float, export_name="ll_math.ll_math_copysign", sandboxsafe=True, llimpl=ll_math.ll_math_copysign) +register_external(math.floor, [float], float, + export_name="ll_math.ll_math_floor", sandboxsafe=True, + llimpl=ll_math.ll_math_floor) complex_math_functions = [ ('frexp', [float], (float, int)), From commits-noreply at bitbucket.org Thu Apr 21 09:17:02 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Thu, 21 Apr 2011 09:17:02 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: add the filemap for the pypy.org repo extraction Message-ID: <20110421071702.72EC0282C26@codespeak.net> Author: Ronny Pfannschmidt Branch: extradoc Changeset: r3528:1ee0c0f08688 Date: 2011-04-21 09:16 +0200 http://bitbucket.org/pypy/extradoc/changeset/1ee0c0f08688/ Log: add the filemap for the pypy.org repo extraction diff --git a/planning/hg-migration/pypy.org.filemap b/planning/hg-migration/pypy.org.filemap new file mode 100644 --- /dev/null +++ b/planning/hg-migration/pypy.org.filemap @@ -0,0 +1,2 @@ +include pypy.org +rename pypy.org . From commits-noreply at bitbucket.org Thu Apr 21 10:02:35 2011 From: commits-noreply at bitbucket.org (RonnyPfannschmidt) Date: Thu, 21 Apr 2011 10:02:35 +0200 (CEST) Subject: [pypy-svn] buildbot default: resuffle some imports to avoid dependencies Message-ID: <20110421080235.DA3CD282C26@codespeak.net> Author: Ronny Pfannschmidt Branch: Changeset: r486:5c6eb036a25d Date: 2011-04-21 10:02 +0200 http://bitbucket.org/pypy/buildbot/changeset/5c6eb036a25d/ Log: resuffle some imports to avoid dependencies diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -15,9 +15,8 @@ import py -app = flask.Flask('bb-hook') +app = flask.Flask(__name__) -from . import hook @app.route('/', methods=['GET']) @@ -41,6 +40,7 @@ def handle_payload(): payload = json.loads(flask.request.form['payload']) try: + from . import hook hook.handle(payload, test=app.testing) except: traceback.print_exc() diff --git a/bitbucket_hook/test_hook_testcall.py b/bitbucket_hook/test_hook_testcall.py --- a/bitbucket_hook/test_hook_testcall.py +++ b/bitbucket_hook/test_hook_testcall.py @@ -1,10 +1,10 @@ import os -from bitbucket_hook.hook import handle -from bitbucket_hook.main import app import py def test_handlecall(): + from bitbucket_hook.hook import handle + from bitbucket_hook.main import app repopath = os.path.dirname(os.path.dirname(__file__)) print 'Repository path:', repopath test_payload = {u'repository': {u'absolute_url': '', From commits-noreply at bitbucket.org Thu Apr 21 11:40:38 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 21 Apr 2011 11:40:38 +0200 (CEST) Subject: [pypy-svn] buildbot default: add a docstring Message-ID: <20110421094038.9F994282B8B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r487:81dcd85ae501 Date: 2011-04-21 10:07 +0200 http://bitbucket.org/pypy/buildbot/changeset/81dcd85ae501/ Log: add a docstring diff --git a/bitbucket_hook/run.py b/bitbucket_hook/run.py --- a/bitbucket_hook/run.py +++ b/bitbucket_hook/run.py @@ -1,4 +1,11 @@ #!/usr/bin/python + +""" +To start the server in production mode, run this command:: + + ./run.py deploy +""" + import py import sys import argparse From commits-noreply at bitbucket.org Thu Apr 21 11:40:41 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 21 Apr 2011 11:40:41 +0200 (CEST) Subject: [pypy-svn] buildbot default: refactoring: move the loop over the commits outside the IRC/mail code; now, every handler has to deal with only one commit at a time Message-ID: <20110421094041.01A58282C27@codespeak.net> Author: Antonio Cuni Branch: Changeset: r488:5e1d218032f4 Date: 2011-04-21 10:35 +0200 http://bitbucket.org/pypy/buildbot/changeset/5e1d218032f4/ Log: refactoring: move the loop over the commits outside the IRC/mail code; now, every handler has to deal with only one commit at a time diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -17,7 +17,6 @@ def check_for_local_repo(local_repo): return local_repo.check(dir=True) - def get_commits(service, payload): #XXX: service is evil, get rid import operator @@ -40,5 +39,6 @@ print >> sys.stderr, 'Ignoring unknown repo', path return scm.hg('pull', '-R', local_repo) - irc.handle_message(payload, test) - mail.handle_diff_email(payload, test) + for commit in get_commits('hook', payload): + irc.handle_commit(payload, commit) + mail.handle_commit(payload, commit) diff --git a/bitbucket_hook/irc.py b/bitbucket_hook/irc.py --- a/bitbucket_hook/irc.py +++ b/bitbucket_hook/irc.py @@ -6,7 +6,6 @@ import time import subprocess - def getpaths(files, listfiles=False): # Handle empty input @@ -53,40 +52,33 @@ ]) -def handle_message(payload, test=False): - #XXX - from .hook import get_commits +def handle_commit(payload, commit): from .main import app - commits = get_commits('irc', payload) - if test: - print "#" * 20 - print "IRC messages:" - for commit in commits: - author = commit['author'] - branch = commit['branch'] - node = commit['node'] - timestamp = commit.get('timestamp') - print time.strftime('[%Y-%m-%d %H:%M]'), node, timestamp, author + author = commit['author'] + branch = commit['branch'] + node = commit['node'] + timestamp = commit.get('timestamp') + print time.strftime('[%Y-%m-%d %H:%M]'), node, timestamp, author - files = commit.get('files', []) - common_prefix, filenames = getpaths(files, app.config['LISTFILES']) - pathlen = len(common_prefix) + len(filenames) + 2 - common_prefix = '/' + common_prefix + files = commit.get('files', []) + common_prefix, filenames = getpaths(files, app.config['LISTFILES']) + pathlen = len(common_prefix) + len(filenames) + 2 + common_prefix = '/' + common_prefix - if app.config['USE_COLOR_CODES']: - author = '\x0312%s\x0F' % author # in blue - branch = '\x02%s\x0F' % branch # in bold - node = '\x0311%s\x0F' % node # in azure - common_prefix = '\x0315%s\x0F' % common_prefix # in gray + if app.config['USE_COLOR_CODES']: + author = '\x0312%s\x0F' % author # in blue + branch = '\x02%s\x0F' % branch # in bold + node = '\x0311%s\x0F' % node # in azure + common_prefix = '\x0315%s\x0F' % common_prefix # in gray - message = commit['message'].replace('\n', ' ') - fields = (author, branch, node, common_prefix, filenames) - part1 = '%s %s %s %s%s: ' % fields - totallen = 160 + pathlen - if len(message) + len(part1) <= totallen: - irc_msg = part1 + message - else: - maxlen = totallen - (len(part1) + 3) - irc_msg = part1 + message[:maxlen] + '...' - send_message(irc_msg, test) + message = commit['message'].replace('\n', ' ') + fields = (author, branch, node, common_prefix, filenames) + part1 = '%s %s %s %s%s: ' % fields + totallen = 160 + pathlen + if len(message) + len(part1) <= totallen: + irc_msg = part1 + message + else: + maxlen = totallen - (len(part1) + 3) + irc_msg = part1 + message[:maxlen] + '...' + send_message(irc_msg) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -89,7 +89,9 @@ } payload, expected = irc_cases(payload) - irc.handle_message(payload) + commits = payload['commits'] + irc.handle_commit(payload, commits[0]) + irc.handle_commit(payload, commits[1]) msg1, msg2 = messages[:2] diff --git a/bitbucket_hook/mail.py b/bitbucket_hook/mail.py --- a/bitbucket_hook/mail.py +++ b/bitbucket_hook/mail.py @@ -13,6 +13,9 @@ """ +def handle_commit(payload, commit): + return send_diff_for_commit(payload, commit) + def send_diff_for_commit(payload, commit, test=False): from .main import app @@ -56,9 +59,3 @@ smtp = SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT']) smtp.sendmail(from_, [to], msg.as_string()) - -def handle_diff_email(payload, test=False): - from . import hook - commits = hook.get_commits('email', payload) - for commit in commits: - send_diff_for_commit(payload, commit, test) diff --git a/bitbucket_hook/test/test_irc.py b/bitbucket_hook/test/test_irc.py --- a/bitbucket_hook/test/test_irc.py +++ b/bitbucket_hook/test/test_irc.py @@ -91,5 +91,3 @@ # doesnt get called in test mode monkeypatch.setattr(subprocess, 'call', lambda: None) irc.send_message('test', test=True) - - From commits-noreply at bitbucket.org Thu Apr 21 11:40:43 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 21 Apr 2011 11:40:43 +0200 (CEST) Subject: [pypy-svn] buildbot default: simplify the logic of get_commits, remove the dependency on the global variable seen_nodes, and avoid monkeypatching Message-ID: <20110421094043.D6A73282C27@codespeak.net> Author: Antonio Cuni Branch: Changeset: r489:5e99077d4f13 Date: 2011-04-21 10:40 +0200 http://bitbucket.org/pypy/buildbot/changeset/5e99077d4f13/ Log: simplify the logic of get_commits, remove the dependency on the global variable seen_nodes, and avoid monkeypatching diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -11,23 +11,18 @@ from . import mail -seen_nodes = set() - - def check_for_local_repo(local_repo): return local_repo.check(dir=True) -def get_commits(service, payload): - #XXX: service is evil, get rid +def get_commits(payload, seen_nodes=set()): import operator commits = sorted(payload['commits'], key=operator.itemgetter('revision')) for commit in commits: node = commit['raw_node'] - key = service, node - if key in seen_nodes: + if node in seen_nodes: continue - seen_nodes.add(key) + seen_nodes.add(node) yield commit @@ -39,6 +34,6 @@ print >> sys.stderr, 'Ignoring unknown repo', path return scm.hg('pull', '-R', local_repo) - for commit in get_commits('hook', payload): + for commit in get_commits(payload): irc.handle_commit(payload, commit) mail.handle_commit(payload, commit) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -9,12 +9,14 @@ def test_sort_commits(): # - commits = hook.get_commits('test_sort', { + seen_nodes = set() + payload = { 'commits': [ {'revision': 43, 'node': 'second', 'raw_node': 'first'}, {'revision': 42, 'node': 'first', 'raw_node': 'second'}, ], - }) + } + commits = hook.get_commits(payload, seen_nodes) commits = [x['node'] for x in commits] assert commits == ['first', 'second'] @@ -156,8 +158,6 @@ def test_ignore_duplicate_commits(monkeypatch, mails, messages): - monkeypatch.setattr(hook, 'seen_nodes', set()) - commits, _ = irc_cases() payload = {u'repository': {u'absolute_url': '', u'name': u'test', @@ -166,8 +166,9 @@ u'website': u''}, u'user': u'antocuni', 'commits': commits['commits']} - commits_listed = list(hook.get_commits('test', payload)) - commits_again = list(hook.get_commits('test', payload)) + seen_nodes = set() + commits_listed = list(hook.get_commits(payload, seen_nodes)) + commits_again = list(hook.get_commits(payload, seen_nodes)) num_commits = len(commits['commits']) assert len(commits_listed) == num_commits assert not commits_again diff --git a/bitbucket_hook/test/conftest.py b/bitbucket_hook/test/conftest.py --- a/bitbucket_hook/test/conftest.py +++ b/bitbucket_hook/test/conftest.py @@ -9,11 +9,6 @@ return [] -def pytest_runtest_setup(item): - from bitbucket_hook import hook - hook.seen_nodes.clear() - - def pytest_funcarg__monkeypatch(request): from bitbucket_hook import irc, mail mp = request.getfuncargvalue('monkeypatch') From commits-noreply at bitbucket.org Thu Apr 21 11:40:46 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 21 Apr 2011 11:40:46 +0200 (CEST) Subject: [pypy-svn] buildbot default: bah, reintroduce the "test" argument to avoid sending emails/irc during tests; this is really bad designed :-( Message-ID: <20110421094046.2E04F282C1B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r490:711264ee4ffd Date: 2011-04-21 10:48 +0200 http://bitbucket.org/pypy/buildbot/changeset/711264ee4ffd/ Log: bah, reintroduce the "test" argument to avoid sending emails/irc during tests; this is really bad designed :-( diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -35,5 +35,5 @@ return scm.hg('pull', '-R', local_repo) for commit in get_commits(payload): - irc.handle_commit(payload, commit) - mail.handle_commit(payload, commit) + irc.handle_commit(payload, commit, test) + mail.handle_commit(payload, commit, test) diff --git a/bitbucket_hook/irc.py b/bitbucket_hook/irc.py --- a/bitbucket_hook/irc.py +++ b/bitbucket_hook/irc.py @@ -52,7 +52,7 @@ ]) -def handle_commit(payload, commit): +def handle_commit(payload, commit, test=False): from .main import app author = commit['author'] @@ -81,4 +81,4 @@ else: maxlen = totallen - (len(part1) + 3) irc_msg = part1 + message[:maxlen] + '...' - send_message(irc_msg) + send_message(irc_msg, test) diff --git a/bitbucket_hook/mail.py b/bitbucket_hook/mail.py --- a/bitbucket_hook/mail.py +++ b/bitbucket_hook/mail.py @@ -13,11 +13,7 @@ """ -def handle_commit(payload, commit): - return send_diff_for_commit(payload, commit) - - -def send_diff_for_commit(payload, commit, test=False): +def handle_commit(payload, commit, test=False): from .main import app path = payload['repository']['absolute_url'] From commits-noreply at bitbucket.org Thu Apr 21 11:40:47 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 21 Apr 2011 11:40:47 +0200 (CEST) Subject: [pypy-svn] buildbot default: put handlers into a list Message-ID: <20110421094047.EB132282C1B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r491:cf8c0ea0a5e2 Date: 2011-04-21 10:52 +0200 http://bitbucket.org/pypy/buildbot/changeset/cf8c0ea0a5e2/ Log: put handlers into a list diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -10,6 +10,10 @@ from . import scm from . import mail +HANDLERS = [ + irc.handle_commit, + mail.handle_commit + ] def check_for_local_repo(local_repo): return local_repo.check(dir=True) @@ -35,5 +39,5 @@ return scm.hg('pull', '-R', local_repo) for commit in get_commits(payload): - irc.handle_commit(payload, commit, test) - mail.handle_commit(payload, commit, test) + for handler in HANDLERS: + handler(payload, commit, test) From commits-noreply at bitbucket.org Thu Apr 21 11:40:50 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 21 Apr 2011 11:40:50 +0200 (CEST) Subject: [pypy-svn] buildbot default: move the logging of received commits to a separate handler, and print it in green Message-ID: <20110421094050.A4C50282C1B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r492:81f970a9bbcc Date: 2011-04-21 11:03 +0200 http://bitbucket.org/pypy/buildbot/changeset/81f970a9bbcc/ Log: move the logging of received commits to a separate handler, and print it in green diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -4,13 +4,16 @@ import sys import time +from .main import app +from . import scm +# +from . import stdoutlog from . import irc -from .main import app - -from . import scm from . import mail + HANDLERS = [ + stdoutlog.handle_commit, irc.handle_commit, mail.handle_commit ] diff --git a/bitbucket_hook/irc.py b/bitbucket_hook/irc.py --- a/bitbucket_hook/irc.py +++ b/bitbucket_hook/irc.py @@ -3,7 +3,6 @@ ''' import os -import time import subprocess def getpaths(files, listfiles=False): @@ -58,8 +57,6 @@ author = commit['author'] branch = commit['branch'] node = commit['node'] - timestamp = commit.get('timestamp') - print time.strftime('[%Y-%m-%d %H:%M]'), node, timestamp, author files = commit.get('files', []) common_prefix, filenames = getpaths(files, app.config['LISTFILES']) diff --git a/bitbucket_hook/stdoutlog.py b/bitbucket_hook/stdoutlog.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/stdoutlog.py @@ -0,0 +1,21 @@ +import time + +RED = 31 +GREEN = 32 +YELLOW = 33 +BLUE = 34 +MAGENTA = 35 +CYAN = 36 +GRAY = 37 + +def color(s, fg=1, bg=1): + template = '\033[%02d;%02dm%s\033[0m' + return template % (bg, fg, s) + +def handle_commit(payload, commit, test=False): + author = commit['author'] + node = commit['node'] + timestamp = commit.get('timestamp') + curtime = time.strftime('[%Y-%m-%d %H:%M]') + log = '%s %s %s %s' % (curtime, node, timestamp, author) + print color(log, fg=GREEN) From commits-noreply at bitbucket.org Thu Apr 21 11:40:53 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 21 Apr 2011 11:40:53 +0200 (CEST) Subject: [pypy-svn] buildbot default: display the name of the repository in case it is different than the default one (i.e., pypy/pypy) Message-ID: <20110421094053.C2809282B8B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r493:e1ddaeb620f8 Date: 2011-04-21 11:37 +0200 http://bitbucket.org/pypy/buildbot/changeset/e1ddaeb620f8/ Log: display the name of the repository in case it is different than the default one (i.e., pypy/pypy) diff --git a/bitbucket_hook/irc.py b/bitbucket_hook/irc.py --- a/bitbucket_hook/irc.py +++ b/bitbucket_hook/irc.py @@ -50,13 +50,40 @@ message, ]) +def get_short_id(owner, repo, branch): + """ + Custom rules to get a short string that identifies a repo/branch in a + useful way, for IRC messages. Look at test_irc.test_get_short_id for what + we expect. + """ + from .main import app + repo_parts = [] + if owner != app.config['DEFAULT_USER']: + repo_parts.append('%s' % owner) + if repo_parts or repo != app.config['DEFAULT_REPO']: + repo_parts.append(repo) + repo_id = '/'.join(repo_parts) + # + if repo_id == '': + return branch + elif branch == 'default': + return repo_id + elif repo_id == branch: + return repo_id # e.g., pypy/extradoc has a branch extradoc, just return 'extradoc' + else: + return '%s[%s]' % (repo_id, branch) + return branch + def handle_commit(payload, commit, test=False): from .main import app + repo_owner = payload['repository']['owner'] + repo_name = payload['repository']['name'] author = commit['author'] branch = commit['branch'] node = commit['node'] + short_id = get_short_id(repo_owner, repo_name, branch) files = commit.get('files', []) common_prefix, filenames = getpaths(files, app.config['LISTFILES']) @@ -65,12 +92,12 @@ if app.config['USE_COLOR_CODES']: author = '\x0312%s\x0F' % author # in blue - branch = '\x02%s\x0F' % branch # in bold + short_id = '\x02%s\x0F' % short_id # in bold node = '\x0311%s\x0F' % node # in azure common_prefix = '\x0315%s\x0F' % common_prefix # in gray message = commit['message'].replace('\n', ' ') - fields = (author, branch, node, common_prefix, filenames) + fields = (author, short_id, node, common_prefix, filenames) part1 = '%s %s %s %s%s: ' % fields totallen = 160 + pathlen if len(message) + len(part1) <= totallen: diff --git a/bitbucket_hook/test/test_irc.py b/bitbucket_hook/test/test_irc.py --- a/bitbucket_hook/test/test_irc.py +++ b/bitbucket_hook/test/test_irc.py @@ -91,3 +91,13 @@ # doesnt get called in test mode monkeypatch.setattr(subprocess, 'call', lambda: None) irc.send_message('test', test=True) + +def test_get_short_id(): + assert irc.get_short_id('pypy', 'pypy', 'default') == 'default' + assert irc.get_short_id('pypy', 'pypy', 'mybranch') == 'mybranch' + assert irc.get_short_id('pypy', 'buildbot', 'default') == 'buildbot' + assert irc.get_short_id('pypy', 'buildbot', 'mybranch') == 'buildbot[mybranch]' + assert irc.get_short_id('pypy', 'extradoc', 'extradoc') == 'extradoc' + # + assert irc.get_short_id('anto', 'pypy', 'default') == 'anto/pypy' + assert irc.get_short_id('anto', 'pypy', 'mybranch') == 'anto/pypy[mybranch]' diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -56,6 +56,9 @@ REMOTE_BASE = 'http://bitbucket.org' USE_COLOR_CODES = True LISTFILES = False + # + DEFAULT_USER = 'pypy' + DEFAULT_REPO = 'pypy' class CodeSpeakConfig(DefaultConfig): From commits-noreply at bitbucket.org Thu Apr 21 11:59:09 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 11:59:09 +0200 (CEST) Subject: [pypy-svn] pypy default: Support test_rx86_*_auto_encoding checking for byte-based instructions Message-ID: <20110421095909.7F48D282C1B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43506:133bf79f8734 Date: 2011-04-20 20:38 +0200 http://bitbucket.org/pypy/pypy/changeset/133bf79f8734/ Log: Support test_rx86_*_auto_encoding checking for byte-based instructions too. Add a missing, more efficient encoding. In- progress on 64-bit. diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -460,6 +460,7 @@ # ------------------------------ MOV ------------------------------ MOV_ri = insn(rex_w, register(1), '\xB8', immediate(2, 'q')) + MOV8_ri = insn(rex_w, byte_register(1), '\xB0', immediate(2, 'b')) # ------------------------------ Arithmetic ------------------------------ diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -43,8 +43,10 @@ TESTDIR = 'rx86_32' X86_CodeBuilder = rx86.X86_32_CodeBuilder REGNAMES = ['%eax', '%ecx', '%edx', '%ebx', '%esp', '%ebp', '%esi', '%edi'] + REGNAMES8 = ['%al', '%cl', '%dl', '%bl', '%ah', '%ch', '%dh', '%bh'] XMMREGNAMES = ['%%xmm%d' % i for i in range(16)] REGS = range(8) + REGS8 = [i|rx86.BYTE_REG_FLAG for i in range(8)] NONSPECREGS = [rx86.R.eax, rx86.R.ecx, rx86.R.edx, rx86.R.ebx, rx86.R.esi, rx86.R.edi] methname = '?' @@ -52,6 +54,9 @@ def reg_tests(self): return self.REGS + def reg8_tests(self): + return self.REGS8 + def xmm_reg_tests(self): return self.reg_tests() @@ -97,12 +102,14 @@ def get_all_tests(self): return { 'r': self.reg_tests, + 'r8': self.reg8_tests, 'x': self.xmm_reg_tests, 'b': self.stack_bp_tests, 's': self.stack_sp_tests, 'm': self.memory_tests, 'a': self.array_tests, 'i': self.imm32_tests, + 'i8': self.imm8_tests, 'j': self.imm32_tests, 'l': self.relative_tests, } @@ -110,6 +117,10 @@ def assembler_operand_reg(self, regnum): return self.REGNAMES[regnum] + def assembler_operand_reg8(self, regnum): + assert regnum & rx86.BYTE_REG_FLAG + return self.REGNAMES8[regnum &~ rx86.BYTE_REG_FLAG] + def assembler_operand_xmm_reg(self, regnum): return self.XMMREGNAMES[regnum] @@ -137,16 +148,19 @@ def get_all_assembler_operands(self): return { 'r': self.assembler_operand_reg, + 'r8': self.assembler_operand_reg8, 'x': self.assembler_operand_xmm_reg, 'b': self.assembler_operand_stack_bp, 's': self.assembler_operand_stack_sp, 'm': self.assembler_operand_memory, 'a': self.assembler_operand_array, 'i': self.assembler_operand_imm, + 'i8': self.assembler_operand_imm, 'j': self.assembler_operand_imm_addr, } - def run_test(self, methname, instrname, argmodes, args_lists): + def run_test(self, methname, instrname, argmodes, args_lists, + instr_suffix=None): global labelcount labelcount = 0 oplist = [] @@ -169,6 +183,9 @@ if (self.WORD == 8) and instrname.startswith('CVT'): suffix = suffixes[self.WORD] + if instr_suffix is not None: + suffix = instr_suffix # overwrite + following = "" if False: # instr.indirect: suffix = "" @@ -196,11 +213,22 @@ oplist.append(op) g.write('\t.string "%s"\n' % END_TAG) g.close() - os.system('as --%d "%s" -o "%s"' % (self.WORD*8, inputname, filename)) + f, g = os.popen4('as --%d "%s" -o "%s"' % + (self.WORD*8, inputname, filename), 'r') + f.close() + got = g.read() + g.close() + error = [line for line in got.splitlines() if 'error' in line.lower()] + if error: + raise Exception("Assembler got an error: %r" % error[0]) + error = [line for line in got.splitlines() + if 'warning' in line.lower()] + if error: + raise Exception("Assembler got a warning: %r" % error[0]) try: f = open(filename, 'rb') except IOError: - raise Exception("Assembler error") + raise Exception("Assembler did not produce output?") data = f.read() f.close() i = data.find(BEGIN_TAG) @@ -227,13 +255,20 @@ if methname in ('ADD_ri', 'AND_ri', 'CMP_ri', 'OR_ri', 'SUB_ri', 'XOR_ri', 'SBB_ri'): if args[0] == rx86.R.eax: - return [] # ADD EAX, constant: there is a special encoding + return [] # ADD EAX, constant: there is a special encoding + if methname in ('CMP8_ri',): + if args[0] == rx86.R.al: + return [] # CMP AL, constant: there is a special encoding if methname == 'XCHG_rr' and rx86.R.eax in args: return [] # special encoding if methname == 'MOV_rj' and args[0] == rx86.R.eax: return [] # MOV EAX, [immediate]: there is a special encoding if methname == 'MOV_jr' and args[1] == rx86.R.eax: return [] # MOV [immediate], EAX: there is a special encoding + if methname == 'MOV8_rj' and args[0] == rx86.R.al: + return [] # MOV AL, [immediate]: there is a special encoding + if methname == 'MOV8_jr' and args[1] == rx86.R.al: + return [] # MOV [immediate], AL: there is a special encoding return [args] @@ -243,7 +278,9 @@ return X86_CodeBuilder def should_skip_instruction(self, instrname, argmodes): - is_artificial_instruction = instrname[-1].isdigit() or (argmodes != '' and argmodes[-1].isdigit()) + is_artificial_instruction = (argmodes != '' and argmodes[-1].isdigit()) + is_artificial_instruction |= (instrname[-1].isdigit() and + instrname[-1] != '8') return ( is_artificial_instruction or # XXX: Can't tests shifts automatically at the moment @@ -274,13 +311,35 @@ if methname == 'WORD': return + if instrname.endswith('8'): + instrname = instrname[:-1] + if instrname == 'MOVSX' or instrname == 'MOVZX': + instr_suffix = 'b' + suffixes[self.WORD] + instrname = instrname[:-1] + if argmodes[1] == 'r': + argmodes = [argmodes[0], 'r8'] + else: + instr_suffix = 'b' + realargmodes = [] + for mode in argmodes: + if mode == 'r': + mode = 'r8' + elif mode == 'i': + mode = 'i8' + realargmodes.append(mode) + argmodes = realargmodes + elif instrname == 'CALL' or instrname == 'JMP': + instr_suffix = suffixes[self.WORD] + ' *' + else: + instr_suffix = None print "Testing %s with argmodes=%r" % (instrname, argmodes) self.methname = methname self.is_xmm_insn = getattr(getattr(self.X86_CodeBuilder, methname), 'is_xmm_insn', False) ilist = self.make_all_tests(methname, argmodes) - oplist, as_code = self.run_test(methname, instrname, argmodes, ilist) + oplist, as_code = self.run_test(methname, instrname, argmodes, ilist, + instr_suffix) cc = self.get_code_checker_class()(as_code) for op, args in zip(oplist, ilist): if op: diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -9,7 +9,11 @@ X86_CodeBuilder = rx86.X86_64_CodeBuilder REGNAMES = ['%rax', '%rcx', '%rdx', '%rbx', '%rsp', '%rbp', '%rsi', '%rdi', '%r8', '%r9', '%r10', '%r11', '%r12', '%r13', '%r14', '%r15'] + REGNAMES8 = ['%al', '%cl', '%dl', '%bl', '%spl', '%bpl', '%sil', '%dil', + '%r8b', '%r9b', '%r10b', '%r11b', + '%r12b', '%r13b', '%r14b', '%r15b'] REGS = range(16) + REGS8 = [i|rx86.BYTE_REG_FLAG for i in range(16)] NONSPECREGS = [rx86.R.eax, rx86.R.ecx, rx86.R.edx, rx86.R.ebx, rx86.R.esi, rx86.R.edi, rx86.R.r8, rx86.R.r9, rx86.R.r10, rx86.R.r11, From commits-noreply at bitbucket.org Thu Apr 21 11:59:13 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 11:59:13 +0200 (CEST) Subject: [pypy-svn] pypy default: Finish the fixes. If I'm correct, I didn't actually made any fix Message-ID: <20110421095913.41981282C28@codespeak.net> Author: Armin Rigo Branch: Changeset: r43507:d3a468a27d46 Date: 2011-04-20 20:54 +0200 http://bitbucket.org/pypy/pypy/changeset/d3a468a27d46/ Log: Finish the fixes. If I'm correct, I didn't actually made any fix here, but just fixed the non-standardness of the encoding. (However, I did change rx86.py in the branch 32ptr-on-64bit, and there I introduced indirectly a bug.) diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -308,14 +308,17 @@ # XXX: Hack. Ignore REX.W if we are using 16-bit operands if mc._use_16_bit_immediate: basevalue &= ~REX_W - if basevalue != 0x40 or rexbyte != 0: + if basevalue != 0 or rexbyte != 0: + if basevalue == 0: + basevalue = 0x40 mc.writechar(chr(basevalue | rexbyte)) else: assert rexbyte == 0 return 0 -rex_w = encode_rex, 0, (0x40 | REX_W), None -rex_nw = encode_rex, 0, 0x40, None +rex_w = encode_rex, 0, (0x40 | REX_W), None # a REX.W prefix +rex_nw = encode_rex, 0, 0, None # an optional REX prefix +rex_fw = encode_rex, 0, 0x40, None # a forced REX prefix # ____________________________________________________________ @@ -460,7 +463,7 @@ # ------------------------------ MOV ------------------------------ MOV_ri = insn(rex_w, register(1), '\xB8', immediate(2, 'q')) - MOV8_ri = insn(rex_w, byte_register(1), '\xB0', immediate(2, 'b')) + MOV8_ri = insn(rex_fw, byte_register(1), '\xB0', immediate(2, 'b')) # ------------------------------ Arithmetic ------------------------------ @@ -486,11 +489,11 @@ CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) - CMP8_ri = insn(rex_nw, '\x80', byte_register(1), '\xF8', immediate(2, 'b')) + CMP8_ri = insn(rex_fw, '\x80', byte_register(1), '\xF8', immediate(2, 'b')) - AND8_rr = insn(rex_w, '\x20', byte_register(1), byte_register(2,8), '\xC0') + AND8_rr = insn(rex_fw, '\x20', byte_register(1), byte_register(2,8), '\xC0') - OR8_rr = insn(rex_w, '\x08', byte_register(1), byte_register(2,8), '\xC0') + OR8_rr = insn(rex_fw, '\x08', byte_register(1), byte_register(2,8), '\xC0') NEG_r = insn(rex_w, '\xF7', register(1), '\xD8') @@ -673,8 +676,8 @@ define_modrm_modes(insnname + '_r*', [rex_type, '\x8B', register(1, 8)]) define_modrm_modes(insnname + '_*i', [rex_type, '\xC7', orbyte(0<<3)], [immediate(2)]) -define_modrm_modes('MOV8_*r', [rex_w, '\x88', byte_register(2, 8)], regtype='BYTE') -define_modrm_modes('MOV8_*i', [rex_w, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') +define_modrm_modes('MOV8_*r', [rex_fw, '\x88', byte_register(2, 8)], regtype='BYTE') +define_modrm_modes('MOV8_*i', [rex_fw, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') define_modrm_modes('MOVZX8_r*', [rex_w, '\x0F\xB6', register(1, 8)], regtype='BYTE') define_modrm_modes('MOVSX8_r*', [rex_w, '\x0F\xBE', register(1, 8)], regtype='BYTE') diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -10,8 +10,9 @@ END_TAG = '<<>>' class CodeCheckerMixin(object): - def __init__(self, expected): + def __init__(self, expected, accept_unnecessary_prefix): self.expected = expected + self.accept_unnecessary_prefix = accept_unnecessary_prefix self.index = 0 def begin(self, op): @@ -20,6 +21,9 @@ def writechar(self, char): if char != self.expected[self.index:self.index+1]: + if (char == self.accept_unnecessary_prefix + and self.index == self.instrindex): + return # ignore the extra character '\x40' print self.op print "\x09from rx86.py:", hexdump(self.expected[self.instrindex:self.index] + char)+"..." print "\x09from 'as': ", hexdump(self.expected[self.instrindex:self.index+15])+"..." @@ -49,6 +53,7 @@ REGS8 = [i|rx86.BYTE_REG_FLAG for i in range(8)] NONSPECREGS = [rx86.R.eax, rx86.R.ecx, rx86.R.edx, rx86.R.ebx, rx86.R.esi, rx86.R.edi] + accept_unnecessary_prefix = None methname = '?' def reg_tests(self): @@ -340,7 +345,8 @@ ilist = self.make_all_tests(methname, argmodes) oplist, as_code = self.run_test(methname, instrname, argmodes, ilist, instr_suffix) - cc = self.get_code_checker_class()(as_code) + cls = self.get_code_checker_class() + cc = cls(as_code, self.accept_unnecessary_prefix) for op, args in zip(oplist, ilist): if op: cc.begin(op) diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -18,6 +18,7 @@ rx86.R.esi, rx86.R.edi, rx86.R.r8, rx86.R.r9, rx86.R.r10, rx86.R.r11, rx86.R.r12, rx86.R.r13, rx86.R.r14, rx86.R.r15] + accept_unnecessary_prefix = '\x40' def should_skip_instruction(self, instrname, argmodes): return ( From commits-noreply at bitbucket.org Thu Apr 21 11:59:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 11:59:15 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Add a "raise AssertionError" for unknown cases. Message-ID: <20110421095915.40040282C26@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43508:3ea6c9727f2d Date: 2011-04-20 20:54 +0200 http://bitbucket.org/pypy/pypy/changeset/3ea6c9727f2d/ Log: Add a "raise AssertionError" for unknown cases. diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py --- a/pypy/jit/backend/llsupport/gc.py +++ b/pypy/jit/backend/llsupport/gc.py @@ -873,6 +873,13 @@ newops.append(ResOperation(rop.HIDE_INTO_PTR32, [v1], v2)) op = op.copy_and_change(num, args=[op.getarg(0), op.getarg(1), v2]) + elif num == rop.ARRAYLEN_GC or num == rop.NEW_ARRAY: + # although these operations operate on a + # GcArray(HiddenGcRef32), there is no actual + # HiddenGcRef32 argument or result + pass + else: + raise AssertionError(op) elif (self.supports_compressed_ptrs and isinstance(descr, BaseCallDescr)): args = op.getarglist() From commits-noreply at bitbucket.org Thu Apr 21 11:59:46 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 11:59:46 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: hg merge default Message-ID: <20110421095946.4272D282C1B@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43509:c116bac64a27 Date: 2011-04-20 20:56 +0200 http://bitbucket.org/pypy/pypy/changeset/c116bac64a27/ Log: hg merge default diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -110,6 +110,13 @@ __dict__ = 8 raises(TypeError, dir, Foo("foo")) + def test_dir_broken_object(self): + class Foo(object): + x = 3 + def __getattribute__(self, name): + return name + assert dir(Foo()) == [] + def test_dir_custom(self): class Foo(object): def __dir__(self): diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py --- a/pypy/annotation/description.py +++ b/pypy/annotation/description.py @@ -880,6 +880,11 @@ except AttributeError: return False + def warn_missing_attribute(self, attr): + # only warn for missing attribute names whose name doesn't start + # with '$', to silence the warnings about '$memofield_xxx'. + return not self.has_attribute(attr) and not attr.startswith('$') + def read_attribute(self, attr): try: return self.attrcache[attr] diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py --- a/pypy/jit/backend/x86/test/test_gc_integration.py +++ b/pypy/jit/backend/x86/test/test_gc_integration.py @@ -327,6 +327,8 @@ ARRAY = lltype.GcArray(lltype.Signed) arraydescr = cpu.arraydescrof(ARRAY) self.arraydescr = arraydescr + ARRAYCHAR = lltype.GcArray(lltype.Char) + arraychardescr = cpu.arraydescrof(ARRAYCHAR) self.namespace = locals().copy() @@ -389,3 +391,24 @@ finish(p0) ''' py.test.raises(Seen, self.interpret, ops, []) + + def test_malloc_array_of_char(self): + # check that fastpath_malloc_varsize() respects the alignment + # of the pointer in the nursery + ops = ''' + [] + p1 = new_array(1, descr=arraychardescr) + p2 = new_array(2, descr=arraychardescr) + p3 = new_array(3, descr=arraychardescr) + p4 = new_array(4, descr=arraychardescr) + finish(p1, p2, p3, p4) + ''' + self.interpret(ops, []) + p1 = self.getptr(0, llmemory.GCREF) + p2 = self.getptr(1, llmemory.GCREF) + p3 = self.getptr(2, llmemory.GCREF) + p4 = self.getptr(3, llmemory.GCREF) + assert p1._obj.intval & (WORD-1) == 0 # aligned + assert p2._obj.intval & (WORD-1) == 0 # aligned + assert p3._obj.intval & (WORD-1) == 0 # aligned + assert p4._obj.intval & (WORD-1) == 0 # aligned diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -76,8 +76,8 @@ result.sort() return result - elif hasattr(obj, '__dir__'): - result = obj.__dir__() + elif hasattr(type(obj), '__dir__'): + result = type(obj).__dir__(obj) if not isinstance(result, list): raise TypeError("__dir__() must return a list, not %r" % ( type(result),)) @@ -87,11 +87,14 @@ else: #(regular item) Dict = {} try: - Dict.update(obj.__dict__) - except AttributeError: pass + if isinstance(obj.__dict__, dict): + Dict.update(obj.__dict__) + except AttributeError: + pass try: Dict.update(_classdir(obj.__class__)) - except AttributeError: pass + except AttributeError: + pass ## Comment from object.c: ## /* Merge in __members__ and __methods__ (if any). @@ -99,10 +102,14 @@ ## XXX needed to get at im_self etc of method objects. */ for attr in ['__members__','__methods__']: try: - for item in getattr(obj, attr): + l = getattr(obj, attr) + if not isinstance(l, list): + continue + for item in l: if isinstance(item, types.StringTypes): Dict[item] = None - except (AttributeError, TypeError): pass + except (AttributeError, TypeError): + pass result = Dict.keys() result.sort() diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -10,8 +10,9 @@ END_TAG = '<<>>' class CodeCheckerMixin(object): - def __init__(self, expected): + def __init__(self, expected, accept_unnecessary_prefix): self.expected = expected + self.accept_unnecessary_prefix = accept_unnecessary_prefix self.index = 0 def begin(self, op): @@ -20,6 +21,9 @@ def writechar(self, char): if char != self.expected[self.index:self.index+1]: + if (char == self.accept_unnecessary_prefix + and self.index == self.instrindex): + return # ignore the extra character '\x40' print self.op print "\x09from rx86.py:", hexdump(self.expected[self.instrindex:self.index] + char)+"..." print "\x09from 'as': ", hexdump(self.expected[self.instrindex:self.index+15])+"..." @@ -43,15 +47,21 @@ TESTDIR = 'rx86_32' X86_CodeBuilder = rx86.X86_32_CodeBuilder REGNAMES = ['%eax', '%ecx', '%edx', '%ebx', '%esp', '%ebp', '%esi', '%edi'] + REGNAMES8 = ['%al', '%cl', '%dl', '%bl', '%ah', '%ch', '%dh', '%bh'] XMMREGNAMES = ['%%xmm%d' % i for i in range(16)] REGS = range(8) + REGS8 = [i|rx86.BYTE_REG_FLAG for i in range(8)] NONSPECREGS = [rx86.R.eax, rx86.R.ecx, rx86.R.edx, rx86.R.ebx, rx86.R.esi, rx86.R.edi] + accept_unnecessary_prefix = None methname = '?' def reg_tests(self): return self.REGS + def reg8_tests(self): + return self.REGS8 + def xmm_reg_tests(self): return self.reg_tests() @@ -97,12 +107,14 @@ def get_all_tests(self): return { 'r': self.reg_tests, + 'r8': self.reg8_tests, 'x': self.xmm_reg_tests, 'b': self.stack_bp_tests, 's': self.stack_sp_tests, 'm': self.memory_tests, 'a': self.array_tests, 'i': self.imm32_tests, + 'i8': self.imm8_tests, 'j': self.imm32_tests, 'l': self.relative_tests, } @@ -110,6 +122,10 @@ def assembler_operand_reg(self, regnum): return self.REGNAMES[regnum] + def assembler_operand_reg8(self, regnum): + assert regnum & rx86.BYTE_REG_FLAG + return self.REGNAMES8[regnum &~ rx86.BYTE_REG_FLAG] + def assembler_operand_xmm_reg(self, regnum): return self.XMMREGNAMES[regnum] @@ -137,16 +153,19 @@ def get_all_assembler_operands(self): return { 'r': self.assembler_operand_reg, + 'r8': self.assembler_operand_reg8, 'x': self.assembler_operand_xmm_reg, 'b': self.assembler_operand_stack_bp, 's': self.assembler_operand_stack_sp, 'm': self.assembler_operand_memory, 'a': self.assembler_operand_array, 'i': self.assembler_operand_imm, + 'i8': self.assembler_operand_imm, 'j': self.assembler_operand_imm_addr, } - def run_test(self, methname, instrname, argmodes, args_lists): + def run_test(self, methname, instrname, argmodes, args_lists, + instr_suffix=None): global labelcount labelcount = 0 oplist = [] @@ -169,6 +188,9 @@ if (self.WORD == 8) and instrname.startswith('CVT'): suffix = suffixes[self.WORD] + if instr_suffix is not None: + suffix = instr_suffix # overwrite + following = "" if False: # instr.indirect: suffix = "" @@ -196,11 +218,22 @@ oplist.append(op) g.write('\t.string "%s"\n' % END_TAG) g.close() - os.system('as --%d "%s" -o "%s"' % (self.WORD*8, inputname, filename)) + f, g = os.popen4('as --%d "%s" -o "%s"' % + (self.WORD*8, inputname, filename), 'r') + f.close() + got = g.read() + g.close() + error = [line for line in got.splitlines() if 'error' in line.lower()] + if error: + raise Exception("Assembler got an error: %r" % error[0]) + error = [line for line in got.splitlines() + if 'warning' in line.lower()] + if error: + raise Exception("Assembler got a warning: %r" % error[0]) try: f = open(filename, 'rb') except IOError: - raise Exception("Assembler error") + raise Exception("Assembler did not produce output?") data = f.read() f.close() i = data.find(BEGIN_TAG) @@ -227,13 +260,20 @@ if methname in ('ADD_ri', 'AND_ri', 'CMP_ri', 'OR_ri', 'SUB_ri', 'XOR_ri', 'SBB_ri'): if args[0] == rx86.R.eax: - return [] # ADD EAX, constant: there is a special encoding + return [] # ADD EAX, constant: there is a special encoding + if methname in ('CMP8_ri',): + if args[0] == rx86.R.al: + return [] # CMP AL, constant: there is a special encoding if methname == 'XCHG_rr' and rx86.R.eax in args: return [] # special encoding if methname == 'MOV_rj' and args[0] == rx86.R.eax: return [] # MOV EAX, [immediate]: there is a special encoding if methname == 'MOV_jr' and args[1] == rx86.R.eax: return [] # MOV [immediate], EAX: there is a special encoding + if methname == 'MOV8_rj' and args[0] == rx86.R.al: + return [] # MOV AL, [immediate]: there is a special encoding + if methname == 'MOV8_jr' and args[1] == rx86.R.al: + return [] # MOV [immediate], AL: there is a special encoding return [args] @@ -243,7 +283,9 @@ return X86_CodeBuilder def should_skip_instruction(self, instrname, argmodes): - is_artificial_instruction = instrname[-1].isdigit() or (argmodes != '' and argmodes[-1].isdigit()) + is_artificial_instruction = (argmodes != '' and argmodes[-1].isdigit()) + is_artificial_instruction |= (instrname[-1].isdigit() and + instrname[-1] != '8') return ( is_artificial_instruction or # XXX: Can't tests shifts automatically at the moment @@ -274,14 +316,37 @@ if methname == 'WORD': return + if instrname.endswith('8'): + instrname = instrname[:-1] + if instrname == 'MOVSX' or instrname == 'MOVZX': + instr_suffix = 'b' + suffixes[self.WORD] + instrname = instrname[:-1] + if argmodes[1] == 'r': + argmodes = [argmodes[0], 'r8'] + else: + instr_suffix = 'b' + realargmodes = [] + for mode in argmodes: + if mode == 'r': + mode = 'r8' + elif mode == 'i': + mode = 'i8' + realargmodes.append(mode) + argmodes = realargmodes + elif instrname == 'CALL' or instrname == 'JMP': + instr_suffix = suffixes[self.WORD] + ' *' + else: + instr_suffix = None print "Testing %s with argmodes=%r" % (instrname, argmodes) self.methname = methname self.is_xmm_insn = getattr(getattr(self.X86_CodeBuilder, methname), 'is_xmm_insn', False) ilist = self.make_all_tests(methname, argmodes) - oplist, as_code = self.run_test(methname, instrname, argmodes, ilist) - cc = self.get_code_checker_class()(as_code) + oplist, as_code = self.run_test(methname, instrname, argmodes, ilist, + instr_suffix) + cls = self.get_code_checker_class() + cc = cls(as_code, self.accept_unnecessary_prefix) for op, args in zip(oplist, ilist): if op: cc.begin(op) diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -9,11 +9,16 @@ X86_CodeBuilder = rx86.X86_64_CodeBuilder REGNAMES = ['%rax', '%rcx', '%rdx', '%rbx', '%rsp', '%rbp', '%rsi', '%rdi', '%r8', '%r9', '%r10', '%r11', '%r12', '%r13', '%r14', '%r15'] + REGNAMES8 = ['%al', '%cl', '%dl', '%bl', '%spl', '%bpl', '%sil', '%dil', + '%r8b', '%r9b', '%r10b', '%r11b', + '%r12b', '%r13b', '%r14b', '%r15b'] REGS = range(16) + REGS8 = [i|rx86.BYTE_REG_FLAG for i in range(16)] NONSPECREGS = [rx86.R.eax, rx86.R.ecx, rx86.R.edx, rx86.R.ebx, rx86.R.esi, rx86.R.edi, rx86.R.r8, rx86.R.r9, rx86.R.r10, rx86.R.r11, rx86.R.r12, rx86.R.r13, rx86.R.r14, rx86.R.r15] + accept_unnecessary_prefix = '\x40' def should_skip_instruction(self, instrname, argmodes): return ( diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -308,14 +308,17 @@ # XXX: Hack. Ignore REX.W if we are using 16-bit operands if mc._use_16_bit_immediate: basevalue &= ~REX_W - if basevalue != 0x40 or rexbyte != 0: + if basevalue != 0 or rexbyte != 0: + if basevalue == 0: + basevalue = 0x40 mc.writechar(chr(basevalue | rexbyte)) else: assert rexbyte == 0 return 0 -rex_w = encode_rex, 0, (0x40 | REX_W), None -rex_nw = encode_rex, 0, 0x40, None +rex_w = encode_rex, 0, (0x40 | REX_W), None # a REX.W prefix +rex_nw = encode_rex, 0, 0, None # an optional REX prefix +rex_fw = encode_rex, 0, 0x40, None # a forced REX prefix # ____________________________________________________________ @@ -460,6 +463,7 @@ # ------------------------------ MOV ------------------------------ MOV_ri = insn(rex_w, register(1), '\xB8', immediate(2, 'q')) + MOV8_ri = insn(rex_fw, byte_register(1), '\xB0', immediate(2, 'b')) # ------------------------------ Arithmetic ------------------------------ @@ -485,11 +489,11 @@ CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) - CMP8_ri = insn(rex_nw, '\x80', byte_register(1), '\xF8', immediate(2, 'b')) + CMP8_ri = insn(rex_fw, '\x80', byte_register(1), '\xF8', immediate(2, 'b')) - AND8_rr = insn(rex_w, '\x20', byte_register(1), byte_register(2,8), '\xC0') + AND8_rr = insn(rex_fw, '\x20', byte_register(1), byte_register(2,8), '\xC0') - OR8_rr = insn(rex_w, '\x08', byte_register(1), byte_register(2,8), '\xC0') + OR8_rr = insn(rex_fw, '\x08', byte_register(1), byte_register(2,8), '\xC0') NEG_r = insn(rex_w, '\xF7', register(1), '\xD8') @@ -672,8 +676,8 @@ define_modrm_modes(insnname + '_r*', [rex_type, '\x8B', register(1, 8)]) define_modrm_modes(insnname + '_*i', [rex_type, '\xC7', orbyte(0<<3)], [immediate(2)]) -define_modrm_modes('MOV8_*r', [rex_nw, '\x88', byte_register(2, 8)], regtype='BYTE') -define_modrm_modes('MOV8_*i', [rex_nw, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') +define_modrm_modes('MOV8_*r', [rex_fw, '\x88', byte_register(2, 8)], regtype='BYTE') +define_modrm_modes('MOV8_*i', [rex_fw, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') define_modrm_modes('MOVZX8_r*', [rex_w, '\x0F\xB6', register(1, 8)], regtype='BYTE') define_modrm_modes('MOVSX8_r*', [rex_w, '\x0F\xBE', register(1, 8)], regtype='BYTE') diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -260,30 +260,6 @@ assert call.getarg(0).value == pow_addr assert call.getarg(1).value == 2.0 assert call.getarg(2).value == 3.0 - - def test_xor(self): - values = (-4, -3, -2, -1, 0, 1, 2, 3, 4) - for a in values: - for b in values: - if a^b >= 0: - r = 2000 - else: - r = 0 - ops = 46 - - self.run_source(''' - def main(a, b): - i = sa = 0 - while i < 2000: - if a > 0: # Specialises the loop - pass - if b > 1: - pass - if a^b >= 0: - sa += 1 - i += 1 - return sa - ''', ops, ([a, b], r)) def test_shift(self): from sys import maxint diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1530,3 +1530,45 @@ ## assert call.getarg(0).value == pow_addr ## assert call.getarg(1).value == 2.0 ## assert call.getarg(2).value == 3.0 + + def test_xor(self): + def main(a, b): + i = sa = 0 + while i < 300: + if a > 0: # Specialises the loop + pass + if b > 10: + pass + if a^b >= 0: + sa += 1 # ID: add + i += 1 + return sa + + # if both are >=0, a^b is known to be >=0 + log = self.run(main, [3, 14], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i9 = int_lt(i6, 300) + guard_true(i9, descr=...) + i11 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i13 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i13, i11, descr=) + """) + + # XXX: I don't understand why this assert passes, because the + # optimizer doesn't know that b >=0 + log = self.run(main, [3, 4], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i9 = int_lt(i6, 300) + guard_true(i9, descr=...) + i11 = int_add_ovf(i7, 1) + guard_no_overflow(descr=...) + i13 = int_add(i6, 1) + --TICK-- + jump(p0, p1, p2, p3, p4, p5, i13, i11, descr=) + """) diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -446,7 +446,6 @@ IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([ 'cmp', 'test', 'set', 'sahf', 'lahf', 'cltd', 'cld', 'std', 'rep', 'movs', 'lods', 'stos', 'scas', 'cwtl', 'cwde', 'prefetch', - 'pslld', # floating-point operations cannot produce GC pointers 'f', 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', @@ -457,6 +456,7 @@ 'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv', 'bswap', 'bt', 'rdtsc', 'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq', + 'paddq', 'pinsr', # zero-extending moves should not produce GC pointers 'movz', ]) diff --git a/pypy/rpython/rpbc.py b/pypy/rpython/rpbc.py --- a/pypy/rpython/rpbc.py +++ b/pypy/rpython/rpbc.py @@ -485,7 +485,7 @@ try: thisattrvalue = frozendesc.attrcache[attr] except KeyError: - if not frozendesc.has_attribute(attr): + if frozendesc.warn_missing_attribute(attr): warning("Desc %r has no attribute %r" % (frozendesc, attr)) continue llvalue = r_value.convert_const(thisattrvalue) From commits-noreply at bitbucket.org Thu Apr 21 14:19:57 2011 From: commits-noreply at bitbucket.org (berdario) Date: Thu, 21 Apr 2011 14:19:57 +0200 (CEST) Subject: [pypy-svn] pypy default: Fixed docs on how to build the documentation Message-ID: <20110421121957.18749282C1B@codespeak.net> Author: Dario Bertini Branch: Changeset: r43512:c5a73888b9bb Date: 2011-04-18 20:34 +0200 http://bitbucket.org/pypy/pypy/changeset/c5a73888b9bb/ Log: Fixed docs on how to build the documentation diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -1052,8 +1052,8 @@ --------------------------------------------------- Most of the PyPy's documentation and website is kept in -`pypy/documentation` and `pypy/documentation/website` respectively. -You can simply edit or add '.txt' files which contain ReST-markuped +`pypy/doc` respectively. +You can simply edit or add '.rst' files which contain ReST-markuped files. Here is a `ReST quickstart`_ but you can also just look at the existing documentation and see how things work. @@ -1062,25 +1062,24 @@ Automatically test documentation/website changes ------------------------------------------------ -.. _`docutils home page`: -.. _`docutils`: http://docutils.sourceforge.net/ +.. _`sphinx home page`: +.. _`sphinx`: http://sphinx.pocoo.org/ We automatically check referential integrity and ReST-conformance. In order to -run the tests you need docutils_ installed. Then go to the local checkout -of the documentation directory and run the tests:: +run the tests you need sphinx_ installed. Then go to the local checkout +of the documentation directory and run the Makefile:: - cd .../pypy/documentation - python ../test_all.py + cd pypy/doc + make html If you see no failures chances are high that your modifications at least -don't produce ReST-errors or wrong local references. A side effect of running -the tests is that you have `.html` files in the documentation directory -which you can point your browser to! +don't produce ReST-errors or wrong local references. Now you will have `.html` +files in the documentation directory which you can point your browser to! Additionally, if you also want to check for remote references inside the documentation issue:: - python ../test_all.py --checkremote + make linkcheck which will check that remote URLs are reachable. From commits-noreply at bitbucket.org Thu Apr 21 14:19:58 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 14:19:58 +0200 (CEST) Subject: [pypy-svn] pypy default: Also mention pypy/extradoc/pypy.org. Message-ID: <20110421121958.A7B97282C1B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43513:138c98b2d194 Date: 2011-04-21 14:17 +0200 http://bitbucket.org/pypy/pypy/changeset/138c98b2d194/ Log: Also mention pypy/extradoc/pypy.org. diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -1051,14 +1051,17 @@ documentation/website files in your local checkout --------------------------------------------------- -Most of the PyPy's documentation and website is kept in -`pypy/doc` respectively. +Most of the PyPy's documentation is kept in `pypy/doc`. You can simply edit or add '.rst' files which contain ReST-markuped files. Here is a `ReST quickstart`_ but you can also just look at the existing documentation and see how things work. .. _`ReST quickstart`: http://docutils.sourceforge.net/docs/rst/quickref.html +Note that the web site of http://pypy.org/ is maintained separately. +For now it is in the repository https://bitbucket.org/pypy/extradoc +in the directory ``pypy.org``. + Automatically test documentation/website changes ------------------------------------------------ From commits-noreply at bitbucket.org Thu Apr 21 15:42:45 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 21 Apr 2011 15:42:45 +0200 (CEST) Subject: [pypy-svn] buildbot default: automatically clone the repo if it is owned by pypy Message-ID: <20110421134245.BDFD4282C26@codespeak.net> Author: Antonio Cuni Branch: Changeset: r494:d66e27d8619a Date: 2011-04-21 15:00 +0200 http://bitbucket.org/pypy/buildbot/changeset/d66e27d8619a/ Log: automatically clone the repo if it is owned by pypy diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -18,8 +18,14 @@ mail.handle_commit ] -def check_for_local_repo(local_repo): - return local_repo.check(dir=True) +def check_for_local_repo(local_repo, remote_repo, owner): + if local_repo.check(dir=True): + return True + if owner == app.config['DEFAULT_USER']: + print >> sys.stderr, 'Automatic initial clone of %s' % remote_repo + scm.hg('clone', str(remote_repo), str(local_repo)) + return True + return False def get_commits(payload, seen_nodes=set()): import operator @@ -35,9 +41,10 @@ def handle(payload, test=False): path = payload['repository']['absolute_url'] + owner = payload['repository']['owner'] local_repo = app.config['LOCAL_REPOS'].join(path) remote_repo = app.config['REMOTE_BASE'] + path - if not check_for_local_repo(local_repo): + if not check_for_local_repo(local_repo, remote_repo, owner): print >> sys.stderr, 'Ignoring unknown repo', path return scm.hg('pull', '-R', local_repo) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -70,6 +70,10 @@ def test_irc_message(monkeypatch, messages): payload = { + 'repository': { + 'owner': 'pypy', + 'name': 'pypy', + }, 'commits': [ { 'revision': 42, @@ -144,17 +148,29 @@ def test_handle_unknown(monkeypatch): - def hgraise(*k): - raise Exception('this should never be called') - - py.test.raises(Exception, hgraise) + hgcalls = [] + def hgraise(*args): + hgcalls.append(args) monkeypatch.setattr(scm, 'hg', hgraise) hook.handle({ u'repository': { - u'absolute_url': 'uhm/missing/yeah', + u'absolute_url': '/foobar/myrepo', + u'owner': 'foobar', }, }) + assert hgcalls == [] + + hook.handle({ + u'repository': { + u'absolute_url': '/pypy/myrepo', + u'owner': 'pypy' + }, + u'commits': [], + }) + assert hgcalls[0][:2] == ('clone', 'http://bitbucket.org/pypy/myrepo',) + local_repo = hgcalls[0][-1] + assert hgcalls[1] == ('pull', '-R', local_repo) def test_ignore_duplicate_commits(monkeypatch, mails, messages): From commits-noreply at bitbucket.org Thu Apr 21 17:46:33 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 17:46:33 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: - Rename the fields that now contain long longs with the 'll_' Message-ID: <20110421154633.BB993282C1B@codespeak.net> Author: Armin Rigo Branch: jit-lsprofile Changeset: r43514:d9039459effc Date: 2011-04-21 17:42 +0200 http://bitbucket.org/pypy/pypy/changeset/d9039459effc/ Log: - Rename the fields that now contain long longs with the 'll_' prefix. Add some asserts to make sure that they are long longs, and never forced to floats. - Use space.r_longlong_w(). diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -101,8 +101,8 @@ class ProfilerSubEntry(object): def __init__(self, frame): self.frame = frame - self.tt = 0 - self.it = 0 + self.ll_tt = r_longlong(0) + self.ll_it = r_longlong(0) self.callcount = 0 self.recursivecallcount = 0 self.recursionLevel = 0 @@ -110,17 +110,19 @@ def stats(self, space, parent, factor): w_sse = W_StatsSubEntry(space, self.frame, self.callcount, self.recursivecallcount, - factor * float(self.tt), - factor * float(self.it)) + factor * float(self.ll_tt), + factor * float(self.ll_it)) return space.wrap(w_sse) def _stop(self, tt, it): + assert type(tt) is r_longlong + assert type(it) is r_longlong self.recursionLevel -= 1 if self.recursionLevel == 0: - self.tt += tt + self.ll_tt += tt else: self.recursivecallcount += 1 - self.it += it + self.ll_it += it self.callcount += 1 class ProfilerEntry(ProfilerSubEntry): @@ -136,8 +138,8 @@ w_sublist = space.w_None w_se = W_StatsEntry(space, self.frame, self.callcount, self.recursivecallcount, - factor * float(self.tt), - factor * float(self.it), w_sublist) + factor * float(self.ll_tt), + factor * float(self.ll_it), w_sublist) return space.wrap(w_se) @jit.purefunction @@ -154,20 +156,20 @@ class ProfilerContext(object): def __init__(self, profobj, entry): self.entry = entry - self.subt = 0 + self.ll_subt = r_longlong(0) self.previous = profobj.current_context entry.recursionLevel += 1 if profobj.subcalls and self.previous: caller = jit.hint(self.previous.entry, promote=True) subentry = caller._get_or_make_subentry(entry) subentry.recursionLevel += 1 - self.t0 = profobj.timer() + self.ll_t0 = profobj.ll_timer() def _stop(self, profobj, entry): - tt = profobj.timer() - self.t0 - it = tt - self.subt + tt = profobj.ll_timer() - self.ll_t0 + it = tt - self.ll_subt if self.previous: - self.previous.subt += tt + self.previous.ll_subt += tt entry._stop(tt, it) if profobj.subcalls and self.previous: caller = jit.hint(self.previous.entry, promote=True) @@ -238,18 +240,11 @@ self.builtin_data = {} self.space = space - def timer(self): - # XXX ignore for now casting of float to long long and instead - # use float -> int -> long long + def ll_timer(self): if self.w_callable: space = self.space try: - if self.time_unit > 0.0: - return r_longlong( - space.int_w(space.call_function(self.w_callable))) - else: - return r_longlong(int(space.float_w( - space.call_function(self.w_callable)))) + return space.r_longlong_w(space.call_function(self.w_callable)) except OperationError, e: e.write_unraisable(space, "timer function ", self.w_callable) From commits-noreply at bitbucket.org Thu Apr 21 19:37:34 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 19:37:34 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: An improved test, which fails as expected. Message-ID: <20110421173734.2E3D6282C1B@codespeak.net> Author: Armin Rigo Branch: jit-lsprofile Changeset: r43517:6164cbeefe10 Date: 2011-04-21 19:37 +0200 http://bitbucket.org/pypy/pypy/changeset/6164cbeefe10/ Log: An improved test, which fails as expected. diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -91,18 +91,29 @@ assert spam2bar.inlinetime == 1.0 assert spam2bar.totaltime == 1.0 - def test_direct_read_timestamp(self): - import _lsprof - - def f(): - pass - - profiler = _lsprof.Profiler() - profiler.enable() - f() - profiler.disable() - stats = profiler.getstats() - assert stats + def test_scale_of_result(self): + import _lsprof, time + prof = _lsprof.Profiler() + def foo(n): + t = time.time() + while abs(t - time.time()) < 1.0: + pass # busy-wait for 1 second + def bar(n): + foo(n) + prof.enable() + bar(0) + prof.disable() + stats = prof.getstats() + entries = {} + for entry in stats: + entries[entry.code] = entry + efoo = entries[foo.func_code] + ebar = entries[bar.func_code] + assert 0.9 < efoo.totaltime < 2.9 + assert 0.9 < efoo.inlinetime < 2.9 + for subentry in ebar.calls: + assert 0.9 < subentry.totaltime < 2.9 + assert 0.9 < subentry.inlinetime < 2.9 def test_cprofile(self): import sys, os From commits-noreply at bitbucket.org Thu Apr 21 20:12:38 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 20:12:38 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: Remove this, which seems rather pointless: _flush_unmatched() Message-ID: <20110421181238.749A7282C1B@codespeak.net> Author: Armin Rigo Branch: jit-lsprofile Changeset: r43518:4ef8ffa2a395 Date: 2011-04-21 19:55 +0200 http://bitbucket.org/pypy/pypy/changeset/4ef8ffa2a395/ Log: Remove this, which seems rather pointless: _flush_unmatched() is only called once, when we call disable(). diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -316,7 +316,6 @@ context._stop(self, entry) self.current_context = context.previous - @jit.unroll_safe def _flush_unmatched(self): context = self.current_context while context: From commits-noreply at bitbucket.org Thu Apr 21 20:12:39 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 21 Apr 2011 20:12:39 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: Compute the total time in real seconds and in CPU clock ticks, Message-ID: <20110421181239.F0EA6282C1B@codespeak.net> Author: Armin Rigo Branch: jit-lsprofile Changeset: r43519:9467eed61789 Date: 2011-04-21 20:12 +0200 http://bitbucket.org/pypy/pypy/changeset/9467eed61789/ Log: Compute the total time in real seconds and in CPU clock ticks, and use the ratio to correct all measures at the end. Note that the CPU clock ticks also measures the real time, at least on all modern Intel CPUs. (We'll ignore the older exceptions for now.) diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -241,6 +241,9 @@ self.data = {} self.builtin_data = {} self.space = space + self.is_enabled = False + self.total_timestamp = r_longlong(0) + self.total_real_time = 0.0 def ll_timer(self): if self.w_callable: @@ -255,10 +258,18 @@ def enable(self, space, w_subcalls=NoneNotWrapped, w_builtins=NoneNotWrapped): + if self.is_enabled: + return # ignored if w_subcalls is not None: self.subcalls = space.bool_w(w_subcalls) if w_builtins is not None: self.builtins = space.bool_w(w_builtins) + # We want total_real_time and total_timestamp to end up containing + # (endtime - starttime). Now we are at the start, so we first + # have to subtract the current time. + self.is_enabled = True + self.total_real_time -= time.time() + self.total_timestamp -= read_timestamp() # set profiler hook c_setup_profiling() space.getexecutioncontext().setllprofile(lsprof_call, space.wrap(self)) @@ -326,6 +337,14 @@ self.current_context = None def disable(self, space): + if not self.is_enabled: + return # ignored + # We want total_real_time and total_timestamp to end up containing + # (endtime - starttime), or the sum of such intervals if + # enable() and disable() are called several times. + self.is_enabled = False + self.total_timestamp += read_timestamp() + self.total_real_time += time.time() # unset profiler hook space.getexecutioncontext().setllprofile(None, None) c_teardown_profiling() @@ -333,8 +352,14 @@ def getstats(self, space): if self.w_callable is None: - # XXX find out a correct measurment freq - factor = 1. # we measure time.time in floats + if self.is_enabled: + raise OperationError(space.w_RuntimeError, + space.wrap("Profiler instance must be disabled " + "before getting the stats")) + if self.total_timestamp: + factor = self.total_real_time / float(self.total_timestamp) + else: + factor = 1.0 # probably not used elif self.time_unit > 0.0: factor = self.time_unit else: From commits-noreply at bitbucket.org Thu Apr 21 23:28:36 2011 From: commits-noreply at bitbucket.org (lac) Date: Thu, 21 Apr 2011 23:28:36 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Stephen Simmons cannot make it. But iko can. Message-ID: <20110421212836.6FBEB282C1B@codespeak.net> Author: Laura Creighton Branch: extradoc Changeset: r3529:2402a80a774b Date: 2011-04-21 23:27 +0200 http://bitbucket.org/pypy/extradoc/changeset/2402a80a774b/ Log: Stephen Simmons cannot make it. But iko can. diff --git a/sprintinfo/gothenburg-2011/people.txt b/sprintinfo/gothenburg-2011/people.txt --- a/sprintinfo/gothenburg-2011/people.txt +++ b/sprintinfo/gothenburg-2011/people.txt @@ -10,9 +10,9 @@ ==================== ============== ===================== ================== Jacob Hallen lives there no peppers Laura Creighton lives there +Anders Hammarquist lives there Carl Friedrich Bolz 24-30 J+L's house Vegan Lukas Diekmann 24-2 J+L's house -Stephen Simmons 28-1 absolutely no nuts David Schneider 26-01 SGS Veckobostader Antonio Cuni 26-30 Hotel Poseidon his own diet :) Armin Rigo 23-02 SGS Veckobostader From commits-noreply at bitbucket.org Thu Apr 21 23:28:37 2011 From: commits-noreply at bitbucket.org (lac) Date: Thu, 21 Apr 2011 23:28:37 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: merge heads Message-ID: <20110421212837.C5FC0282C1B@codespeak.net> Author: Laura Creighton Branch: extradoc Changeset: r3530:57d162f82fdb Date: 2011-04-21 23:28 +0200 http://bitbucket.org/pypy/extradoc/changeset/57d162f82fdb/ Log: merge heads diff --git a/planning/hg-migration/pypy.org.filemap b/planning/hg-migration/pypy.org.filemap new file mode 100644 --- /dev/null +++ b/planning/hg-migration/pypy.org.filemap @@ -0,0 +1,2 @@ +include pypy.org +rename pypy.org . From commits-noreply at bitbucket.org Fri Apr 22 03:14:06 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Fri, 22 Apr 2011 03:14:06 +0200 (CEST) Subject: [pypy-svn] pypy default: breath-first search is not a thing. Fixed a typo. Message-ID: <20110422011406.BD984282C27@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43520:6e9a3c7322d3 Date: 2011-04-21 21:12 -0400 http://bitbucket.org/pypy/pypy/changeset/6e9a3c7322d3/ Log: breath-first search is not a thing. Fixed a typo. diff --git a/pypy/tool/algo/color.py b/pypy/tool/algo/color.py --- a/pypy/tool/algo/color.py +++ b/pypy/tool/algo/color.py @@ -29,7 +29,7 @@ return [v for v in self._all_nodes if v in self.neighbours] def lexicographic_order(self): - """Enumerate a lexicographic breath-first ordering of the nodes.""" + """Enumerate a lexicographic breadth-first ordering of the nodes.""" sigma = [self.getnodes()[::-1]] if not sigma[0]: return From commits-noreply at bitbucket.org Fri Apr 22 03:14:11 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Fri, 22 Apr 2011 03:14:11 +0200 (CEST) Subject: [pypy-svn] pypy default: Merged upstream. Message-ID: <20110422011411.30AC6282C2A@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43521:7f0e4cc0910a Date: 2011-04-21 21:13 -0400 http://bitbucket.org/pypy/pypy/changeset/7f0e4cc0910a/ Log: Merged upstream. diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -308,14 +308,17 @@ # XXX: Hack. Ignore REX.W if we are using 16-bit operands if mc._use_16_bit_immediate: basevalue &= ~REX_W - if basevalue != 0x40 or rexbyte != 0: + if basevalue != 0 or rexbyte != 0: + if basevalue == 0: + basevalue = 0x40 mc.writechar(chr(basevalue | rexbyte)) else: assert rexbyte == 0 return 0 -rex_w = encode_rex, 0, (0x40 | REX_W), None -rex_nw = encode_rex, 0, 0x40, None +rex_w = encode_rex, 0, (0x40 | REX_W), None # a REX.W prefix +rex_nw = encode_rex, 0, 0, None # an optional REX prefix +rex_fw = encode_rex, 0, 0x40, None # a forced REX prefix # ____________________________________________________________ @@ -460,6 +463,7 @@ # ------------------------------ MOV ------------------------------ MOV_ri = insn(rex_w, register(1), '\xB8', immediate(2, 'q')) + MOV8_ri = insn(rex_fw, byte_register(1), '\xB0', immediate(2, 'b')) # ------------------------------ Arithmetic ------------------------------ @@ -485,11 +489,11 @@ CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) - CMP8_ri = insn(rex_nw, '\x80', byte_register(1), '\xF8', immediate(2, 'b')) + CMP8_ri = insn(rex_fw, '\x80', byte_register(1), '\xF8', immediate(2, 'b')) - AND8_rr = insn(rex_w, '\x20', byte_register(1), byte_register(2,8), '\xC0') + AND8_rr = insn(rex_fw, '\x20', byte_register(1), byte_register(2,8), '\xC0') - OR8_rr = insn(rex_w, '\x08', byte_register(1), byte_register(2,8), '\xC0') + OR8_rr = insn(rex_fw, '\x08', byte_register(1), byte_register(2,8), '\xC0') NEG_r = insn(rex_w, '\xF7', register(1), '\xD8') @@ -672,8 +676,8 @@ define_modrm_modes(insnname + '_r*', [rex_type, '\x8B', register(1, 8)]) define_modrm_modes(insnname + '_*i', [rex_type, '\xC7', orbyte(0<<3)], [immediate(2)]) -define_modrm_modes('MOV8_*r', [rex_w, '\x88', byte_register(2, 8)], regtype='BYTE') -define_modrm_modes('MOV8_*i', [rex_w, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') +define_modrm_modes('MOV8_*r', [rex_fw, '\x88', byte_register(2, 8)], regtype='BYTE') +define_modrm_modes('MOV8_*i', [rex_fw, '\xC6', orbyte(0<<3)], [immediate(2, 'b')], regtype='BYTE') define_modrm_modes('MOVZX8_r*', [rex_w, '\x0F\xB6', register(1, 8)], regtype='BYTE') define_modrm_modes('MOVSX8_r*', [rex_w, '\x0F\xBE', register(1, 8)], regtype='BYTE') diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -10,8 +10,9 @@ END_TAG = '<<>>' class CodeCheckerMixin(object): - def __init__(self, expected): + def __init__(self, expected, accept_unnecessary_prefix): self.expected = expected + self.accept_unnecessary_prefix = accept_unnecessary_prefix self.index = 0 def begin(self, op): @@ -20,6 +21,9 @@ def writechar(self, char): if char != self.expected[self.index:self.index+1]: + if (char == self.accept_unnecessary_prefix + and self.index == self.instrindex): + return # ignore the extra character '\x40' print self.op print "\x09from rx86.py:", hexdump(self.expected[self.instrindex:self.index] + char)+"..." print "\x09from 'as': ", hexdump(self.expected[self.instrindex:self.index+15])+"..." @@ -43,15 +47,21 @@ TESTDIR = 'rx86_32' X86_CodeBuilder = rx86.X86_32_CodeBuilder REGNAMES = ['%eax', '%ecx', '%edx', '%ebx', '%esp', '%ebp', '%esi', '%edi'] + REGNAMES8 = ['%al', '%cl', '%dl', '%bl', '%ah', '%ch', '%dh', '%bh'] XMMREGNAMES = ['%%xmm%d' % i for i in range(16)] REGS = range(8) + REGS8 = [i|rx86.BYTE_REG_FLAG for i in range(8)] NONSPECREGS = [rx86.R.eax, rx86.R.ecx, rx86.R.edx, rx86.R.ebx, rx86.R.esi, rx86.R.edi] + accept_unnecessary_prefix = None methname = '?' def reg_tests(self): return self.REGS + def reg8_tests(self): + return self.REGS8 + def xmm_reg_tests(self): return self.reg_tests() @@ -97,12 +107,14 @@ def get_all_tests(self): return { 'r': self.reg_tests, + 'r8': self.reg8_tests, 'x': self.xmm_reg_tests, 'b': self.stack_bp_tests, 's': self.stack_sp_tests, 'm': self.memory_tests, 'a': self.array_tests, 'i': self.imm32_tests, + 'i8': self.imm8_tests, 'j': self.imm32_tests, 'l': self.relative_tests, } @@ -110,6 +122,10 @@ def assembler_operand_reg(self, regnum): return self.REGNAMES[regnum] + def assembler_operand_reg8(self, regnum): + assert regnum & rx86.BYTE_REG_FLAG + return self.REGNAMES8[regnum &~ rx86.BYTE_REG_FLAG] + def assembler_operand_xmm_reg(self, regnum): return self.XMMREGNAMES[regnum] @@ -137,16 +153,19 @@ def get_all_assembler_operands(self): return { 'r': self.assembler_operand_reg, + 'r8': self.assembler_operand_reg8, 'x': self.assembler_operand_xmm_reg, 'b': self.assembler_operand_stack_bp, 's': self.assembler_operand_stack_sp, 'm': self.assembler_operand_memory, 'a': self.assembler_operand_array, 'i': self.assembler_operand_imm, + 'i8': self.assembler_operand_imm, 'j': self.assembler_operand_imm_addr, } - def run_test(self, methname, instrname, argmodes, args_lists): + def run_test(self, methname, instrname, argmodes, args_lists, + instr_suffix=None): global labelcount labelcount = 0 oplist = [] @@ -169,6 +188,9 @@ if (self.WORD == 8) and instrname.startswith('CVT'): suffix = suffixes[self.WORD] + if instr_suffix is not None: + suffix = instr_suffix # overwrite + following = "" if False: # instr.indirect: suffix = "" @@ -196,11 +218,22 @@ oplist.append(op) g.write('\t.string "%s"\n' % END_TAG) g.close() - os.system('as --%d "%s" -o "%s"' % (self.WORD*8, inputname, filename)) + f, g = os.popen4('as --%d "%s" -o "%s"' % + (self.WORD*8, inputname, filename), 'r') + f.close() + got = g.read() + g.close() + error = [line for line in got.splitlines() if 'error' in line.lower()] + if error: + raise Exception("Assembler got an error: %r" % error[0]) + error = [line for line in got.splitlines() + if 'warning' in line.lower()] + if error: + raise Exception("Assembler got a warning: %r" % error[0]) try: f = open(filename, 'rb') except IOError: - raise Exception("Assembler error") + raise Exception("Assembler did not produce output?") data = f.read() f.close() i = data.find(BEGIN_TAG) @@ -227,13 +260,20 @@ if methname in ('ADD_ri', 'AND_ri', 'CMP_ri', 'OR_ri', 'SUB_ri', 'XOR_ri', 'SBB_ri'): if args[0] == rx86.R.eax: - return [] # ADD EAX, constant: there is a special encoding + return [] # ADD EAX, constant: there is a special encoding + if methname in ('CMP8_ri',): + if args[0] == rx86.R.al: + return [] # CMP AL, constant: there is a special encoding if methname == 'XCHG_rr' and rx86.R.eax in args: return [] # special encoding if methname == 'MOV_rj' and args[0] == rx86.R.eax: return [] # MOV EAX, [immediate]: there is a special encoding if methname == 'MOV_jr' and args[1] == rx86.R.eax: return [] # MOV [immediate], EAX: there is a special encoding + if methname == 'MOV8_rj' and args[0] == rx86.R.al: + return [] # MOV AL, [immediate]: there is a special encoding + if methname == 'MOV8_jr' and args[1] == rx86.R.al: + return [] # MOV [immediate], AL: there is a special encoding return [args] @@ -243,7 +283,9 @@ return X86_CodeBuilder def should_skip_instruction(self, instrname, argmodes): - is_artificial_instruction = instrname[-1].isdigit() or (argmodes != '' and argmodes[-1].isdigit()) + is_artificial_instruction = (argmodes != '' and argmodes[-1].isdigit()) + is_artificial_instruction |= (instrname[-1].isdigit() and + instrname[-1] != '8') return ( is_artificial_instruction or # XXX: Can't tests shifts automatically at the moment @@ -274,14 +316,37 @@ if methname == 'WORD': return + if instrname.endswith('8'): + instrname = instrname[:-1] + if instrname == 'MOVSX' or instrname == 'MOVZX': + instr_suffix = 'b' + suffixes[self.WORD] + instrname = instrname[:-1] + if argmodes[1] == 'r': + argmodes = [argmodes[0], 'r8'] + else: + instr_suffix = 'b' + realargmodes = [] + for mode in argmodes: + if mode == 'r': + mode = 'r8' + elif mode == 'i': + mode = 'i8' + realargmodes.append(mode) + argmodes = realargmodes + elif instrname == 'CALL' or instrname == 'JMP': + instr_suffix = suffixes[self.WORD] + ' *' + else: + instr_suffix = None print "Testing %s with argmodes=%r" % (instrname, argmodes) self.methname = methname self.is_xmm_insn = getattr(getattr(self.X86_CodeBuilder, methname), 'is_xmm_insn', False) ilist = self.make_all_tests(methname, argmodes) - oplist, as_code = self.run_test(methname, instrname, argmodes, ilist) - cc = self.get_code_checker_class()(as_code) + oplist, as_code = self.run_test(methname, instrname, argmodes, ilist, + instr_suffix) + cls = self.get_code_checker_class() + cc = cls(as_code, self.accept_unnecessary_prefix) for op, args in zip(oplist, ilist): if op: cc.begin(op) diff --git a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/pypy/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -9,11 +9,16 @@ X86_CodeBuilder = rx86.X86_64_CodeBuilder REGNAMES = ['%rax', '%rcx', '%rdx', '%rbx', '%rsp', '%rbp', '%rsi', '%rdi', '%r8', '%r9', '%r10', '%r11', '%r12', '%r13', '%r14', '%r15'] + REGNAMES8 = ['%al', '%cl', '%dl', '%bl', '%spl', '%bpl', '%sil', '%dil', + '%r8b', '%r9b', '%r10b', '%r11b', + '%r12b', '%r13b', '%r14b', '%r15b'] REGS = range(16) + REGS8 = [i|rx86.BYTE_REG_FLAG for i in range(16)] NONSPECREGS = [rx86.R.eax, rx86.R.ecx, rx86.R.edx, rx86.R.ebx, rx86.R.esi, rx86.R.edi, rx86.R.r8, rx86.R.r9, rx86.R.r10, rx86.R.r11, rx86.R.r12, rx86.R.r13, rx86.R.r14, rx86.R.r15] + accept_unnecessary_prefix = '\x40' def should_skip_instruction(self, instrname, argmodes): return ( diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -1051,36 +1051,38 @@ documentation/website files in your local checkout --------------------------------------------------- -Most of the PyPy's documentation and website is kept in -`pypy/documentation` and `pypy/documentation/website` respectively. -You can simply edit or add '.txt' files which contain ReST-markuped +Most of the PyPy's documentation is kept in `pypy/doc`. +You can simply edit or add '.rst' files which contain ReST-markuped files. Here is a `ReST quickstart`_ but you can also just look at the existing documentation and see how things work. .. _`ReST quickstart`: http://docutils.sourceforge.net/docs/rst/quickref.html +Note that the web site of http://pypy.org/ is maintained separately. +For now it is in the repository https://bitbucket.org/pypy/extradoc +in the directory ``pypy.org``. + Automatically test documentation/website changes ------------------------------------------------ -.. _`docutils home page`: -.. _`docutils`: http://docutils.sourceforge.net/ +.. _`sphinx home page`: +.. _`sphinx`: http://sphinx.pocoo.org/ We automatically check referential integrity and ReST-conformance. In order to -run the tests you need docutils_ installed. Then go to the local checkout -of the documentation directory and run the tests:: +run the tests you need sphinx_ installed. Then go to the local checkout +of the documentation directory and run the Makefile:: - cd .../pypy/documentation - python ../test_all.py + cd pypy/doc + make html If you see no failures chances are high that your modifications at least -don't produce ReST-errors or wrong local references. A side effect of running -the tests is that you have `.html` files in the documentation directory -which you can point your browser to! +don't produce ReST-errors or wrong local references. Now you will have `.html` +files in the documentation directory which you can point your browser to! Additionally, if you also want to check for remote references inside the documentation issue:: - python ../test_all.py --checkremote + make linkcheck which will check that remote URLs are reachable. From commits-noreply at bitbucket.org Fri Apr 22 08:46:29 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 08:46:29 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: we dont want setitems in short preamble Message-ID: <20110422064629.EB70D282C27@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43522:2adfcc67af47 Date: 2011-04-21 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/2adfcc67af47/ Log: we dont want setitems in short preamble diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -715,6 +715,7 @@ """ expected = """ [p0] + setfield_gc(p0, 5, descr=valuedescr) jump(p0) """ self.optimize_loop(ops, expected, preamble) From commits-noreply at bitbucket.org Fri Apr 22 08:46:32 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 08:46:32 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: handle constant inputargs properly Message-ID: <20110422064632.A54DA282C2A@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43523:81a668d19633 Date: 2011-04-22 08:45 +0200 http://bitbucket.org/pypy/pypy/changeset/81a668d19633/ Log: handle constant inputargs properly diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -166,6 +166,12 @@ inputargs = virtual_state.make_inputargs(values) sb = preamble_optimizer.produce_short_preamble_ops(inputargs) self.short_boxes = sb + + self.constant_inputargs = {} + for box in jump_args: + const = self.get_constant_box(box) + if const: + self.constant_inputargs[box] = const initial_inputargs_len = len(inputargs) @@ -225,6 +231,8 @@ # Clone ops and boxes to get private versions and newargs = [a.clonebox() for a in short_loop.inputargs] inliner = Inliner(short_loop.inputargs, newargs) + for box, const in self.constant_inputargs.items(): + inliner.argmap[box] = const short_loop.inputargs = newargs ops = [inliner.inline_op(op) for op in short_loop.operations] short_loop.operations = ops @@ -275,6 +283,10 @@ self.short_inliner = Inliner(inputargs, jumpargs) short = [] short_seen = {} + for box, const in self.constant_inputargs.items(): + short_seen[box] = True + self.short_inliner.argmap[box] = const + for result, op in self.short_boxes.items(): if op is not None: for op in self.getvalue(result).make_guards(result): From commits-noreply at bitbucket.org Fri Apr 22 09:43:38 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 09:43:38 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: dont cache setfield Message-ID: <20110422074338.5524E282C28@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43524:f9fe1f986804 Date: 2011-04-22 09:10 +0200 http://bitbucket.org/pypy/pypy/changeset/f9fe1f986804/ Log: dont cache setfield diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -982,16 +982,17 @@ i2b = int_is_true(i2) guard_true(i2b) [] setfield_gc(p2, i2, descr=nextdescr) - jump(p2, i2) - """ - expected = """ - [p2, i1] + jump(p2) + """ + expected = """ + [p2] + i1 = getfield_gc(p2, descr=nextdescr) i2 = int_sub(i1, 1) i2b = int_is_true(i2) guard_true(i2b) [] p3 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p3, i2, descr=nextdescr) - jump(p3, i2) + jump(p3) """ self.optimize_loop(ops, expected, preamble) From commits-noreply at bitbucket.org Fri Apr 22 09:43:40 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 09:43:40 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: dont cache setfield Message-ID: <20110422074340.131B7282C28@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43525:1a44f963b797 Date: 2011-04-22 09:18 +0200 http://bitbucket.org/pypy/pypy/changeset/1a44f963b797/ Log: dont cache setfield diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -950,17 +950,18 @@ p2sub = new_with_vtable(ConstClass(node_vtable2)) setfield_gc(p2sub, i1, descr=valuedescr) setfield_gc(p2, p2sub, descr=nextdescr) - jump(i1, p2, p2sub) - """ - expected = """ - [i1, p2, p2sub] + jump(i1, p2) + """ + expected = """ + [i1, p2] + p2sub = getfield_gc(p2, descr=nextdescr) i3 = getfield_gc(p2sub, descr=valuedescr) escape(i3) p1 = new_with_vtable(ConstClass(node_vtable)) p3sub = new_with_vtable(ConstClass(node_vtable2)) setfield_gc(p3sub, i1, descr=valuedescr) setfield_gc(p1, p3sub, descr=nextdescr) - jump(i1, p1, p3sub) + jump(i1, p1) """ self.optimize_loop(ops, expected, preamble) @@ -1869,6 +1870,7 @@ """ expected = """ [p1, i1, i2] + setfield_gc(p1, i2, descr=valuedescr) jump(p1, i1, i2) """ # in this case, all setfields are removed, because we can prove From commits-noreply at bitbucket.org Fri Apr 22 09:43:43 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 09:43:43 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: we cant allow boxes not originating from the inputargs, even if they are constant since we have to check that they are still constant in the short preamble Message-ID: <20110422074343.879CD282C28@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43526:d436870b52d4 Date: 2011-04-22 09:28 +0200 http://bitbucket.org/pypy/pypy/changeset/d436870b52d4/ Log: we cant allow boxes not originating from the inputargs, even if they are constant since we have to check that they are still constant in the short preamble diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -387,12 +387,11 @@ def produce_short_preamble_box(self, box, short_boxes, potential_ops): if box in short_boxes: return - if isinstance(box, Const): #self.getvalue(box).is_constant(): + if isinstance(box, Const): return if box in potential_ops: op = potential_ops[box] for arg in op.getarglist(): - arg = self.getvalue(arg).get_key_box() self.produce_short_preamble_box(arg, short_boxes, potential_ops) short_boxes[box] = op From commits-noreply at bitbucket.org Fri Apr 22 09:43:46 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 09:43:46 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: Make _lazy_setfield invalidate _cached_fields_getfield_op when it is not None. This way a lazy setfield that was canceled because the original value was written later will not clear the cache at the loop boundary. Message-ID: <20110422074346.84795282C2F@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43527:f1a2f92171de Date: 2011-04-22 09:42 +0200 http://bitbucket.org/pypy/pypy/changeset/f1a2f92171de/ Log: Make _lazy_setfield invalidate _cached_fields_getfield_op when it is not None. This way a lazy setfield that was canceled because the original value was written later will not clear the cache at the loop boundary. diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -5001,6 +5001,8 @@ """ expected = """ [p0] + p1 = getfield_gc(p0, descr=valuedescr) + setfield_gc(p0, p0, descr=valuedescr) jump(p0) """ self.optimize_loop(ops, expected, preamble) diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -38,10 +38,6 @@ if cached_fieldvalue is not fieldvalue: # common case: store the 'op' as lazy_setfield, and register # myself in the optheap's _lazy_setfields list - try: - del self._cached_fields_getfield_op[structvalue] - except KeyError: - pass self._lazy_setfield = op if not self._lazy_setfield_registered: optheap._lazy_setfields.append(self) @@ -110,6 +106,8 @@ def produce_potential_short_preamble_ops(self, optimizer, potential_ops, descr): + if self._lazy_setfield is not None: + return for structvalue, op in self._cached_fields_getfield_op.iteritems(): if op and structvalue in self._cached_fields: potential_ops[op.result] = op From commits-noreply at bitbucket.org Fri Apr 22 12:15:10 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 22 Apr 2011 12:15:10 +0200 (CEST) Subject: [pypy-svn] pypy default: this test has been ported to test_pypy_c_new Message-ID: <20110422101510.E9691282C2A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43528:22ef03c4a8e9 Date: 2011-04-21 16:18 +0200 http://bitbucket.org/pypy/pypy/changeset/22ef03c4a8e9/ Log: this test has been ported to test_pypy_c_new diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,44 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test__ffi_call(self): - from pypy.rlib.test.test_libffi import get_libm_name - libm_name = get_libm_name(sys.platform) - out = self.run_source(''' - def main(): - try: - from _ffi import CDLL, types - except ImportError: - sys.stdout.write('SKIP: cannot import _ffi') - return 0 - - libm = CDLL('%(libm_name)s') - pow = libm.getfunc('pow', [types.double, types.double], - types.double) - print pow.getaddr() - i = 0 - res = 0 - while i < 2000: - res += pow(2, 3) - i += 1 - return res - ''' % locals(), - 76, ([], 8.0*2000), threshold=1000) - pow_addr = int(out.splitlines()[0]) - ops = self.get_by_bytecode('CALL_FUNCTION') - assert len(ops) == 2 # we get two loops, because of specialization - call_function = ops[0] - last_ops = [op.getopname() for op in call_function[-5:]] - assert last_ops == ['force_token', - 'setfield_gc', - 'call_may_force', - 'guard_not_forced', - 'guard_no_exception'] - call = call_function[-3] - assert call.getarg(0).value == pow_addr - assert call.getarg(1).value == 2.0 - assert call.getarg(2).value == 3.0 - def test_shift(self): from sys import maxint maxvals = (-maxint-1, -maxint, maxint-1, maxint) From commits-noreply at bitbucket.org Fri Apr 22 12:15:12 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 22 Apr 2011 12:15:12 +0200 (CEST) Subject: [pypy-svn] pypy default: extrapolate some more precise tests from the old test_shift and test_reverse_shift in test_pypy_c; more work needed to cover all the cases Message-ID: <20110422101512.B6CFF282C2A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43529:43a6b30c7141 Date: 2011-04-22 12:11 +0200 http://bitbucket.org/pypy/pypy/changeset/43a6b30c7141/ Log: extrapolate some more precise tests from the old test_shift and test_reverse_shift in test_pypy_c; more work needed to cover all the cases diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1565,3 +1565,40 @@ i12 = int_ge(i10, 0) guard_true(i12, descr=...) """) + + def test_shift_intbound(self): + def main(b): + res = 0 + a = 0 + while a < 300: + assert a >= 0 + assert 0 <= b <= 10 + val = a >> b + if val >= 0: # ID: rshift + res += 1 + val = a << b + if val >= 0: # ID: lshift + res += 2 + a += 1 + return res + # + log = self.run(main, [2], threshold=200) + assert log.result == 300*3 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('rshift', "") # guard optimized away + assert loop.match_by_id('lshift', "") # guard optimized away + + def test_lshift_and_then_rshift(self): + def main(a, b): + res = 0 + while res < 300: + assert 0 <= a <= 10 + assert 0 <= b <= 10 + val = (a << b) >> b # ID: shift + res += val + return res + # + log = self.run(main, [1, 2], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('shift', "") # optimized away From commits-noreply at bitbucket.org Fri Apr 22 12:15:14 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 22 Apr 2011 12:15:14 +0200 (CEST) Subject: [pypy-svn] pypy default: ok, I managed to turn this into a failing test Message-ID: <20110422101514.E21EC2A202B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43530:9e324d398a15 Date: 2011-04-22 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/9e324d398a15/ Log: ok, I managed to turn this into a failing test diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1589,16 +1589,18 @@ assert loop.match_by_id('lshift', "") # guard optimized away def test_lshift_and_then_rshift(self): - def main(a, b): + py.test.skip('fixme, this optimization is disabled') + def main(b): res = 0 + a = 0 while res < 300: - assert 0 <= a <= 10 + assert a >= 0 assert 0 <= b <= 10 - val = (a << b) >> b # ID: shift - res += val + res = (a << b) >> b # ID: shift + a += 1 return res # - log = self.run(main, [1, 2], threshold=200) + log = self.run(main, [2], threshold=200) assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('shift', "") # optimized away From commits-noreply at bitbucket.org Fri Apr 22 13:39:17 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 22 Apr 2011 13:39:17 +0200 (CEST) Subject: [pypy-svn] pypy default: Use pypy's current version instead of hard-coding the name "pypy-14". Message-ID: <20110422113917.2A65C36C20A@codespeak.net> Author: Armin Rigo Branch: Changeset: r43531:d043e5b4401c Date: 2011-04-22 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d043e5b4401c/ Log: Use pypy's current version instead of hard-coding the name "pypy-14". diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -15,6 +15,7 @@ from pypy.rlib.streamio import StreamErrors from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.module.sys.version import PYPY_VERSION SEARCH_ERROR = 0 PY_SOURCE = 1 @@ -31,7 +32,7 @@ SO = ".pyd" else: SO = ".so" -DEFAULT_SOABI = 'pypy-14' +DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] CHECK_FOR_PYW = sys.platform == 'win32' @specialize.memo() From commits-noreply at bitbucket.org Fri Apr 22 13:39:21 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 22 Apr 2011 13:39:21 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110422113921.285D92A202B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43532:f421c6d2c548 Date: 2011-04-22 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/f421c6d2c548/ Log: merge heads diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1565,3 +1565,42 @@ i12 = int_ge(i10, 0) guard_true(i12, descr=...) """) + + def test_shift_intbound(self): + def main(b): + res = 0 + a = 0 + while a < 300: + assert a >= 0 + assert 0 <= b <= 10 + val = a >> b + if val >= 0: # ID: rshift + res += 1 + val = a << b + if val >= 0: # ID: lshift + res += 2 + a += 1 + return res + # + log = self.run(main, [2], threshold=200) + assert log.result == 300*3 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('rshift', "") # guard optimized away + assert loop.match_by_id('lshift', "") # guard optimized away + + def test_lshift_and_then_rshift(self): + py.test.skip('fixme, this optimization is disabled') + def main(b): + res = 0 + a = 0 + while res < 300: + assert a >= 0 + assert 0 <= b <= 10 + res = (a << b) >> b # ID: shift + a += 1 + return res + # + log = self.run(main, [2], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('shift', "") # optimized away diff --git a/pypy/tool/algo/color.py b/pypy/tool/algo/color.py --- a/pypy/tool/algo/color.py +++ b/pypy/tool/algo/color.py @@ -29,7 +29,7 @@ return [v for v in self._all_nodes if v in self.neighbours] def lexicographic_order(self): - """Enumerate a lexicographic breath-first ordering of the nodes.""" + """Enumerate a lexicographic breadth-first ordering of the nodes.""" sigma = [self.getnodes()[::-1]] if not sigma[0]: return diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,44 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test__ffi_call(self): - from pypy.rlib.test.test_libffi import get_libm_name - libm_name = get_libm_name(sys.platform) - out = self.run_source(''' - def main(): - try: - from _ffi import CDLL, types - except ImportError: - sys.stdout.write('SKIP: cannot import _ffi') - return 0 - - libm = CDLL('%(libm_name)s') - pow = libm.getfunc('pow', [types.double, types.double], - types.double) - print pow.getaddr() - i = 0 - res = 0 - while i < 2000: - res += pow(2, 3) - i += 1 - return res - ''' % locals(), - 76, ([], 8.0*2000), threshold=1000) - pow_addr = int(out.splitlines()[0]) - ops = self.get_by_bytecode('CALL_FUNCTION') - assert len(ops) == 2 # we get two loops, because of specialization - call_function = ops[0] - last_ops = [op.getopname() for op in call_function[-5:]] - assert last_ops == ['force_token', - 'setfield_gc', - 'call_may_force', - 'guard_not_forced', - 'guard_no_exception'] - call = call_function[-3] - assert call.getarg(0).value == pow_addr - assert call.getarg(1).value == 2.0 - assert call.getarg(2).value == 3.0 - def test_shift(self): from sys import maxint maxvals = (-maxint-1, -maxint, maxint-1, maxint) From commits-noreply at bitbucket.org Fri Apr 22 15:10:12 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 15:10:12 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: missing import Message-ID: <20110422131012.3A9B5282C2A@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43533:f7040d882bf4 Date: 2011-04-22 10:28 +0200 http://bitbucket.org/pypy/pypy/changeset/f7040d882bf4/ Log: missing import diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -5,7 +5,7 @@ LEVEL_NONNULL, \ LEVEL_UNKNOWN, \ MININT, MAXINT -from pypy.jit.metainterp.history import BoxInt, ConstInt +from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxPtr from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.optimizeopt.intutils import IntBound from pypy.jit.metainterp.resoperation import rop, ResOperation From commits-noreply at bitbucket.org Fri Apr 22 15:10:15 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 15:10:15 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: CALL_LOOPINVARIANT support Message-ID: <20110422131015.34CB7282C2D@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43534:7f7eb269501f Date: 2011-04-22 10:47 +0200 http://bitbucket.org/pypy/pypy/changeset/7f7eb269501f/ Log: CALL_LOOPINVARIANT support diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1282,8 +1282,8 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_value=2) - self.check_loops(guard_class=0, guard_value=5, everywhere=True) + self.check_loops(guard_class=0, guard_value=3) + self.check_loops(guard_class=0, guard_value=6, everywhere=True) def test_merge_guardnonnull_guardclass(self): from pypy.rlib.objectmodel import instantiate @@ -1437,8 +1437,8 @@ y.v = g(y.v) - y.v/y.v + lc/l[0] - 1 return y.v res = self.meta_interp(f, [20], listops=True) - self.check_loops(getfield_gc=0, getarrayitem_gc=0) - self.check_loops(getfield_gc=1, getarrayitem_gc=0, everywhere=True) + self.check_loops(getfield_gc=1, getarrayitem_gc=0) + self.check_loops(getfield_gc=2, getarrayitem_gc=0, everywhere=True) def test_guard_isnull_nonnull(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'res']) diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -14,6 +14,7 @@ """ def __init__(self): self.loop_invariant_results = {} + self.loop_invariant_producer = {} def reconstruct_for_next_iteration(self, surviving_boxes, optimizer, valuemap): @@ -22,6 +23,11 @@ new.loop_invariant_results[key] = \ value.get_cloned(new, valuemap) return new + + def produce_potential_short_preamble_ops(self, potential_ops): + for op in self.loop_invariant_producer.values(): + potential_ops[op.result] = op + def propagate_forward(self, op): args = self.optimizer.make_args_key(op) @@ -316,12 +322,14 @@ # expects a compile-time constant assert isinstance(arg, Const) key = make_hashable_int(arg.getint()) + resvalue = self.loop_invariant_results.get(key, None) if resvalue is not None: self.make_equal_to(op.result, resvalue) return # change the op to be a normal call, from the backend's point of view # there is no reason to have a separate operation for this + self.loop_invariant_producer[key] = op op = op.copy_and_change(rop.CALL) self.emit_operation(op) resvalue = self.getvalue(op.result) From commits-noreply at bitbucket.org Fri Apr 22 15:10:18 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Fri, 22 Apr 2011 15:10:18 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: short preamble will need key boxes aswell since they are used in the snapshots Message-ID: <20110422131018.D8018282C2B@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43535:89f55c6c0db4 Date: 2011-04-22 15:08 +0200 http://bitbucket.org/pypy/pypy/changeset/89f55c6c0db4/ Log: short preamble will need key boxes aswell since they are used in the snapshots diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -175,7 +175,7 @@ initial_inputargs_len = len(inputargs) - inputargs, short = self.inline(self.cloned_operations, + inputargs, short_inputargs, short = self.inline(self.cloned_operations, loop.inputargs, jump_args, virtual_state) #except KeyError: @@ -225,9 +225,10 @@ short[i] = op short_loop = TreeLoop('short preamble') - short_loop.inputargs = loop.inputargs[:initial_inputargs_len] + short_loop.inputargs = short_inputargs short_loop.operations = short + # Clone ops and boxes to get private versions and newargs = [a.clonebox() for a in short_loop.inputargs] inliner = Inliner(short_loop.inputargs, newargs) @@ -261,6 +262,7 @@ values = [self.getvalue(arg) for arg in jump_args] inputargs = virtual_state.make_inputargs(values) short_jumpargs = inputargs[:] + short_inputargs = virtual_state.make_inputargs(values, keyboxes=True) # This loop is equivalent to the main optimization loop in # Optimizer.propagate_all_forward @@ -268,19 +270,21 @@ for newop in loop_operations: newop = inliner.inline_op(newop, clone=False) if newop.getopnum() == rop.JUMP: - values = [self.getvalue(arg) for arg in newop.getarglist()] - newop.initarglist(virtual_state.make_inputargs(values)) jumpop = newop break #self.optimizer.first_optimization.propagate_forward(newop) self.optimizer.send_extra_operation(newop) - assert jumpop self.boxes_created_this_iteration = {} - jumpargs = jumpop.getarglist() - self.short_inliner = Inliner(inputargs, jumpargs) + assert jumpop + values = [self.getvalue(arg) for arg in jumpop.getarglist()] + jumpargs = virtual_state.make_inputargs(values) + newop.initarglist(jumpargs) + jmp_to_short_args = virtual_state.make_inputargs(values, keyboxes=True) + self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) + short = [] short_seen = {} for box, const in self.constant_inputargs.items(): @@ -316,7 +320,7 @@ jumpop.initarglist(jumpargs) self.optimizer.send_extra_operation(jumpop) short.append(ResOperation(rop.JUMP, short_jumpargs, None)) - return inputargs, short + return inputargs, short_inputargs, short def add_op_to_short(self, op, short, short_seen): if op is None: @@ -640,8 +644,11 @@ #if self.inline(sh.operations, sh.inputargs, # op.getarglist(), dryrun=True): try: - self.inline(sh.operations, sh.inputargs, - op.getarglist()) + values = [self.getvalue(arg) + for arg in op.getarglist()] + args = virtual_state.make_inputargs(values, + keyboxes=True) + self.inline(sh.operations, sh.inputargs, args) except InvalidLoop: debug_print("Inlining failed unexpectedly", "jumping to preamble instead") diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1703,7 +1703,7 @@ res = self.meta_interp(g, [6, 14]) assert res == g(6, 14) self.check_loop_count(9) - self.check_loops(getarrayitem_gc=6, everywhere=True) + self.check_loops(getarrayitem_gc=8, everywhere=True) def test_multiple_specialied_versions_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -5,7 +5,7 @@ LEVEL_NONNULL, \ LEVEL_UNKNOWN, \ MININT, MAXINT -from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxPtr +from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxPtr, Const from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.optimizeopt.intutils import IntBound from pypy.jit.metainterp.resoperation import rop, ResOperation @@ -222,13 +222,20 @@ self.state[i].generate_guards(other.state[i], args[i], cpu, extra_guards) - def make_inputargs(self, values): + def make_inputargs(self, values, keyboxes=False): assert len(values) == len(self.state) inputargs = [] seen_inputargs = {} for i in range(len(values)): self.state[i].enum_forced_boxes(inputargs, seen_inputargs, values[i]) + + if keyboxes: + for value in values: + box = value.get_key_box() + if box not in inputargs and not isinstance(box, Const): + inputargs.append(box) + return inputargs From commits-noreply at bitbucket.org Fri Apr 22 16:12:42 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 22 Apr 2011 16:12:42 +0200 (CEST) Subject: [pypy-svn] pypy 32ptr-on-64bit: Simplify the logic and remove ExtraAttributes. The number 8 has been tweaked Message-ID: <20110422141242.8A174282C2A@codespeak.net> Author: Armin Rigo Branch: 32ptr-on-64bit Changeset: r43536:164a683483ca Date: 2011-04-22 16:12 +0200 http://bitbucket.org/pypy/pypy/changeset/164a683483ca/ Log: Simplify the logic and remove ExtraAttributes. The number 8 has been tweaked as giving the slowest possible memory usage on "pypy translate.py". diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -471,6 +471,7 @@ SUBCLASSES_MIN_FIELDS = 5 # XXX tweak these numbers SUBCLASSES_MAX_FIELDS = 5 +SUBCLASSES_COMPRESSPTR_FIELDS = 8 def memo_get_subclass_of_correct_size(space, supercls): key = space, supercls @@ -480,19 +481,14 @@ assert not hasattr(supercls, "__del__") result = [] if space.config.translation.compressptr: - # with 'compressptr', there are two issues: first we would only - # need half as many classes, because there is no point in having a - # class that contains an odd number of hiddengcrefs (there is - # one too for the map, so there is no point in having an even - # number of attributes); and we avoid using rlib.rerased because - # such an erased pointer cannot be compressed, as it might point - # to a list. For now we assume that SUBCLASSES_MIN_FIELDS == - # SUBCLASSES_MAX_FIELDS == odd number. - assert SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS - assert SUBCLASSES_MAX_FIELDS % 2 == 1 + # with 'compressptr', we need an even number of fields: + # [--header--][map;fld1][fld2;fld3][fld4;fld5][--fld6--] + # the last field (in the example above, fld6) needs a full + # word, because it might point to a list; list pointers + # cannot be compressed + assert SUBCLASSES_COMPRESSPTR_FIELDS % 2 == 0 result.append(_make_subclass_size_n(supercls, - SUBCLASSES_MAX_FIELDS, - use_erased=False)) + SUBCLASSES_COMPRESSPTR_FIELDS)) else: # common case for i in range(SUBCLASSES_MIN_FIELDS, SUBCLASSES_MAX_FIELDS+1): @@ -510,23 +506,17 @@ erase_item, unerase_item = rerased.new_erasing_pair("mapdict storage item") erase_list, unerase_list = rerased.new_erasing_pair("mapdict storage list") -def _make_subclass_size_n(supercls, n, use_erased=True): +def _make_subclass_size_n(supercls, n): from pypy.rlib import unroll rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 rangenmin1 = unroll.unrolling_iterable(range(nmin1)) - if use_erased: - erase = erase_item - unerase = unerase_item - else: - erase = lambda x: x - unerase = lambda x: x # class subcls(BaseMapdictObject, supercls): _nmin1 = nmin1 for _i in rangenmin1: locals()["_value%s" % _i] = None - locals()["_value%s" % nmin1] = erase(None) + locals()["_value%s" % nmin1] = erase_item(None) def _init_empty(self, map): self.map = map @@ -536,11 +526,7 @@ def _mapdict_get_storage_list(self): erased = getattr(self, "_value%s" % nmin1) - if use_erased: - return unerase_list(erased) - else: - assert isinstance(erased, ExtraAttributes) - return erased.storage + return unerase_list(erased) def _mapdict_read_storage(self, index): assert index >= 0 @@ -551,7 +537,7 @@ if self._has_storage_list(): return self._mapdict_get_storage_list()[index - nmin1] erased = getattr(self, "_value%s" % nmin1) - return unerase(erased) + return unerase_item(erased) def _mapdict_write_storage(self, index, value): for i in rangenmin1: @@ -561,7 +547,7 @@ if self._has_storage_list(): self._mapdict_get_storage_list()[index - nmin1] = value return - erased = erase(value) + erased = erase_item(value) setattr(self, "_value%s" % nmin1, erased) def _mapdict_storage_length(self): @@ -581,33 +567,25 @@ has_storage_list = self._has_storage_list() if len_storage < n: assert not has_storage_list - erased = erase(None) + erased = erase_item(None) elif len_storage == n: assert not has_storage_list - erased = erase(storage[nmin1]) + erased = erase_item(storage[nmin1]) elif not has_storage_list: # storage is longer than self.map.length() only due to # overallocation - erased = erase(storage[nmin1]) + erased = erase_item(storage[nmin1]) # in theory, we should be ultra-paranoid and check all entries, # but checking just one should catch most problems anyway: assert storage[n] is None else: storage_list = storage[nmin1:] - if use_erased: - erased = erase_list(storage_list) - else: - erased = ExtraAttributes(storage_list) + erased = erase_list(storage_list) setattr(self, "_value%s" % nmin1, erased) subcls.__name__ = supercls.__name__ + "Size%s" % n return subcls -class ExtraAttributes(W_Root): - def __init__(self, storage): - from pypy.rlib.debug import make_sure_not_resized - self.storage = make_sure_not_resized(storage) - # ____________________________________________________________ # dict implementation diff --git a/pypy/rlib/rerased.py b/pypy/rlib/rerased.py --- a/pypy/rlib/rerased.py +++ b/pypy/rlib/rerased.py @@ -150,7 +150,6 @@ return hop.gendirectcall(ll_unerase_int, v) def ll_unerase_int(gcref): - from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.debug import ll_assert x = llop.cast_ptr_to_int(lltype.Signed, gcref) ll_assert((x&1) != 0, "unerased_int(): not an integer") From commits-noreply at bitbucket.org Fri Apr 22 20:05:02 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Fri, 22 Apr 2011 20:05:02 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer-s-star-buffer: some assorted changes trying to add new-style buffer support to PyArg_ParseTuple - not working Message-ID: <20110422180502.80B8A282C2A@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer-s-star-buffer Changeset: r43537:31d9ac42ba34 Date: 2011-04-22 09:14 -0400 http://bitbucket.org/pypy/pypy/changeset/31d9ac42ba34/ Log: some assorted changes trying to add new-style buffer support to PyArg_ParseTuple - not working diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -10,7 +10,7 @@ cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - Py_TPFLAGS_HAVE_GETCHARBUFFER, + Py_TPFLAGS_HAVE_GETCHARBUFFER, Py_buffer, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, @@ -406,6 +406,26 @@ pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER + + at cpython_api([PyObject, lltype.Ptr(Py_buffer), lltype.Signed], lltype.Signed, + external=False, error=-1) +def memoryview_getbuffer(space, w_view, buf, flags): + # buf.c_obj = lltype.nullptr(PyObject.TO) + buf.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(w_view.as_str())) + buf.c_len = w_view.getlength() + print 'set length to', buf.c_len + print 'set buf to', buf.c_buf + return 0 + +def setup_memoryview_buffer_procs(space, pto): + c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) + c_buf.c_bf_getbuffer = llhelper( + memoryview_getbuffer.api_func.functype, + memoryview_getbuffer.api_func.get_wrapper(space)) + pto.c_tp_as_buffer = c_buf + pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER + + @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): from pypy.module.cpyext.object import PyObject_dealloc @@ -444,6 +464,9 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) + elif space.is_w( + w_type, space.getattr(space.builtin, space.wrap("memoryview"))): + setup_memoryview_buffer_procs(space, pto) pto.c_tp_flags |= Py_TPFLAGS_HEAPTYPE pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -442,4 +442,5 @@ @cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) def PyBuffer_Release(space, view): + print 'releasing', view.c_obj Py_DecRef(space, view.c_obj) diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,7 +1,7 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, - PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, + PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_buffer, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef @@ -58,8 +58,9 @@ writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) -## We don't support new buffer interface for now -getbufferproc = rffi.VOIDP + +## We don't support new buffer interface for now !!! YEA RIGHT +getbufferproc = P(FT([PyO, P(Py_buffer), lltype.Signed], lltype.Signed)) releasebufferproc = rffi.VOIDP diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -775,17 +775,12 @@ case 's': {/* string */ if (*format == '*') { Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); - + printf("star case\n"); if (PyString_Check(arg)) { PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } else { - PyErr_SetString( - PyExc_NotImplementedError, - "s* not implemented for non-string values"); - return NULL; - } + } #if 0 #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { @@ -798,12 +793,13 @@ 1, 0); } #endif +#endif else { /* any buffer-like object */ char *buf; + printf("about to getbuffer\n"); if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } -#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", @@ -1325,6 +1321,7 @@ { PyBufferProcs *pb = arg->ob_type->tp_as_buffer; Py_ssize_t count; +#if 0 if (pb == NULL || pb->bf_getreadbuffer == NULL || pb->bf_getsegcount == NULL || @@ -1336,44 +1333,50 @@ *errmsg = "string or single-segment read-only buffer"; return -1; } +#endif if ((count = (*pb->bf_getreadbuffer)(arg, 0, p)) < 0) { *errmsg = "(unspecified)"; } return count; } -#if 0 //YYY static int getbuffer(PyObject *arg, Py_buffer *view, char **errmsg) { void *buf; Py_ssize_t count; PyBufferProcs *pb = arg->ob_type->tp_as_buffer; + printf("pb %p\n", pb); if (pb == NULL) { *errmsg = "string or buffer"; return -1; } + if (pb->bf_getbuffer) { if (pb->bf_getbuffer(arg, view, 0) < 0) { *errmsg = "convertible to a buffer"; return -1; } +#if 0 if (!PyBuffer_IsContiguous(view, 'C')) { *errmsg = "contiguous buffer"; return -1; } +#endif return 0; } + printf("about to convertbuffer\n"); count = convertbuffer(arg, &buf, errmsg); if (count < 0) { + printf("error converting it\n"); *errmsg = "convertible to a buffer"; return count; } + printf("converted it\n"); PyBuffer_FillInfo(view, NULL, buf, count, 1, 0); return 0; } -#endif /* Support for keyword arguments donated by Geoff Philbrick */ diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -129,6 +129,34 @@ assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + def test_pyarg_parse_buffer_py_buffer(self): + """ + The `s*` format specifier can also be used to parse a memoryview into a + Py_buffer structure containing a pointer to the memoryview's data and + the length of that data. + """ + memview = self.import_parser( + ''' + Py_buffer buf; + PyObject *result; + printf("about to parse\\n"); + if (!PyArg_ParseTuple(args, "s*", &buf)) { + printf("parse fail\\n"); + return NULL; + } + printf("parse win\\n"); + result = PyString_FromStringAndSize(buf.buf, buf.len); + printf("release\\n"); + PyBuffer_Release(&buf); + printf("return\\n"); + return result; + ''') + raises(TypeError, "memview(None)") + raises(TypeError, "memview(3)") + assert 'foo\0bar\0baz' == memview(memoryview('foo\0bar\0baz')) + assert 'foo\0bar\0baz' == memview(buffer('foo\0bar\0baz')) + + def test_pyarg_parse_charbuf_and_length(self): """ The `t#` format specifier can be used to parse a read-only 8-bit From commits-noreply at bitbucket.org Fri Apr 22 20:05:04 2011 From: commits-noreply at bitbucket.org (exarkun) Date: Fri, 22 Apr 2011 20:05:04 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer-new: Copy documentation from CPython; note the incompleteness of this implementation. Message-ID: <20110422180504.26292282C2A@codespeak.net> Author: Jean-Paul Calderone Branch: pyarg-parsebuffer-new Changeset: r43538:abe5265d2444 Date: 2011-04-22 14:03 -0400 http://bitbucket.org/pypy/pypy/changeset/abe5265d2444/ Log: Copy documentation from CPython; note the incompleteness of this implementation. diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -433,6 +433,14 @@ @cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, lltype.Signed, lltype.Signed], rffi.INT, error=CANNOT_FAIL) def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags): + """ + Fills in a buffer-info structure correctly for an exporter that can only + share a contiguous chunk of memory of "unsigned bytes" of the given + length. Returns 0 on success and -1 (with raising an error) on error. + + This is not a complete re-implementation of the CPython API; it only + provides a subset of CPython's behavior. + """ view.c_buf = buf view.c_len = length view.c_obj = obj @@ -442,4 +450,10 @@ @cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) def PyBuffer_Release(space, view): + """ + Releases a Py_buffer obtained from getbuffer ParseTuple's s*. + + This is not a complete re-implementation of the CPython API; it only + provides a subset of CPython's behavior. + """ Py_DecRef(space, view.c_obj) From commits-noreply at bitbucket.org Sat Apr 23 11:55:10 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Sat, 23 Apr 2011 11:55:10 +0200 (CEST) Subject: [pypy-svn] pypy default: shift optimizations reenabled, now with proper overflow checking Message-ID: <20110423095510.60DC4282BEA@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43539:065cd49333e7 Date: 2011-04-23 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/065cd49333e7/ Log: shift optimizations reenabled, now with proper overflow checking diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -4398,6 +4398,8 @@ i4 = int_rshift(i3, i2) i5 = int_lshift(i1, 2) i6 = int_rshift(i5, 2) + i6t= int_eq(i6, i1) + guard_true(i6t) [] i7 = int_lshift(i1, 100) i8 = int_rshift(i7, 100) i9 = int_lt(i1b, 100) @@ -4422,6 +4424,8 @@ i4 = int_rshift(i3, i2) i5 = int_lshift(i1, 2) i6 = int_rshift(i5, 2) + i6t= int_eq(i6, i1) + guard_true(i6t) [] i7 = int_lshift(i1, 100) i8 = int_rshift(i7, 100) i9 = int_lt(i1b, 100) @@ -4431,11 +4435,8 @@ i13 = int_lshift(i1b, i2) i14 = int_rshift(i13, i2) i15 = int_lshift(i1b, 2) - i16 = int_rshift(i15, 2) i17 = int_lshift(i1b, 100) i18 = int_rshift(i17, 100) - i19 = int_eq(i1b, i16) - guard_true(i19) [] jump(i2, i3, i1b, i2b) """ self.optimize_loop(ops, expected) diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -130,12 +130,12 @@ r = self.getvalue(op.result) b = v1.intbound.lshift_bound(v2.intbound) r.intbound.intersect(b) - # --- The following is actually wrong if the INT_LSHIFT overflowed. - # --- It is precisely the pattern we use to detect overflows of the - # --- app-level '<<' operator: INT_LSHIFT/INT_RSHIFT/INT_EQ - #if b.has_lower and b.has_upper: - # # Synthesize the reverse op for optimize_default to reuse - # self.pure(rop.INT_RSHIFT, [op.result, op.getarg(1)], op.getarg(0)) + # intbound.lshift_bound checks for an overflow and if the + # lshift can be proven not to overflow sets b.has_upper and + # b.has_lower + if b.has_lower and b.has_upper: + # Synthesize the reverse op for optimize_default to reuse + self.pure(rop.INT_RSHIFT, [op.result, op.getarg(1)], op.getarg(0)) def optimize_INT_RSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py --- a/pypy/jit/metainterp/optimizeopt/intutils.py +++ b/pypy/jit/metainterp/optimizeopt/intutils.py @@ -1,4 +1,4 @@ -from pypy.rlib.rarithmetic import ovfcheck +from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') @@ -163,12 +163,12 @@ other.has_upper and other.has_lower and \ other.known_ge(IntBound(0, 0)): try: - vals = (ovfcheck(self.upper * pow2(other.upper)), - ovfcheck(self.upper * pow2(other.lower)), - ovfcheck(self.lower * pow2(other.upper)), - ovfcheck(self.lower * pow2(other.lower))) + vals = (ovfcheck_lshift(self.upper, other.upper), + ovfcheck_lshift(self.upper, other.lower), + ovfcheck_lshift(self.lower, other.upper), + ovfcheck_lshift(self.lower, other.lower)) return IntBound(min4(vals), max4(vals)) - except OverflowError: + except (OverflowError, ValueError): return IntUnbounded() else: return IntUnbounded() @@ -177,14 +177,11 @@ if self.has_upper and self.has_lower and \ other.has_upper and other.has_lower and \ other.known_ge(IntBound(0, 0)): - try: - vals = (ovfcheck(self.upper / pow2(other.upper)), - ovfcheck(self.upper / pow2(other.lower)), - ovfcheck(self.lower / pow2(other.upper)), - ovfcheck(self.lower / pow2(other.lower))) - return IntBound(min4(vals), max4(vals)) - except OverflowError: - return IntUnbounded() + vals = (self.upper >> other.upper, + self.upper >> other.lower, + self.lower >> other.upper, + self.lower >> other.lower) + return IntBound(min4(vals), max4(vals)) else: return IntUnbounded() @@ -252,11 +249,3 @@ def max4(t): return max(max(t[0], t[1]), max(t[2], t[3])) - -def pow2(x): - y = 1 << x - if y < 1: - raise OverflowError, "pow2 did overflow" - return y - - diff --git a/pypy/jit/metainterp/test/test_intbound.py b/pypy/jit/metainterp/test/test_intbound.py --- a/pypy/jit/metainterp/test/test_intbound.py +++ b/pypy/jit/metainterp/test/test_intbound.py @@ -1,6 +1,7 @@ from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \ IntLowerBound, IntUnbounded from copy import copy +import sys def bound(a,b): if a is None and b is None: @@ -221,6 +222,14 @@ assert bleft.contains(n1 << n2) assert bright.contains(n1 >> n2) +def test_shift_overflow(): + b10 = IntBound(0, 10) + b100 = IntBound(0, 100) + bmax = IntBound(0, sys.maxint/2) + assert not b10.lshift_bound(b100).has_upper + assert not bmax.lshift_bound(b10).has_upper + assert b10.lshift_bound(b10).has_upper + def test_div_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1984,6 +1984,122 @@ assert res == 12 self.check_tree_loop_count(2) + def test_overflowing_shift_pos(self): + myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) + def f1(a, b): + n = sa = 0 + while n < 10: + myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) + if 0 < a < 10: pass + if 0 < b < 10: pass + sa += (a << b) >> b + n += 1 + return sa + + def f2(a, b): + n = sa = 0 + while n < 10: + myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) + if 0 < a < hint(sys.maxint/2, promote=True): pass + if 0 < b < 100: pass + sa += (a << b) >> b + n += 1 + return sa + + assert self.meta_interp(f1, [5, 5]) == 50 + self.check_loops(int_rshift=0, everywhere=True) + + for f in (f1, f2): + assert self.meta_interp(f, [5, 10]) == 50 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [10, 5]) == 100 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [10, 10]) == 100 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [5, 100]) == 0 + self.check_loops(int_rshift=1, everywhere=True) + + bigval = 1 + while (bigval << 3).__class__ is int: + bigval = bigval << 1 + + assert self.meta_interp(f, [bigval, 5]) == 0 + self.check_loops(int_rshift=1, everywhere=True) + + def test_overflowing_shift_neg(self): + myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) + def f1(a, b): + n = sa = 0 + while n < 10: + myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) + if -10 < a < 0: pass + if 0 < b < 10: pass + sa += (a << b) >> b + n += 1 + return sa + + def f2(a, b): + n = sa = 0 + while n < 10: + myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) + if -hint(sys.maxint/2, promote=True) < a < 0: pass + if 0 < b < 100: pass + sa += (a << b) >> b + n += 1 + return sa + + assert self.meta_interp(f1, [-5, 5]) == -50 + self.check_loops(int_rshift=0, everywhere=True) + + for f in (f1, f2): + assert self.meta_interp(f, [-5, 10]) == -50 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [-10, 5]) == -100 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [-10, 10]) == -100 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [-5, 100]) == 0 + self.check_loops(int_rshift=1, everywhere=True) + + bigval = 1 + while (bigval << 3).__class__ is int: + bigval = bigval << 1 + + assert self.meta_interp(f, [-bigval, 5]) == 0 + self.check_loops(int_rshift=1, everywhere=True) + + def notest_overflowing_shift2(self): + myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) + def f(a, b): + n = sa = 0 + while n < 10: + myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) + if 0 < a < hint(sys.maxint/2, promote=True): pass + if 0 < b < 100: pass + sa += (a << b) >> b + n += 1 + return sa + + assert self.meta_interp(f, [5, 5]) == 50 + self.check_loops(int_rshift=0, everywhere=True) + + assert self.meta_interp(f, [5, 10]) == 50 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [10, 5]) == 100 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [10, 10]) == 100 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [5, 100]) == 0 + self.check_loops(int_rshift=1, everywhere=True) class TestOOtype(BasicTests, OOJitMixin): From commits-noreply at bitbucket.org Sat Apr 23 11:55:14 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Sat, 23 Apr 2011 11:55:14 +0200 (CEST) Subject: [pypy-svn] pypy default: hg merge Message-ID: <20110423095514.4025A282C2C@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43540:5c963d968cfe Date: 2011-04-23 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/5c963d968cfe/ Log: hg merge diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1565,3 +1565,42 @@ i12 = int_ge(i10, 0) guard_true(i12, descr=...) """) + + def test_shift_intbound(self): + def main(b): + res = 0 + a = 0 + while a < 300: + assert a >= 0 + assert 0 <= b <= 10 + val = a >> b + if val >= 0: # ID: rshift + res += 1 + val = a << b + if val >= 0: # ID: lshift + res += 2 + a += 1 + return res + # + log = self.run(main, [2], threshold=200) + assert log.result == 300*3 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('rshift', "") # guard optimized away + assert loop.match_by_id('lshift', "") # guard optimized away + + def test_lshift_and_then_rshift(self): + py.test.skip('fixme, this optimization is disabled') + def main(b): + res = 0 + a = 0 + while res < 300: + assert a >= 0 + assert 0 <= b <= 10 + res = (a << b) >> b # ID: shift + a += 1 + return res + # + log = self.run(main, [2], threshold=200) + assert log.result == 300 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('shift', "") # optimized away diff --git a/pypy/tool/algo/color.py b/pypy/tool/algo/color.py --- a/pypy/tool/algo/color.py +++ b/pypy/tool/algo/color.py @@ -29,7 +29,7 @@ return [v for v in self._all_nodes if v in self.neighbours] def lexicographic_order(self): - """Enumerate a lexicographic breath-first ordering of the nodes.""" + """Enumerate a lexicographic breadth-first ordering of the nodes.""" sigma = [self.getnodes()[::-1]] if not sigma[0]: return diff --git a/pypy/module/pypyjit/test/test_pypy_c.py b/pypy/module/pypyjit/test/test_pypy_c.py --- a/pypy/module/pypyjit/test/test_pypy_c.py +++ b/pypy/module/pypyjit/test/test_pypy_c.py @@ -223,44 +223,6 @@ return total ''' % startvalue, 170, ([], startvalue + 4999450000L)) - def test__ffi_call(self): - from pypy.rlib.test.test_libffi import get_libm_name - libm_name = get_libm_name(sys.platform) - out = self.run_source(''' - def main(): - try: - from _ffi import CDLL, types - except ImportError: - sys.stdout.write('SKIP: cannot import _ffi') - return 0 - - libm = CDLL('%(libm_name)s') - pow = libm.getfunc('pow', [types.double, types.double], - types.double) - print pow.getaddr() - i = 0 - res = 0 - while i < 2000: - res += pow(2, 3) - i += 1 - return res - ''' % locals(), - 76, ([], 8.0*2000), threshold=1000) - pow_addr = int(out.splitlines()[0]) - ops = self.get_by_bytecode('CALL_FUNCTION') - assert len(ops) == 2 # we get two loops, because of specialization - call_function = ops[0] - last_ops = [op.getopname() for op in call_function[-5:]] - assert last_ops == ['force_token', - 'setfield_gc', - 'call_may_force', - 'guard_not_forced', - 'guard_no_exception'] - call = call_function[-3] - assert call.getarg(0).value == pow_addr - assert call.getarg(1).value == 2.0 - assert call.getarg(2).value == 3.0 - def test_shift(self): from sys import maxint maxvals = (-maxint-1, -maxint, maxint-1, maxint) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -15,6 +15,7 @@ from pypy.rlib.streamio import StreamErrors from pypy.rlib.rarithmetic import intmask from pypy.rlib.objectmodel import we_are_translated, specialize +from pypy.module.sys.version import PYPY_VERSION SEARCH_ERROR = 0 PY_SOURCE = 1 @@ -31,7 +32,7 @@ SO = ".pyd" else: SO = ".so" -DEFAULT_SOABI = 'pypy-14' +DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] CHECK_FOR_PYW = sys.platform == 'win32' @specialize.memo() From commits-noreply at bitbucket.org Mon Apr 25 10:34:39 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 10:34:39 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: (all): planning for today Message-ID: <20110425083439.7D2B4282B90@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3531:f720a1de8bfa Date: 2011-04-25 10:33 +0200 http://bitbucket.org/pypy/extradoc/changeset/f720a1de8bfa/ Log: (all): planning for today diff --git a/sprintinfo/gothenburg-2011/planning.txt b/sprintinfo/gothenburg-2011/planning.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/gothenburg-2011/planning.txt @@ -0,0 +1,38 @@ +people present: + - Armin + - Carl Friedrich + - Laura + - Håkan + - Lukas + - Anders + - Romain + - Dario + + +tasks: +- fix the projector +- release 1.5 + - fix the import problem (Armin, Romain) + - fix the jit tests (Håkan, Armin around + - do we have 2.7 or 2.7.1? rename the directory + - merge jit-lsprof (Carl Friedrich, Dario) + - merge exarkun's branches, after review (Armin, Romain) + - merge jitypes2? + - documentation (Laura, Carl Friedrich) + +- branches to be integrated/finished afterwards + - 32-on-64 + - lukas' branches: list-strategies/dict-strategies + - new-dict-proxy (Lukas) + - out-of-line guards + - refactor-not-in-translator + - håkan's branches + +- other tasks + - continue tracing after invalid loops + - look into cython + - investigate Open End software on top of PyPy (Lukas, Anders) + +- presentations/discussions + - Lukas' presentation on memory improvements (Tuesday) + - codespeak migration From commits-noreply at bitbucket.org Mon Apr 25 10:37:04 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:04 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: use the virtual state of the short preamble Message-ID: <20110425083704.4F75B282B90@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43543:c0e033404df8 Date: 2011-04-24 15:23 +0200 http://bitbucket.org/pypy/pypy/changeset/c0e033404df8/ Log: use the virtual state of the short preamble diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -651,8 +651,8 @@ try: values = [self.getvalue(arg) for arg in op.getarglist()] - args = virtual_state.make_inputargs(values, - keyboxes=True) + args = sh.virtual_state.make_inputargs(values, + keyboxes=True) self.inline(sh.operations, sh.inputargs, args) except InvalidLoop: debug_print("Inlining failed unexpectedly", From commits-noreply at bitbucket.org Mon Apr 25 10:37:11 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:11 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: esure that only pure_operation accetpted into short_boxes are propageted from preamble to loop Message-ID: <20110425083711.9F7D2282BEC@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43544:3ead4a52ca81 Date: 2011-04-24 19:22 +0200 http://bitbucket.org/pypy/pypy/changeset/3ead4a52ca81/ Log: esure that only pure_operation accetpted into short_boxes are propageted from preamble to loop diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py --- a/pypy/jit/metainterp/optimizeopt/fficall.py +++ b/pypy/jit/metainterp/optimizeopt/fficall.py @@ -67,7 +67,7 @@ def __init__(self): self.funcinfo = None - def reconstruct_for_next_iteration(self, surviving_boxes, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes, optimizer, valuemap): return OptFfiCall() # FIXME: Should any status be saved for next iteration? diff --git a/pypy/jit/metainterp/optimizeopt/string.py b/pypy/jit/metainterp/optimizeopt/string.py --- a/pypy/jit/metainterp/optimizeopt/string.py +++ b/pypy/jit/metainterp/optimizeopt/string.py @@ -366,7 +366,7 @@ "Handling of strings and unicodes." enabled = True - def reconstruct_for_next_iteration(self, surviving_boxes, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes, optimizer, valuemap): return OptString() diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -157,15 +157,18 @@ jumpop.initarglist([]) loop.preamble.operations = self.optimizer.newoperations - preamble_optimizer = self.optimizer - self.optimizer = self.optimizer.reconstruct_for_next_iteration(jump_args) + + self.optimizer.force_at_end_of_preamble(jump_args) + modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) - values = [self.getvalue(arg) for arg in jump_args] inputargs = virtual_state.make_inputargs(values) - sb = preamble_optimizer.produce_short_preamble_ops(inputargs) + + sb = self.optimizer.produce_short_preamble_ops(inputargs) self.short_boxes = sb + preamble_optimizer = self.optimizer + self.optimizer = self.optimizer.reconstruct_for_next_iteration(sb, jump_args) self.constant_inputargs = {} for box in jump_args: @@ -610,7 +613,7 @@ self.inliner = None - def reconstruct_for_next_iteration(self, surviving_boxes, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes, optimizer, valuemap): return OptInlineShortPreamble(self.retraced) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2238,6 +2238,18 @@ assert self.meta_interp(f, [5, 100]) == 0 self.check_loops(int_rshift=1, everywhere=True) + def test_pure_op_not_to_be_propagated(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'sa']) + def f(n): + sa = 0 + while n > 0: + myjitdriver.jit_merge_point(n=n, sa=sa) + sa += n + 1 + n -= 1 + return sa + assert self.meta_interp(f, [10]) == f(10) + + class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -136,7 +136,7 @@ self.cached_arrayitems = {} self.original_producer = {} - def reconstruct_for_next_iteration(self, surviving_boxes, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes, optimizer, valuemap): new = OptHeap() diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -16,7 +16,7 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def reconstruct_for_next_iteration(self, surviving_boxes, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes, optimizer, valuemap): new = OptRewrite() for key, value in self.loop_invariant_results.items(): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -80,13 +80,19 @@ def get_key_box(self): return self.box + def force_at_end_of_preamble(self): + pass + def get_cloned(self, optimizer, valuemap, force_if_needed=True): if self in valuemap: return valuemap[self] new = self.clone_for_next_iteration(optimizer) if new is None: if force_if_needed: - new = OptValue(self.force_box()) + # It is too late to force things here it must have been + # done already in force_at_end_of_preamble() + assert self.box + new = OptValue(self.box) else: return None else: @@ -271,13 +277,10 @@ def setup(self): pass - def force_at_end_of_preamble(self): - pass - def turned_constant(self, value): pass - def reconstruct_for_next_iteration(self, surviving_boxes=None, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes=None, optimizer=None, valuemap=None): #return self.__class__() raise NotImplementedError @@ -325,12 +328,11 @@ self.optimizations = optimizations - def force_at_end_of_preamble(self): - self.resumedata_memo = resume.ResumeDataLoopMemo(self.metainterp_sd) - for o in self.optimizations: - o.force_at_end_of_preamble() + def force_at_end_of_preamble(self, jumpargs): + for a in jumpargs: + self.getvalue(a).force_at_end_of_preamble() - def reconstruct_for_next_iteration(self, surviving_boxes=None, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes=None, optimizer=None, valuemap=None): assert optimizer is None assert valuemap is None @@ -339,7 +341,7 @@ valuemap = {} new = Optimizer(self.metainterp_sd, self.loop) - optimizations = [o.reconstruct_for_next_iteration(surviving_boxes, + optimizations = [o.reconstruct_for_next_iteration(short_boxes, surviving_boxes, new, valuemap) for o in self.optimizations] new.set_optimizations(optimizations) @@ -349,7 +351,10 @@ for value in new.bool_boxes.keys(): new.bool_boxes[value.get_cloned(new, valuemap)] = None - new.pure_operations = self.pure_operations + new.pure_operations = args_dict() + for key, op in self.pure_operations.items(): + if op.result in short_boxes: + new.pure_operations[key] = op new.producer = self.producer assert self.posponedop is None diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py --- a/pypy/jit/metainterp/optimizeopt/intbounds.py +++ b/pypy/jit/metainterp/optimizeopt/intbounds.py @@ -13,7 +13,7 @@ self.posponedop = None self.nextop = None - def reconstruct_for_next_iteration(self, surviving_boxes, optimizer, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes, optimizer, valuemap): assert self.posponedop is None return OptIntBounds() diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -31,6 +31,9 @@ self._really_force() return self.box + def force_at_end_of_preamble(self): + self.force_box() + def make_virtual_info(self, modifier, fieldnums): if fieldnums is None: return self._make_virtual(modifier) @@ -87,6 +90,9 @@ return False return True + def force_at_end_of_preamble(self): + pass + def _really_force(self): op = self.source_op assert op is not None @@ -237,6 +243,9 @@ assert isinstance(itemvalue, optimizer.OptValue) self._items[index] = itemvalue + def force_at_end_of_preamble(self): + pass + def _really_force(self): assert self.source_op is not None if not we_are_translated(): @@ -285,7 +294,7 @@ class OptVirtualize(optimizer.Optimization): "Virtualize objects until they escape." - def reconstruct_for_next_iteration(self, surviving_boxes, + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes, optimizer, valuemap): return OptVirtualize() From commits-noreply at bitbucket.org Mon Apr 25 10:37:14 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:14 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: esure only cached reads accetpted into short_boxes are propageted from preamble to loop Message-ID: <20110425083714.72A6E282BF2@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43545:4cf12d065c2d Date: 2011-04-24 19:42 +0200 http://bitbucket.org/pypy/pypy/changeset/4cf12d065c2d/ Log: esure only cached reads accetpted into short_boxes are propageted from preamble to loop diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -93,12 +93,12 @@ fieldvalue = optheap.getvalue(op.getarg(1)) self.remember_field_value(structvalue, fieldvalue) - def get_cloned(self, optimizer, valuemap): + def get_cloned(self, optimizer, valuemap, short_boxes): assert self._lazy_setfield is None cf = CachedField() for structvalue, fieldvalue in self._cached_fields.iteritems(): op = self._cached_fields_getfield_op.get(structvalue, None) - if op: + if op and op.result in short_boxes: structvalue2 = structvalue.get_cloned(optimizer, valuemap) fieldvalue2 = fieldvalue .get_cloned(optimizer, valuemap) cf._cached_fields[structvalue2] = fieldvalue2 @@ -146,7 +146,7 @@ assert 0 # was: new.lazy_setfields = self.lazy_setfields for descr, d in self.cached_fields.items(): - new.cached_fields[descr] = d.get_cloned(optimizer, valuemap) + new.cached_fields[descr] = d.get_cloned(optimizer, valuemap, short_boxes) new.cached_arrayitems = {} for descr, d in self.cached_arrayitems.items(): @@ -155,15 +155,17 @@ for value, cache in d.items(): newcache = CachedArrayItems() newd[value.get_cloned(optimizer, valuemap)] = newcache - if cache.var_index_item and cache.var_index_getop: - newcache.var_index_item = \ - cache.var_index_item.get_cloned(optimizer, valuemap) - if cache.var_index_indexvalue: - newcache.var_index_indexvalue = \ - cache.var_index_indexvalue.get_cloned(optimizer, - valuemap) + if cache.var_index_getop and cache.var_index_getop.result in short_boxes: + if cache.var_index_item: + newcache.var_index_item = \ + cache.var_index_item.get_cloned(optimizer, valuemap) + if cache.var_index_indexvalue: + newcache.var_index_indexvalue = \ + cache.var_index_indexvalue.get_cloned(optimizer, + valuemap) for index, fieldvalue in cache.fixed_index_items.items(): - if cache.fixed_index_getops.get(index, None): + op = cache.fixed_index_getops.get(index, None) + if op and op.result in short_boxes: newcache.fixed_index_items[index] = \ fieldvalue.get_cloned(optimizer, valuemap) From commits-noreply at bitbucket.org Mon Apr 25 10:37:16 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:16 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: dont overwrite to make pure_operations match emitted_pure_operations as closely as possible Message-ID: <20110425083716.DC4A4282BF2@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43546:3f32bdbea460 Date: 2011-04-24 20:17 +0200 http://bitbucket.org/pypy/pypy/changeset/3f32bdbea460/ Log: dont overwrite to make pure_operations match emitted_pure_operations as closely as possible diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -264,7 +264,9 @@ def pure(self, opnum, args, result): op = ResOperation(opnum, args, result) - self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op + key = self.optimizer.make_args_key(op) + if key not in self.optimizer.pure_operations: + self.optimizer.pure_operations[key] = op def has_pure_result(self, opnum, args, descr): op = ResOperation(opnum, args, None) From commits-noreply at bitbucket.org Mon Apr 25 10:37:21 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:21 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: force unsupported vrituals and convert them into NotVirtual's when producing the VirtualState Message-ID: <20110425083721.AF5E2282BEC@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43547:a933cd00f4b7 Date: 2011-04-24 20:51 +0200 http://bitbucket.org/pypy/pypy/changeset/a933cd00f4b7/ Log: force unsupported vrituals and convert them into NotVirtual's when producing the VirtualState diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -158,8 +158,6 @@ loop.preamble.operations = self.optimizer.newoperations - self.optimizer.force_at_end_of_preamble(jump_args) - modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) values = [self.getvalue(arg) for arg in jump_args] @@ -631,6 +629,7 @@ args = op.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) + for sh in short: ok = False extra_guards = [] diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -81,7 +81,7 @@ return self.box def force_at_end_of_preamble(self): - pass + return None def get_cloned(self, optimizer, valuemap, force_if_needed=True): if self in valuemap: @@ -330,10 +330,6 @@ self.optimizations = optimizations - def force_at_end_of_preamble(self, jumpargs): - for a in jumpargs: - self.getvalue(a).force_at_end_of_preamble() - def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes=None, optimizer=None, valuemap=None): assert optimizer is None diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -4,7 +4,7 @@ LEVEL_KNOWNCLASS, \ LEVEL_NONNULL, \ LEVEL_UNKNOWN, \ - MININT, MAXINT + MININT, MAXINT, OptValue from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxPtr, Const from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.optimizeopt.intutils import IntBound @@ -271,7 +271,11 @@ def get_virtual_state(self, jump_args): for box in jump_args: value = self.getvalue(box) - value.get_args_for_fail(self) + box = value.force_at_end_of_preamble() + if box: + self.make_not_virtual(OptValue(box)) + else: + value.get_args_for_fail(self) return VirtualState([self.state(box) for box in jump_args]) diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -32,7 +32,7 @@ return self.box def force_at_end_of_preamble(self): - self.force_box() + return self.force_box() def make_virtual_info(self, modifier, fieldnums): if fieldnums is None: @@ -91,7 +91,7 @@ return True def force_at_end_of_preamble(self): - pass + return None def _really_force(self): op = self.source_op @@ -244,7 +244,7 @@ self._items[index] = itemvalue def force_at_end_of_preamble(self): - pass + return None def _really_force(self): assert self.source_op is not None From commits-noreply at bitbucket.org Mon Apr 25 10:37:24 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:24 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: force not suported virtuals as members of VirtualStructs Message-ID: <20110425083724.D9977282BEB@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43548:bcdfee77bd20 Date: 2011-04-24 21:14 +0200 http://bitbucket.org/pypy/pypy/changeset/bcdfee77bd20/ Log: force not suported virtuals as members of VirtualStructs diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -80,8 +80,8 @@ def get_key_box(self): return self.box - def force_at_end_of_preamble(self): - return None + def force_at_end_of_preamble(self, already_forced): + return self def get_cloned(self, optimizer, valuemap, force_if_needed=True): if self in valuemap: diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -269,13 +269,11 @@ return info def get_virtual_state(self, jump_args): + already_forced = {} for box in jump_args: value = self.getvalue(box) - box = value.force_at_end_of_preamble() - if box: - self.make_not_virtual(OptValue(box)) - else: - value.get_args_for_fail(self) + value = value.force_at_end_of_preamble(already_forced) + value.get_args_for_fail(self) return VirtualState([self.state(box) for box in jump_args]) diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -4,6 +4,7 @@ from pypy.jit.metainterp.optimizeutil import descrlist_dict from pypy.rlib.objectmodel import we_are_translated from pypy.jit.metainterp.optimizeopt import optimizer +from pypy.jit.metainterp.optimizeopt.optimizer import OptValue from pypy.jit.metainterp.executor import execute from pypy.jit.codewriter.heaptracker import vtable2descr @@ -31,8 +32,11 @@ self._really_force() return self.box - def force_at_end_of_preamble(self): - return self.force_box() + def force_at_end_of_preamble(self, already_forced): + value = already_forced.get(self, None) + if value: + return value + return OptValue(self.force_box()) def make_virtual_info(self, modifier, fieldnums): if fieldnums is None: @@ -90,8 +94,13 @@ return False return True - def force_at_end_of_preamble(self): - return None + def force_at_end_of_preamble(self, already_forced): + if self in already_forced: + return self + already_forced[self] = self + for ofs in self._fields.keys(): + self._fields[ofs] = self._fields[ofs].force_at_end_of_preamble(already_forced) + return self def _really_force(self): op = self.source_op @@ -244,7 +253,7 @@ self._items[index] = itemvalue def force_at_end_of_preamble(self): - return None + raise NotImplementedError def _really_force(self): assert self.source_op is not None From commits-noreply at bitbucket.org Mon Apr 25 10:37:28 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:28 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: support for forced virtual structs and for virtual arrays Message-ID: <20110425083728.07806282BEA@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43549:f8a22117054c Date: 2011-04-24 22:27 +0200 http://bitbucket.org/pypy/pypy/changeset/f8a22117054c/ Log: support for forced virtual structs and for virtual arrays diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py --- a/pypy/jit/metainterp/optimizeopt/virtualize.py +++ b/pypy/jit/metainterp/optimizeopt/virtualize.py @@ -98,8 +98,9 @@ if self in already_forced: return self already_forced[self] = self - for ofs in self._fields.keys(): - self._fields[ofs] = self._fields[ofs].force_at_end_of_preamble(already_forced) + if self._fields: + for ofs in self._fields.keys(): + self._fields[ofs] = self._fields[ofs].force_at_end_of_preamble(already_forced) return self def _really_force(self): @@ -252,8 +253,13 @@ assert isinstance(itemvalue, optimizer.OptValue) self._items[index] = itemvalue - def force_at_end_of_preamble(self): - raise NotImplementedError + def force_at_end_of_preamble(self, already_forced): + if self in already_forced: + return self + already_forced[self] = self + for index in range(len(self._items)): + self._items[index] = self._items[index].force_at_end_of_preamble(already_forced) + return self def _really_force(self): assert self.source_op is not None From commits-noreply at bitbucket.org Mon Apr 25 10:37:30 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:30 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: translation fix Message-ID: <20110425083730.40766282B90@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43550:9e838c990995 Date: 2011-04-24 22:43 +0200 http://bitbucket.org/pypy/pypy/changeset/9e838c990995/ Log: translation fix diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -287,7 +287,7 @@ assert jumpop values = [self.getvalue(arg) for arg in jumpop.getarglist()] jumpargs = virtual_state.make_inputargs(values) - newop.initarglist(jumpargs) + jumpop.initarglist(jumpargs) jmp_to_short_args = virtual_state.make_inputargs(values, keyboxes=True) self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) From commits-noreply at bitbucket.org Mon Apr 25 10:37:33 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 10:37:33 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: translation fixes Message-ID: <20110425083733.3821B282C2D@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43551:a4048937b8c6 Date: 2011-04-25 10:36 +0200 http://bitbucket.org/pypy/pypy/changeset/a4048937b8c6/ Log: translation fixes diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py --- a/pypy/jit/metainterp/optimizeopt/unroll.py +++ b/pypy/jit/metainterp/optimizeopt/unroll.py @@ -214,8 +214,10 @@ # a lot of tests to be fixed... loop.preamble.operations = short[:] + # FIXME: combine with snapshot loop above short_resumedescr = start_resumedescr.clone_if_mutable() - self.inliner.inline_descr_inplace(short_resumedescr) + assert isinstance(short_resumedescr, ResumeGuardDescr) + self.inliner.inline_descr_inplace(short_resumedescr) snapshot = short_resumedescr.rd_snapshot while snapshot: snapshot.boxes = [self.getvalue(b).get_key_box() From commits-noreply at bitbucket.org Mon Apr 25 11:02:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 11:02:15 +0200 (CEST) Subject: [pypy-svn] pypy post-release-1.5: Make a branch for random checkins not part of the release 1.5. Message-ID: <20110425090215.A9537282B90@codespeak.net> Author: Armin Rigo Branch: post-release-1.5 Changeset: r43552:c611e41b99f4 Date: 2011-04-25 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/c611e41b99f4/ Log: Make a branch for random checkins not part of the release 1.5. From commits-noreply at bitbucket.org Mon Apr 25 11:14:04 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 11:14:04 +0200 (CEST) Subject: [pypy-svn] pypy post-release-1.5: Improve the tests. Needs an obscure workaround, but it's only Message-ID: <20110425091404.0309736C20F@codespeak.net> Author: Armin Rigo Branch: post-release-1.5 Changeset: r43553:240957c0ff30 Date: 2011-04-23 11:43 +0200 http://bitbucket.org/pypy/pypy/changeset/240957c0ff30/ Log: Improve the tests. Needs an obscure workaround, but it's only a problem with the test, not in real code. diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -22,11 +22,39 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_compiled_isnan(self): + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isnan(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(1e200, 1e200) # nan + assert not f(1e200, 1.0) # +inf + assert not f(1e200, -1.0) # -inf + assert not f(42.5, 2.3) # +finite + assert not f(42.5, -2.3) # -finite + def test_compiled_isinf(self): - def f(x): - return ll_math.ll_math_isinf(1. / x) - f = compile(f, [float], backendopt=False) - assert f(5.5e-309) + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isinf(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(1e200, 1.0) # +inf + assert f(1e200, -1.0) # -inf + assert not f(1e200, 1e200) # nan + assert not f(42.5, 2.3) # +finite + assert not f(42.5, -2.3) # -finite + + +from pypy.rpython.lltypesystem import lltype +_A = lltype.GcArray(lltype.Float) +def normalize(x): + # workaround: force the C compiler to cast to a double + a = lltype.malloc(_A, 1) + a[0] = x + import time; time.time() + return a[0] def make_test_case((fnname, args, expected), dict): diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -157,9 +157,9 @@ self.interpret(fn, [1.0, 2.0, 3.0]) def test_copysign(self): - import math + from pypy.rlib import rfloat def fn(x, y): - return math.copysign(x, y) + return rfloat.copysign(x, y) assert self.interpret(fn, [42, -1]) == -42 assert self.interpret(fn, [42, -0.0]) == -42 assert self.interpret(fn, [42, 0.0]) == 42 @@ -172,21 +172,30 @@ assert self.interpret(fn, [0]) == 42.3 def test_isnan(self): - import math - def fn(x): - inf = x * x - nan = inf / inf - return math.isnan(nan) - assert self.interpret(fn, [1e200]) + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isnan(n1 / n2) + assert self.interpret(fn, [1e200, 1e200]) # nan + assert not self.interpret(fn, [1e200, 1.0]) # +inf + assert not self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [42.5, 2.3]) # +finite + assert not self.interpret(fn, [42.5, -2.3]) # -finite def test_isinf(self): - import math - def fn(x): - inf = x * x - return math.isinf(inf) - assert self.interpret(fn, [1e200]) + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isinf(n1 / n2) + assert self.interpret(fn, [1e200, 1.0]) # +inf + assert self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [1e200, 1e200]) # nan + assert not self.interpret(fn, [42.5, 2.3]) # +finite + assert not self.interpret(fn, [42.5, -2.3]) # -finite - + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -91,8 +91,8 @@ # Custom implementations def ll_math_isnan(y): - # By not calling into the extenal function the JIT can inline this. Floats - # are awesome. + # By not calling into the external function the JIT can inline this. + # Floats are awesome. return y != y def ll_math_isinf(y): From commits-noreply at bitbucket.org Mon Apr 25 11:14:09 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 11:14:09 +0200 (CEST) Subject: [pypy-svn] pypy post-release-1.5: Introduce isfinite(). Message-ID: <20110425091409.CC8B2282BEA@codespeak.net> Author: Armin Rigo Branch: post-release-1.5 Changeset: r43554:27ae850d9aff Date: 2011-04-23 11:58 +0200 http://bitbucket.org/pypy/pypy/changeset/27ae850d9aff/ Log: Introduce isfinite(). diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -195,6 +195,18 @@ assert not self.interpret(fn, [42.5, 2.3]) # +finite assert not self.interpret(fn, [42.5, -2.3]) # -finite + def test_isfinite(self): + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isfinite(n1 / n2) + assert self.interpret(fn, [42.5, 2.3]) # +finite + assert self.interpret(fn, [42.5, -2.3]) # -finite + assert not self.interpret(fn, [1e200, 1.0]) # +inf + assert not self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [1e200, 1e200]) # nan + class TestLLtype(BaseTestRfloat, LLRtypeMixin): diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -22,6 +22,15 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_isfinite(self): + inf = 1e200 * 1e200 + nan = inf / inf + assert ll_math.ll_math_isfinite(0.0) + assert ll_math.ll_math_isfinite(-42.0) + assert not ll_math.ll_math_isfinite(nan) + assert not ll_math.ll_math_isnan(inf) + assert not ll_math.ll_math_isnan(-inf) + def test_compiled_isnan(self): def f(x, y): n1 = normalize(x * x) @@ -46,6 +55,18 @@ assert not f(42.5, 2.3) # +finite assert not f(42.5, -2.3) # -finite + def test_compiled_isfinite(self): + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isfinite(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(42.5, 2.3) # +finite + assert f(42.5, -2.3) # -finite + assert not f(1e200, 1.0) # +inf + assert not f(1e200, -1.0) # -inf + assert not f(1e200, 1e200) # nan + from pypy.rpython.lltypesystem import lltype _A = lltype.GcArray(lltype.Float) diff --git a/pypy/rpython/extfuncregistry.py b/pypy/rpython/extfuncregistry.py --- a/pypy/rpython/extfuncregistry.py +++ b/pypy/rpython/extfuncregistry.py @@ -36,6 +36,9 @@ register_external(rfloat.isnan, [float], bool, export_name="ll_math.ll_math_isnan", sandboxsafe=True, llimpl=ll_math.ll_math_isnan) +register_external(rfloat.isfinite, [float], bool, + export_name="ll_math.ll_math_isfinite", sandboxsafe=True, + llimpl=ll_math.ll_math_isfinite) register_external(rfloat.copysign, [float, float], float, export_name="ll_math.ll_math_copysign", sandboxsafe=True, llimpl=ll_math.ll_math_copysign) diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -8,7 +8,7 @@ from pypy.tool.autopath import pypydir from pypy.rlib import jit, rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN +from pypy.rlib.rfloat import isinf, isnan, isfinite, INFINITY, NAN if sys.platform == "win32": eci = ExternalCompilationInfo() @@ -99,6 +99,12 @@ # Use a bitwise OR so the JIT doesn't produce 2 different guards. return (y == INFINITY) | (y == -INFINITY) +def ll_math_isfinite(y): + # Use a custom hack that is reasonably well-suited to the JIT. + # Floats are awesome (bis). + z = 0.0 * y + return z == z # i.e.: z is not a NaN + ll_math_floor = math_floor diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -158,12 +158,12 @@ return _formatd(x, code, precision, flags) def double_to_string(value, tp, precision, flags): - if isnan(value): - special = DIST_NAN + if isfinite(value): + special = DIST_FINITE elif isinf(value): special = DIST_INFINITY - else: - special = DIST_FINITE + else: #isnan(value): + special = DIST_NAN result = formatd(value, tp, precision, flags) return result, special @@ -344,7 +344,7 @@ def asinh(x): "NOT_RPYTHON" absx = abs(x) - if isnan(x) or isinf(x): + if not isfinite(x): return x if absx < _2_to_m28: return x @@ -405,3 +405,6 @@ r = math.floor(absx) return copysign(r, x) +def isfinite(x): + "NOT_RPYTHON" + return not isinf(x) and not isnan(x) From commits-noreply at bitbucket.org Mon Apr 25 11:14:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 11:14:15 +0200 (CEST) Subject: [pypy-svn] pypy post-release-1.5: In-progress. Message-ID: <20110425091415.3D7A6282B90@codespeak.net> Author: Armin Rigo Branch: post-release-1.5 Changeset: r43555:1fdb135e7cb0 Date: 2011-04-25 10:50 +0200 http://bitbucket.org/pypy/pypy/changeset/1fdb135e7cb0/ Log: In-progress. diff --git a/pypy/translator/c/node.py b/pypy/translator/c/node.py --- a/pypy/translator/c/node.py +++ b/pypy/translator/c/node.py @@ -11,7 +11,7 @@ from pypy.translator.c.support import c_char_array_constant, barebonearray from pypy.translator.c.primitive import PrimitiveType, name_signed from pypy.rlib import exports -from pypy.rlib.rfloat import isinf, isnan +from pypy.rlib.rfloat import isfinite from pypy.rlib.rstackovf import _StackOverflow from pypy.translator.c import extfunc from pypy.translator.tool.cbuild import ExternalCompilationInfo @@ -793,7 +793,7 @@ node = db.getcontainernode(value._obj) expr = 'NULL /*%s*/' % node.name node.where_to_copy_me.append('&%s' % access_expr) - elif typeOf(value) == Float and (isinf(value) or isnan(value)): + elif typeOf(value) == Float and not isfinite(value): db.late_initializations.append(('%s' % access_expr, db.get(value))) expr = '0.0 /* patched later by %sinfinity */' % ( '-+'[value > 0]) diff --git a/pypy/translator/c/test/test_genc.py b/pypy/translator/c/test/test_genc.py --- a/pypy/translator/c/test/test_genc.py +++ b/pypy/translator/c/test/test_genc.py @@ -273,7 +273,7 @@ assert res == 1.5 def test_nan_and_special_values(): - from pypy.rlib.rfloat import isnan, isinf, copysign + from pypy.rlib.rfloat import isnan, isinf, isfinite, copysign inf = 1e300 * 1e300 assert isinf(inf) nan = inf/inf @@ -283,6 +283,7 @@ (inf, lambda x: isinf(x) and x > 0.0), (-inf, lambda x: isinf(x) and x < 0.0), (nan, isnan), + (42.0, isfinite), (0.0, lambda x: not x and copysign(1., x) == 1.), (-0.0, lambda x: not x and copysign(1., x) == -1.), ]: diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -1,7 +1,7 @@ from pypy.rlib.rarithmetic import LONG_BIT, intmask, r_uint, r_ulonglong from pypy.rlib.rarithmetic import ovfcheck, r_longlong, widen from pypy.rlib.rarithmetic import most_neg_value_of_same_type -from pypy.rlib.rfloat import isinf, isnan +from pypy.rlib.rfloat import isfinite from pypy.rlib.debug import make_sure_not_resized, check_regular_int from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib import jit @@ -173,9 +173,15 @@ def fromfloat(dval): """ Create a new bigint object from a float """ # This function is not marked as pure because it can raise + if isfinite(dval): + return rbigint._fromfloat_finite(dval) + else: + raise OverflowError + + @staticmethod + @jit.purefunction + def _fromfloat_finite(dval): sign = 1 - if isinf(dval) or isnan(dval): - raise OverflowError if dval < 0.0: sign = -1 dval = -dval diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -267,14 +267,15 @@ In RPython, floats cannot be used with ints in dicts, anyway. """ from pypy.rlib.rarithmetic import intmask - from pypy.rlib.rfloat import isinf, isnan - if isinf(f): - if f < 0.0: - return -271828 - else: - return 314159 - elif isnan(f): - return 0 + from pypy.rlib.rfloat import isfinite, isinf + if not isfinite(f): + if isinf(f): + if f < 0.0: + return -271828 + else: + return 314159 + else: #isnan(f): + return 0 v, expo = math.frexp(f) v *= TAKE_NEXT hipart = int(v) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -10,7 +10,7 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT from pypy.rlib.rfloat import ( - isinf, isnan, INFINITY, NAN, copysign, formatd, + isinf, isnan, isfinite, INFINITY, NAN, copysign, formatd, DTSF_ADD_DOT_0, DTSF_STR_PRECISION) from pypy.rlib.rbigint import rbigint from pypy.rlib.objectmodel import we_are_translated @@ -102,7 +102,7 @@ def float_hex__Float(space, w_float): value = w_float.floatval - if isinf(value) or isnan(value): + if not isfinite(value): return str__Float(space, w_float) if value == 0.0: if copysign(1., value) == -1.: @@ -136,15 +136,15 @@ def float2string(space, w_float, code, precision): x = w_float.floatval # we special-case explicitly inf and nan here - if isinf(x): + if isfinite(x): + s = formatd(x, code, precision, DTSF_ADD_DOT_0) + elif isinf(x): if x > 0.0: s = "inf" else: s = "-inf" - elif isnan(x): + else: # isnan(x): s = "nan" - else: - s = formatd(x, code, precision, DTSF_ADD_DOT_0) return space.wrap(s) def repr__Float(space, w_float): @@ -179,7 +179,7 @@ if opname == 'eq' or opname == 'ne': def do_compare_bigint(f1, b2): """f1 is a float. b2 is a bigint.""" - if isinf(f1) or isnan(f1) or math.floor(f1) != f1: + if not isfinite(f1) or math.floor(f1) != f1: return opname == 'ne' b1 = rbigint.fromfloat(f1) res = b1.eq(b2) @@ -189,7 +189,7 @@ else: def do_compare_bigint(f1, b2): """f1 is a float. b2 is a bigint.""" - if isinf(f1) or isnan(f1): + if not isfinite(f1): return op(f1, 0.0) if opname == 'gt' or opname == 'le': # 'float > long' <==> 'ceil(float) > long' @@ -457,8 +457,6 @@ if x == 0.0: if y < 0.0: - if isinf(y): - return space.wrap(INFINITY) raise OperationError(space.w_ZeroDivisionError, space.wrap("0.0 cannot be raised to " "a negative power")) diff --git a/pypy/rlib/rstruct/ieee.py b/pypy/rlib/rstruct/ieee.py --- a/pypy/rlib/rstruct/ieee.py +++ b/pypy/rlib/rstruct/ieee.py @@ -87,12 +87,13 @@ raise ValueError("invalid size value") sign = rfloat.copysign(1.0, x) < 0.0 - if rfloat.isinf(x): - mant = r_ulonglong(0) - exp = MAX_EXP - MIN_EXP + 2 - elif rfloat.isnan(x): - mant = r_ulonglong(1) << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 + if not rfloat.isfinite(x): + if rfloat.isinf(x): + mant = r_ulonglong(0) + exp = MAX_EXP - MIN_EXP + 2 + else: # rfloat.isnan(x): + mant = r_ulonglong(1) << (MANT_DIG-2) # other values possible + exp = MAX_EXP - MIN_EXP + 2 elif x == 0.0: mant = r_ulonglong(0) exp = 0 From commits-noreply at bitbucket.org Mon Apr 25 11:25:03 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 11:25:03 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: look at the tracker before the release Message-ID: <20110425092503.B27A0282B90@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3532:d95b06dec89b Date: 2011-04-25 11:24 +0200 http://bitbucket.org/pypy/extradoc/changeset/d95b06dec89b/ Log: look at the tracker before the release diff --git a/sprintinfo/gothenburg-2011/planning.txt b/sprintinfo/gothenburg-2011/planning.txt --- a/sprintinfo/gothenburg-2011/planning.txt +++ b/sprintinfo/gothenburg-2011/planning.txt @@ -19,6 +19,7 @@ - merge exarkun's branches, after review (Armin, Romain) - merge jitypes2? - documentation (Laura, Carl Friedrich) + - look at the tracker - branches to be integrated/finished afterwards - 32-on-64 From commits-noreply at bitbucket.org Mon Apr 25 11:40:48 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 11:40:48 +0200 (CEST) Subject: [pypy-svn] pypy default: cleanup non intended checkin Message-ID: <20110425094048.9B661282B90@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43556:6d5b7ba2e05e Date: 2011-04-25 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/6d5b7ba2e05e/ Log: cleanup non intended checkin diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2074,34 +2074,6 @@ assert self.meta_interp(f, [-bigval, 5]) == 0 self.check_loops(int_rshift=1, everywhere=True) - def notest_overflowing_shift2(self): - myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) - def f(a, b): - n = sa = 0 - while n < 10: - myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) - if 0 < a < hint(sys.maxint/2, promote=True): pass - if 0 < b < 100: pass - sa += (a << b) >> b - n += 1 - return sa - - assert self.meta_interp(f, [5, 5]) == 50 - self.check_loops(int_rshift=0, everywhere=True) - - assert self.meta_interp(f, [5, 10]) == 50 - self.check_loops(int_rshift=1, everywhere=True) - - assert self.meta_interp(f, [10, 5]) == 100 - self.check_loops(int_rshift=1, everywhere=True) - - assert self.meta_interp(f, [10, 10]) == 100 - self.check_loops(int_rshift=1, everywhere=True) - - assert self.meta_interp(f, [5, 100]) == 0 - self.check_loops(int_rshift=1, everywhere=True) - - class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): From commits-noreply at bitbucket.org Mon Apr 25 11:40:50 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 11:40:50 +0200 (CEST) Subject: [pypy-svn] pypy default: only use shiftcounts <=31 as the backend only will look at the lower 32/64 bits of it Message-ID: <20110425094050.65DB8282B90@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43557:08e1b91e6a36 Date: 2011-04-25 11:40 +0200 http://bitbucket.org/pypy/pypy/changeset/08e1b91e6a36/ Log: only use shiftcounts <=31 as the backend only will look at the lower 32/64 bits of it diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1990,9 +1990,9 @@ n = sa = 0 while n < 10: myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) - if 0 < a < 10: pass - if 0 < b < 10: pass - sa += (a << b) >> b + if 0 < a <= 5: pass + if 0 < b <= 5: pass + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2002,7 +2002,7 @@ myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) if 0 < a < hint(sys.maxint/2, promote=True): pass if 0 < b < 100: pass - sa += (a << b) >> b + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2010,24 +2010,24 @@ self.check_loops(int_rshift=0, everywhere=True) for f in (f1, f2): - assert self.meta_interp(f, [5, 10]) == 50 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [5, 6]) == 50 + self.check_loops(int_rshift=3, everywhere=True) assert self.meta_interp(f, [10, 5]) == 100 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [10, 10]) == 100 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [10, 6]) == 100 + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [5, 100]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [5, 31]) == 0 + self.check_loops(int_rshift=3, everywhere=True) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) def test_overflowing_shift_neg(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) @@ -2035,9 +2035,9 @@ n = sa = 0 while n < 10: myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) - if -10 < a < 0: pass - if 0 < b < 10: pass - sa += (a << b) >> b + if -5 <= a < 0: pass + if 0 < b <= 5: pass + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2047,7 +2047,7 @@ myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) if -hint(sys.maxint/2, promote=True) < a < 0: pass if 0 < b < 100: pass - sa += (a << b) >> b + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2055,24 +2055,24 @@ self.check_loops(int_rshift=0, everywhere=True) for f in (f1, f2): - assert self.meta_interp(f, [-5, 10]) == -50 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-5, 6]) == -50 + self.check_loops(int_rshift=3, everywhere=True) assert self.meta_interp(f, [-10, 5]) == -100 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [-10, 10]) == -100 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-10, 6]) == -100 + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [-5, 100]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-5, 31]) == 0 + self.check_loops(int_rshift=3, everywhere=True) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 - assert self.meta_interp(f, [-bigval, 5]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [bigval, 5]) == 0 + self.check_loops(int_rshift=3, everywhere=True) class TestOOtype(BasicTests, OOJitMixin): From commits-noreply at bitbucket.org Mon Apr 25 12:00:53 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:00:53 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): remove description of dead feature. the rest of the doc is fine. Message-ID: <20110425100053.9182A36C20B@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43558:350a66c6c85a Date: 2011-04-25 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/350a66c6c85a/ Log: (lac, cfbolz): remove description of dead feature. the rest of the doc is fine. diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -7,10 +7,6 @@ Introduction ================ -.. include:: crufty.rst - - .. apparently this still works; needs JIT integration; hasn't been maintained for years - PyPy can expose to its user language features similar to the ones present in `Stackless Python`_: **no recursion depth limit**, and the ability to write code in a **massively concurrent style**. It actually @@ -430,32 +426,6 @@ These cases are not supported yet. -Coroutine Cloning -+++++++++++++++++ - -In theory, coroutine pickling is general enough to allow coroutines to -be *cloned* within a process; i.e. from one suspended coroutine, a copy -can be made - simply by pickling and immediately unpickling it. Both -the original and the copy can then continue execution from the same -point on. Cloning gives much of the expressive power of full -*continuations*. - -However, pickling has several problems in practice (besides a relatively -high overhead). It is not a completely general solution because not all -kinds of objects can be pickled; moreover, which objects are pickled by -value or by reference only depends on the type of the object. For the -purpose of cloning, this means that coroutines cannot be -pickled/unpickled in all situations, and even when they can, the user -does not have full control over which of the objects currently reachable -from a coroutine will be duplicated, and which will be shared with the -original coroutine. - -For this reason, we implemented a direct cloning operation. It has been -deprecated for some time, however, as it was slightly buggy and relied -on a specific (and deprecated) garbage collector. It is not available -out of the box right now, so we will not talk any more about this. - - Composability +++++++++++++ From commits-noreply at bitbucket.org Mon Apr 25 12:00:58 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:00:58 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): those should go, we need to check whether they are referenced. Message-ID: <20110425100058.DF5B0282B90@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43559:ff3f8495a564 Date: 2011-04-25 11:55 +0200 http://bitbucket.org/pypy/pypy/changeset/ff3f8495a564/ Log: (lac, cfbolz): those should go, we need to check whether they are referenced. diff --git a/pypy/doc/geninterp.rst b/pypy/doc/geninterp.rst --- a/pypy/doc/geninterp.rst +++ b/pypy/doc/geninterp.rst @@ -1,6 +1,4 @@ -.. include:: crufty.rst - - .. ^^ apparently dead +.. include:: throwaway.rst The Interpreter-Level backend ----------------------------- diff --git a/pypy/doc/buildtool.rst b/pypy/doc/buildtool.rst --- a/pypy/doc/buildtool.rst +++ b/pypy/doc/buildtool.rst @@ -2,7 +2,7 @@ PyPyBuilder ============ -.. include:: crufty.rst +.. include:: throwaway.rst What is this? ============= diff --git a/pypy/doc/low-level-encapsulation.rst b/pypy/doc/low-level-encapsulation.rst --- a/pypy/doc/low-level-encapsulation.rst +++ b/pypy/doc/low-level-encapsulation.rst @@ -1,3 +1,5 @@ +.. include:: throwaway.rst + ============================================================ Encapsulating low-level implementation aspects ============================================================ diff --git a/pypy/doc/externaltools.rst b/pypy/doc/externaltools.rst --- a/pypy/doc/externaltools.rst +++ b/pypy/doc/externaltools.rst @@ -1,6 +1,4 @@ -.. include:: crufty.rst - - .. ^^ Incomplete and wrong, superceded elsewhere +.. include:: throwaway.rst External tools&programs needed by PyPy ====================================== diff --git a/pypy/doc/throwaway.rst b/pypy/doc/throwaway.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/throwaway.rst @@ -0,0 +1,3 @@ +.. warning:: + + This documentation should be removed (as discussed during the Gothenburg sprint in 2011) diff --git a/pypy/doc/translation-aspects.rst b/pypy/doc/translation-aspects.rst --- a/pypy/doc/translation-aspects.rst +++ b/pypy/doc/translation-aspects.rst @@ -1,5 +1,4 @@ -.. include:: crufty.rst -.. ^^ old and needs updating +.. include:: throwaway.rst ========================================================================================== Memory management and threading models as translation aspects -- solutions and challenges From commits-noreply at bitbucket.org Mon Apr 25 12:01:03 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:01:03 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): those need work, but we want to keep them Message-ID: <20110425100103.667BE282BE9@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43560:a7386abcdfc0 Date: 2011-04-25 11:55 +0200 http://bitbucket.org/pypy/pypy/changeset/a7386abcdfc0/ Log: (lac, cfbolz): those need work, but we want to keep them diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,10 +1,8 @@ -.. include:: crufty.rst +.. include:: needswork.rst - .. ^^ Incomplete, superceded elsewhere - -======================== -lib/distributed features -======================== +============================= +lib_pypy/distributed features +============================= The 'distributed' library is an attempt to provide transparent, lazy access to remote objects. This is accomplished using diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,4 +1,6 @@ -.. include:: crufty.rst +.. include:: needswork.rst + +.. needs work, it talks about svn. also, it is not really user documentation Making a PyPy Release ======================= diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,3 +1,5 @@ +.. include:: needswork.rst + .. _glossary: ******** @@ -12,12 +14,6 @@ .. glossary:: -**abstract interpretation** - The technique of interpreting the bytecode of a user program with - an interpreter that handles abstract objects instead of concrete ones. - It can be used to check the bytecode or see what it does, without - actually executing it with concrete values. See Theory_. - .. _annotator: **annotator** @@ -107,11 +103,6 @@ .. _`object space`: -**multimethod** - A callable object that invokes a different Python function based - on the type of all its arguments (instead of just the class of the - first argument, as with normal methods). See Theory_. - **object space** The `object space `__ (often abbreviated to "objspace") creates all objects and knows how to perform operations @@ -242,6 +233,5 @@ .. _Python: http://www.python.org .. _`RPython Typer`: rtyper.html .. _`subsystem implementing the Python language`: architecture.html#standard-interpreter -.. _Theory: theory.html .. include:: _ref.rst diff --git a/pypy/doc/needswork.rst b/pypy/doc/needswork.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/needswork.rst @@ -0,0 +1,3 @@ +.. warning:: + + This documentation needs work (as discussed during the Gothenburg sprint in 2011) From commits-noreply at bitbucket.org Mon Apr 25 12:01:05 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:01:05 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): kill some old stuff, the rest is fine Message-ID: <20110425100105.2E52C282BE9@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43561:cba929f705e5 Date: 2011-04-25 11:56 +0200 http://bitbucket.org/pypy/pypy/changeset/cba929f705e5/ Log: (lac, cfbolz): kill some old stuff, the rest is fine diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -1,5 +1,3 @@ -.. include:: crufty.rst - ============================= PyPy's ctypes implementation ============================= @@ -71,8 +69,6 @@ interface require more object allocations and copying than strictly necessary; this too could be improved. -The implementation was developed and has only been tested on x86-32 Linux. - Here is a list of the limitations and missing features of the current implementation: @@ -94,41 +90,6 @@ between its primitive types and user subclasses of its primitive types -Getting the code and test suites -================================= - -A stable revision of PyPy containing the ctypes implementation can be checked out with subversion from the tag: - -http://codespeak.net/svn/pypy/tag/ctypes-stable - -The various tests and later examples can be run on x86-32 Linux. We tried them -on an up-to-date Ubuntu 7.10 x86-32 system. - -If one goes inside the checkout it is possible to run ``_rawffi`` tests with:: - - $ cd pypy - $ python test_all.py module/_rawffi/ - -The ctypes implementation test suite is derived from the tests for -ctypes 1.0.2, we have skipped some tests corresponding to not -implemented features or implementation details, we have also added -some tests. - -To run the test suite a compiled pypy-c is required with the proper configuration. To build the required pypy-c one should inside the checkout:: - - $ cd pypy/translator/goal - $ ./translate.py --text --batch --gc=generation targetpypystandalone.py - --withmod-_rawffi --allworkingmodules - -this should produce a pypy-c executable in the ``goal`` directory. - -To run the tests then:: - - $ cd ../../.. # back to pypy-trunk - $ ./pypy/translator/goal/pypy-c pypy/test_all.py lib/pypy1.2/lib_pypy/pypy_test/ctypes_tests - -There should be 36 skipped tests and all other tests should pass. - Running application examples ============================== From commits-noreply at bitbucket.org Mon Apr 25 12:01:07 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:01:07 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): this is fine, if unmaintained Message-ID: <20110425100107.BDB7936C20B@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43562:cbd9a1f53987 Date: 2011-04-25 11:56 +0200 http://bitbucket.org/pypy/pypy/changeset/cbd9a1f53987/ Log: (lac, cfbolz): this is fine, if unmaintained diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -1,7 +1,3 @@ -.. include:: crufty.rst - - .. ^^ it continues to work, but is unmaintained - PyPy's sandboxing features ========================== From commits-noreply at bitbucket.org Mon Apr 25 12:01:10 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:01:10 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac): some of the reports are out-of-date, add a warning Message-ID: <20110425100110.297D4282BEA@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43563:2b8907633bab Date: 2011-04-25 11:57 +0200 http://bitbucket.org/pypy/pypy/changeset/2b8907633bab/ Log: (cfbolz, lac): some of the reports are out-of-date, add a warning diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -1,4 +1,7 @@ -.. include:: crufty.rst +.. warning:: + + Some of these reports are interesting for historical reasons only. + ============================================ PyPy - Overview over the EU-reports @@ -9,7 +12,7 @@ They also are very good documentation if you'd like to know in more detail about motivation and implementation of the various parts and aspects of PyPy. Feel free to send questions or comments -to `pypy-dev`_, the development list. +to `pypy-dev`_, the development list. Reports of 2007 =============== From commits-noreply at bitbucket.org Mon Apr 25 12:01:15 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:01:15 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): kill theory.rst, which is very high-level and thus a bit useless. replace by wikipedia links. Message-ID: <20110425100115.48F2C36C20B@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43564:f11a3a95cc27 Date: 2011-04-25 11:58 +0200 http://bitbucket.org/pypy/pypy/changeset/f11a3a95cc27/ Log: (lac, cfbolz): kill theory.rst, which is very high-level and thus a bit useless. replace by wikipedia links. diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -469,7 +469,7 @@ resulting pair of basic strings. This is similar to the C++ method overloading resolution mechanism (but occurs at runtime). -.. _multimethods: theory.html#multimethods +.. _multimethods: http://en.wikipedia.org/wiki/Multimethods Multimethod slicing @@ -556,7 +556,7 @@ .. _`found here` : getting-started-dev.html#tracing-bytecode-and-operations-on-objects -.. _`Abstract Interpretation`: theory.html#abstract-interpretation +.. _`Abstract Interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation .. _`traceconfig.py`: ../tool/traceconfig.py @@ -588,7 +588,7 @@ appear in some next operation. This technique is an example of `Abstract Interpretation`_. -.. _`Abstract Interpretation`: theory.html#abstract-interpretation +.. _`Abstract Interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation For example, if the placeholder ``v1`` is given as the argument to the above function, the bytecode interpreter will call ``v2 = space.mul(space.wrap(3), diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -29,8 +29,6 @@ statistic/index.rst - theory.rst - translation-aspects.rst docindex.rst diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -165,7 +165,6 @@ .. _`coding guide`: coding-guide.html .. _`architecture`: architecture.html .. _`getting started`: getting-started.html -.. _`theory`: theory.html .. _`bytecode interpreter`: interpreter.html .. _`EU reports`: index-report.html .. _`Technical reports`: index-report.html @@ -223,7 +222,7 @@ `objspace/thunk.py`_ the `thunk object space`_, providing unique object features -`objspace/flow/`_ the FlowObjSpace_ implementing `abstract interpretation` +`objspace/flow/`_ the FlowObjSpace_ implementing `abstract interpretation`_ `objspace/std/`_ the StdObjSpace_ implementing CPython's objects and types @@ -285,7 +284,7 @@ .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Stackless and coroutines`: stackless.html .. _StdObjSpace: objspace.html#the-standard-object-space -.. _`abstract interpretation`: theory.html#abstract-interpretation +.. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation .. _`rpython`: coding-guide.html#rpython .. _`type inferencing code`: translation.html#the-annotation-pass .. _`RPython Typer`: translation.html#rpython-typer diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -104,7 +104,7 @@ .. _`PDF color version`: image/translation.pdf .. _`bytecode evaluator`: interpreter.html -.. _`abstract interpretation`: theory.html#abstract-interpretation +.. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation .. _`Flow Object Space`: objspace.html#the-flow-object-space .. _`interactive interface`: getting-started-dev.html#try-out-the-translator .. _`translatorshell.py`: ../../../../pypy/bin/translatorshell.py diff --git a/pypy/doc/theory.rst b/pypy/doc/theory.rst deleted file mode 100644 --- a/pypy/doc/theory.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. include:: crufty.rst - - .. ^^ old ideas; we're not doing it this way any more - -=================================== -Techniques used in PyPy -=================================== - -.. contents:: - - -.. _`abstract interpretation`: - -Abstract Interpretation -======================= - -Abstract Interpretation is a general technique which consists of an -interpreter that follows the bytecode instructions of a user program, just -like a normal interpreter does, but with abstract objects instead of concrete -ones. Remember that in PyPy this is done by using alternate object spaces with -the same bytecode interpreter main loop. - -As a theoretical example, the most abstract object space would be the one manipulating the most abstract objects that you could imagine: they are all equivalent, because we have abstracted away any information about the object. There is actually only one of them left, and we could call it "the object". In Python terms, an AbstractObjectSpace could use None for all its wrapped objects. Any operation between wrapped objects gives None again as the wrapped result -- there is nothing else it could give anyway. So when you have said that the add method of AbstractObjectSpace takes None and None and returns None you have said everything. - -The point of such an object space is for example to check the bytecode. The -bytecode interpreter will really run your bytecode, just with completely -abstract arguments. If there is no problem then you are sure that the bytecode -is valid. You could also record, during this abstract interpretation, how much -the stack ever grows; that would give you a fool-proof method of computing or -checking the co_stacksize argument of a code object. (There are subtleties -which I won't describe here, but that's the basic idea.) - -Typically, however, abstract object spaces are a (little) bit less abstract, still maintaining a minimal amount of information about the objects. For example, a wrapped object could be represented by its type. You then define the object space's add to return int when the two arguments are int and int. That way, you abstractedly call a function with the input argument's types and what the interpreter will do is a type inference. (Here also there are subtle problems, even besides the remark that integer operations can overflow and actually return longs in a real Python implementation.) - -As an example of more abstract object spaces you have the ones with finite domain, i.e. with a finite number of different possible wrapped objects. For example, you can use True and False as wrapped values to denote the fact that the object is, respectively, a non-negative integer or anything else. In this way you are doing another kind of type inference that just tells you which variables will only ever contain non-negative integers. - -In PyPy, the FlowObjSpace_ uses the abstract interpretation technique to generate a control flow graph of the functions of RPython_ programs. - -In its `more formal definition`_, Abstract Interpretation typically -considers abstract objects that are organized in a lattice_: some of -these objects are more (or less) abstract than others, in the sense that -they represent less (or more) known information; to say that this forms -a lattice essentially means that any two abstract objects have -well-defined unions and intersections (which are again abstract -objects). - -.. _FlowObjSpace: objspace.html#the-flow-object-space -.. _RPython: coding-guide.html#restricted-python -.. _`more formal definition`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _lattice: http://en.wikipedia.org/wiki/Lattice_%28order%29 - - -Multimethods -============ - -A "multimethod" is the generalization of the OOP notion of "method". -Theoretically, a method is a "message name" and signature attached to a -particular base class, which is implemented in the class or its subclasses. -To do a "method call" means to send a message to an object, using a message -name and actual arguments. We call "message dispatch" the operation of -finding which actual implementation is suitable for a particular call. For -methods, a message is dispatched by looking up the class of the "self" object, -and finding an implementation in that class, or in its base classes, in a -certain order. - -A multimethod is a message name and signature that can have implementations -that depend not only on the class of the first "self" argument, but on the -class of several arguments. Because of this we cannot use Python's nice model -of storing method implementations as functions, in the attributes of the -class. - -Here is a common implementation of multimethods: they are instances of a -specific MultiMethod class, and the instances are callable (there is a -__call__ operator on MultiMethod). When a MultiMethod is called, a dispatch -algorithm is used to find which, among the registered implementations, is the -one that should be called; this implementation is then immediately called. The -most important difference with normal methods is that the MultiMethod object -to call is no longer syntactically attached to classes. In other words, -whereas a method is called with ``obj.somemethod(args)``, a multimethod is -called much like a function, e.g. ``dosomething(obj1, obj2, obj3...)``. You -have to find the MultiMethod object ``dosomething`` in some namespace; it is -no longer implicitly looked up in the namespace of the "self" object. - -PyPy contains two different implementations of multimethods: a `quite general -one`_ written in RPython_ for the purposes of the StdObjSpace_, and a `short -two-arguments-dispatching one`_ used internally by the annotator_. - -.. _`quite general one`: http://codespeak.net/svn/pypy/dist/pypy/objspace/std/multimethod.py -.. _StdObjSpace: objspace.html#the-standard-object-space -.. _`short two-arguments-dispatching one`: http://codespeak.net/svn/pypy/dist/pypy/tool/pairtype.py -.. _annotator: translation.html#annotator From commits-noreply at bitbucket.org Mon Apr 25 12:02:20 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:02:20 +0200 (CEST) Subject: [pypy-svn] pypy default: there *is* a setitem_str shortcut Message-ID: <20110425100220.5768936C20B@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43565:6534e23ee4cd Date: 2011-04-17 10:46 +0200 http://bitbucket.org/pypy/pypy/changeset/6534e23ee4cd/ Log: there *is* a setitem_str shortcut diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -41,9 +41,7 @@ def PyDict_SetItemString(space, w_dict, key_ptr, w_obj): if PyDict_Check(space, w_dict): key = rffi.charp2str(key_ptr) - # our dicts dont have a standardized interface, so we need - # to go through the space - space.setitem(w_dict, space.wrap(key), w_obj) + space.setitem_str(w_dict, key, w_obj) return 0 else: PyErr_BadInternalCall(space) From commits-noreply at bitbucket.org Mon Apr 25 12:02:22 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:02:22 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110425100222.207C436C20B@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43566:d537e6abed09 Date: 2011-04-25 12:02 +0200 http://bitbucket.org/pypy/pypy/changeset/d537e6abed09/ Log: merge diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -41,9 +41,7 @@ def PyDict_SetItemString(space, w_dict, key_ptr, w_obj): if PyDict_Check(space, w_dict): key = rffi.charp2str(key_ptr) - # our dicts dont have a standardized interface, so we need - # to go through the space - space.setitem(w_dict, space.wrap(key), w_obj) + space.setitem_str(w_dict, key, w_obj) return 0 else: PyErr_BadInternalCall(space) From commits-noreply at bitbucket.org Mon Apr 25 12:11:34 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:11:34 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: yay, most important task is done Message-ID: <20110425101134.5AE8336C20B@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3533:a2b90da90ced Date: 2011-04-25 12:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/a2b90da90ced/ Log: yay, most important task is done diff --git a/sprintinfo/gothenburg-2011/planning.txt b/sprintinfo/gothenburg-2011/planning.txt --- a/sprintinfo/gothenburg-2011/planning.txt +++ b/sprintinfo/gothenburg-2011/planning.txt @@ -10,7 +10,7 @@ tasks: -- fix the projector +- fix the projector DONE (Jacob) - release 1.5 - fix the import problem (Armin, Romain) - fix the jit tests (Håkan, Armin around From commits-noreply at bitbucket.org Mon Apr 25 12:23:29 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 12:23:29 +0200 (CEST) Subject: [pypy-svn] pypy default: have the llbackend raise on ValueError on bad shift counts Message-ID: <20110425102329.4EC16282B90@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43567:4c695a1b8a86 Date: 2011-04-25 12:21 +0200 http://bitbucket.org/pypy/pypy/changeset/4c695a1b8a86/ Log: have the llbackend raise on ValueError on bad shift counts diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -238,6 +238,10 @@ interp.cleanup_registers() self.blackholeinterps.append(interp) +def check_shift_count(b): + if not we_are_translated(): + if b < 0 or b >= LONG_BIT: + raise ValueError("Shift count, %d, not in valid range, 0 .. %d." % (b, LONG_BIT-1)) class BlackholeInterpreter(object): @@ -420,14 +424,17 @@ @arguments("i", "i", returns="i") def bhimpl_int_rshift(a, b): + check_shift_count(b) return a >> b @arguments("i", "i", returns="i") def bhimpl_int_lshift(a, b): + check_shift_count(b) return intmask(a << b) @arguments("i", "i", returns="i") def bhimpl_uint_rshift(a, b): + check_shift_count(b) c = r_uint(a) >> r_uint(b) return intmask(c) diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -217,3 +217,16 @@ for x in range(1, 8)]) builder = pyjitpl._warmrunnerdesc.metainterp_sd.blackholeinterpbuilder assert builder.num_interpreters == 2 + +def test_bad_shift(): + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_lshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_rshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_uint_rshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_lshift.im_func, 7, -1) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_rshift.im_func, 7, -1) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_uint_rshift.im_func, 7, -1) + + assert BlackholeInterpreter.bhimpl_int_lshift.im_func(100, 3) == 100<<3 + assert BlackholeInterpreter.bhimpl_int_rshift.im_func(100, 3) == 100>>3 + assert BlackholeInterpreter.bhimpl_uint_rshift.im_func(100, 3) == 100>>3 + From commits-noreply at bitbucket.org Mon Apr 25 12:23:31 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 12:23:31 +0200 (CEST) Subject: [pypy-svn] pypy default: hg merge Message-ID: <20110425102331.088A5282B90@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43568:33a21c931703 Date: 2011-04-25 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/33a21c931703/ Log: hg merge diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -41,9 +41,7 @@ def PyDict_SetItemString(space, w_dict, key_ptr, w_obj): if PyDict_Check(space, w_dict): key = rffi.charp2str(key_ptr) - # our dicts dont have a standardized interface, so we need - # to go through the space - space.setitem(w_dict, space.wrap(key), w_obj) + space.setitem_str(w_dict, key, w_obj) return 0 else: PyErr_BadInternalCall(space) From commits-noreply at bitbucket.org Mon Apr 25 12:26:27 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 12:26:27 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: Fix the timer size when on 64bit Message-ID: <20110425102627.0F771282B90@codespeak.net> Author: Dario Bertini Branch: jit-lsprofile Changeset: r43569:29c6a9a2c564 Date: 2011-04-25 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/29c6a9a2c564/ Log: Fix the timer size when on 64bit diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -8,7 +8,7 @@ interp_attrproperty) from pypy.rlib import jit from pypy.rlib.objectmodel import we_are_translated -from pypy.rlib.rtimer import read_timestamp +from pypy.rlib.rtimer import read_timestamp, _is_64_bit from pypy.rpython.lltypesystem import rffi, lltype from pypy.translator.tool.cbuild import ExternalCompilationInfo from pypy.tool.autopath import pypydir @@ -29,6 +29,11 @@ [], lltype.Void, compilation_info = eci) +if _is_64_bit: + timer_size_int = int +else: + timer_size_int = r_longlong + class W_StatsEntry(Wrappable): def __init__(self, space, frame, callcount, reccallcount, tt, it, w_sublist): @@ -117,8 +122,8 @@ def _stop(self, tt, it): if not we_are_translated(): - assert type(tt) is r_longlong - assert type(it) is r_longlong + assert type(tt) is timer_size_int + assert type(it) is timer_size_int self.recursionLevel -= 1 if self.recursionLevel == 0: self.ll_tt += tt @@ -158,7 +163,7 @@ class ProfilerContext(object): def __init__(self, profobj, entry): self.entry = entry - self.ll_subt = r_longlong(0) + self.ll_subt = timer_size_int(0) self.previous = profobj.current_context entry.recursionLevel += 1 if profobj.subcalls and self.previous: @@ -249,11 +254,14 @@ if self.w_callable: space = self.space try: - return space.r_longlong_w(space.call_function(self.w_callable)) + if _is_64_bit: + return space.int_w(space.call_function(self.w_callable)) + else: + return space.r_longlong_w(space.call_function(self.w_callable)) except OperationError, e: e.write_unraisable(space, "timer function ", self.w_callable) - return r_longlong(0) + return timer_size_int(0) return read_timestamp() def enable(self, space, w_subcalls=NoneNotWrapped, From commits-noreply at bitbucket.org Mon Apr 25 12:27:36 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 12:27:36 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): kill lots of useless discussion files, add XXXs about more that could be killed. Message-ID: <20110425102736.569E5282B90@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43570:77352a4c722f Date: 2011-04-25 12:27 +0200 http://bitbucket.org/pypy/pypy/changeset/77352a4c722f/ Log: (lac, cfbolz): kill lots of useless discussion files, add XXXs about more that could be killed. diff --git a/pypy/doc/discussion/thoughts_string_interning.rst b/pypy/doc/discussion/thoughts_string_interning.rst deleted file mode 100644 --- a/pypy/doc/discussion/thoughts_string_interning.rst +++ /dev/null @@ -1,211 +0,0 @@ -String Interning in PyPy -======================== - -A few thoughts about string interning. CPython gets a remarkable -speed-up by interning strings. Interned are all builtin string -objects and all strings used as names. The effect is that when -a string lookup is done during instance attribute access, -the dict lookup method will find the string always by identity, -saving the need to do a string comparison. - -Interned Strings in CPython ---------------------------- - -CPython keeps an internal dictionary named ``interned`` for all of these -strings. It contains the string both as key and as value, which means -there are two extra references in principle. Upto Version 2.2, interned -strings were considered immortal. Once they entered the ``interned`` dict, -nothing could revert this memory usage. - -Starting with Python 2.3, interned strings became mortal by default. -The reason was less memory usage for strings that have no external -reference any longer. This seems to be a worthwhile enhancement. -Interned strings that are really needed always have a real reference. -Strings which are interned for temporary reasons get a big speed up -and can be freed after they are no longer in use. - -This was implemented by making the ``interned`` dictionary a weak dict, -by lowering the refcount of interned strings by 2. The string deallocator -got extra handling to look into the ``interned`` dict when a string is deallocated. -This is supported by the state variable on string objects which tells -whether the string is not interned, immortal or mortal. - -Implementation problems for PyPy --------------------------------- - -- The CPython implementation makes explicit use of the refcount to handle - the weak-dict behavior of ``interned``. PyPy does not expose the implementation - of object aliveness. Special handling would be needed to simulate mortal - behavior. A possible but expensive solution would be to use a real - weak dictionary. Another way is to add a special interface to the backend - that allows either the two extra references to be reset, or for the - boehm collector to exclude the ``interned`` dict from reference tracking. - -- PyPy implements quite complete internal strings, as opposed to CPython - which always uses its "applevel" strings. It also supports low-level - dictionaries. This adds some complication to the issue of interning. - Additionally, the interpreter currently handles attribute access - by calling wrap(str) on the low-level attribute string when executing - frames. This implies that we have to primarily intern low-level strings - and cache the created string objects on top of them. - A possible implementation would use a dict with ll string keys and the - string objects as values. In order to save the extra dict lookup, we also - could consider to cache the string object directly on a field of the rstr, - which of course adds some extra cost. Alternatively, a fast id-indexed - extra dictionary can provide the mapping from rstr to interned string object. - But for efficiency reasons, it is anyway necessary to put an extra flag about - interning on the strings. Flagging this by putting the string object itself - as the flag might be acceptable. A dummyobject can be used if the interned - rstr is not exposed as an interned string object. - -Update: a reasonably simple implementation -------------------------------------------- - -Instead of the complications using the stringobject as a property of an rstr -instance, I propose to special case this kind of dictionary (mapping rstr -to stringobject) and to put an integer ``interned`` field into the rstr. The -default is -1 for not interned. Non-negative values are the direct index -of this string into the interning dict. That is, we grow an extra function -that indexes the dict by slot number of the dict table and gives direct -access to its value. The dictionary gets special handling on dict_resize, -to recompute the slot numbers of the interned strings. ATM I'd say we leave -the strings immortal and support mortality later when we have a cheap -way to express this (less refcount, exclusion from Boehm, whatever). - -A prototype brute-force patch ------------------------------ - -In order to get some idea how efficient string interning is at the moment, -I implemented a quite crude version of interning. I patched space.wrap -to call this intern_string instead of W_StringObject:: - - def intern_string(space, str): - if we_are_translated(): - _intern_ids = W_StringObject._intern_ids - str_id = id(str) - w_ret = _intern_ids.get(str_id, None) - if w_ret is not None: - return w_ret - _intern = W_StringObject._intern - if str not in _intern: - _intern[str] = W_StringObject(space, str) - W_StringObject._intern_keep[str_id] = str - _intern_ids[str_id] = w_ret = _intern[str] - return w_ret - else: - return W_StringObject(space, str) - -This is no general solution at all, since it a) does not provide -interning of rstr and b) interns every app-level string. The -implementation is also by far not as efficient as it could be, -because it utilizes an extra dict _intern_ids which maps the -id of the rstr to the string object, and a dict _intern_keep to -keep these ids alive. - -With just a single _intern dict from rstr to string object, the -overall performance degraded slightly instead of an advantage. -The triple dict patch accelerates richards by about 12 percent. -Since it still has the overhead of handling the extra dicts, -I guess we can expect twice the acceleration if we add proper -interning support. - -The resulting estimated 24 % acceleration is still not enough -to justify an implementation right now. - -Here the results of the richards benchmark:: - - D:\pypy\dist\pypy\translator\goal>pypy-c-17516.exe -c "from richards import *;Richards.iterations=1;main()" - debug: entry point starting - debug: argv -> pypy-c-17516.exe - debug: argv -> -c - debug: argv -> from richards import *;Richards.iterations=1;main() - Richards benchmark (Python) starting... [] - finished. - Total time for 1 iterations: 38 secs - Average time for iterations: 38885 ms - - D:\pypy\dist\pypy\translator\goal>pypy-c.exe -c "from richards import *;Richards.iterations=1;main()" - debug: entry point starting - debug: argv -> pypy-c.exe - debug: argv -> -c - debug: argv -> from richards import *;Richards.iterations=1;main() - Richards benchmark (Python) starting... [] - finished. - Total time for 1 iterations: 34 secs - Average time for iterations: 34388 ms - - D:\pypy\dist\pypy\translator\goal> - - -This was just an exercise to get an idea. For sure this is not to be checked in. -Instead, I'm attaching the simple patch here for reference. -:: - - Index: objspace/std/objspace.py - =================================================================== - --- objspace/std/objspace.py (revision 17526) - +++ objspace/std/objspace.py (working copy) - @@ -243,6 +243,9 @@ - return self.newbool(x) - return W_IntObject(self, x) - if isinstance(x, str): - + # XXX quick speed testing hack - + from pypy.objspace.std.stringobject import intern_string - + return intern_string(self, x) - return W_StringObject(self, x) - if isinstance(x, unicode): - return W_UnicodeObject(self, [unichr(ord(u)) for u in x]) # xxx - Index: objspace/std/stringobject.py - =================================================================== - --- objspace/std/stringobject.py (revision 17526) - +++ objspace/std/stringobject.py (working copy) - @@ -18,6 +18,10 @@ - class W_StringObject(W_Object): - from pypy.objspace.std.stringtype import str_typedef as typedef - - + _intern_ids = {} - + _intern_keep = {} - + _intern = {} - + - def __init__(w_self, space, str): - W_Object.__init__(w_self, space) - w_self._value = str - @@ -32,6 +36,21 @@ - - registerimplementation(W_StringObject) - - +def intern_string(space, str): - + if we_are_translated(): - + _intern_ids = W_StringObject._intern_ids - + str_id = id(str) - + w_ret = _intern_ids.get(str_id, None) - + if w_ret is not None: - + return w_ret - + _intern = W_StringObject._intern - + if str not in _intern: - + _intern[str] = W_StringObject(space, str) - + W_StringObject._intern_keep[str_id] = str - + _intern_ids[str_id] = w_ret = _intern[str] - + return w_ret - + else: - + return W_StringObject(space, str) - - def _isspace(ch): - return ord(ch) in (9, 10, 11, 12, 13, 32) - Index: objspace/std/stringtype.py - =================================================================== - --- objspace/std/stringtype.py (revision 17526) - +++ objspace/std/stringtype.py (working copy) - @@ -47,6 +47,10 @@ - if space.is_true(space.is_(w_stringtype, space.w_str)): - return w_obj # XXX might be reworked when space.str() typechecks - value = space.str_w(w_obj) - + # XXX quick hack to check interning effect - + w_obj = W_StringObject._intern.get(value, None) - + if w_obj is not None: - + return w_obj - w_obj = space.allocate_instance(W_StringObject, w_stringtype) - W_StringObject.__init__(w_obj, space, value) - return w_obj - -ciao - chris diff --git a/pypy/doc/discussion/howtoimplementpickling.rst b/pypy/doc/discussion/howtoimplementpickling.rst --- a/pypy/doc/discussion/howtoimplementpickling.rst +++ b/pypy/doc/discussion/howtoimplementpickling.rst @@ -1,3 +1,5 @@ +.. XXX think more, some of this might be useful + Designing thread pickling or "the Essence of Stackless Python" -------------------------------------------------------------- diff --git a/pypy/doc/discussion/testing-zope.rst b/pypy/doc/discussion/testing-zope.rst deleted file mode 100644 --- a/pypy/doc/discussion/testing-zope.rst +++ /dev/null @@ -1,45 +0,0 @@ -Testing Zope on top of pypy-c -============================= - -Getting Zope packages ---------------------- - -If you don't have a full Zope installation, you can pick a Zope package, -check it out via Subversion, and get all its dependencies (replace -``$PKG`` with, for example, ``zope.interface``):: - - svn co svn://svn.zope.org/repos/main/$PKG/trunk $PKG - cd $PKG - python bootstrap.py - bin/buildout - bin/test - -Required pypy-c version ------------------------ - -You probably need a pypy-c built with --allworkingmodules, at least:: - - cd pypy/translator/goal - ./translate.py targetpypystandalone.py --allworkingmodules - -Workarounds ------------ - -At the moment, our ``gc`` module is incomplete, making the Zope test -runner unhappy. Quick workaround: go to the -``lib-python/modified-2.4.1`` directory and create a -``sitecustomize.py`` with the following content:: - - print "" - import gc - gc.get_threshold = lambda : (0, 0, 0) - gc.get_debug = lambda : 0 - gc.garbage = [] - -Running the tests ------------------ - -To run the tests we need the --oldstyle option, as follows:: - - cd $PKG - pypy-c --oldstyle bin/test diff --git a/pypy/doc/discussion/compiled-swamp.rst b/pypy/doc/discussion/compiled-swamp.rst deleted file mode 100644 --- a/pypy/doc/discussion/compiled-swamp.rst +++ /dev/null @@ -1,14 +0,0 @@ - -We've got huge swamp of compiled pypy-c's used for: - -* benchmarks -* tests -* compliance tests -* play1 -* downloads -* ... - -We've got build tool, which we don't use, etc. etc. - -Idea is to formalize it more or less, so we'll have single script -to make all of this work, upload builds to the web page etc. diff --git a/pypy/doc/discussion/ctypes_modules.rst b/pypy/doc/discussion/ctypes_modules.rst deleted file mode 100644 --- a/pypy/doc/discussion/ctypes_modules.rst +++ /dev/null @@ -1,65 +0,0 @@ -what is needed for various ctypes-based modules and how feasible they are -========================================================================== - -Quick recap for module evaluation: - -1. does the module use callbacks? - -2. how sophisticated ctypes usage is (accessing of _objects?) - -3. any specific tricks - -4. does it have tests? - -5. dependencies - -6. does it depend on cpython c-api over ctypes? - -Pygame -====== - -1. yes, for various things, but basic functionality can be achieved without - -2. probably not - -3. not that I know of - -4. yes for tests, no for unittests - -5. numpy, but can live without, besides only C-level dependencies. On OS/X - it requires PyObjC. - -6. no - - -PyOpenGL -======== - -1. yes, for GLX, but not for the core functionality - -2. probably not - -3. all the code is auto-generated - -4. it has example programs, no tests - -5. numpy, but can live without it. can use various surfaces (including pygame) to draw on - -6. no - - -Sqlite -====== - -1. yes, but I think it's not necessary - -2. no - -3. no - -4. yes - -5. datetime - -6. it passes py_object around in few places, not sure why (probably as an - opaque argument). diff --git a/pypy/doc/discussion/oz-thread-api.rst b/pypy/doc/discussion/oz-thread-api.rst deleted file mode 100644 --- a/pypy/doc/discussion/oz-thread-api.rst +++ /dev/null @@ -1,49 +0,0 @@ -Some rough notes about the Oz threading model -============================================= - -(almost verbatim from CTM) - -Scheduling ----------- - -Fair scheduling through round-robin. - -With priority levels : three queues exist, which manage high, medium, -low priority threads. The time slice ratio for these is -100:10:1. Threads inherit the priority of their parent. - -Mozart uses an external timer approach to implement thread preemption. - -Thread ops ----------- - -All these ops are defined in a Thread namespace/module. - -this() -> current thread's name (*not* another thread's name) -state(t) -> return state of t in {runnable, blocked, terminated} -suspend(t) : suspend t -resume(t) : resume execution of t -preempt(t) : preempt t -terminate(t) : terminate t immediately -injectException(t, e) : raise exception e in t -setPriority(t, p) : set t's priority to p - -Interestingly, coroutines can be build upon this thread -API. Coroutines have two ops : spawn and resume. - -spawn(p) -> creates a coroutine with procedure p, returns pid -resume(c) : transfers control from current coroutine to c - -The implementation of these ops in terms of the threads API is as -follows : - -def spawn(p): - in_thread: - pid = Thread.this() - Thread.suspend(pid) - p() - -def resume(cid): - Thread.resume cid - Thread.suspend(Thread.this()) - diff --git a/pypy/doc/discussion/translation-swamp.rst b/pypy/doc/discussion/translation-swamp.rst deleted file mode 100644 --- a/pypy/doc/discussion/translation-swamp.rst +++ /dev/null @@ -1,30 +0,0 @@ -=================================================================== -List of things that need to be improved for translation to be saner -=================================================================== - - - * understand nondeterminism after rtyping - - * experiment with different heuristics: - - * weigh backedges more (TESTING) - * consider size of outer function - * consider number of arguments (TESTING) - - * find a more deterministic inlining order (TESTING using number of callers) - - * experiment with using a base inlining threshold and then drive inlining by - malloc removal possibilities (using escape analysis) - - * move the inlining of gc helpers just before emitting the code. - throw the graph away (TESTING, need to do a new framework translation) - - * for gcc: use just one implement file (TRIED: turns out to be a bad idea, - because gcc uses too much ram). Need to experiment more now that - inlining should at least be more deterministic! - -things to improve the framework gc -================================== - - * find out whether a function can collect - diff --git a/pypy/doc/discussion/cmd-prompt-translation.rst b/pypy/doc/discussion/cmd-prompt-translation.rst deleted file mode 100644 --- a/pypy/doc/discussion/cmd-prompt-translation.rst +++ /dev/null @@ -1,18 +0,0 @@ - -t = Translation(entry_point[,]) -t.annotate([]) -t.rtype([]) -t.backendopt[_]([]) -t.source[_]([]) -f = t.compile[_]([]) - -and t.view(), t.viewcg() - - = c|llvm (for now) -you can skip steps - - = argtypes (for annotation) plus - keyword args: gc=...|policy= etc - - - diff --git a/pypy/doc/discussion/gc.rst b/pypy/doc/discussion/gc.rst deleted file mode 100644 --- a/pypy/doc/discussion/gc.rst +++ /dev/null @@ -1,77 +0,0 @@ - -*Note: this things are experimental and are being implemented on the -`io-improvements`_ branch* - -.. _`io-improvements`: http://codespeak.net/svn/pypy/branch/io-improvements - -============= -GC operations -============= - -This document tries to gather gc-related issues which are very recent -or in-development. Also, it tries to document needed gc refactorings -and expected performance of certain gc-related operations. - -Problem area -============ - -Since some of our gcs are moving, we at some point decided to simplify -the issue of having care of it by always copying the contents of -data that goes to C level. This yields a performance penalty, also -because some gcs does not move data around anyway. - -So we decided to introduce new operations which will simplify issues -regarding this. - -Pure gc operations -================== - -(All available from rlib.rgc) - -* can_move(p) - returns a flag telling whether pointer p will move. - useful for example when you want to know whether memcopy is safe. - -* malloc_nonmovable(TP, n=None) - tries to allocate non-moving object. - if it succeeds, it return an object, otherwise (for whatever reasons) - returns null pointer. Does not raise! (never) - -Usage patterns -============== - -Usually those functions are used via helpers located in rffi. For things like -os.write - first get_nonmovingbuffer(data) that will give you a pointer -suitable of passing to C and finally free_nonmovingbuffer. - -For os.read like usage - you first call alloc_buffer (that will allocate a -buffer of desired size passable to C) and afterwards create str_from_buffer, -finally calling keep_buffer_alive_until_here. - -String builder -============== - -In Python strings are immutable by design. In RPython this still yields true, -but since we cooperate with lower (C/POSIX) level, which has no notion of -strings, we use buffers. Typical use case is to use list of characters l and -than ''.join(l) in order to get string. This requires a lot of unnecessary -copying, which yields performance penalty for such operations as string -formatting. Hence the idea of string builder. String builder would be an -object to which you can append strings or characters and afterwards build it -to a string. Ideally, this set of operations would not contain any copying -whatsoever. - -Low level gc operations for string builder ------------------------------------------- - -* alloc_buffer(T, size) - allocates Array(nolength=True) with possibility - of later becoming of shape T - -* realloc_buffer(buf, newsize) - tries to shrink or enlarge buffer buf. Returns - new pointer (since it might involve copying) - -* build_buffer(T, buf) - creates a type T (previously passed to alloc_buffer) - from buffer. - -Depending on a gc, those might be implemented dumb (realloc always copies) -or using C-level realloc. Might be implemented also in whatever clever way -comes to mind. - diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst --- a/pypy/doc/discussion/VM-integration.rst +++ b/pypy/doc/discussion/VM-integration.rst @@ -1,3 +1,5 @@ +.. XXX anto, do we still need this? + ============================================== Integration of PyPy with host Virtual Machines ============================================== diff --git a/pypy/doc/discussion/chained_getattr.rst b/pypy/doc/discussion/chained_getattr.rst deleted file mode 100644 --- a/pypy/doc/discussion/chained_getattr.rst +++ /dev/null @@ -1,70 +0,0 @@ - - -"chained getattr/module global lookup" optimization -(discussion during trillke-sprint 2007, anto/holger, -a bit of samuele and cf earlier on) - -random example: - - code: - import os.path - normed = [os.path.normpath(p) for p in somelist] - bytecode: - [...] - LOAD_GLOBAL (os) - LOAD_ATTR (path) - LOAD_ATTR (normpath) - LOAD_FAST (p) - CALL_FUNCTION 1 - - would be turned by pypy-compiler into: - - LOAD_CHAINED_GLOBAL (os,path,normpath) - LOAD_FAST (p) - CALL_FUNCTION 1 - - now for the LOAD_CHAINED_GLOBAL bytecode implementation: - - Module dicts have a special implementation, providing: - - - an extra "fastlookup" rpython-dict serving as a cache for - LOAD_CHAINED_GLOBAL places within the modules: - - * keys are e.g. ('os', 'path', 'normpath') - - * values are tuples of the form: - ([obj1, obj2, obj3], [ver1, ver2]) - - "ver1" refer to the version of the globals of "os" - "ver2" refer to the version of the globals of "os.path" - "obj3" is the resulting "normpath" function - - - upon changes to the global dict, "fastlookup.clear()" is called - - - after the fastlookup entry is filled for a given - LOAD_CHAINED_GLOBAL index, the following checks need - to be performed in the bytecode implementation:: - - value = f_globals.fastlookup.get(key, None) - if value is None: - # fill entry - else: - # check that our cached lookups are still valid - assert isinstance(value, tuple) - objects, versions = value - i = 0 - while i < len(versions): - lastversion = versions[i] - ver = getver_for_obj(objects[i]) - if ver == -1 or ver != lastversion: - name = key[i] - objects[i] = space.getattr(curobj, name) - versions[i] = ver - curobj = objects[i] - i += 1 - return objects[i] - - def getver_for_obj(obj): - if "obj is not Module": - return -1 - return obj.w_dict.version diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst --- a/pypy/doc/discussion/finalizer-order.rst +++ b/pypy/doc/discussion/finalizer-order.rst @@ -1,3 +1,6 @@ +.. XXX armin, what do we do with this? + + Ordering finalizers in the SemiSpace GC ======================================= diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst --- a/pypy/doc/discussion/outline-external-ootype.rst +++ b/pypy/doc/discussion/outline-external-ootype.rst @@ -1,3 +1,5 @@ +.. XXX, anto, can this be killed? + Some discussion about external objects in ootype ================================================ diff --git a/pypy/doc/discussion/distribution.rst b/pypy/doc/discussion/distribution.rst --- a/pypy/doc/discussion/distribution.rst +++ b/pypy/doc/discussion/distribution.rst @@ -1,3 +1,5 @@ +.. XXX fijal, can this be killed? + =================================================== (Semi)-transparent distribution of RPython programs =================================================== diff --git a/pypy/doc/discussion/pypy_metaclasses_in_cl.rst b/pypy/doc/discussion/pypy_metaclasses_in_cl.rst deleted file mode 100644 --- a/pypy/doc/discussion/pypy_metaclasses_in_cl.rst +++ /dev/null @@ -1,139 +0,0 @@ -IRC log -======= - -:: - - [09:41] arigo: is it possible to ask the backendoptimizer to completely remove all the oogetfield('meta', obj)? - [09:42] and at the same time to change all the oogetfield('somefield', meta) into oogetfield('somefield', obj) - [09:42] because then we wouldn't need the metaclass hierarchy anymore - [09:42] (at least in common lisp) - [09:42] as far as I know the idea was indeed to be able to do this kind of things - [09:43] but not necessarily in the existing backendopt - [09:44] uhmmm - [09:44] I have no idea how to do this stuff - [09:44] if I understand it correctly, as a first step you can just tweak gencl to recognize oogetfield('meta', obj) - [09:44] I'll think about it on the plane maybe - [09:44] and produce a same_as equivalent instead - [09:44] (do I make any sense at all?) - [09:44] yes - [09:45] same_as(meta, obj) - [09:45] so that the next oogetfield() will still work on meta which in reality is the obj - [09:45] yes - [09:45] thus you obtained the same thing without removing anything - [09:45] cool - [09:46] dialtone: can you explain me better what are you trying to do? - [09:46] it looks kinda simple - [09:46] am I a fool? - [09:46] antocuni: I want to get rid of the metaclass stuff in common lisp - [09:47] since common lisp supports class variables - [09:47] (DEFCLASS foo () ((bar :allocate :class))) - [09:47] cool - [09:47] but to do that I also have to get rid of the opcodes that work on the object model - [09:48] at first I thought about removing the metaclass related operations (or change them) but armin got a great idea about using same_as - [09:48] idnar (i=mithrand at unaffiliated/idnar) left irc: Remote closed the connection - [09:48] there might be a few problems, though - [09:48] and here comes the part I feared - [09:48] I'm not sure if the meta object is used for more than oogetfields - [09:49] and also, let's see if there are name clashes in the fields - [09:49] I can't understand a thing: are you trying to lookup some fields in the obj directly, instead of in the metclass, right? - [09:49] antocuni: yes - [09:50] why an object should have fields that belongs to its metaclass? - [09:50] arigo: uhmmm you can have both a class variable and an instance variable named in the same way? - [09:50] metaclass is not a real metaclass - [09:50] I don't know - [09:50] arigo - r26566 - Support geterrno() from rctypes to genc. - [09:50] dialtone: ah, now I understand - [09:50] I would expect it not to be the case, as the names come from RPython names - [09:51] arigo: indeed - [09:51] but I guess I can set different accessors maybe for class level things and for instance level things - [09:51] let's try - [09:51] no... - [09:52] so a name clash would break stuff - [09:52] but... how do you recognize an access to a class variable and one to an instance variable from RPython? - [09:53] dialtone: I think we don't have name clashes, because there is some mangling anyway - [09:53] cool - [09:53] if I see it correctly, class variable names start with 'pbc' and instance ones with 'o' - [09:53] that's what we've done in gencl yes - [09:54] ? that's what the ootyping is doing - [09:54] yes yes - [09:54] :-) - [09:54] I mean that I see the distinction in gencl :) - [09:54] sooooooo - [09:55] if I have a getfield where the first argument is meta and I simply emit the same code that I emit for the same_as I should be safe removing all the meta stuff... maybe - [09:55] seems like a tiny change in gencl - [09:55] dialtone: in RPython, the annotator says that attributes are instance fields as soon as they are written to instances, otherwise they are class attributes - [09:56] yes, it should work - [09:56] Palats (n=Pierre at izumi.palats.com) left irc: Read error: 104 (Connection reset by peer) - [09:56] unless of course metaclasses are used for something else than class variables - [09:56] ideally, you should not look for the name 'meta' but for some other hint - [09:57] I'm not completely at ease with the various levels of ootype - [09:57] neither am I\ - [09:57] all field names other than those defined by ootype (like "meta") will be mangled, so i guess checking for "meta" is good enough - [09:57] and I also have to ignore the setfield opcode that deals with metaclasses - [09:58] or make it a same_as as well - [09:59] apparently, the meta instances are used as the ootype of RPython classes - [10:00] so they can be manipulated by RPython code that passes classes around - [10:01] I guess you can also pass classes around in CL, read attributes from them, and instantiate them - [10:01] yes - [10:01] so a saner approach might be to try to have gencl use CL classes instead of these meta instances - [10:03] uhmmmmm - [10:03] which means: recognize if an ootype.Instance is actually representing an RPython class (by using a hint) - [10:03] I also have to deal with the Class_ - [10:03] but that can probably be set to standard-class - [10:03] yes, I think it's saner to make, basically, oogetfield('class_') be a same_as - [10:04] cool - [10:04] I think I'll save this irc log to put it in the svn tree for sanxiyn - [10:04] to recognize RPython class represenations: if the ootype.Instance has the superclass ootypesystem.rclass.CLASSTYPE, then it's a "metaclass" - [10:04] he is thinking about this in the plane (at least this is what he told) - [10:05] :-) - [10:05] nikh: yes - [10:05] ootype is indeed rather complicated, level-wise, to support limited languages like Java - [10:05] unfortunately, yes - [10:05] well, in a way it's very convenient for the backends - [10:05] but if you want to use more native constructs, it gets hairy quickly - [10:05] I dunno - [10:05] depends on the backend - [10:06] hum, there is still an information missing that gencl would need here - [10:06] I think if the language of the backend is powerful enough it could use an higher abstraction - [10:07] dialtone: yes, there is also the (hairly to implement) idea of producing slightly different things for different back-ends too - [10:07] using backendopts? - [10:08] would it make sense to have a kind of backend_supports=['metaclasses', 'classvariables', 'first_class_functions'...] - [10:08] maybe, but I was thinking about doing different things in ootypesystem/rclass already - [10:08] yes, such a backend_supports would be great - [10:09] dialtone: there is still an hour left to sprint, so go go go ;) - [10:09] you can do it, if you want it ;) - [10:09] what is missing is the link from the concrete Instance types, and which Instance corresponds to its meta-instance - [10:10] idnar (i=mithrand at unaffiliated/idnar) joined #pypy. - [10:10] dialtone: it's not as simple as making an oogetfield be a same_as - [10:10] KnowledgeUnboundError, Missing documentation in slot brain - [10:10] right now for CL the goal would be to generate for a normal Instance, a DEFCLASS whose :allocate :class attributes are the attributes of the meta-Instance - [10:11] we could optionally have class fields in Instances, and then operations like ooget/setclassfield - [10:11] the reason why I ask is that if we manage to do this then we could also use default Condition as Exception - [10:11] and we could map the Conditions in common lisp to exceptions in python transparently - [10:12] since the object systems will then match (and they are vaguely similar anyway) - [10:12] nice - [10:12] at least I think - [10:18] I'm still rather confused by ootypesystem/rclass - [10:18] although I think that blame would show my name on quite some bits :-) - [10:19] there are no class attributes read through instances - [10:19] they are turned into method calls - [10:19] accessor methods - [10:20] it's a bit organically grown - [10:20] accessor methods were introduced at one point, and the meta-Instance later - [10:21] uhmmm - [10:22] what was the reason for having accessor methods? - [10:22] they seem to be only generated for class vars that are overriden in subclasses. - [10:22] yes - [10:22] before we had the meta-Instance trick, it was the only way to avoid storing the value in all instances - [10:22] aha - [10:23] we could possibly get rid of these accessors - [10:23] now, yes, by storing the values in the meta-Instance - [10:23] they are alway anyway stored in the meta-Instance, I think - [10:23] no, I think that other values are stored in the meta-Instance right now - [10:24] it's the values that are only ever accessed with a syntax 'ClassName.attr', i.e. not through an instance - [10:24] ...more precisely, with 'x = ClassName or OtherClassName; x.attr' - [10:25] hm, i'm still trying to read this out of the code ... - [10:28] it's in ClassRepr._setup_repr() - [10:28] there is no clsfields here, just pbcfields - [10:28] # attributes showing up in getattrs done on the class as a PBC - [10:28] i see diff --git a/pypy/doc/discussion/GC-performance.rst b/pypy/doc/discussion/GC-performance.rst deleted file mode 100644 --- a/pypy/doc/discussion/GC-performance.rst +++ /dev/null @@ -1,118 +0,0 @@ -StartHeapsize# is the framework GC as of revision 31586 with initial -bytes_malloced_threshold of 2-512 MB - -NewHeuristics is the framework GC with a new heuristics for adjusting -the bytes_malloced_threshold - -:: - - Pystone - StartHeapsize2: - This machine benchmarks at 5426.92 pystones/second - This machine benchmarks at 5193.91 pystones/second - This machine benchmarks at 5403.46 pystones/second - StartHeapsize8: - This machine benchmarks at 6075.33 pystones/second - This machine benchmarks at 6007.21 pystones/second - This machine benchmarks at 6122.45 pystones/second - StartHeapsize32: - This machine benchmarks at 6643.05 pystones/second - This machine benchmarks at 6590.51 pystones/second - This machine benchmarks at 6593.41 pystones/second - StartHeapsize128: - This machine benchmarks at 7065.47 pystones/second - This machine benchmarks at 7102.27 pystones/second - This machine benchmarks at 7082.15 pystones/second - StartHeapsize512: - This machine benchmarks at 7208.07 pystones/second - This machine benchmarks at 7197.7 pystones/second - This machine benchmarks at 7246.38 pystones/second - NewHeuristics: - This machine benchmarks at 6821.28 pystones/second - This machine benchmarks at 6858.71 pystones/second - This machine benchmarks at 6902.9 pystones/second - - - Richards - StartHeapSize2: - Average time per iteration: 5456.21 ms - Average time per iteration: 5529.31 ms - Average time per iteration: 5398.82 ms - StartHeapsize8: - Average time per iteration: 4775.43 ms - Average time per iteration: 4753.25 ms - Average time per iteration: 4781.37 ms - StartHeapsize32: - Average time per iteration: 4554.84 ms - Average time per iteration: 4501.86 ms - Average time per iteration: 4531.59 ms - StartHeapsize128: - Average time per iteration: 4329.42 ms - Average time per iteration: 4360.87 ms - Average time per iteration: 4392.81 ms - StartHeapsize512: - Average time per iteration: 4371.72 ms - Average time per iteration: 4399.70 ms - Average time per iteration: 4354.66 ms - NewHeuristics: - Average time per iteration: 4763.56 ms - Average time per iteration: 4803.49 ms - Average time per iteration: 4840.68 ms - - - translate rpystone - time pypy-c translate --text --batch --backendopt --no-compile targetrpystonedalone.py - StartHeapSize2: - real 1m38.459s - user 1m35.582s - sys 0m0.440s - StartHeapsize8: - real 1m35.398s - user 1m33.878s - sys 0m0.376s - StartHeapsize32: - real 1m5.475s - user 1m5.108s - sys 0m0.180s - StartHeapsize128: - real 0m52.941s - user 0m52.395s - sys 0m0.328s - StartHeapsize512: - real 1m3.727s - user 0m50.031s - sys 0m1.240s - NewHeuristics: - real 0m53.449s - user 0m52.771s - sys 0m0.356s - - - docutils - time pypy-c rst2html doc/coding-guide.txt - StartHeapSize2: - real 0m36.125s - user 0m35.562s - sys 0m0.088s - StartHeapsize8: - real 0m32.678s - user 0m31.106s - sys 0m0.084s - StartHeapsize32: - real 0m22.041s - user 0m21.085s - sys 0m0.132s - StartHeapsize128: - real 0m19.350s - user 0m18.653s - sys 0m0.324s - StartHeapsize512: - real 0m19.116s - user 0m17.517s - sys 0m0.620s - NewHeuristics: - real 0m20.990s - user 0m20.109s - sys 0m0.196s - - diff --git a/pypy/doc/discussion/use_case_of_logic.rst b/pypy/doc/discussion/use_case_of_logic.rst deleted file mode 100644 --- a/pypy/doc/discussion/use_case_of_logic.rst +++ /dev/null @@ -1,75 +0,0 @@ -Use cases for a combination of Logic and Object Oriented programming approach -------------------------------------------------------------------------------- - -Workflows -========= - -Defining the next state by solving certain constraints. The more -general term might be State machines. - -Business Logic -============== - -We define Business Logic as expressing consistency (as an example) on -a set of objects in a business application. - -For example checking the consistency of a calculation before -committing the changes. - -The domain is quite rich in example of uses of Business Logic. - -Datamining -=========== - -An example is Genetic sequence matching. - -Databases -========= - -Validity constraints for the data can be expressed as constraints. - -Constraints can be used to perform type inference when querying the -database. - -Semantic web -============= - -The use case is like the database case, except the ontology language -it self is born out of Descriptive Logic - - -User Interfaces -=============== - -We use rules to describe the layout and visibility constraints of -elements that are to be displayed on screen. The rule can also help -describing how an element is to be displayed depending on its state -(for instance, out of bound values can be displayed in a different -colour). - -Configuration -============== - -User configuration can use information inferred from : the current -user, current platforms , version requirements, ... - -The validity of the configuration can be checked with the constraints. - - -Scheduling and planning -======================== - -Timetables, process scheduling, task scheduling. - -Use rules to determine when to execute tasks (only start batch, if load -is low, and previous batch is finished. - -Load sharing. - -Route optimization. Planning the routes of a technician based on tools -needed and such - -An example is scheduling a conference like Europython see: - -http://lists.logilab.org/pipermail/python-logic/2005-May/000107.html - diff --git a/pypy/doc/discussion/ctypes_todo.rst b/pypy/doc/discussion/ctypes_todo.rst deleted file mode 100644 --- a/pypy/doc/discussion/ctypes_todo.rst +++ /dev/null @@ -1,34 +0,0 @@ -Few ctypes-related todo points: - -* Write down missing parts and port all tests, eventually adding - additional tests. - - - for unions and structs, late assignment of _fields_ is somewhat buggy. - Tests about behavior of getattr working properly on instances - are missing or not comprehensive. Some tests are skipped because I didn't - understand the details. - - - _fields_ can be tuples too as well as lists - - - restype being a function is not working. - - - there are features, which we don't support like buffer() and - array() protocols. - - - are the _CData_value return lifetime/gc semantics correct? - - - for some ABIs we will need completely filled ffitypes to do the - right thing for passing structures by value, we are now passing enough - information to rawffi that it should be possible to construct such precise - ffitypes in most cases - - - bitfields are not implemented - - - byteorder is not implemented - -* as all stuff is applevel, we cannot have it really fast right now. - -* we shall at least try to approach ctypes from the point of the jit - backends (at least on platforms that we support). The thing is that - we need a lot broader support of jit backends for different argument - passing in order to do it. diff --git a/pypy/doc/discussion/security-ideas.rst b/pypy/doc/discussion/security-ideas.rst deleted file mode 100644 --- a/pypy/doc/discussion/security-ideas.rst +++ /dev/null @@ -1,312 +0,0 @@ -============== -Security ideas -============== - -These are some notes I (Armin) took after a talk at Chalmers by Steve -Zdancewic: "Encoding Information Flow in Haskell". That talk was -presenting a pure Haskell approach with monad-like constructions; I -think that the approach translates well to PyPy at the level of RPython. - - -The problem ------------ - -The problem that we try to solve here is: how to give the programmer a -way to write programs that are easily checked to be "secure", in the -sense that bugs shouldn't allow confidential information to be -unexpectedly leaked. This is not security as in defeating actively -malicious attackers. - - -Example -------- - -Let's suppose that we want to write a telnet-based application for a -bidding system. We want normal users to be able to log in with their -username and password, and place bids (i.e. type in an amount of money). -The server should record the highest bid so far but not allow users to -see that number. Additionally, the administrator should be able to log -in with his own password and see the highest bid. The basic program:: - - def mainloop(): - while True: - username = raw_input() - password = raw_input() - user = authenticate(username, password) - if user == 'guest': - serve_guest() - elif user == 'admin': - serve_admin() - - def serve_guest(): - global highest_bid - print "Enter your bid:" - n = int(raw_input()) - if n > highest_bid: # - highest_bid = n # - print "Thank you" - - def serve_admin(): - print "Highest big is:", highest_bid - -The goal is to make this program more secure by declaring and enforcing -the following properties: first, the guest code is allowed to manipulate -the highest_bid, as in the lines marked with ``#``, but these lines must -not leak back the highest_bid in a form visible to the guest user; -second, the printing in serve_admin() must only be allowed if the user -that logged in is really the administrator (e.g. catch bugs like -accidentally swapping the serve_guest() and serve_admin() calls in -mainloop()). - - -Preventing leak of information in guest code: 1st try ------------------------------------------------------ - -The basic technique to prevent leaks is to attach "confidentiality -level" tags to objects. In this example, the highest_bid int object -would be tagged with label="secret", e.g. by being initialized as:: - - highest_bid = tag(0, label="secret") - -At first, we can think about an object space where all objects have such -a label, and the label propagates to operations between objects: for -example, code like ``highest_bid += 1`` would produce a new int object -with again label="secret". - -Where this approach doesn't work is with if/else or loops. In the above -example, we do:: - - if n > highest_bid: - ... - -However, by the object space rules introduced above, the result of the -comparison is a "secret" bool objects. This means that the guest code -cannot know if it is True or False, and so the PyPy interpreter has no -clue if it must following the ``then`` or ``else`` branch of the ``if``. -So the guest code could do ``highest_bid += 1`` and probably even -``highest_bid = max(highest_bid, n)`` if max() is a clever enough -built-in function, but clearly this approach doesn't work well for more -complicated computations that we would like to perform at this point. - -There might be very cool possible ideas to solve this with doing some -kind of just-in-time flow object space analysis. However, here is a -possibly more practical approach. Let's forget about the object space -tricks and start again. (See `Related work`_ for why the object space -approach doesn't work too well.) - - -Preventing leak of information in guest code with the annotator instead ------------------------------------------------------------------------ - -Suppose that the program runs on top of CPython and not necessarily -PyPy. We will only need PyPy's annotator. The idea is to mark the code -that manipulates highest_bid explicitly, and make it RPython in the -sense that we can take its flow space and follow the calls (we don't -care about the precise types here -- we will use different annotations). -Note that only the bits that manipulates the secret values needs to be -RPython. Example:: - - # on top of CPython, 'hidden' is a type that hides a value without - # giving any way to normal programs to access it, so the program - # cannot do anything with 'highest_bid' - - highest_bid = hidden(0, label="secure") - - def enter_bid(n): - if n > highest_bid.value: - highest_bid.value = n - - enter_bid = secure(enter_bid) - - def serve_guest(): - print "Enter your bid:" - n = int(raw_input()) - enter_bid(n) - print "Thank you" - -The point is that the expression ``highest_bid.value`` raises a -SecurityException when run normally: it is not allowed to read this -value. The secure() decorator uses the annotator on the enter_bid() -function, with special annotations that I will describe shortly. Then -secure() returns a "compiled" version of enter_bid. The compiled -version is checked to satisfy the security constrains, and it contains -special code that then enables the ``highest_bid.value`` to work. - -The annotations propagated by secure() are ``SomeSecurityLevel`` -annotations. Normal constants are propagated as -SomeSecurityLevel("public"). The ``highest_bid.value`` returns the -annotation SomeSecurityLevel("secret"), which is the label of the -constant ``highest_bid`` hidden object. We define operations between -two SomeSecurityLevels to return a SomeSecurityLevel which is the max of -the secret levels of the operands. - -The key point is that secure() checks that the return value is -SomeSecurityLevel("public"). It also checks that only -SomeSecurityLevel("public") values are stored e.g. in global data -structures. - -In this way, any CPython code like serve_guest() can safely call -``enter_bid(n)``. There is no way to leak information about the current -highest bid back out of the compiled enter_bid(). - - -Declassification ----------------- - -Now there must be a controlled way to leak the highest_bid value, -otherwise it is impossible even for the admin to read it. Note that -serve_admin(), which prints highest_bid, is considered to "leak" this -value because it is an input-output, i.e. it escapes the program. This -is a leak that we actually want -- the terminology is that serve_admin() -must "declassify" the value. - -To do this, there is a capability-like model that is easy to implement -for us. Let us modify the main loop as follows:: - - def mainloop(): - while True: - username = raw_input() - password = raw_input() - user, priviledge_token = authenticate(username, password) - if user == 'guest': - serve_guest() - elif user == 'admin': - serve_admin(priviledge_token) - del priviledge_token # make sure nobody else uses it - -The idea is that the authenticate() function (shown later) also returns -a "token" object. This is a normal Python object, but it should not be -possible for normal Python code to instantiate such an object manually. -In this example, authenticate() returns a ``priviledge("public")`` for -guests, and a ``priviledge("secret")`` for admins. Now -- and this is -the insecure part of this scheme, but it is relatively easy to control --- the programmer must make sure that these priviledge_token objects -don't go to unexpected places, particularly the "secret" one. They work -like capabilities: having a reference to them allows parts of the -program to see secret information, of a confidentiality level up to the -one corresponding to the token. - -Now we modify serve_admin() as follows: - - def serve_admin(token): - print "Highest big is:", declassify(highest_bid, token=token) - -The declassify() function reads the value if the "token" is privileged -enough, and raises an exception otherwise. - -What are we protecting here? The fact that we need the administrator -token in order to see the highest bid. If by mistake we swap the -serve_guest() and serve_admin() lines in mainloop(), then what occurs is -that serve_admin() would be called with the guest token. Then -declassify() would fail. If we assume that authenticate() is not buggy, -then the rest of the program is safe from leak bugs. - -There are another variants of declassify() that are convenient. For -example, in the RPython parts of the code, declassify() can be used to -control more precisely at which confidentiality levels we want which -values, if there are more than just two such levels. The "token" -argument could also be implicit in RPython parts, meaning "use the -current level"; normal non-RPython code always runs at "public" level, -but RPython functions could run with higher current levels, e.g. if they -are called with a "token=..." argument. - -(Do not confuse this with what enter_bid() does: enter_bid() runs at the -public level all along. It is ok for it to compute with, and even -modify, the highest_bid.value. The point of enter_bid() was that by -being an RPython function the annotator can make sure that the value, or -even anything that gives a hint about the value, cannot possibly escape -from the function.) - -It is also useful to have "globally trusted" administrator-level RPython -functions that always run at a higher level than the caller, a bit like -Unix programs with the "suid" bit. If we set aside the consideration -that it should not be possible to make new "suid" functions too easily, -then we could define the authenticate() function of our server example -as follows:: - - def authenticate(username, password): - database = {('guest', 'abc'): priviledge("public"), - ('admin', '123'): priviledge("secret")} - token_obj = database[username, password] - return username, declassify(token_obj, target_level="public") - - authenticate = secure(authenticate, suid="secret") - -The "suid" argument makes the compiled function run on level "secret" -even if the caller is "public" or plain CPython code. The declassify() -in the function is allowed because of the current level of "secret". -Note that the function returns a "public" tuple -- the username is -public, and the token_obj is declassified to public. This is the -property that allows CPython code to call it. - -Of course, like a Unix suid program the authenticate() function could be -buggy and leak information, but like suid programs it is small enough -for us to feel that it is secure just by staring at the code. - -An alternative to the suid approach is to play with closures, e.g.:: - - def setup(): - #initialize new levels -- this cannot be used to access existing levels - public_level = create_new_priviledge("public") - secret_level = create_new_priviledge("secret") - - database = {('guest', 'abc'): public_level, - ('admin', '123'): secret_level} - - def authenticate(username, password): - token_obj = database[username, password] - return username, declassify(token_obj, target_level="public", - token=secret_level) - - return secure(authenticate) - - authenticate = setup() - -In this approach, declassify() works because it has access to the -secret_level token. We still need to make authenticate() a secure() -compiled function to hide the database and the secret_level more -carefully; otherwise, code could accidentally find them by inspecting -the traceback of the KeyError exception if the username or password is -invalid. Also, secure() will check for us that authenticate() indeed -returns a "public" tuple. - -This basic model is easy to extend in various directions. For example -secure() RPython functions should be allowed to return non-public -results -- but then they have to be called either with an appropriate -"token=..." keyword, or else they return hidden objects again. They -could also be used directly from other RPython functions, in which the -level of what they return is propagated. - - -Related work ------------- - -What I'm describing here is nothing more than an adaptation of existing -techniques to RPython. - -It is noteworthy to mention at this point why the object space approach -doesn't work as well as we could first expect. The distinction between -static checking and dynamic checking (with labels only attached to -values) seems to be well known; also, it seems to be well known that the -latter is too coarse in practice. The problem is about branching and -looping. From the object space' point of view it is quite hard to know -what a newly computed value really depends on. Basically, it is -difficult to do better than: after is_true() has been called on a secret -object, then we must assume that all objects created are also secret -because they could depend in some way on the truth-value of the previous -secret object. - -The idea to dynamically use static analysis is the key new idea -presented by Steve Zdancewic in his talk. You can have small controlled -RPython parts of the program that must pass through a static analysis, -and we only need to check dynamically that some input conditions are -satisfied when other parts of the program call the RPython parts. -Previous research was mostly about designing languages that are -completely statically checked at compile-time. The delicate part is to -get the static/dynamic mixture right so that even indirect leaks are not -possible -- e.g. leaks that would occur from calling functions with -strange arguments to provoke exceptions, and where the presence of the -exception or not would be information in itself. This approach seems to -do that reliably. (Of course, at the talk many people including the -speaker were wondering about ways to move more of the checking at -compile-time, but Python people won't have such worries :-) diff --git a/pypy/doc/discussion/removing-stable-compiler.rst b/pypy/doc/discussion/removing-stable-compiler.rst deleted file mode 100644 --- a/pypy/doc/discussion/removing-stable-compiler.rst +++ /dev/null @@ -1,22 +0,0 @@ -February 28th, 2006 - -While implementing conditional expressions from 2.5 we had to change -the stable compiler in order to keep tests from breaking. While using -stable compiler as a baseline made sense when the ast compiler was -new, it is less and less true as new grammar changes are introduced. - -Options include - -1. Freezing the stable compiler at grammar 2.4. - -2. Capture AST output from the stable compiler and use that explicitly -in current tests instead of regenerating them every time, primarily -because it allows us to change the grammar without changing the stable -compiler. - - -In either case, AST production tests for new grammar changes could be -written manually, which is less effort than fixing the stable -compiler (which itself isn't really tested anyway). - -Discussion by Arre, Anders L., Stuart Williams diff --git a/pypy/doc/discussion/somepbc-refactoring-plan.rst b/pypy/doc/discussion/somepbc-refactoring-plan.rst deleted file mode 100644 --- a/pypy/doc/discussion/somepbc-refactoring-plan.rst +++ /dev/null @@ -1,161 +0,0 @@ -========================== - Refactoring SomePBCs -========================== - -Motivation -========== - -Some parts of the annotator, and especially specialization, are quite obscure -and hackish. One cause for this is the need to manipulate Python objects like -functions directly. This makes it hard to attach additional information directly -to the objects. It makes specialization messy because it has to create new dummy -function objects just to represent the various specialized versions of the function. - - -Plan -==== - -Let's introduce nice wrapper objects. This refactoring is oriented towards -the following goal: replacing the content of SomePBC() with a plain set of -"description" wrapper objects. We shall probably also remove the possibility -for None to explicitly be in the set and add a can_be_None flag (this is -closer to what the other SomeXxx classes do). - - -XxxDesc classes -=============== - -To be declared in module pypy.annotator.desc, with a mapping -annotator.bookkeeper.descs = {: } -accessed with bookkeeper.getdesc(). - -Maybe later the module should be moved out of pypy.annotation but for now I -suppose that it's the best place. - -The goal is to have a single Desc wrapper even for functions and classes that -are specialized. - -FunctionDesc - - Describes (usually) a Python function object. Contains flow graphs: one - in the common case, zero for external functions, more than one if there - are several specialized versions. Also describes the signature of the - function in a nice format (i.e. not by relying on func_code inspection). - -ClassDesc - - Describes a Python class object. Generally just maps to a ClassDef, but - could map to more than one in the presence of specialization. So we get - SomePBC({}) annotations for the class, and when it's - instantiated it becomes SomeInstance(classdef=...) for the particular - selected classdef. - -MethodDesc - - Describes a bound method. Just references a FunctionDesc and a ClassDef - (not a ClassDesc, because it's read out of a SomeInstance). - -FrozenDesc - - Describes a frozen pre-built instance. That's also a good place to store - some information currently in dictionaries of the bookkeeper. - -MethodOfFrozenDesc - - Describes a method of a FrozenDesc. Just references a FunctionDesc and a - FrozenDesc. - -NB: unbound method objects are the same as function for our purposes, so they -become the same FunctionDesc as their im_func. - -These XxxDesc classes should share some common interface, as we'll see during -the refactoring. A common base class might be a good idea (at least I don't -see why it would be a bad idea :-) - - -Implementation plan -=================== - -* make a branch (/branch/somepbc-refactoring/) - -* change the definition of SomePBC, start pypy.annotation.desc - -* fix all places that use SomePBC :-) - -* turn Translator.flowgraphs into a plain list of flow graphs, - and make the FunctionDescs responsible for computing their own flow graphs - -* move external function functionality into the FunctionDescs too - - -Status -====== - -Done, branch merged. - - -RTyping PBCs of functions -========================= - -The FuncDesc.specialize() method takes an args_s and return a -corresponding graph. The caller of specialize() parses the actual -arguments provided by the simple_call or call_args operation, so that -args_s is a flat parsed list. The returned graph must have the same -number and order of input variables. - -For each call family, we compute a table like this (after annotation -finished):: - - call_shape FuncDesc1 FuncDesc2 FuncDesc3 ... - ---------------------------------------------------------- - call0 shape1 graph1 - call1 shape1 graph1 graph2 - call2 shape1 graph3 graph4 - call3 shape2 graph5 graph6 - - -We then need to merge some of the lines if they look similar enough, -e.g. call0 and call1. Precisely, we can merge two lines if they only -differ in having more or less holes. In theory, the same graph could -appear in two lines that are still not mergeable because of other -graphs. For sanity of implementation, we should check that at the end -each graph only appears once in the table (unless there is only one -*column*, in which case all problems can be dealt with at call sites). - -(Note that before this refactoring, the code was essentially requiring -that the table ended up with either one single row or one single -column.) - -The table is computed when the annotation is complete, in -compute_at_fixpoint(), which calls the FuncDesc's consider_call_site() -for each call site. The latter merges lines as soon as possible. The -table is attached to the call family, grouped by call shape. - -During RTyping, compute_at_fixpoint() is called after each new ll -helper is annotated. Normally, this should not modify existing tables -too much, but in some situations it will. So the rule is that -consider_call_site() should not add new (unmerged) rows to the table -after the table is considered "finished" (again, unless there is only -one column, in which case we should not discover new columns). - -XXX this is now out of date, in the details at least. - -RTyping other callable PBCs -=========================== - -The above picture attaches "calltable" information to the call -families containing the function. When it comes to rtyping a call of -another kind of pbc (class, instance-method, frozenpbc-method) we have -two basic choices: - - - associate the calltable information with the funcdesc that - ultimately ends up getting called, or - - - attach the calltable to the callfamily that contains the desc - that's actually being called. - -Neither is totally straightforward: the former is closer to what -happens on the trunk but new families of funcdescs need to be created -at the end of annotation or by normalisation. The latter is more of a -change. The former is also perhaps a bit unnatural for ootyped -backends. diff --git a/pypy/doc/discussion/emptying-the-malloc-zoo.rst b/pypy/doc/discussion/emptying-the-malloc-zoo.rst deleted file mode 100644 --- a/pypy/doc/discussion/emptying-the-malloc-zoo.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. coding: utf-8 - -Emptying the malloc zoo -======================= - -Around the end-of-the-EU-project time there were two major areas of -obscurity in the memory management area: - - 1. The confusing set of operations that the low-level backend are - expected to implement. - - 2. The related, but slightly different, confusion of the various - "flavours" of malloc: what's the difference between - lltype.malloc(T, flavour='raw') and llmemory.raw_malloc(sizeof(T))? - -At the post-ep2007 sprint, Samuele and Michael attacked the first -problem a bit: making the Boehm GC transformer only require three -simple operations of the backend. This could be extending still -further by having the gc transformer use rffi to insert calls to the -relevant Boehm functions^Wmacros, and then the backend wouldn't need -to know anything about Boehm at all (but... LLVM). - -A potential next step is to work out what we want the "llpython" -interface to memory management to be. - -There are various use cases: - -**lltype.malloc(T) – T is a fixed-size GC container** - - This is the default case. Non-pointers inside the allocated memory - will not be zeroed. The object will be managed by the GC, no - deallocation required. - -**lltype.malloc(T, zero=True) – T is a GC container** - - As above, but all fields will be cleared. - -**lltype.malloc(U, raw=True) – U is not a GC container** - - Blah. diff --git a/pypy/doc/discussion/paper-wishlist.rst b/pypy/doc/discussion/paper-wishlist.rst deleted file mode 100644 --- a/pypy/doc/discussion/paper-wishlist.rst +++ /dev/null @@ -1,27 +0,0 @@ -Things we would like to write papers about -========================================== - -- object space architecture + reflective space -- stackless transformation -- composable coroutines -- jit: - - overview paper - - putting our jit into the context of classical partial evaluation - - a jit technical paper too, probably - -- sandboxing - -Things about which writing a paper would be nice, which need more work first -============================================================================ - -- taint object space -- logic object space - -- jit - - - with some more work: how to deal in a JIT backend with less-that- - full-function compilation unit - - - work in progress (Anto?): our JIT on the JVM - - (later) removing the overhead of features not used, e.g. thunk space or - another special space diff --git a/pypy/doc/discussion/summer-of-pypy-pytest.rst b/pypy/doc/discussion/summer-of-pypy-pytest.rst deleted file mode 100644 --- a/pypy/doc/discussion/summer-of-pypy-pytest.rst +++ /dev/null @@ -1,56 +0,0 @@ -============================================ -Summer of PyPy proposal: Distributed py.test -============================================ - - -Purpose: -======== - -The main purpose of distributing py.test is to speedup tests -of actual applications (running all pypy tests already takes -ages). - -Method: -======= - -Remote imports: ---------------- - -On the beginning of communication, master server sends to client -import hook code, which then can import all needed libraries. - -Libraries are uploaded server -> client if they're needed (when -__import__ is called). Possible extension is to add some kind of -checksum (md5?) and store files in some directory. - -Previous experiments: ---------------------- - -Previous experiments tried to run on the lowest level - when function/ -method is called. This is pretty clear (you run as few code on client -side as possible), but has got some drawbacks: - -- You must simulate *everything* and transform it to server side in - case of need of absolutely anything (tracebacks, short and long, - source code etc.) -- It's sometimes hard to catch exceptions. -- Top level code in testing module does not work at all. - -Possible approach: ------------------- - -On client side (side really running tests) run some kind of cut-down -session, which is imported by remote import at the very beginning and -after that, we run desired tests (probably by importing whole test -file which allows us to have top-level imports). - -Then we transfer output data to server as string, possibly tweaking -file names (which is quite easy). - -Deliverables: -============= - -- better use of testing machines -- cut down test time -- possible extension to run distributed code testing, by running and - controlling several distributed parts on different machines. diff --git a/pypy/doc/discussion/parsing-ideas.rst b/pypy/doc/discussion/parsing-ideas.rst deleted file mode 100644 --- a/pypy/doc/discussion/parsing-ideas.rst +++ /dev/null @@ -1,5 +0,0 @@ -add a way to modularize regular expressions: - -_HEXNUM = "..."; -_DECNUM = "..."; -NUM = "{_HEXNUM}|{_DECNUM}"; From commits-noreply at bitbucket.org Mon Apr 25 12:44:43 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 12:44:43 +0200 (CEST) Subject: [pypy-svn] pypy default: fixed encoding tests Message-ID: <20110425104443.81BC1282B90@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43571:8ecddd13dfe3 Date: 2011-04-25 12:44 +0200 http://bitbucket.org/pypy/pypy/changeset/8ecddd13dfe3/ Log: fixed encoding tests diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py --- a/pypy/jit/backend/x86/test/test_regloc.py +++ b/pypy/jit/backend/x86/test/test_regloc.py @@ -21,10 +21,12 @@ assert_encodes_as(cb32, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xB9\x39\x30') # 64-bit - assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x89\xD9') + assert_encodes_as(cb64, "MOV16", (r8, ebx), '\x66\x41\x89\xD8') # 11 011 000 + assert_encodes_as(cb64, "MOV16", (ebx, r8), '\x66\x44\x89\xC3') # 11 000 011 + assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x40\x89\xD9') # XXX: What we are testing for here is actually not the most compact # encoding. - assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xC7\xC1\x39\x30') + assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\x40\xC7\xC1\x39\x30') assert_encodes_as(cb64, "MOV16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\xC7\x45\x00\x39\x30') def test_cmp_16(): @@ -33,8 +35,10 @@ assert_encodes_as(cb32, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30') # 64-bit - assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x39\xD9') - assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30') + assert_encodes_as(cb64, "CMP16", (r8, ebx), '\x66\x41\x39\xD8') # 11 011 000 + assert_encodes_as(cb64, "CMP16", (ebx, r8), '\x66\x44\x39\xC3') # 11 000 011 + assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x40\x39\xD9') + assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x40\x81\xF9\x39\x30') assert_encodes_as(cb64, "CMP16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\x81\x7D\x00\x39\x30') def test_relocation(): From commits-noreply at bitbucket.org Mon Apr 25 13:46:35 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 13:46:35 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Get rif of buildtool.rst. Message-ID: <20110425114635.E8491282B90@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43572:2b2d4b78089e Date: 2011-04-25 13:45 +0200 http://bitbucket.org/pypy/pypy/changeset/2b2d4b78089e/ Log: Get rif of buildtool.rst. diff --git a/pypy/doc/buildtool.rst b/pypy/doc/buildtool.rst deleted file mode 100644 --- a/pypy/doc/buildtool.rst +++ /dev/null @@ -1,251 +0,0 @@ -============ -PyPyBuilder -============ - -.. include:: throwaway.rst - -What is this? -============= - -PyPyBuilder is an application that allows people to build PyPy instances on -demand. If you have a nice idle machine connected to the Internet, and don't -mind us 'borrowing' it every once in a while, you can start up the client -script (in bin/client) and have the server send compile jobs to your machine. -If someone requests a build of PyPy that is not already available on the PyPy -website, and your machine is capable of making such a build, the server may ask -your machine to create it. If enough people participate, with diverse enough -machines, a 'build farm' is created. - -Quick usage instructions -======================== - -For the impatient, that just want to get started, some quick instructions. - -First you'll need to have a checkout of the 'buildtool' package, that can -be found here:: - - https://codespeak.net/svn/pypy/build/buildtool - -To start a compilation, run (from the buildtool root directory):: - - $ ./bin/startcompile.py [options] - -where the options can be found by using --help, and the email address will be -used to send mail to once the compilation is finished. - -To start a build server, to participate in the build farm, do:: - - $ ./bin/buildserver.py - -That's it for the compilation script and build server, if you have your own -project and want to set up your own meta server, you'll have to be a bit more -patient and read the details below... - -Components -========== - -The application consists of 3 main components: a meta server component, a -client component that handles compilations (let's call this a 'build server') -and a small client component to start compile jobs (which we'll call -'requesting clients' for now). - -The server waits for build server to register, and for compile job -requests. When participating clients register, they pass the server information -about what compilations the system can handle (system info), and a set of -options to use for compilation (compile info). - -When now a requesting client requests a compilation job, the server checks -whether a suitable binary is already available based on the system and compile -info, and if so returns that. If there isn't one, the server walks through a -list of connected participating clients to see if one of them can handle the -job, and if so dispatches the compilation. If there's no participating client -to handle the job, it gets queued until there is. - -If a client crashes during compilation, the build is restarted, or error -information is sent to the logs and requesting client, depending on the type of -error. As long as no compilation error occurs (read: on disconnects, system -errors, etc.) compilation will be retried until a build is available. - -Once a build is available, the server will send an email to all clients waiting -for the build (it could be that more than one person asked for some build at -the same time!). - -Configuration -============= - -There are several aspects to configuration on this system. Of course, for the -meta server, build server and startcompile components there is configuration -for the host and port to connect to, and there is some additional configuration -for things like which mailhost to use (only applies to the server), but also -there is configuration data passed around to determine what client is picked, -and what the client needs to compile exactly. - -Config file ------------ - -The host/port configuration etc. can be found in the file 'config.py' in the -build tool dir. There are several things that can be configured here, mostly -related to what application to build, and where to build it. Please read the -file carefully when setting up a new build network, or when participating for -compilation, because certain items (e.g. the svnpath_to_url function, or the -client_checkers) can make the system a lot less secure when not configured -properly. - -Note that all client-related configuration is done from command-line switches, -so the configuration file is supposed to be changed on a per-project basis: -unless you have specific needs, use a test version of the build tool, or are -working on another project than PyPy, you will not want to modify the it. - -System configuration --------------------- - -This information is used by the client and startcompile components. On the -participating clients this information is retrieved by querying the system, on -the requesting clients the system values are used by default, but may be -overridden (so a requesting client running an x86 can still request PPC builds, -for instance). The clients compare their own system config to that of a build -request, and will (should) refuse a build if it can not be executed because -of incompatibilities. - -Compilation configuration -------------------------- - -The third form of configuration is that of the to-be-built application itself, -its compilation arguments. This configuration is only provided by the -requesting clients, build servers can examine the information and refuse a -compilation based on this configuration (just like with the system config, see -'client_checkers' in 'config.py'). Compilation configuration can be controlled -using command-line arguments (use 'bin/startcompile.py --help' for an -overview). - -Build tool options ------------------- - -Yet another part of the configuration are the options that are used by the -startcompile.py script itself: the user can specify what SVN path (relative to -a certain base path) and what Subversion revision is desired. The revision can -either be specified exactly, or as a range of versions. - -Installation -============ - -Build Server ------------- - -Installing the system should not be required: just run './bin/buildserver' to -start. Note that it depends on the `py lib`_ (as does the rest of PyPy). - -When starting a build server with PyPy's default configuration, it will connect -to a meta server we have running in codespeak.net. - -Meta Server ------------ - -Also for the server there's no real setup required, and again there's a -dependency on the `py lib`_. Starting it is done by running -'./bin/metaserver'. - -Running a compile job ---------------------- - -Again installation is not required, just run './bin/startcompile.py [options] -' (see --help for the options) to start. Again, you need to have the -`py lib`_ installed. - -Normally the codespeak.net meta server will be used when this script is issued. - -.. _`py lib`: http://codespeak.net/py - -Using the build tool for other projects -======================================= - -The code for the build tool is meant to be generic. Using it for other projects -than PyPy (for which it was originally written) is relatively straight-forward: -just change the configuration, and implement a build client script (probably -highly resembling bin/buildserver.py). - -Note that there is a test project in 'tool/build/testproject' that can serve -as an example. - -Prerequisites --------------- - -Your project can use the build tool if: - - * it can be built from Python - - Of course this is a rather vague requirement: theoretically _anything_ can - be built from Python; it's just a matter of integrating it into the tool - properly... A project that can entirely be built from Python code (like - PyPy) is easier to integrate than something that is built from the command - line, though (although implementing that won't be very hard either, see - the test project for instance). - - * it is located in Subversion - - The build tool makes very little hard-coded assumptions, but having code - in Subversion is one of them. There are several locations in the code where - SVN is assumed: the command line options (see `build tool options`_), - the server (which checks SVN urls for validity, and converts HEAD revision - requests to actual revision ids) and and build client (which checks out the - data) all make this assumption, changing to a different revision control - system is currently not easy and unsupported (but who knows what the future - will bring). - - * it uses PyPy's config mechanism - - PyPy has a very nice, generic configuration mechanism (essentially wrapper - OptionParser stuff) that makes dealing with fragmented configuration - and command-line options a lot easier. This mechanism is used by the build - tool: it assumes configuration is provided in this format. If your project - uses this configuration mechanism already, you can provide the root Config - object from config.compile_config; if not it should be fairly straight- - forward to wrap your existing configuration with the PyPy stuff. - -Basically that's it: if your project is stored in SVN, and you don't mind using -Python a bit, it shouldn't be too hard to get things going (note that more -documentation about this subject will follow in the future). - -Web Front-End -============= - -To examine the status of the meta server, connected build servers and build -requests, there is a web server available. This can be started using -'./bin/webserver' and uses port 8080 by default (override in -config.py). - -The web server presents a number of different pages: - - * / and /metaserverstatus - meta server status - - this displays a small list of information about the meta server, such - as the amount of connected build servers, the amount of builds available, - the amount of waiting clients, etc. - - * /buildservers - connected build servers - - this page contains a list of all connected build servers, system - information and what build they're currently working on (if any) - - * /builds - a list of builds - - here you'll find a list of all builds, both done and in-progress and - queued ones, with links to the details pages, the date they were - requested and their status - - * /build/ - build details - - the 'build' (virtual) directory contains pages of information for each - build - each of those pages displays status information, time requested, - time started and finished (if appropriate), links to the zip and logs, - and system and compile information - -There's a build tool status web server for the meta server on codespeak.net -available at http://codespeak.net/pypy/buildstatus/. - -More info -========= - -For more information, bug reports, patches, etc., please send an email to -guido at merlinux.de. - diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -10,7 +10,6 @@ .. toctree:: - buildtool.rst distribution.rst externaltools.rst From commits-noreply at bitbucket.org Mon Apr 25 13:47:37 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 13:47:37 +0200 (CEST) Subject: [pypy-svn] pypy default: merge jit-lsprofile Message-ID: <20110425114737.8A333282B90@codespeak.net> Author: Dario Bertini Branch: Changeset: r43573:6cfa4ea038ab Date: 2011-04-25 13:41 +0200 http://bitbucket.org/pypy/pypy/changeset/6cfa4ea038ab/ Log: merge jit-lsprofile diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -532,7 +532,10 @@ raise LLFatalError(msg, LLException(ll_exc_type, ll_exc)) def op_debug_llinterpcall(self, pythonfunction, *args_ll): - return pythonfunction(*args_ll) + try: + return pythonfunction(*args_ll) + except: + self.make_llexception() def op_debug_start_traceback(self, *args): pass # xxx write debugging code here? diff --git a/pypy/translator/c/src/debug_print.h b/pypy/translator/c/src/debug_print.h --- a/pypy/translator/c/src/debug_print.h +++ b/pypy/translator/c/src/debug_print.h @@ -20,7 +20,6 @@ Note that 'fname' can be '-' to send the logging data to stderr. */ - /* macros used by the generated code */ #define PYPY_HAVE_DEBUG_PRINTS (pypy_have_debug_prints & 1 ? \ (pypy_debug_ensure_opened(), 1) : 0) @@ -40,174 +39,24 @@ extern long pypy_have_debug_prints; extern FILE *pypy_debug_file; +#define OP_LL_READ_TIMESTAMP(val) READ_TIMESTAMP(val) -/* implementations */ +#include "src/asm.h" -#ifndef PYPY_NOT_MAIN_FILE -#include - -#if defined(__GNUC__) && defined(__linux__) -# include - static void pypy_setup_profiling() - { - cpu_set_t set; - CPU_ZERO(&set); - CPU_SET(0, &set); /* restrict to a single cpu */ - sched_setaffinity(0, sizeof(cpu_set_t), &set); - } -#else -static void pypy_setup_profiling() { } -#endif - -long pypy_have_debug_prints = -1; -FILE *pypy_debug_file = NULL; -static bool_t debug_ready = 0; -static bool_t debug_profile = 0; -static char *debug_start_colors_1 = ""; -static char *debug_start_colors_2 = ""; -static char *debug_stop_colors = ""; -static char *debug_prefix = NULL; - -static void pypy_debug_open(void) -{ - char *filename = getenv("PYPYLOG"); - if (filename) -#ifndef MS_WINDOWS - unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ -#else - putenv("PYPYLOG="); /* don't pass it to subprocesses */ -#endif - if (filename && filename[0]) - { - char *colon = strchr(filename, ':'); - if (!colon) - { - /* PYPYLOG=filename --- profiling version */ - debug_profile = 1; - pypy_setup_profiling(); - } - else - { - /* PYPYLOG=prefix:filename --- conditional logging */ - int n = colon - filename; - debug_prefix = malloc(n + 1); - memcpy(debug_prefix, filename, n); - debug_prefix[n] = '\0'; - filename = colon + 1; - } - if (strcmp(filename, "-") != 0) - pypy_debug_file = fopen(filename, "w"); - } - if (!pypy_debug_file) - { - pypy_debug_file = stderr; - if (isatty(2)) - { - debug_start_colors_1 = "\033[1m\033[31m"; - debug_start_colors_2 = "\033[31m"; - debug_stop_colors = "\033[0m"; - } - } - debug_ready = 1; -} - -void pypy_debug_ensure_opened(void) -{ - if (!debug_ready) - pypy_debug_open(); -} - - -#ifndef READ_TIMESTAMP /* asm_xxx.h may contain a specific implementation of READ_TIMESTAMP. * This is the default generic timestamp implementation. */ +#ifndef READ_TIMESTAMP + # ifdef _WIN32 # define READ_TIMESTAMP(val) QueryPerformanceCounter((LARGE_INTEGER*)&(val)) # else # include # include + +long long pypy_read_timestamp(); + # define READ_TIMESTAMP(val) (val) = pypy_read_timestamp() - static long long pypy_read_timestamp(void) - { -# ifdef CLOCK_THREAD_CPUTIME_ID - struct timespec tspec; - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tspec); - return ((long long)tspec.tv_sec) * 1000000000LL + tspec.tv_nsec; -# else - /* argh, we don't seem to have clock_gettime(). Bad OS. */ - struct timeval tv; - gettimeofday(&tv, NULL); - return ((long long)tv.tv_sec) * 1000000LL + tv.tv_usec; -# endif - } # endif #endif - - -static bool_t startswithoneof(const char *str, const char *substr) -{ - const char *p = str; - for (; *substr; substr++) - { - if (*substr != ',') - { - if (p && *p++ != *substr) - p = NULL; /* mismatch */ - } - else if (p != NULL) - return 1; /* match */ - else - p = str; /* mismatched, retry with the next */ - } - return p != NULL; -} - -#if defined(_MSC_VER) || defined(__MINGW32__) -#define PYPY_LONG_LONG_PRINTF_FORMAT "I64" -#else -#define PYPY_LONG_LONG_PRINTF_FORMAT "ll" -#endif - -static void display_startstop(const char *prefix, const char *postfix, - const char *category, const char *colors) -{ - long long timestamp; - READ_TIMESTAMP(timestamp); - fprintf(pypy_debug_file, "%s[%"PYPY_LONG_LONG_PRINTF_FORMAT"x] %s%s%s\n%s", - colors, - timestamp, prefix, category, postfix, - debug_stop_colors); -} - -void pypy_debug_start(const char *category) -{ - pypy_debug_ensure_opened(); - /* Enter a nesting level. Nested debug_prints are disabled by default - because the following left shift introduces a 0 in the last bit. - Note that this logic assumes that we are never going to nest - debug_starts more than 31 levels (63 on 64-bits). */ - pypy_have_debug_prints <<= 1; - if (!debug_profile) - { - /* non-profiling version */ - if (!debug_prefix || !startswithoneof(category, debug_prefix)) - { - /* wrong section name, or no PYPYLOG at all, skip it */ - return; - } - /* else make this subsection active */ - pypy_have_debug_prints |= 1; - } - display_startstop("{", "", category, debug_start_colors_1); -} - -void pypy_debug_stop(const char *category) -{ - if (debug_profile | (pypy_have_debug_prints & 1)) - display_startstop("", "}", category, debug_start_colors_2); - pypy_have_debug_prints >>= 1; -} - -#endif /* PYPY_NOT_MAIN_FILE */ diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2100,6 +2100,23 @@ assert self.meta_interp(f, [5, 100]) == 0 self.check_loops(int_rshift=1, everywhere=True) + + def test_read_timestamp(self): + import time + from pypy.rlib.rtimer import read_timestamp + def busy_loop(): + start = time.time() + while time.time() - start < 0.1: + # busy wait + pass + + def f(): + t1 = read_timestamp() + busy_loop() + t2 = read_timestamp() + return t2 - t1 > 1000 + res = self.interp_operations(f, []) + assert res class TestOOtype(BasicTests, OOJitMixin): diff --git a/pypy/translator/c/src/debug_print.c b/pypy/translator/c/src/debug_print.c new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/debug_print.c @@ -0,0 +1,150 @@ + +#include +#include +#include + +#include +#include +#include "src/profiling.h" +#include "src/debug_print.h" + +long pypy_have_debug_prints = -1; +FILE *pypy_debug_file = NULL; +static unsigned char debug_ready = 0; +static unsigned char debug_profile = 0; +static char *debug_start_colors_1 = ""; +static char *debug_start_colors_2 = ""; +static char *debug_stop_colors = ""; +static char *debug_prefix = NULL; + +static void pypy_debug_open(void) +{ + char *filename = getenv("PYPYLOG"); + if (filename) +#ifndef MS_WINDOWS + unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ +#else + putenv("PYPYLOG="); /* don't pass it to subprocesses */ +#endif + if (filename && filename[0]) + { + char *colon = strchr(filename, ':'); + if (!colon) + { + /* PYPYLOG=filename --- profiling version */ + debug_profile = 1; + pypy_setup_profiling(); + } + else + { + /* PYPYLOG=prefix:filename --- conditional logging */ + int n = colon - filename; + debug_prefix = malloc(n + 1); + memcpy(debug_prefix, filename, n); + debug_prefix[n] = '\0'; + filename = colon + 1; + } + if (strcmp(filename, "-") != 0) + pypy_debug_file = fopen(filename, "w"); + } + if (!pypy_debug_file) + { + pypy_debug_file = stderr; + if (isatty(2)) + { + debug_start_colors_1 = "\033[1m\033[31m"; + debug_start_colors_2 = "\033[31m"; + debug_stop_colors = "\033[0m"; + } + } + debug_ready = 1; +} + +void pypy_debug_ensure_opened(void) +{ + if (!debug_ready) + pypy_debug_open(); +} + + +#ifndef _WIN32 + + static long long pypy_read_timestamp(void) + { +# ifdef CLOCK_THREAD_CPUTIME_ID + struct timespec tspec; + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tspec); + return ((long long)tspec.tv_sec) * 1000000000LL + tspec.tv_nsec; +# else + /* argh, we don't seem to have clock_gettime(). Bad OS. */ + struct timeval tv; + gettimeofday(&tv, NULL); + return ((long long)tv.tv_sec) * 1000000LL + tv.tv_usec; +# endif + } +#endif + + +static unsigned char startswithoneof(const char *str, const char *substr) +{ + const char *p = str; + for (; *substr; substr++) + { + if (*substr != ',') + { + if (p && *p++ != *substr) + p = NULL; /* mismatch */ + } + else if (p != NULL) + return 1; /* match */ + else + p = str; /* mismatched, retry with the next */ + } + return p != NULL; +} + +#if defined(_MSC_VER) || defined(__MINGW32__) +#define PYPY_LONG_LONG_PRINTF_FORMAT "I64" +#else +#define PYPY_LONG_LONG_PRINTF_FORMAT "ll" +#endif + +static void display_startstop(const char *prefix, const char *postfix, + const char *category, const char *colors) +{ + long long timestamp; + READ_TIMESTAMP(timestamp); + fprintf(pypy_debug_file, "%s[%"PYPY_LONG_LONG_PRINTF_FORMAT"x] %s%s%s\n%s", + colors, + timestamp, prefix, category, postfix, + debug_stop_colors); +} + +void pypy_debug_start(const char *category) +{ + pypy_debug_ensure_opened(); + /* Enter a nesting level. Nested debug_prints are disabled by default + because the following left shift introduces a 0 in the last bit. + Note that this logic assumes that we are never going to nest + debug_starts more than 31 levels (63 on 64-bits). */ + pypy_have_debug_prints <<= 1; + if (!debug_profile) + { + /* non-profiling version */ + if (!debug_prefix || !startswithoneof(category, debug_prefix)) + { + /* wrong section name, or no PYPYLOG at all, skip it */ + return; + } + /* else make this subsection active */ + pypy_have_debug_prints |= 1; + } + display_startstop("{", "", category, debug_start_colors_1); +} + +void pypy_debug_stop(const char *category) +{ + if (debug_profile | (pypy_have_debug_prints & 1)) + display_startstop("", "}", category, debug_start_colors_2); + pypy_have_debug_prints >>= 1; +} diff --git a/pypy/jit/codewriter/longlong.py b/pypy/jit/codewriter/longlong.py --- a/pypy/jit/codewriter/longlong.py +++ b/pypy/jit/codewriter/longlong.py @@ -16,6 +16,7 @@ from pypy.rlib.objectmodel import compute_hash + is_64_bit = True supports_longlong = False r_float_storage = float FLOATSTORAGE = lltype.Float @@ -32,6 +33,7 @@ from pypy.rlib import rarithmetic, longlong2float + is_64_bit = False supports_longlong = True r_float_storage = rarithmetic.r_longlong FLOATSTORAGE = lltype.SignedLongLong diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -20,33 +20,33 @@ 'fastlocals_w[*]', 'last_exception', 'lastblock', + 'is_being_profiled', ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] -def get_printable_location(next_instr, bytecode): +def get_printable_location(next_instr, is_being_profiled, bytecode): from pypy.tool.stdlib_opcode import opcode_method_names name = opcode_method_names[ord(bytecode.co_code[next_instr])] return '%s #%d %s' % (bytecode.get_repr(), next_instr, name) -def get_jitcell_at(next_instr, bytecode): - return bytecode.jit_cells.get(next_instr, None) +def get_jitcell_at(next_instr, is_being_profiled, bytecode): + return bytecode.jit_cells.get((next_instr, is_being_profiled), None) -def set_jitcell_at(newcell, next_instr, bytecode): - bytecode.jit_cells[next_instr] = newcell +def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode): + bytecode.jit_cells[next_instr, is_being_profiled] = newcell -def confirm_enter_jit(next_instr, bytecode, frame, ec): +def confirm_enter_jit(next_instr, is_being_profiled, bytecode, frame, ec): return (frame.w_f_trace is None and - ec.profilefunc is None and ec.w_tracefunc is None) -def can_never_inline(next_instr, bytecode): +def can_never_inline(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 class PyPyJitDriver(JitDriver): reds = ['frame', 'ec'] - greens = ['next_instr', 'pycode'] + greens = ['next_instr', 'is_being_profiled', 'pycode'] virtualizables = ['frame'] ## def compute_invariants(self, reds, next_instr, pycode): @@ -68,13 +68,16 @@ def dispatch(self, pycode, next_instr, ec): self = hint(self, access_directly=True) next_instr = r_uint(next_instr) + is_being_profiled = self.is_being_profiled try: while True: pypyjitdriver.jit_merge_point(ec=ec, - frame=self, next_instr=next_instr, pycode=pycode) + frame=self, next_instr=next_instr, pycode=pycode, + is_being_profiled=is_being_profiled) co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) + is_being_profiled = self.is_being_profiled except ExitFrame: return self.popvalue() @@ -97,7 +100,8 @@ jumpto = r_uint(self.last_instr) # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, - pycode=self.getcode()) + pycode=self.getcode(), + is_being_profiled=self.is_being_profiled) return jumpto diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -422,12 +422,12 @@ # Method names take the form of -# +# # _ # # For example, the method name for "mov reg, immed" is MOV_ri. Operand order # is Intel-style, with the destination first. -# +# # The operand type codes are: # r - register # b - ebp/rbp offset @@ -565,6 +565,9 @@ # x87 instructions FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + # ------------------------------ Random mess ----------------------- + RDTSC = insn('\x0F\x31') + # reserved as an illegal instruction UD2 = insn('\x0F\x0B') diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,17 +1,19 @@ try: - def main(n): - def g(n): - return range(n) - s = 0 - for i in range(n): # ID: for - tmp = g(n) - s += tmp[i] # ID: getitem - a = 0 - return s - main(10) - + def g(x): + return x - 1 + def f(x): + while x: + x = g(x) + import cProfile + import time + t1 = time.time() + cProfile.run("f(10000000)") + t2 = time.time() + f(10000000) + t3 = time.time() + print t2 - t1, t3 - t2, (t3 - t2) / (t2 - t1) except Exception, e: print "Exception: ", type(e) print e - + diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py --- a/pypy/translator/c/genc.py +++ b/pypy/translator/c/genc.py @@ -915,6 +915,14 @@ from pypy.rlib.rarithmetic import LONG_BIT defines['PYPY_LONG_BIT'] = LONG_BIT +def add_extra_files(eci): + srcdir = py.path.local(autopath.pypydir).join('translator', 'c', 'src') + files = [ + srcdir / 'profiling.c', + srcdir / 'debug_print.c', + ] + return eci.merge(ExternalCompilationInfo(separate_module_files=files)) + def gen_source_standalone(database, modulename, targetdir, eci, entrypointname, defines={}): assert database.standalone @@ -964,6 +972,7 @@ print >>fi, "#define INSTRUMENT_NCOUNTER %d" % n fi.close() + eci = add_extra_files(eci) eci = eci.convert_sources_to_files(being_main=True) files, eci = eci.get_module_files() return eci, filename, sg.getextrafiles() + list(files) @@ -1010,6 +1019,7 @@ gen_startupcode(f, database) f.close() + eci = add_extra_files(eci) eci = eci.convert_sources_to_files(being_main=True) files, eci = eci.get_module_files() return eci, filename, sg.getextrafiles() + list(files) diff --git a/pypy/rlib/debug.py b/pypy/rlib/debug.py --- a/pypy/rlib/debug.py +++ b/pypy/rlib/debug.py @@ -175,6 +175,7 @@ c_pythonfunction = hop.inputconst(lltype.Void, pythonfunction) args_v = [hop.inputarg(hop.args_r[i], arg=i) for i in range(2, hop.nb_args)] + hop.exception_is_here() return hop.genop('debug_llinterpcall', [c_pythonfunction] + args_v, resulttype=RESTYPE) diff --git a/pypy/rpython/test/test_llinterp.py b/pypy/rpython/test/test_llinterp.py --- a/pypy/rpython/test/test_llinterp.py +++ b/pypy/rpython/test/test_llinterp.py @@ -658,3 +658,25 @@ assert x == -42 res = interpret(f, []) + +def test_raising_llimpl(): + from pypy.rpython.extfunc import register_external + + def external(): + pass + + def raising(): + raise OSError(15, "abcd") + + ext = register_external(external, [], llimpl=raising, llfakeimpl=raising) + + def f(): + # this is a useful llfakeimpl that raises an exception + try: + external() + return True + except OSError: + return False + + res = interpret(f, []) + assert not res diff --git a/pypy/rlib/test/test_rtimer.py b/pypy/rlib/test/test_rtimer.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/test_rtimer.py @@ -0,0 +1,28 @@ +import time + +from pypy.rlib.rtimer import read_timestamp +from pypy.rpython.test.test_llinterp import interpret +from pypy.translator.c.test.test_genc import compile + +def timer(): + t1 = read_timestamp() + start = time.time() + while time.time() - start < 0.1: + # busy wait + pass + t2 = read_timestamp() + return t2 - t1 + +def test_timer(): + diff = timer() + # We're counting ticks, verify they look correct + assert diff > 1000 + +def test_annotation(): + diff = interpret(timer, []) + assert diff > 1000 + +def test_compile_c(): + function = compile(timer, []) + diff = function() + assert diff > 1000 \ No newline at end of file diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -367,7 +367,7 @@ self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, result_loc, current_depths) - self.possibly_free_vars(guard_op.getfailargs()) + self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): if not we_are_translated(): @@ -443,7 +443,7 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + longevity = {} for arg in produced: if arg in last_used: @@ -837,7 +837,7 @@ self._call(op, [imm(size), vable] + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_op) - + def consider_cond_call_gc_wb(self, op): assert op.result is None args = op.getarglist() @@ -1217,6 +1217,29 @@ else: raise AssertionError("bad unicode item size") + def consider_read_timestamp(self, op): + tmpbox_high = TempBox() + self.rm.force_allocate_reg(tmpbox_high, selected_reg=eax) + if longlong.is_64_bit: + # on 64-bit, use rax as temporary register and returns the + # result in rdx + result_loc = self.rm.force_allocate_reg(op.result, + selected_reg=edx) + self.Perform(op, [], result_loc) + else: + # on 32-bit, use both eax and edx as temporary registers, + # use a temporary xmm register, and returns the result in + # another xmm register. + tmpbox_low = TempBox() + self.rm.force_allocate_reg(tmpbox_low, selected_reg=edx) + xmmtmpbox = TempBox() + xmmtmploc = self.xrm.force_allocate_reg(xmmtmpbox) + result_loc = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [xmmtmploc], result_loc) + self.xrm.possibly_free_var(xmmtmpbox) + self.rm.possibly_free_var(tmpbox_low) + self.rm.possibly_free_var(tmpbox_high) + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -221,12 +221,14 @@ pdb_plus_show = PdbPlusShow(t) # need a translator to support extended commands - def debug(got_error): + def finish_profiling(): if prof: prof.disable() statfilename = 'prof.dump' log.info('Dumping profiler stats to: %s' % statfilename) - prof.dump_stats(statfilename) + prof.dump_stats(statfilename) + + def debug(got_error): tb = None if got_error: import traceback @@ -302,9 +304,11 @@ except SystemExit: raise except: + finish_profiling() debug(True) raise SystemExit(1) else: + finish_profiling() if translateconfig.pdb: debug(False) diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -5,7 +5,8 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.lloperation import llop -from pypy.rlib.rarithmetic import ovfcheck, r_uint, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_uint, intmask, r_longlong +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, check_descr from pypy.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr @@ -227,6 +228,15 @@ length = lengthbox.getint() rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) +def do_read_timestamp(cpu, _): + x = read_timestamp() + if longlong.is_64_bit: + assert isinstance(x, int) # 64-bit + return BoxInt(x) + else: + assert isinstance(x, r_longlong) # 32-bit + return BoxFloat(x) + # ____________________________________________________________ ##def do_force_token(cpu): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -25,6 +25,7 @@ from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint +from pypy.rlib.rtimer import read_timestamp import py from pypy.tool.ansi_print import ansi_log @@ -506,7 +507,7 @@ ', '.join(map(str, args)),)) self.fail_args = args return op.fail_index - + else: assert 0, "unknown final operation %d" % (op.opnum,) @@ -856,6 +857,9 @@ opaque_frame = _to_opaque(self) return llmemory.cast_ptr_to_adr(opaque_frame) + def op_read_timestamp(self, descr): + return read_timestamp() + def op_call_may_force(self, calldescr, func, *args): assert not self._forced self._may_force = self.opindex @@ -937,7 +941,7 @@ class OOFrame(Frame): OPHANDLERS = [None] * (rop._LAST+1) - + def op_new_with_vtable(self, descr, vtable): assert descr is None typedescr = get_class_size(self.memocast, vtable) @@ -958,7 +962,7 @@ return res op_getfield_gc_pure = op_getfield_gc - + def op_setfield_gc(self, fielddescr, obj, newvalue): TYPE = fielddescr.TYPE fieldname = fielddescr.fieldname diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -1,12 +1,39 @@ +import py from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import OperationError +from pypy.interpreter.function import Method, Function +from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty) -from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped -from pypy.interpreter.function import Method, Function -from pypy.interpreter.error import OperationError +from pypy.rlib import jit +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.rtimer import read_timestamp, _is_64_bit +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.tool.autopath import pypydir +from pypy.rlib.rarithmetic import r_longlong + import time, sys +# cpu affinity settings + +srcdir = py.path.local(pypydir).join('translator', 'c', 'src') +eci = ExternalCompilationInfo(separate_module_files= + [srcdir.join('profiling.c')]) + +c_setup_profiling = rffi.llexternal('pypy_setup_profiling', + [], lltype.Void, + compilation_info = eci) +c_teardown_profiling = rffi.llexternal('pypy_teardown_profiling', + [], lltype.Void, + compilation_info = eci) + +if _is_64_bit: + timer_size_int = int +else: + timer_size_int = r_longlong + class W_StatsEntry(Wrappable): def __init__(self, space, frame, callcount, reccallcount, tt, it, w_sublist): @@ -74,20 +101,43 @@ l_w = [] for v in values: if v.callcount != 0: - l_w.append(v.stats(space, factor)) + l_w.append(v.stats(space, None, factor)) return space.newlist(l_w) -class ProfilerEntry(object): +class ProfilerSubEntry(object): def __init__(self, frame): self.frame = frame - self.tt = 0 - self.it = 0 + self.ll_tt = r_longlong(0) + self.ll_it = r_longlong(0) self.callcount = 0 self.recursivecallcount = 0 self.recursionLevel = 0 + + def stats(self, space, parent, factor): + w_sse = W_StatsSubEntry(space, self.frame, + self.callcount, self.recursivecallcount, + factor * float(self.ll_tt), + factor * float(self.ll_it)) + return space.wrap(w_sse) + + def _stop(self, tt, it): + if not we_are_translated(): + assert type(tt) is timer_size_int + assert type(it) is timer_size_int + self.recursionLevel -= 1 + if self.recursionLevel == 0: + self.ll_tt += tt + else: + self.recursivecallcount += 1 + self.ll_it += it + self.callcount += 1 + +class ProfilerEntry(ProfilerSubEntry): + def __init__(self, frame): + ProfilerSubEntry.__init__(self, frame) self.calls = {} - def stats(self, space, factor): + def stats(self, space, dummy, factor): if self.calls: w_sublist = space.newlist([sub_entry.stats(space, self, factor) for sub_entry in self.calls.values()]) @@ -95,67 +145,44 @@ w_sublist = space.w_None w_se = W_StatsEntry(space, self.frame, self.callcount, self.recursivecallcount, - factor * self.tt, factor * self.it, w_sublist) + factor * float(self.ll_tt), + factor * float(self.ll_it), w_sublist) return space.wrap(w_se) -class ProfilerSubEntry(object): - def __init__(self, frame): - self.frame = frame - self.tt = 0 - self.it = 0 - self.callcount = 0 - self.recursivecallcount = 0 - self.recursionLevel = 0 - - def stats(self, space, parent, factor): - w_sse = W_StatsSubEntry(space, self.frame, - self.callcount, self.recursivecallcount, - factor * self.tt, factor * self.it) - return space.wrap(w_sse) + @jit.purefunction + def _get_or_make_subentry(self, entry, make=True): + try: + return self.calls[entry] + except KeyError: + if make: + subentry = ProfilerSubEntry(entry.frame) + self.calls[entry] = subentry + return subentry + return None class ProfilerContext(object): def __init__(self, profobj, entry): self.entry = entry - self.subt = 0 + self.ll_subt = timer_size_int(0) self.previous = profobj.current_context entry.recursionLevel += 1 if profobj.subcalls and self.previous: - caller = self.previous.entry - try: - subentry = caller.calls[entry] - except KeyError: - subentry = ProfilerSubEntry(entry.frame) - caller.calls[entry] = subentry + caller = jit.hint(self.previous.entry, promote=True) + subentry = caller._get_or_make_subentry(entry) subentry.recursionLevel += 1 - self.t0 = profobj.timer() + self.ll_t0 = profobj.ll_timer() def _stop(self, profobj, entry): - # XXX factor out two pieces of the same code - tt = profobj.timer() - self.t0 - it = tt - self.subt + tt = profobj.ll_timer() - self.ll_t0 + it = tt - self.ll_subt if self.previous: - self.previous.subt += tt - entry.recursionLevel -= 1 - if entry.recursionLevel == 0: - entry.tt += tt - else: - entry.recursivecallcount += 1 - entry.it += it - entry.callcount += 1 + self.previous.ll_subt += tt + entry._stop(tt, it) if profobj.subcalls and self.previous: - caller = self.previous.entry - try: - subentry = caller.calls[entry] - except KeyError: - pass - else: - subentry.recursionLevel -= 1 - if subentry.recursionLevel == 0: - subentry.tt += tt - else: - subentry.recursivecallcount += 1 - subentry.it += it - subentry.callcount += 1 + caller = jit.hint(self.previous.entry, promote=True) + subentry = caller._get_or_make_subentry(entry, False) + if subentry is not None: + subentry._stop(tt, it) def create_spec(space, w_arg): if isinstance(w_arg, Method): @@ -187,7 +214,7 @@ else: class_name = space.type(w_arg).getname(space, '?') return "{'%s' object}" % (class_name,) - + def lsprof_call(space, w_self, frame, event, w_arg): assert isinstance(w_self, W_Profiler) if event == 'call': @@ -209,6 +236,7 @@ pass class W_Profiler(Wrappable): + def __init__(self, space, w_callable, time_unit, subcalls, builtins): self.subcalls = subcalls self.builtins = builtins @@ -218,65 +246,94 @@ self.data = {} self.builtin_data = {} self.space = space + self.is_enabled = False + self.total_timestamp = r_longlong(0) + self.total_real_time = 0.0 - def timer(self): + def ll_timer(self): if self.w_callable: space = self.space try: - return space.float_w(space.call_function(self.w_callable)) + if _is_64_bit: + return space.int_w(space.call_function(self.w_callable)) + else: + return space.r_longlong_w(space.call_function(self.w_callable)) except OperationError, e: e.write_unraisable(space, "timer function ", self.w_callable) - return 0.0 - return time.time() + return timer_size_int(0) + return read_timestamp() def enable(self, space, w_subcalls=NoneNotWrapped, w_builtins=NoneNotWrapped): + if self.is_enabled: + return # ignored if w_subcalls is not None: self.subcalls = space.bool_w(w_subcalls) if w_builtins is not None: self.builtins = space.bool_w(w_builtins) + # We want total_real_time and total_timestamp to end up containing + # (endtime - starttime). Now we are at the start, so we first + # have to subtract the current time. + self.is_enabled = True + self.total_real_time -= time.time() + self.total_timestamp -= read_timestamp() # set profiler hook + c_setup_profiling() space.getexecutioncontext().setllprofile(lsprof_call, space.wrap(self)) + @jit.purefunction + def _get_or_make_entry(self, f_code, make=True): + try: + return self.data[f_code] + except KeyError: + if make: + entry = ProfilerEntry(f_code) + self.data[f_code] = entry + return entry + return None + + @jit.purefunction + def _get_or_make_builtin_entry(self, key, make=True): + try: + return self.builtin_data[key] + except KeyError: + if make: + entry = ProfilerEntry(self.space.wrap(key)) + self.builtin_data[key] = entry + return entry + return None + def _enter_call(self, f_code): # we have a superb gc, no point in freelist :) - try: - entry = self.data[f_code] - except KeyError: - entry = ProfilerEntry(f_code) - self.data[f_code] = entry + self = jit.hint(self, promote=True) + entry = self._get_or_make_entry(f_code) self.current_context = ProfilerContext(self, entry) def _enter_return(self, f_code): context = self.current_context if context is None: return - try: - entry = self.data[f_code] + self = jit.hint(self, promote=True) + entry = self._get_or_make_entry(f_code, False) + if entry is not None: context._stop(self, entry) - except KeyError: - pass self.current_context = context.previous def _enter_builtin_call(self, key): - try: - entry = self.builtin_data[key] - except KeyError: - entry = ProfilerEntry(self.space.wrap(key)) - self.builtin_data[key] = entry - self.current_context = ProfilerContext(self, entry) + self = jit.hint(self, promote=True) + entry = self._get_or_make_builtin_entry(key) + self.current_context = ProfilerContext(self, entry) def _enter_builtin_return(self, key): context = self.current_context if context is None: return - try: - entry = self.builtin_data[key] + self = jit.hint(self, promote=True) + entry = self._get_or_make_builtin_entry(key, False) + if entry is not None: context._stop(self, entry) - except KeyError: - pass - self.current_context = context.previous + self.current_context = context.previous def _flush_unmatched(self): context = self.current_context @@ -288,13 +345,29 @@ self.current_context = None def disable(self, space): + if not self.is_enabled: + return # ignored + # We want total_real_time and total_timestamp to end up containing + # (endtime - starttime), or the sum of such intervals if + # enable() and disable() are called several times. + self.is_enabled = False + self.total_timestamp += read_timestamp() + self.total_real_time += time.time() # unset profiler hook space.getexecutioncontext().setllprofile(None, None) + c_teardown_profiling() self._flush_unmatched() def getstats(self, space): if self.w_callable is None: - factor = 1. # we measure time.time in floats + if self.is_enabled: + raise OperationError(space.w_RuntimeError, + space.wrap("Profiler instance must be disabled " + "before getting the stats")) + if self.total_timestamp: + factor = self.total_real_time / float(self.total_timestamp) + else: + factor = 1.0 # probably not used elif self.time_unit > 0.0: factor = self.time_unit else: diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h --- a/pypy/translator/c/src/g_include.h +++ b/pypy/translator/c/src/g_include.h @@ -39,11 +39,13 @@ #include "src/instrument.h" #include "src/asm.h" +#include "src/profiling.h" + +#include "src/debug_print.h" /*** modules ***/ #ifdef HAVE_RTYPER /* only if we have an RTyper */ # include "src/rtyper.h" -# include "src/debug_print.h" # include "src/debug_traceback.h" # include "src/debug_alloc.h" #ifndef AVR diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -14,7 +14,7 @@ modname, _ = modname.split('.', 1) if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', - '_socket', '_sre']: + '_socket', '_sre', '_lsprof']: return True return False diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1023,6 +1023,10 @@ metainterp.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, lastbox], None) + @arguments() + def opimpl_ll_read_timestamp(self): + return self.metainterp.execute_and_record(rop.READ_TIMESTAMP, None) + # ------------------------------ def setup_call(self, argboxes): diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -380,7 +380,7 @@ return ord(b) def op_cast_int_to_unichar(b): - assert type(b) is int + assert type(b) is int return unichr(b) def op_cast_int_to_uint(b): @@ -578,6 +578,10 @@ def op_shrink_array(array, smallersize): return False +def op_ll_read_timestamp(): + from pypy.rlib.rtimer import read_timestamp + return read_timestamp() + # ____________________________________________________________ def get_op_impl(opname): diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1356,6 +1356,19 @@ self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') + def test_read_timestamp(self): + if longlong.is_64_bit: + got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + res1 = got1.getint() + res2 = got2.getint() + else: + got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + res1 = got1.getlonglong() + res2 = got2.getlonglong() + assert res1 < res2 < res1 + 2**32 + class LLtypeBackendTest(BaseBackendTest): diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -32,7 +32,7 @@ assert isinstance(canraise, tuple) assert not canraise or not canfold - + # The operation manipulates PyObjects self.pyobj = pyobj @@ -440,6 +440,7 @@ 'get_write_barrier_failing_case': LLOp(sideeffects=False), 'get_write_barrier_from_array_failing_case': LLOp(sideeffects=False), 'gc_get_type_info_group': LLOp(sideeffects=False), + 'll_read_timestamp': LLOp(canrun=True), # __________ GC operations __________ @@ -482,7 +483,7 @@ 'gc_typeids_z' : LLOp(), # ------- JIT & GC interaction, only for some GCs ---------- - + 'gc_adr_of_nursery_free' : LLOp(), # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), @@ -554,7 +555,8 @@ 'debug_pdb': LLOp(), 'debug_assert': LLOp(tryfold=True), 'debug_fatalerror': LLOp(), - 'debug_llinterpcall': LLOp(), # Python func call 'res=arg[0](*arg[1:])' + 'debug_llinterpcall': LLOp(canraise=(Exception,)), + # Python func call 'res=arg[0](*arg[1:])' # in backends, abort() or whatever is fine 'debug_start_traceback': LLOp(), 'debug_record_traceback': LLOp(), diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -298,8 +298,11 @@ # Profile cases if self.profilefunc is not None: - if event not in ['leaveframe', 'call', 'c_call', - 'c_return', 'c_exception']: + if not (event == 'leaveframe' or + event == 'call' or + event == 'c_call' or + event == 'c_return' or + event == 'c_exception'): return False last_exception = frame.last_exception diff --git a/pypy/rlib/rtimer.py b/pypy/rlib/rtimer.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/rtimer.py @@ -0,0 +1,37 @@ +import time + +from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint +from pypy.rlib.rarithmetic import intmask, longlongmask +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rpython.lltypesystem import lltype, rffi + +_is_64_bit = r_uint.BITS > 32 + + +def read_timestamp(): + # Returns a longlong on 32-bit, and a regular int on 64-bit. + # When running on top of python, build the result a bit arbitrarily. + x = long(time.time() * 500000000) + if _is_64_bit: + return intmask(x) + else: + return longlongmask(x) + + +class ReadTimestampEntry(ExtRegistryEntry): + _about_ = read_timestamp + + def compute_result_annotation(self): + from pypy.annotation.model import SomeInteger + if _is_64_bit: + return SomeInteger() + else: + return SomeInteger(knowntype=r_longlong) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + if _is_64_bit: + resulttype = lltype.Signed + else: + resulttype = rffi.LONGLONG + return hop.genop("ll_read_timestamp", [], resulttype=resulttype) diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1,4 +1,5 @@ from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import debug_start, debug_stop @@ -1205,6 +1206,10 @@ def bhimpl_unicodesetitem(cpu, unicode, index, newchr): cpu.bh_unicodesetitem(unicode, index, newchr) + @arguments(returns=(longlong.is_64_bit and "i" or "f")) + def bhimpl_ll_read_timestamp(): + return read_timestamp() + # ---------- # helpers to resume running in blackhole mode when a guard failed @@ -1416,7 +1421,7 @@ current_exc = blackholeinterp._prepare_resume_from_failure( resumedescr.guard_opnum, dont_change_position) - + try: _run_forever(blackholeinterp, current_exc) finally: diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -23,7 +23,7 @@ # methods implemented by each concrete class # ------------------------------------------ - + def getopnum(self): raise NotImplementedError @@ -234,7 +234,7 @@ def getarg(self, i): raise IndexError - + def setarg(self, i, box): raise IndexError @@ -258,7 +258,7 @@ return self._arg0 else: raise IndexError - + def setarg(self, i, box): if i == 0: self._arg0 = box @@ -288,7 +288,7 @@ return self._arg1 else: raise IndexError - + def setarg(self, i, box): if i == 0: self._arg0 = box @@ -326,7 +326,7 @@ return self._arg2 else: raise IndexError - + def setarg(self, i, box): if i == 0: self._arg0 = box @@ -352,7 +352,7 @@ def getarg(self, i): return self._args[i] - + def setarg(self, i, box): self._args[i] = box @@ -460,6 +460,7 @@ '_MALLOC_LAST', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend + 'READ_TIMESTAMP/0', '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'SETARRAYITEM_GC/3d', @@ -468,7 +469,7 @@ 'SETFIELD_RAW/2d', 'STRSETITEM/3', 'UNICODESETITEM/3', - #'RUNTIMENEW/1', # ootype operation + #'RUNTIMENEW/1', # ootype operation 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/2', # debugging only 'JIT_DEBUG/*', # debugging only @@ -554,7 +555,7 @@ 2: BinaryOp, 3: TernaryOp } - + is_guard = name.startswith('GUARD') if is_guard: assert withdescr diff --git a/pypy/rpython/lltypesystem/rdict.py b/pypy/rpython/lltypesystem/rdict.py --- a/pypy/rpython/lltypesystem/rdict.py +++ b/pypy/rpython/lltypesystem/rdict.py @@ -521,6 +521,7 @@ ll_dict_insertclean(d, entry.key, entry.value, hash) i += 1 old_entries.delete() +ll_dict_resize.oopspec = 'dict.resize(d)' # ------- a port of CPython's dictobject.c's lookdict implementation ------- PERTURB_SHIFT = 5 diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -91,6 +91,30 @@ assert spam2bar.inlinetime == 1.0 assert spam2bar.totaltime == 1.0 + def test_scale_of_result(self): + import _lsprof, time + prof = _lsprof.Profiler() + def foo(n): + t = time.time() + while abs(t - time.time()) < 1.0: + pass # busy-wait for 1 second + def bar(n): + foo(n) + prof.enable() + bar(0) + prof.disable() + stats = prof.getstats() + entries = {} + for entry in stats: + entries[entry.code] = entry + efoo = entries[foo.func_code] + ebar = entries[bar.func_code] + assert 0.9 < efoo.totaltime < 2.9 + assert 0.9 < efoo.inlinetime < 2.9 + for subentry in ebar.calls: + assert 0.9 < subentry.totaltime < 2.9 + assert 0.9 < subentry.inlinetime < 2.9 + def test_cprofile(self): import sys, os # XXX this is evil trickery to walk around the fact that we don't diff --git a/pypy/translator/c/src/profiling.h b/pypy/translator/c/src/profiling.h new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/profiling.h @@ -0,0 +1,8 @@ + +#ifndef PROFILING_H +#define PROFILING_H + +void pypy_setup_profiling(); +void pypy_teardown_profiling(); + +#endif diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -39,6 +39,7 @@ config.objspace.usemodules.array = True config.objspace.usemodules._weakref = True config.objspace.usemodules._sre = False +config.objspace.usemodules._lsprof = True # config.objspace.usemodules._ffi = True # @@ -99,7 +100,7 @@ from pypy.translator.goal.ann_override import PyPyAnnotatorPolicy from pypy.rpython.test.test_llinterp import get_interpreter - # first annotate, rtype, and backendoptimize PyPy + # first annotate and rtype try: interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -330,7 +330,7 @@ if log: self._register_counter() operations = self._inject_debugging_code(looptoken, operations) - + regalloc = RegAlloc(self, self.cpu.translate_support_code) arglocs = regalloc.prepare_loop(inputargs, operations, looptoken) looptoken._x86_arglocs = arglocs @@ -339,7 +339,7 @@ stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) self.looppos = self.mc.get_relative_pos() looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + looptoken._x86_param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) looptoken._x86_frame_depth = frame_depth looptoken._x86_param_depth = param_depth @@ -538,7 +538,7 @@ def _assemble(self, regalloc, operations): self._regalloc = regalloc - regalloc.walk_operations(operations) + regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging frame_depth = regalloc.fm.frame_depth @@ -1015,7 +1015,7 @@ dst_locs.append(unused_gpr.pop()) else: pass_on_stack.append(loc) - + # Emit instructions to pass the stack arguments # XXX: Would be nice to let remap_frame_layout take care of this, but # we'd need to create something like StackLoc, but relative to esp, @@ -1441,6 +1441,17 @@ else: assert 0, itemsize + def genop_read_timestamp(self, op, arglocs, resloc): + self.mc.RDTSC() + if longlong.is_64_bit: + self.mc.SHL_ri(edx.value, 32) + self.mc.OR_rr(edx.value, eax.value) + else: + loc1, = arglocs + self.mc.MOVD_xr(loc1.value, edx.value) + self.mc.MOVD_xr(resloc.value, eax.value) + self.mc.PUNPCKLDQ_xx(resloc.value, loc1.value) + def genop_guard_guard_true(self, ign_1, guard_op, guard_token, locs, ign_2): loc = locs[0] self.mc.TEST(loc, loc) @@ -2131,7 +2142,7 @@ assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) - + genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST genop_list = [Assembler386.not_implemented_op] * rop._LAST genop_llong_list = {} @@ -2142,7 +2153,7 @@ opname = name[len('genop_discard_'):] num = getattr(rop, opname.upper()) genop_discard_list[num] = value - elif name.startswith('genop_guard_') and name != 'genop_guard_exception': + elif name.startswith('genop_guard_') and name != 'genop_guard_exception': opname = name[len('genop_guard_'):] num = getattr(rop, opname.upper()) genop_guard_list[num] = value diff --git a/pypy/translator/c/src/align.h b/pypy/translator/c/src/align.h --- a/pypy/translator/c/src/align.h +++ b/pypy/translator/c/src/align.h @@ -1,3 +1,6 @@ + +#ifndef _PYPY_ALIGN_H +#define _PYPY_ALIGN_H /* alignment for arena-based garbage collectors: the following line enforces an alignment that should be enough for any structure @@ -14,3 +17,5 @@ #define ROUND_UP_FOR_ALLOCATION(x, minsize) \ ((((x)>=(minsize)?(x):(minsize)) \ + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1)) + +#endif //_PYPY_ALIGN_H diff --git a/pypy/translator/c/src/profiling.c b/pypy/translator/c/src/profiling.c new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/profiling.c @@ -0,0 +1,35 @@ + +#include +#if defined(__GNUC__) && defined(__linux__) + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#include +#endif + +cpu_set_t base_cpu_set; +int profiling_setup = 0; + +void pypy_setup_profiling() +{ + if (!profiling_setup) { + cpu_set_t set; + sched_getaffinity(0, sizeof(cpu_set_t), &base_cpu_set); + CPU_ZERO(&set); + CPU_SET(0, &set); /* restrict to a single cpu */ + sched_setaffinity(0, sizeof(cpu_set_t), &set); + profiling_setup = 1; + } +} + +void pypy_teardown_profiling() +{ + if (profiling_setup) { + sched_setaffinity(0, sizeof(cpu_set_t), &base_cpu_set); + profiling_setup = 0; + } +} +#else +void pypy_setup_profiling() { } +void pypy_teardown_profiling() { } +#endif From commits-noreply at bitbucket.org Mon Apr 25 13:56:40 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 13:56:40 +0200 (CEST) Subject: [pypy-svn] pypy default: merge Message-ID: <20110425115640.43E2D282B90@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43574:ccffe37b6971 Date: 2011-04-25 13:56 +0200 http://bitbucket.org/pypy/pypy/changeset/ccffe37b6971/ Log: merge diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -41,9 +41,7 @@ def PyDict_SetItemString(space, w_dict, key_ptr, w_obj): if PyDict_Check(space, w_dict): key = rffi.charp2str(key_ptr) - # our dicts dont have a standardized interface, so we need - # to go through the space - space.setitem(w_dict, space.wrap(key), w_obj) + space.setitem_str(w_dict, key, w_obj) return 0 else: PyErr_BadInternalCall(space) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1990,9 +1990,9 @@ n = sa = 0 while n < 10: myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) - if 0 < a < 10: pass - if 0 < b < 10: pass - sa += (a << b) >> b + if 0 < a <= 5: pass + if 0 < b <= 5: pass + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2002,7 +2002,7 @@ myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) if 0 < a < hint(sys.maxint/2, promote=True): pass if 0 < b < 100: pass - sa += (a << b) >> b + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2010,24 +2010,24 @@ self.check_loops(int_rshift=0, everywhere=True) for f in (f1, f2): - assert self.meta_interp(f, [5, 10]) == 50 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [5, 6]) == 50 + self.check_loops(int_rshift=3, everywhere=True) assert self.meta_interp(f, [10, 5]) == 100 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [10, 10]) == 100 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [10, 6]) == 100 + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [5, 100]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [5, 31]) == 0 + self.check_loops(int_rshift=3, everywhere=True) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) def test_overflowing_shift_neg(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) @@ -2035,9 +2035,9 @@ n = sa = 0 while n < 10: myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) - if -10 < a < 0: pass - if 0 < b < 10: pass - sa += (a << b) >> b + if -5 <= a < 0: pass + if 0 < b <= 5: pass + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2047,7 +2047,7 @@ myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) if -hint(sys.maxint/2, promote=True) < a < 0: pass if 0 < b < 100: pass - sa += (a << b) >> b + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2055,24 +2055,24 @@ self.check_loops(int_rshift=0, everywhere=True) for f in (f1, f2): - assert self.meta_interp(f, [-5, 10]) == -50 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-5, 6]) == -50 + self.check_loops(int_rshift=3, everywhere=True) assert self.meta_interp(f, [-10, 5]) == -100 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [-10, 10]) == -100 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-10, 6]) == -100 + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [-5, 100]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-5, 31]) == 0 + self.check_loops(int_rshift=3, everywhere=True) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 - assert self.meta_interp(f, [-bigval, 5]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [bigval, 5]) == 0 + self.check_loops(int_rshift=3, everywhere=True) def notest_overflowing_shift2(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) @@ -2100,7 +2100,7 @@ assert self.meta_interp(f, [5, 100]) == 0 self.check_loops(int_rshift=1, everywhere=True) - + def test_read_timestamp(self): import time from pypy.rlib.rtimer import read_timestamp @@ -2118,7 +2118,6 @@ res = self.interp_operations(f, []) assert res - class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py --- a/pypy/jit/backend/x86/test/test_regloc.py +++ b/pypy/jit/backend/x86/test/test_regloc.py @@ -21,10 +21,12 @@ assert_encodes_as(cb32, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xB9\x39\x30') # 64-bit - assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x89\xD9') + assert_encodes_as(cb64, "MOV16", (r8, ebx), '\x66\x41\x89\xD8') # 11 011 000 + assert_encodes_as(cb64, "MOV16", (ebx, r8), '\x66\x44\x89\xC3') # 11 000 011 + assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x40\x89\xD9') # XXX: What we are testing for here is actually not the most compact # encoding. - assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xC7\xC1\x39\x30') + assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\x40\xC7\xC1\x39\x30') assert_encodes_as(cb64, "MOV16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\xC7\x45\x00\x39\x30') def test_cmp_16(): @@ -33,8 +35,10 @@ assert_encodes_as(cb32, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30') # 64-bit - assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x39\xD9') - assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30') + assert_encodes_as(cb64, "CMP16", (r8, ebx), '\x66\x41\x39\xD8') # 11 011 000 + assert_encodes_as(cb64, "CMP16", (ebx, r8), '\x66\x44\x39\xC3') # 11 000 011 + assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x40\x39\xD9') + assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x40\x81\xF9\x39\x30') assert_encodes_as(cb64, "CMP16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\x81\x7D\x00\x39\x30') def test_relocation(): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -239,6 +239,10 @@ interp.cleanup_registers() self.blackholeinterps.append(interp) +def check_shift_count(b): + if not we_are_translated(): + if b < 0 or b >= LONG_BIT: + raise ValueError("Shift count, %d, not in valid range, 0 .. %d." % (b, LONG_BIT-1)) class BlackholeInterpreter(object): @@ -421,14 +425,17 @@ @arguments("i", "i", returns="i") def bhimpl_int_rshift(a, b): + check_shift_count(b) return a >> b @arguments("i", "i", returns="i") def bhimpl_int_lshift(a, b): + check_shift_count(b) return intmask(a << b) @arguments("i", "i", returns="i") def bhimpl_uint_rshift(a, b): + check_shift_count(b) c = r_uint(a) >> r_uint(b) return intmask(c) diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -217,3 +217,16 @@ for x in range(1, 8)]) builder = pyjitpl._warmrunnerdesc.metainterp_sd.blackholeinterpbuilder assert builder.num_interpreters == 2 + +def test_bad_shift(): + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_lshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_rshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_uint_rshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_lshift.im_func, 7, -1) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_rshift.im_func, 7, -1) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_uint_rshift.im_func, 7, -1) + + assert BlackholeInterpreter.bhimpl_int_lshift.im_func(100, 3) == 100<<3 + assert BlackholeInterpreter.bhimpl_int_rshift.im_func(100, 3) == 100>>3 + assert BlackholeInterpreter.bhimpl_uint_rshift.im_func(100, 3) == 100>>3 + From commits-noreply at bitbucket.org Mon Apr 25 14:00:13 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 14:00:13 +0200 (CEST) Subject: [pypy-svn] pypy jit-lsprofile: close merged branch Message-ID: <20110425120013.A987B282B90@codespeak.net> Author: Carl Friedrich Bolz Branch: jit-lsprofile Changeset: r43575:b52d892cc8d9 Date: 2011-04-25 13:58 +0200 http://bitbucket.org/pypy/pypy/changeset/b52d892cc8d9/ Log: close merged branch From commits-noreply at bitbucket.org Mon Apr 25 14:00:15 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 14:00:15 +0200 (CEST) Subject: [pypy-svn] pypy default: forgot to close before the merge Message-ID: <20110425120015.176FD282BEA@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43576:b28f53c01c9c Date: 2011-04-25 13:59 +0200 http://bitbucket.org/pypy/pypy/changeset/b28f53c01c9c/ Log: forgot to close before the merge From commits-noreply at bitbucket.org Mon Apr 25 14:08:59 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 14:08:59 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz) get rid of externaltools.rst which is obsolete Message-ID: <20110425120859.0B8DE282B90@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43577:6cdbea84f217 Date: 2011-04-25 14:08 +0200 http://bitbucket.org/pypy/pypy/changeset/6cdbea84f217/ Log: (lac, cfbolz) get rid of externaltools.rst which is obsolete diff --git a/pypy/doc/externaltools.rst b/pypy/doc/externaltools.rst deleted file mode 100644 --- a/pypy/doc/externaltools.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. include:: throwaway.rst - -External tools&programs needed by PyPy -====================================== - -Tools needed for testing ------------------------- - -These tools are used in various ways by PyPy tests; if they are not found, -some tests might be skipped, so they need to be installed on every buildbot -slave to be sure we actually run all tests: - - - Mono (versions 1.2.1.1 and 1.9.1 known to work) - - - Java/JVM (preferably sun-jdk; version 1.6.0 known to work) - - - Jasmin >= 2.2 (copy it from wyvern, /usr/local/bin/jasmin and /usr/local/share/jasmin.jar) - - - gcc - - - make - - - Some libraries (these are Debian package names, adapt as needed): - - * ``python-dev`` - * ``python-ctypes`` - * ``libffi-dev`` - * ``libz-dev`` (for the optional ``zlib`` module) - * ``libbz2-dev`` (for the optional ``bz2`` module) - * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) - * ``libgc-dev`` (only when translating with `--opt=0, 1` or `size`) diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -12,8 +12,6 @@ distribution.rst - externaltools.rst - geninterp.rst objspace-proxies.rst From commits-noreply at bitbucket.org Mon Apr 25 14:37:21 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 14:37:21 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: hg merge default Message-ID: <20110425123721.6D98336C20B@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43578:a7f1410361ce Date: 2011-04-25 12:52 +0200 http://bitbucket.org/pypy/pypy/changeset/a7f1410361ce/ Log: hg merge default diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -41,9 +41,7 @@ def PyDict_SetItemString(space, w_dict, key_ptr, w_obj): if PyDict_Check(space, w_dict): key = rffi.charp2str(key_ptr) - # our dicts dont have a standardized interface, so we need - # to go through the space - space.setitem(w_dict, space.wrap(key), w_obj) + space.setitem_str(w_dict, key, w_obj) return 0 else: PyErr_BadInternalCall(space) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2127,9 +2127,9 @@ n = sa = 0 while n < 10: myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) - if 0 < a < 10: pass - if 0 < b < 10: pass - sa += (a << b) >> b + if 0 < a <= 5: pass + if 0 < b <= 5: pass + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2139,7 +2139,7 @@ myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) if 0 < a < hint(sys.maxint/2, promote=True): pass if 0 < b < 100: pass - sa += (a << b) >> b + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2147,24 +2147,24 @@ self.check_loops(int_rshift=0, everywhere=True) for f in (f1, f2): - assert self.meta_interp(f, [5, 10]) == 50 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [5, 6]) == 50 + self.check_loops(int_rshift=3, everywhere=True) assert self.meta_interp(f, [10, 5]) == 100 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [10, 10]) == 100 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [10, 6]) == 100 + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [5, 100]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [5, 31]) == 0 + self.check_loops(int_rshift=3, everywhere=True) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 assert self.meta_interp(f, [bigval, 5]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) def test_overflowing_shift_neg(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) @@ -2172,9 +2172,9 @@ n = sa = 0 while n < 10: myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) - if -10 < a < 0: pass - if 0 < b < 10: pass - sa += (a << b) >> b + if -5 <= a < 0: pass + if 0 < b <= 5: pass + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2184,7 +2184,7 @@ myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) if -hint(sys.maxint/2, promote=True) < a < 0: pass if 0 < b < 100: pass - sa += (a << b) >> b + sa += (((((a << b) << b) << b) >> b) >> b) >> b n += 1 return sa @@ -2192,51 +2192,25 @@ self.check_loops(int_rshift=0, everywhere=True) for f in (f1, f2): - assert self.meta_interp(f, [-5, 10]) == -50 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-5, 6]) == -50 + self.check_loops(int_rshift=3, everywhere=True) assert self.meta_interp(f, [-10, 5]) == -100 - self.check_loops(int_rshift=1, everywhere=True) + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [-10, 10]) == -100 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-10, 6]) == -100 + self.check_loops(int_rshift=3, everywhere=True) - assert self.meta_interp(f, [-5, 100]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [-5, 31]) == 0 + self.check_loops(int_rshift=3, everywhere=True) bigval = 1 while (bigval << 3).__class__ is int: bigval = bigval << 1 - assert self.meta_interp(f, [-bigval, 5]) == 0 - self.check_loops(int_rshift=1, everywhere=True) + assert self.meta_interp(f, [bigval, 5]) == 0 + self.check_loops(int_rshift=3, everywhere=True) - def notest_overflowing_shift2(self): - myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) - def f(a, b): - n = sa = 0 - while n < 10: - myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) - if 0 < a < hint(sys.maxint/2, promote=True): pass - if 0 < b < 100: pass - sa += (a << b) >> b - n += 1 - return sa - - assert self.meta_interp(f, [5, 5]) == 50 - self.check_loops(int_rshift=0, everywhere=True) - - assert self.meta_interp(f, [5, 10]) == 50 - self.check_loops(int_rshift=1, everywhere=True) - - assert self.meta_interp(f, [10, 5]) == 100 - self.check_loops(int_rshift=1, everywhere=True) - - assert self.meta_interp(f, [10, 10]) == 100 - self.check_loops(int_rshift=1, everywhere=True) - - assert self.meta_interp(f, [5, 100]) == 0 - self.check_loops(int_rshift=1, everywhere=True) def test_pure_op_not_to_be_propagated(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa']) @@ -2249,8 +2223,6 @@ return sa assert self.meta_interp(f, [10]) == f(10) - - class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py --- a/pypy/jit/backend/x86/test/test_regloc.py +++ b/pypy/jit/backend/x86/test/test_regloc.py @@ -21,10 +21,12 @@ assert_encodes_as(cb32, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xB9\x39\x30') # 64-bit - assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x89\xD9') + assert_encodes_as(cb64, "MOV16", (r8, ebx), '\x66\x41\x89\xD8') # 11 011 000 + assert_encodes_as(cb64, "MOV16", (ebx, r8), '\x66\x44\x89\xC3') # 11 000 011 + assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x40\x89\xD9') # XXX: What we are testing for here is actually not the most compact # encoding. - assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xC7\xC1\x39\x30') + assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\x40\xC7\xC1\x39\x30') assert_encodes_as(cb64, "MOV16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\xC7\x45\x00\x39\x30') def test_cmp_16(): @@ -33,8 +35,10 @@ assert_encodes_as(cb32, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30') # 64-bit - assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x39\xD9') - assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30') + assert_encodes_as(cb64, "CMP16", (r8, ebx), '\x66\x41\x39\xD8') # 11 011 000 + assert_encodes_as(cb64, "CMP16", (ebx, r8), '\x66\x44\x39\xC3') # 11 000 011 + assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x40\x39\xD9') + assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x40\x81\xF9\x39\x30') assert_encodes_as(cb64, "CMP16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\x81\x7D\x00\x39\x30') def test_relocation(): diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -238,6 +238,10 @@ interp.cleanup_registers() self.blackholeinterps.append(interp) +def check_shift_count(b): + if not we_are_translated(): + if b < 0 or b >= LONG_BIT: + raise ValueError("Shift count, %d, not in valid range, 0 .. %d." % (b, LONG_BIT-1)) class BlackholeInterpreter(object): @@ -420,14 +424,17 @@ @arguments("i", "i", returns="i") def bhimpl_int_rshift(a, b): + check_shift_count(b) return a >> b @arguments("i", "i", returns="i") def bhimpl_int_lshift(a, b): + check_shift_count(b) return intmask(a << b) @arguments("i", "i", returns="i") def bhimpl_uint_rshift(a, b): + check_shift_count(b) c = r_uint(a) >> r_uint(b) return intmask(c) diff --git a/pypy/jit/metainterp/test/test_blackhole.py b/pypy/jit/metainterp/test/test_blackhole.py --- a/pypy/jit/metainterp/test/test_blackhole.py +++ b/pypy/jit/metainterp/test/test_blackhole.py @@ -217,3 +217,16 @@ for x in range(1, 8)]) builder = pyjitpl._warmrunnerdesc.metainterp_sd.blackholeinterpbuilder assert builder.num_interpreters == 2 + +def test_bad_shift(): + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_lshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_rshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_uint_rshift.im_func, 7, 100) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_lshift.im_func, 7, -1) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_int_rshift.im_func, 7, -1) + py.test.raises(ValueError, BlackholeInterpreter.bhimpl_uint_rshift.im_func, 7, -1) + + assert BlackholeInterpreter.bhimpl_int_lshift.im_func(100, 3) == 100<<3 + assert BlackholeInterpreter.bhimpl_int_rshift.im_func(100, 3) == 100>>3 + assert BlackholeInterpreter.bhimpl_uint_rshift.im_func(100, 3) == 100>>3 + From commits-noreply at bitbucket.org Mon Apr 25 14:37:23 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 14:37:23 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: fixed tests Message-ID: <20110425123723.93134282C1A@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43579:536fbb140987 Date: 2011-04-25 14:36 +0200 http://bitbucket.org/pypy/pypy/changeset/536fbb140987/ Log: fixed tests diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1319,10 +1319,10 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=2, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, - guard_nonnull_class=4, guard_isnull=1, + self.check_loops(guard_class=0, guard_nonnull=2, + guard_nonnull_class=2, guard_isnull=1) + self.check_loops(guard_class=0, guard_nonnull=4, + guard_nonnull_class=4, guard_isnull=2, everywhere=True) def test_merge_guardnonnull_guardvalue(self): @@ -1350,9 +1350,9 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=1, + self.check_loops(guard_class=0, guard_nonnull=2, guard_value=2, guard_nonnull_class=0, guard_isnull=1) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=3, + self.check_loops(guard_class=0, guard_nonnull=4, guard_value=4, guard_nonnull_class=0, guard_isnull=2, everywhere=True) @@ -1381,10 +1381,10 @@ return x res = self.meta_interp(f, [299], listops=True) assert res == f(299) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=4, - guard_nonnull_class=0, guard_isnull=1, + self.check_loops(guard_class=0, guard_nonnull=2, guard_value=2, + guard_nonnull_class=0, guard_isnull=1) + self.check_loops(guard_class=0, guard_nonnull=4, guard_value=4, + guard_nonnull_class=0, guard_isnull=2, everywhere=True) def test_merge_guardnonnull_guardclass_guardvalue(self): @@ -1415,10 +1415,10 @@ return x res = self.meta_interp(f, [399], listops=True) assert res == f(399) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=2, - guard_nonnull_class=0, guard_isnull=0) - self.check_loops(guard_class=0, guard_nonnull=0, guard_value=5, - guard_nonnull_class=0, guard_isnull=1, + self.check_loops(guard_class=0, guard_nonnull=3, guard_value=3, + guard_nonnull_class=0, guard_isnull=1) + self.check_loops(guard_class=0, guard_nonnull=6, guard_value=6, + guard_nonnull_class=0, guard_isnull=2, everywhere=True) def test_residual_call_doesnt_lose_info(self): From commits-noreply at bitbucket.org Mon Apr 25 14:42:49 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 14:42:49 +0200 (CEST) Subject: [pypy-svn] pypy default: (rguillebert, arigo) Message-ID: <20110425124249.E097036C20B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43580:f330c318944f Date: 2011-04-25 14:42 +0200 http://bitbucket.org/pypy/pypy/changeset/f330c318944f/ Log: (rguillebert, arigo) Re-insert the checkin that was backed out by 195459aa1891, and try to make it more jit-friendly. diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -61,3 +61,27 @@ assert not imp.lock_held() self.waitfor(lambda: done) assert done + +class TestImportLock: + def test_lock(self, space, monkeypatch): + from pypy.module.imp.importing import getimportlock, importhook + + # Monkeypatch the import lock and add a counter + importlock = getimportlock(space) + original_acquire = importlock.acquire_lock + def acquire_lock(): + importlock.count += 1 + original_acquire() + importlock.count = 0 + monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) + + # An already imported module + importhook(space, 'sys') + assert importlock.count == 0 + # A new module + importhook(space, 're') + assert importlock.count == 7 + # Import it again + previous_count = importlock.count + importhook(space, 're') + assert importlock.count == previous_count diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -438,6 +438,38 @@ res = __import__('', mydict, None, ['bar'], 2) assert res is pkg + def test__package__(self): + # Regression test for http://bugs.python.org/issue3221. + def check_absolute(): + exec "from os import path" in ns + def check_relative(): + exec "from . import a" in ns + + # Check both OK with __package__ and __name__ correct + ns = dict(__package__='pkg', __name__='pkg.notarealmodule') + check_absolute() + check_relative() + + # Check both OK with only __name__ wrong + ns = dict(__package__='pkg', __name__='notarealpkg.notarealmodule') + check_absolute() + check_relative() + + # Check relative fails with only __package__ wrong + ns = dict(__package__='foo', __name__='pkg.notarealmodule') + check_absolute() # XXX check warnings + raises(SystemError, check_relative) + + # Check relative fails with __package__ and __name__ wrong + ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') + check_absolute() # XXX check warnings + raises(SystemError, check_relative) + + # Check both fail with package set to a non-string + ns = dict(__package__=object()) + raises(ValueError, check_absolute) + raises(ValueError, check_relative) + def test_universal_newlines(self): import pkg_univnewlines assert pkg_univnewlines.a == 5 diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -120,6 +120,114 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) + at jit.purefunction +def _get_dot_position(ctxt_package, level): + result = len(ctxt_package) + while level > 1 and result >= 0: + level -= 1 + result = ctxt_package.rfind('.', 0, result) + return result + +def _get_relative_name(space, modulename, level, w_globals): + w = space.wrap + ctxt_w_package = space.finditem_str(w_globals, '__package__') + ctxt_w_package = jit.hint(ctxt_w_package, promote=True) + level = jit.hint(level, promote=True) + + ctxt_package = None + if ctxt_w_package is not None and ctxt_w_package is not space.w_None: + try: + ctxt_package = space.str_w(ctxt_w_package) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_ValueError, space.wrap( + "__package__ set to non-string")) + + if ctxt_package is not None: + # __package__ is set, so use it + if ctxt_package == '' and level < 0: + return None, 0 + + dot_position = _get_dot_position(ctxt_package, level) + if dot_position < 0: + if len(ctxt_package) == 0: + msg = "Attempted relative import in non-package" + else: + msg = "Attempted relative import beyond toplevel package" + raise OperationError(space.w_ValueError, w(msg)) + + # Try to import parent package + try: + w_parent = absolute_import(space, ctxt_package, 0, + None, tentative=False) + except OperationError, e: + if not e.match(space, space.w_ImportError): + raise + if level > 0: + raise OperationError(space.w_SystemError, space.wrap( + "Parent module '%s' not loaded, " + "cannot perform relative import" % ctxt_package)) + else: + space.warn("Parent module '%s' not found " + "while handling absolute import" % ctxt_package, + space.w_RuntimeWarning) + + rel_modulename = ctxt_package[:dot_position] + rel_level = rel_modulename.count('.') + 1 + if modulename: + rel_modulename += '.' + modulename + else: + # __package__ not set, so figure it out and set it + ctxt_w_name = space.finditem_str(w_globals, '__name__') + ctxt_w_path = space.finditem_str(w_globals, '__path__') + + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.str_w(ctxt_w_name) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + + if not ctxt_name: + return None, 0 + + ctxt_name_prefix_parts = ctxt_name.split('.') + if level > 0: + n = len(ctxt_name_prefix_parts)-level+1 + assert n>=0 + ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] + if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module + ctxt_name_prefix_parts.pop() + + if level > 0 and not ctxt_name_prefix_parts: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) + + rel_modulename = '.'.join(ctxt_name_prefix_parts) + + if ctxt_w_path is not None: + # __path__ is set, so __name__ is already the package name + space.setitem(w_globals, w("__package__"), ctxt_w_name) + else: + # Normal module, so work out the package name if any + if '.' not in ctxt_name: + space.setitem(w_globals, w("__package__"), space.w_None) + elif rel_modulename: + space.setitem(w_globals, w("__package__"), w(rel_modulename)) + + if modulename: + if rel_modulename: + rel_modulename += '.' + modulename + else: + rel_modulename = modulename + + rel_level = len(ctxt_name_prefix_parts) + + return rel_modulename, rel_level + + @unwrap_spec(name=str, level=int) def importhook(space, name, w_globals=None, w_locals=None, w_fromlist=None, level=-1): @@ -141,68 +249,40 @@ w_globals is not None and space.isinstance_w(w_globals, space.w_dict)): - ctxt_w_name = space.finditem(w_globals, w('__name__')) - ctxt_w_path = space.finditem(w_globals, w('__path__')) + rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) - ctxt_name = None - if ctxt_w_name is not None: - try: - ctxt_name = space.str_w(ctxt_w_name) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise + if rel_modulename: + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + tentative = True + else: + tentative = False - if ctxt_name is not None: - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - if ctxt_name_prefix_parts: - rel_modulename = '.'.join(ctxt_name_prefix_parts) - if modulename: - rel_modulename += '.' + modulename - baselevel = len(ctxt_name_prefix_parts) - if rel_modulename is not None: - # XXX What is this check about? There is no test for it - w_mod = check_sys_modules(space, w(rel_modulename)) + w_mod = absolute_import(space, rel_modulename, rel_level, + fromlist_w, tentative=tentative) + if w_mod is not None: + space.timer.stop_name("importhook", modulename) + return w_mod - if (w_mod is None or - not space.is_w(w_mod, space.w_None) or - level > 0): - - # if no level was set, ignore import errors, and - # fall back to absolute import at the end of the - # function. - if level == -1: - tentative = True - else: - tentative = False - - w_mod = absolute_import(space, rel_modulename, - baselevel, fromlist_w, - tentative=tentative) - if w_mod is not None: - space.timer.stop_name("importhook", modulename) - return w_mod - else: - rel_modulename = None - - if level > 0: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) - w_mod = absolute_import_try(space, modulename, 0, fromlist_w) - if w_mod is None or space.is_w(w_mod, space.w_None): - w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) + w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) if rel_modulename is not None: space.setitem(space.sys.get('modules'), w(rel_modulename), space.w_None) space.timer.stop_name("importhook", modulename) return w_mod +def absolute_import(space, modulename, baselevel, fromlist_w, tentative): + # Short path: check in sys.modules + w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w) + if w_mod is not None and not space.is_w(w_mod, space.w_None): + return w_mod + return absolute_import_with_lock(space, modulename, baselevel, + fromlist_w, tentative) + @jit.dont_look_inside -def absolute_import(space, modulename, baselevel, fromlist_w, tentative): +def absolute_import_with_lock(space, modulename, baselevel, + fromlist_w, tentative): lock = getimportlock(space) lock.acquire_lock() try: From commits-noreply at bitbucket.org Mon Apr 25 14:43:11 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 14:43:11 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110425124311.AF9DB36C20B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43581:3d96d6bdc2be Date: 2011-04-25 14:42 +0200 http://bitbucket.org/pypy/pypy/changeset/3d96d6bdc2be/ Log: merge heads diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py --- a/pypy/rpython/llinterp.py +++ b/pypy/rpython/llinterp.py @@ -532,7 +532,10 @@ raise LLFatalError(msg, LLException(ll_exc_type, ll_exc)) def op_debug_llinterpcall(self, pythonfunction, *args_ll): - return pythonfunction(*args_ll) + try: + return pythonfunction(*args_ll) + except: + self.make_llexception() def op_debug_start_traceback(self, *args): pass # xxx write debugging code here? diff --git a/pypy/translator/c/src/debug_print.h b/pypy/translator/c/src/debug_print.h --- a/pypy/translator/c/src/debug_print.h +++ b/pypy/translator/c/src/debug_print.h @@ -20,7 +20,6 @@ Note that 'fname' can be '-' to send the logging data to stderr. */ - /* macros used by the generated code */ #define PYPY_HAVE_DEBUG_PRINTS (pypy_have_debug_prints & 1 ? \ (pypy_debug_ensure_opened(), 1) : 0) @@ -40,174 +39,24 @@ extern long pypy_have_debug_prints; extern FILE *pypy_debug_file; +#define OP_LL_READ_TIMESTAMP(val) READ_TIMESTAMP(val) -/* implementations */ +#include "src/asm.h" -#ifndef PYPY_NOT_MAIN_FILE -#include - -#if defined(__GNUC__) && defined(__linux__) -# include - static void pypy_setup_profiling() - { - cpu_set_t set; - CPU_ZERO(&set); - CPU_SET(0, &set); /* restrict to a single cpu */ - sched_setaffinity(0, sizeof(cpu_set_t), &set); - } -#else -static void pypy_setup_profiling() { } -#endif - -long pypy_have_debug_prints = -1; -FILE *pypy_debug_file = NULL; -static bool_t debug_ready = 0; -static bool_t debug_profile = 0; -static char *debug_start_colors_1 = ""; -static char *debug_start_colors_2 = ""; -static char *debug_stop_colors = ""; -static char *debug_prefix = NULL; - -static void pypy_debug_open(void) -{ - char *filename = getenv("PYPYLOG"); - if (filename) -#ifndef MS_WINDOWS - unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ -#else - putenv("PYPYLOG="); /* don't pass it to subprocesses */ -#endif - if (filename && filename[0]) - { - char *colon = strchr(filename, ':'); - if (!colon) - { - /* PYPYLOG=filename --- profiling version */ - debug_profile = 1; - pypy_setup_profiling(); - } - else - { - /* PYPYLOG=prefix:filename --- conditional logging */ - int n = colon - filename; - debug_prefix = malloc(n + 1); - memcpy(debug_prefix, filename, n); - debug_prefix[n] = '\0'; - filename = colon + 1; - } - if (strcmp(filename, "-") != 0) - pypy_debug_file = fopen(filename, "w"); - } - if (!pypy_debug_file) - { - pypy_debug_file = stderr; - if (isatty(2)) - { - debug_start_colors_1 = "\033[1m\033[31m"; - debug_start_colors_2 = "\033[31m"; - debug_stop_colors = "\033[0m"; - } - } - debug_ready = 1; -} - -void pypy_debug_ensure_opened(void) -{ - if (!debug_ready) - pypy_debug_open(); -} - - -#ifndef READ_TIMESTAMP /* asm_xxx.h may contain a specific implementation of READ_TIMESTAMP. * This is the default generic timestamp implementation. */ +#ifndef READ_TIMESTAMP + # ifdef _WIN32 # define READ_TIMESTAMP(val) QueryPerformanceCounter((LARGE_INTEGER*)&(val)) # else # include # include + +long long pypy_read_timestamp(); + # define READ_TIMESTAMP(val) (val) = pypy_read_timestamp() - static long long pypy_read_timestamp(void) - { -# ifdef CLOCK_THREAD_CPUTIME_ID - struct timespec tspec; - clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tspec); - return ((long long)tspec.tv_sec) * 1000000000LL + tspec.tv_nsec; -# else - /* argh, we don't seem to have clock_gettime(). Bad OS. */ - struct timeval tv; - gettimeofday(&tv, NULL); - return ((long long)tv.tv_sec) * 1000000LL + tv.tv_usec; -# endif - } # endif #endif - - -static bool_t startswithoneof(const char *str, const char *substr) -{ - const char *p = str; - for (; *substr; substr++) - { - if (*substr != ',') - { - if (p && *p++ != *substr) - p = NULL; /* mismatch */ - } - else if (p != NULL) - return 1; /* match */ - else - p = str; /* mismatched, retry with the next */ - } - return p != NULL; -} - -#if defined(_MSC_VER) || defined(__MINGW32__) -#define PYPY_LONG_LONG_PRINTF_FORMAT "I64" -#else -#define PYPY_LONG_LONG_PRINTF_FORMAT "ll" -#endif - -static void display_startstop(const char *prefix, const char *postfix, - const char *category, const char *colors) -{ - long long timestamp; - READ_TIMESTAMP(timestamp); - fprintf(pypy_debug_file, "%s[%"PYPY_LONG_LONG_PRINTF_FORMAT"x] %s%s%s\n%s", - colors, - timestamp, prefix, category, postfix, - debug_stop_colors); -} - -void pypy_debug_start(const char *category) -{ - pypy_debug_ensure_opened(); - /* Enter a nesting level. Nested debug_prints are disabled by default - because the following left shift introduces a 0 in the last bit. - Note that this logic assumes that we are never going to nest - debug_starts more than 31 levels (63 on 64-bits). */ - pypy_have_debug_prints <<= 1; - if (!debug_profile) - { - /* non-profiling version */ - if (!debug_prefix || !startswithoneof(category, debug_prefix)) - { - /* wrong section name, or no PYPYLOG at all, skip it */ - return; - } - /* else make this subsection active */ - pypy_have_debug_prints |= 1; - } - display_startstop("{", "", category, debug_start_colors_1); -} - -void pypy_debug_stop(const char *category) -{ - if (debug_profile | (pypy_have_debug_prints & 1)) - display_startstop("", "}", category, debug_start_colors_2); - pypy_have_debug_prints >>= 1; -} - -#endif /* PYPY_NOT_MAIN_FILE */ diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2074,6 +2074,50 @@ assert self.meta_interp(f, [bigval, 5]) == 0 self.check_loops(int_rshift=3, everywhere=True) + def notest_overflowing_shift2(self): + myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa']) + def f(a, b): + n = sa = 0 + while n < 10: + myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa) + if 0 < a < hint(sys.maxint/2, promote=True): pass + if 0 < b < 100: pass + sa += (a << b) >> b + n += 1 + return sa + + assert self.meta_interp(f, [5, 5]) == 50 + self.check_loops(int_rshift=0, everywhere=True) + + assert self.meta_interp(f, [5, 10]) == 50 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [10, 5]) == 100 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [10, 10]) == 100 + self.check_loops(int_rshift=1, everywhere=True) + + assert self.meta_interp(f, [5, 100]) == 0 + self.check_loops(int_rshift=1, everywhere=True) + + def test_read_timestamp(self): + import time + from pypy.rlib.rtimer import read_timestamp + def busy_loop(): + start = time.time() + while time.time() - start < 0.1: + # busy wait + pass + + def f(): + t1 = read_timestamp() + busy_loop() + t2 = read_timestamp() + return t2 - t1 > 1000 + res = self.interp_operations(f, []) + assert res + class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): diff --git a/pypy/translator/c/src/debug_print.c b/pypy/translator/c/src/debug_print.c new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/debug_print.c @@ -0,0 +1,150 @@ + +#include +#include +#include + +#include +#include +#include "src/profiling.h" +#include "src/debug_print.h" + +long pypy_have_debug_prints = -1; +FILE *pypy_debug_file = NULL; +static unsigned char debug_ready = 0; +static unsigned char debug_profile = 0; +static char *debug_start_colors_1 = ""; +static char *debug_start_colors_2 = ""; +static char *debug_stop_colors = ""; +static char *debug_prefix = NULL; + +static void pypy_debug_open(void) +{ + char *filename = getenv("PYPYLOG"); + if (filename) +#ifndef MS_WINDOWS + unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ +#else + putenv("PYPYLOG="); /* don't pass it to subprocesses */ +#endif + if (filename && filename[0]) + { + char *colon = strchr(filename, ':'); + if (!colon) + { + /* PYPYLOG=filename --- profiling version */ + debug_profile = 1; + pypy_setup_profiling(); + } + else + { + /* PYPYLOG=prefix:filename --- conditional logging */ + int n = colon - filename; + debug_prefix = malloc(n + 1); + memcpy(debug_prefix, filename, n); + debug_prefix[n] = '\0'; + filename = colon + 1; + } + if (strcmp(filename, "-") != 0) + pypy_debug_file = fopen(filename, "w"); + } + if (!pypy_debug_file) + { + pypy_debug_file = stderr; + if (isatty(2)) + { + debug_start_colors_1 = "\033[1m\033[31m"; + debug_start_colors_2 = "\033[31m"; + debug_stop_colors = "\033[0m"; + } + } + debug_ready = 1; +} + +void pypy_debug_ensure_opened(void) +{ + if (!debug_ready) + pypy_debug_open(); +} + + +#ifndef _WIN32 + + static long long pypy_read_timestamp(void) + { +# ifdef CLOCK_THREAD_CPUTIME_ID + struct timespec tspec; + clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tspec); + return ((long long)tspec.tv_sec) * 1000000000LL + tspec.tv_nsec; +# else + /* argh, we don't seem to have clock_gettime(). Bad OS. */ + struct timeval tv; + gettimeofday(&tv, NULL); + return ((long long)tv.tv_sec) * 1000000LL + tv.tv_usec; +# endif + } +#endif + + +static unsigned char startswithoneof(const char *str, const char *substr) +{ + const char *p = str; + for (; *substr; substr++) + { + if (*substr != ',') + { + if (p && *p++ != *substr) + p = NULL; /* mismatch */ + } + else if (p != NULL) + return 1; /* match */ + else + p = str; /* mismatched, retry with the next */ + } + return p != NULL; +} + +#if defined(_MSC_VER) || defined(__MINGW32__) +#define PYPY_LONG_LONG_PRINTF_FORMAT "I64" +#else +#define PYPY_LONG_LONG_PRINTF_FORMAT "ll" +#endif + +static void display_startstop(const char *prefix, const char *postfix, + const char *category, const char *colors) +{ + long long timestamp; + READ_TIMESTAMP(timestamp); + fprintf(pypy_debug_file, "%s[%"PYPY_LONG_LONG_PRINTF_FORMAT"x] %s%s%s\n%s", + colors, + timestamp, prefix, category, postfix, + debug_stop_colors); +} + +void pypy_debug_start(const char *category) +{ + pypy_debug_ensure_opened(); + /* Enter a nesting level. Nested debug_prints are disabled by default + because the following left shift introduces a 0 in the last bit. + Note that this logic assumes that we are never going to nest + debug_starts more than 31 levels (63 on 64-bits). */ + pypy_have_debug_prints <<= 1; + if (!debug_profile) + { + /* non-profiling version */ + if (!debug_prefix || !startswithoneof(category, debug_prefix)) + { + /* wrong section name, or no PYPYLOG at all, skip it */ + return; + } + /* else make this subsection active */ + pypy_have_debug_prints |= 1; + } + display_startstop("{", "", category, debug_start_colors_1); +} + +void pypy_debug_stop(const char *category) +{ + if (debug_profile | (pypy_have_debug_prints & 1)) + display_startstop("", "}", category, debug_start_colors_2); + pypy_have_debug_prints >>= 1; +} diff --git a/pypy/jit/codewriter/longlong.py b/pypy/jit/codewriter/longlong.py --- a/pypy/jit/codewriter/longlong.py +++ b/pypy/jit/codewriter/longlong.py @@ -16,6 +16,7 @@ from pypy.rlib.objectmodel import compute_hash + is_64_bit = True supports_longlong = False r_float_storage = float FLOATSTORAGE = lltype.Float @@ -32,6 +33,7 @@ from pypy.rlib import rarithmetic, longlong2float + is_64_bit = False supports_longlong = True r_float_storage = rarithmetic.r_longlong FLOATSTORAGE = lltype.SignedLongLong diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -20,33 +20,33 @@ 'fastlocals_w[*]', 'last_exception', 'lastblock', + 'is_being_profiled', ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] -def get_printable_location(next_instr, bytecode): +def get_printable_location(next_instr, is_being_profiled, bytecode): from pypy.tool.stdlib_opcode import opcode_method_names name = opcode_method_names[ord(bytecode.co_code[next_instr])] return '%s #%d %s' % (bytecode.get_repr(), next_instr, name) -def get_jitcell_at(next_instr, bytecode): - return bytecode.jit_cells.get(next_instr, None) +def get_jitcell_at(next_instr, is_being_profiled, bytecode): + return bytecode.jit_cells.get((next_instr, is_being_profiled), None) -def set_jitcell_at(newcell, next_instr, bytecode): - bytecode.jit_cells[next_instr] = newcell +def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode): + bytecode.jit_cells[next_instr, is_being_profiled] = newcell -def confirm_enter_jit(next_instr, bytecode, frame, ec): +def confirm_enter_jit(next_instr, is_being_profiled, bytecode, frame, ec): return (frame.w_f_trace is None and - ec.profilefunc is None and ec.w_tracefunc is None) -def can_never_inline(next_instr, bytecode): +def can_never_inline(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 class PyPyJitDriver(JitDriver): reds = ['frame', 'ec'] - greens = ['next_instr', 'pycode'] + greens = ['next_instr', 'is_being_profiled', 'pycode'] virtualizables = ['frame'] ## def compute_invariants(self, reds, next_instr, pycode): @@ -68,13 +68,16 @@ def dispatch(self, pycode, next_instr, ec): self = hint(self, access_directly=True) next_instr = r_uint(next_instr) + is_being_profiled = self.is_being_profiled try: while True: pypyjitdriver.jit_merge_point(ec=ec, - frame=self, next_instr=next_instr, pycode=pycode) + frame=self, next_instr=next_instr, pycode=pycode, + is_being_profiled=is_being_profiled) co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) + is_being_profiled = self.is_being_profiled except ExitFrame: return self.popvalue() @@ -97,7 +100,8 @@ jumpto = r_uint(self.last_instr) # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, - pycode=self.getcode()) + pycode=self.getcode(), + is_being_profiled=self.is_being_profiled) return jumpto diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py --- a/pypy/jit/backend/x86/rx86.py +++ b/pypy/jit/backend/x86/rx86.py @@ -422,12 +422,12 @@ # Method names take the form of -# +# # _ # # For example, the method name for "mov reg, immed" is MOV_ri. Operand order # is Intel-style, with the destination first. -# +# # The operand type codes are: # r - register # b - ebp/rbp offset @@ -565,6 +565,9 @@ # x87 instructions FSTP_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) + # ------------------------------ Random mess ----------------------- + RDTSC = insn('\x0F\x31') + # reserved as an illegal instruction UD2 = insn('\x0F\x0B') diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,17 +1,19 @@ try: - def main(n): - def g(n): - return range(n) - s = 0 - for i in range(n): # ID: for - tmp = g(n) - s += tmp[i] # ID: getitem - a = 0 - return s - main(10) - + def g(x): + return x - 1 + def f(x): + while x: + x = g(x) + import cProfile + import time + t1 = time.time() + cProfile.run("f(10000000)") + t2 = time.time() + f(10000000) + t3 = time.time() + print t2 - t1, t3 - t2, (t3 - t2) / (t2 - t1) except Exception, e: print "Exception: ", type(e) print e - + diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py --- a/pypy/translator/c/genc.py +++ b/pypy/translator/c/genc.py @@ -915,6 +915,14 @@ from pypy.rlib.rarithmetic import LONG_BIT defines['PYPY_LONG_BIT'] = LONG_BIT +def add_extra_files(eci): + srcdir = py.path.local(autopath.pypydir).join('translator', 'c', 'src') + files = [ + srcdir / 'profiling.c', + srcdir / 'debug_print.c', + ] + return eci.merge(ExternalCompilationInfo(separate_module_files=files)) + def gen_source_standalone(database, modulename, targetdir, eci, entrypointname, defines={}): assert database.standalone @@ -964,6 +972,7 @@ print >>fi, "#define INSTRUMENT_NCOUNTER %d" % n fi.close() + eci = add_extra_files(eci) eci = eci.convert_sources_to_files(being_main=True) files, eci = eci.get_module_files() return eci, filename, sg.getextrafiles() + list(files) @@ -1010,6 +1019,7 @@ gen_startupcode(f, database) f.close() + eci = add_extra_files(eci) eci = eci.convert_sources_to_files(being_main=True) files, eci = eci.get_module_files() return eci, filename, sg.getextrafiles() + list(files) diff --git a/pypy/rlib/debug.py b/pypy/rlib/debug.py --- a/pypy/rlib/debug.py +++ b/pypy/rlib/debug.py @@ -175,6 +175,7 @@ c_pythonfunction = hop.inputconst(lltype.Void, pythonfunction) args_v = [hop.inputarg(hop.args_r[i], arg=i) for i in range(2, hop.nb_args)] + hop.exception_is_here() return hop.genop('debug_llinterpcall', [c_pythonfunction] + args_v, resulttype=RESTYPE) diff --git a/pypy/rpython/test/test_llinterp.py b/pypy/rpython/test/test_llinterp.py --- a/pypy/rpython/test/test_llinterp.py +++ b/pypy/rpython/test/test_llinterp.py @@ -658,3 +658,25 @@ assert x == -42 res = interpret(f, []) + +def test_raising_llimpl(): + from pypy.rpython.extfunc import register_external + + def external(): + pass + + def raising(): + raise OSError(15, "abcd") + + ext = register_external(external, [], llimpl=raising, llfakeimpl=raising) + + def f(): + # this is a useful llfakeimpl that raises an exception + try: + external() + return True + except OSError: + return False + + res = interpret(f, []) + assert not res diff --git a/pypy/rlib/test/test_rtimer.py b/pypy/rlib/test/test_rtimer.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/test/test_rtimer.py @@ -0,0 +1,28 @@ +import time + +from pypy.rlib.rtimer import read_timestamp +from pypy.rpython.test.test_llinterp import interpret +from pypy.translator.c.test.test_genc import compile + +def timer(): + t1 = read_timestamp() + start = time.time() + while time.time() - start < 0.1: + # busy wait + pass + t2 = read_timestamp() + return t2 - t1 + +def test_timer(): + diff = timer() + # We're counting ticks, verify they look correct + assert diff > 1000 + +def test_annotation(): + diff = interpret(timer, []) + assert diff > 1000 + +def test_compile_c(): + function = compile(timer, []) + diff = function() + assert diff > 1000 \ No newline at end of file diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py --- a/pypy/jit/backend/x86/regalloc.py +++ b/pypy/jit/backend/x86/regalloc.py @@ -367,7 +367,7 @@ self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, result_loc, current_depths) - self.possibly_free_vars(guard_op.getfailargs()) + self.possibly_free_vars(guard_op.getfailargs()) def PerformDiscard(self, op, arglocs): if not we_are_translated(): @@ -443,7 +443,7 @@ assert isinstance(arg, Box) if arg not in last_used: last_used[arg] = i - + longevity = {} for arg in produced: if arg in last_used: @@ -837,7 +837,7 @@ self._call(op, [imm(size), vable] + [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_op) - + def consider_cond_call_gc_wb(self, op): assert op.result is None args = op.getarglist() @@ -1217,6 +1217,29 @@ else: raise AssertionError("bad unicode item size") + def consider_read_timestamp(self, op): + tmpbox_high = TempBox() + self.rm.force_allocate_reg(tmpbox_high, selected_reg=eax) + if longlong.is_64_bit: + # on 64-bit, use rax as temporary register and returns the + # result in rdx + result_loc = self.rm.force_allocate_reg(op.result, + selected_reg=edx) + self.Perform(op, [], result_loc) + else: + # on 32-bit, use both eax and edx as temporary registers, + # use a temporary xmm register, and returns the result in + # another xmm register. + tmpbox_low = TempBox() + self.rm.force_allocate_reg(tmpbox_low, selected_reg=edx) + xmmtmpbox = TempBox() + xmmtmploc = self.xrm.force_allocate_reg(xmmtmpbox) + result_loc = self.xrm.force_allocate_reg(op.result) + self.Perform(op, [xmmtmploc], result_loc) + self.xrm.possibly_free_var(xmmtmpbox) + self.rm.possibly_free_var(tmpbox_low) + self.rm.possibly_free_var(tmpbox_high) + def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -221,12 +221,14 @@ pdb_plus_show = PdbPlusShow(t) # need a translator to support extended commands - def debug(got_error): + def finish_profiling(): if prof: prof.disable() statfilename = 'prof.dump' log.info('Dumping profiler stats to: %s' % statfilename) - prof.dump_stats(statfilename) + prof.dump_stats(statfilename) + + def debug(got_error): tb = None if got_error: import traceback @@ -302,9 +304,11 @@ except SystemExit: raise except: + finish_profiling() debug(True) raise SystemExit(1) else: + finish_profiling() if translateconfig.pdb: debug(False) diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py --- a/pypy/jit/metainterp/executor.py +++ b/pypy/jit/metainterp/executor.py @@ -5,7 +5,8 @@ from pypy.rpython.lltypesystem import lltype, llmemory, rstr from pypy.rpython.ootypesystem import ootype from pypy.rpython.lltypesystem.lloperation import llop -from pypy.rlib.rarithmetic import ovfcheck, r_uint, intmask +from pypy.rlib.rarithmetic import ovfcheck, r_uint, intmask, r_longlong +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.unroll import unrolling_iterable from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, check_descr from pypy.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr @@ -227,6 +228,15 @@ length = lengthbox.getint() rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) +def do_read_timestamp(cpu, _): + x = read_timestamp() + if longlong.is_64_bit: + assert isinstance(x, int) # 64-bit + return BoxInt(x) + else: + assert isinstance(x, r_longlong) # 32-bit + return BoxFloat(x) + # ____________________________________________________________ ##def do_force_token(cpu): diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -25,6 +25,7 @@ from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated from pypy.rlib.rarithmetic import ovfcheck from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint +from pypy.rlib.rtimer import read_timestamp import py from pypy.tool.ansi_print import ansi_log @@ -506,7 +507,7 @@ ', '.join(map(str, args)),)) self.fail_args = args return op.fail_index - + else: assert 0, "unknown final operation %d" % (op.opnum,) @@ -856,6 +857,9 @@ opaque_frame = _to_opaque(self) return llmemory.cast_ptr_to_adr(opaque_frame) + def op_read_timestamp(self, descr): + return read_timestamp() + def op_call_may_force(self, calldescr, func, *args): assert not self._forced self._may_force = self.opindex @@ -937,7 +941,7 @@ class OOFrame(Frame): OPHANDLERS = [None] * (rop._LAST+1) - + def op_new_with_vtable(self, descr, vtable): assert descr is None typedescr = get_class_size(self.memocast, vtable) @@ -958,7 +962,7 @@ return res op_getfield_gc_pure = op_getfield_gc - + def op_setfield_gc(self, fielddescr, obj, newvalue): TYPE = fielddescr.TYPE fieldname = fielddescr.fieldname diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -1,12 +1,39 @@ +import py from pypy.interpreter.baseobjspace import Wrappable +from pypy.interpreter.error import OperationError +from pypy.interpreter.function import Method, Function +from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty) -from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped -from pypy.interpreter.function import Method, Function -from pypy.interpreter.error import OperationError +from pypy.rlib import jit +from pypy.rlib.objectmodel import we_are_translated +from pypy.rlib.rtimer import read_timestamp, _is_64_bit +from pypy.rpython.lltypesystem import rffi, lltype +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.tool.autopath import pypydir +from pypy.rlib.rarithmetic import r_longlong + import time, sys +# cpu affinity settings + +srcdir = py.path.local(pypydir).join('translator', 'c', 'src') +eci = ExternalCompilationInfo(separate_module_files= + [srcdir.join('profiling.c')]) + +c_setup_profiling = rffi.llexternal('pypy_setup_profiling', + [], lltype.Void, + compilation_info = eci) +c_teardown_profiling = rffi.llexternal('pypy_teardown_profiling', + [], lltype.Void, + compilation_info = eci) + +if _is_64_bit: + timer_size_int = int +else: + timer_size_int = r_longlong + class W_StatsEntry(Wrappable): def __init__(self, space, frame, callcount, reccallcount, tt, it, w_sublist): @@ -74,20 +101,43 @@ l_w = [] for v in values: if v.callcount != 0: - l_w.append(v.stats(space, factor)) + l_w.append(v.stats(space, None, factor)) return space.newlist(l_w) -class ProfilerEntry(object): +class ProfilerSubEntry(object): def __init__(self, frame): self.frame = frame - self.tt = 0 - self.it = 0 + self.ll_tt = r_longlong(0) + self.ll_it = r_longlong(0) self.callcount = 0 self.recursivecallcount = 0 self.recursionLevel = 0 + + def stats(self, space, parent, factor): + w_sse = W_StatsSubEntry(space, self.frame, + self.callcount, self.recursivecallcount, + factor * float(self.ll_tt), + factor * float(self.ll_it)) + return space.wrap(w_sse) + + def _stop(self, tt, it): + if not we_are_translated(): + assert type(tt) is timer_size_int + assert type(it) is timer_size_int + self.recursionLevel -= 1 + if self.recursionLevel == 0: + self.ll_tt += tt + else: + self.recursivecallcount += 1 + self.ll_it += it + self.callcount += 1 + +class ProfilerEntry(ProfilerSubEntry): + def __init__(self, frame): + ProfilerSubEntry.__init__(self, frame) self.calls = {} - def stats(self, space, factor): + def stats(self, space, dummy, factor): if self.calls: w_sublist = space.newlist([sub_entry.stats(space, self, factor) for sub_entry in self.calls.values()]) @@ -95,67 +145,44 @@ w_sublist = space.w_None w_se = W_StatsEntry(space, self.frame, self.callcount, self.recursivecallcount, - factor * self.tt, factor * self.it, w_sublist) + factor * float(self.ll_tt), + factor * float(self.ll_it), w_sublist) return space.wrap(w_se) -class ProfilerSubEntry(object): - def __init__(self, frame): - self.frame = frame - self.tt = 0 - self.it = 0 - self.callcount = 0 - self.recursivecallcount = 0 - self.recursionLevel = 0 - - def stats(self, space, parent, factor): - w_sse = W_StatsSubEntry(space, self.frame, - self.callcount, self.recursivecallcount, - factor * self.tt, factor * self.it) - return space.wrap(w_sse) + @jit.purefunction + def _get_or_make_subentry(self, entry, make=True): + try: + return self.calls[entry] + except KeyError: + if make: + subentry = ProfilerSubEntry(entry.frame) + self.calls[entry] = subentry + return subentry + return None class ProfilerContext(object): def __init__(self, profobj, entry): self.entry = entry - self.subt = 0 + self.ll_subt = timer_size_int(0) self.previous = profobj.current_context entry.recursionLevel += 1 if profobj.subcalls and self.previous: - caller = self.previous.entry - try: - subentry = caller.calls[entry] - except KeyError: - subentry = ProfilerSubEntry(entry.frame) - caller.calls[entry] = subentry + caller = jit.hint(self.previous.entry, promote=True) + subentry = caller._get_or_make_subentry(entry) subentry.recursionLevel += 1 - self.t0 = profobj.timer() + self.ll_t0 = profobj.ll_timer() def _stop(self, profobj, entry): - # XXX factor out two pieces of the same code - tt = profobj.timer() - self.t0 - it = tt - self.subt + tt = profobj.ll_timer() - self.ll_t0 + it = tt - self.ll_subt if self.previous: - self.previous.subt += tt - entry.recursionLevel -= 1 - if entry.recursionLevel == 0: - entry.tt += tt - else: - entry.recursivecallcount += 1 - entry.it += it - entry.callcount += 1 + self.previous.ll_subt += tt + entry._stop(tt, it) if profobj.subcalls and self.previous: - caller = self.previous.entry - try: - subentry = caller.calls[entry] - except KeyError: - pass - else: - subentry.recursionLevel -= 1 - if subentry.recursionLevel == 0: - subentry.tt += tt - else: - subentry.recursivecallcount += 1 - subentry.it += it - subentry.callcount += 1 + caller = jit.hint(self.previous.entry, promote=True) + subentry = caller._get_or_make_subentry(entry, False) + if subentry is not None: + subentry._stop(tt, it) def create_spec(space, w_arg): if isinstance(w_arg, Method): @@ -187,7 +214,7 @@ else: class_name = space.type(w_arg).getname(space, '?') return "{'%s' object}" % (class_name,) - + def lsprof_call(space, w_self, frame, event, w_arg): assert isinstance(w_self, W_Profiler) if event == 'call': @@ -209,6 +236,7 @@ pass class W_Profiler(Wrappable): + def __init__(self, space, w_callable, time_unit, subcalls, builtins): self.subcalls = subcalls self.builtins = builtins @@ -218,65 +246,94 @@ self.data = {} self.builtin_data = {} self.space = space + self.is_enabled = False + self.total_timestamp = r_longlong(0) + self.total_real_time = 0.0 - def timer(self): + def ll_timer(self): if self.w_callable: space = self.space try: - return space.float_w(space.call_function(self.w_callable)) + if _is_64_bit: + return space.int_w(space.call_function(self.w_callable)) + else: + return space.r_longlong_w(space.call_function(self.w_callable)) except OperationError, e: e.write_unraisable(space, "timer function ", self.w_callable) - return 0.0 - return time.time() + return timer_size_int(0) + return read_timestamp() def enable(self, space, w_subcalls=NoneNotWrapped, w_builtins=NoneNotWrapped): + if self.is_enabled: + return # ignored if w_subcalls is not None: self.subcalls = space.bool_w(w_subcalls) if w_builtins is not None: self.builtins = space.bool_w(w_builtins) + # We want total_real_time and total_timestamp to end up containing + # (endtime - starttime). Now we are at the start, so we first + # have to subtract the current time. + self.is_enabled = True + self.total_real_time -= time.time() + self.total_timestamp -= read_timestamp() # set profiler hook + c_setup_profiling() space.getexecutioncontext().setllprofile(lsprof_call, space.wrap(self)) + @jit.purefunction + def _get_or_make_entry(self, f_code, make=True): + try: + return self.data[f_code] + except KeyError: + if make: + entry = ProfilerEntry(f_code) + self.data[f_code] = entry + return entry + return None + + @jit.purefunction + def _get_or_make_builtin_entry(self, key, make=True): + try: + return self.builtin_data[key] + except KeyError: + if make: + entry = ProfilerEntry(self.space.wrap(key)) + self.builtin_data[key] = entry + return entry + return None + def _enter_call(self, f_code): # we have a superb gc, no point in freelist :) - try: - entry = self.data[f_code] - except KeyError: - entry = ProfilerEntry(f_code) - self.data[f_code] = entry + self = jit.hint(self, promote=True) + entry = self._get_or_make_entry(f_code) self.current_context = ProfilerContext(self, entry) def _enter_return(self, f_code): context = self.current_context if context is None: return - try: - entry = self.data[f_code] + self = jit.hint(self, promote=True) + entry = self._get_or_make_entry(f_code, False) + if entry is not None: context._stop(self, entry) - except KeyError: - pass self.current_context = context.previous def _enter_builtin_call(self, key): - try: - entry = self.builtin_data[key] - except KeyError: - entry = ProfilerEntry(self.space.wrap(key)) - self.builtin_data[key] = entry - self.current_context = ProfilerContext(self, entry) + self = jit.hint(self, promote=True) + entry = self._get_or_make_builtin_entry(key) + self.current_context = ProfilerContext(self, entry) def _enter_builtin_return(self, key): context = self.current_context if context is None: return - try: - entry = self.builtin_data[key] + self = jit.hint(self, promote=True) + entry = self._get_or_make_builtin_entry(key, False) + if entry is not None: context._stop(self, entry) - except KeyError: - pass - self.current_context = context.previous + self.current_context = context.previous def _flush_unmatched(self): context = self.current_context @@ -288,13 +345,29 @@ self.current_context = None def disable(self, space): + if not self.is_enabled: + return # ignored + # We want total_real_time and total_timestamp to end up containing + # (endtime - starttime), or the sum of such intervals if + # enable() and disable() are called several times. + self.is_enabled = False + self.total_timestamp += read_timestamp() + self.total_real_time += time.time() # unset profiler hook space.getexecutioncontext().setllprofile(None, None) + c_teardown_profiling() self._flush_unmatched() def getstats(self, space): if self.w_callable is None: - factor = 1. # we measure time.time in floats + if self.is_enabled: + raise OperationError(space.w_RuntimeError, + space.wrap("Profiler instance must be disabled " + "before getting the stats")) + if self.total_timestamp: + factor = self.total_real_time / float(self.total_timestamp) + else: + factor = 1.0 # probably not used elif self.time_unit > 0.0: factor = self.time_unit else: diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h --- a/pypy/translator/c/src/g_include.h +++ b/pypy/translator/c/src/g_include.h @@ -39,11 +39,13 @@ #include "src/instrument.h" #include "src/asm.h" +#include "src/profiling.h" + +#include "src/debug_print.h" /*** modules ***/ #ifdef HAVE_RTYPER /* only if we have an RTyper */ # include "src/rtyper.h" -# include "src/debug_print.h" # include "src/debug_traceback.h" # include "src/debug_alloc.h" #ifndef AVR diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -14,7 +14,7 @@ modname, _ = modname.split('.', 1) if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', - '_socket', '_sre']: + '_socket', '_sre', '_lsprof']: return True return False diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1023,6 +1023,10 @@ metainterp.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, lastbox], None) + @arguments() + def opimpl_ll_read_timestamp(self): + return self.metainterp.execute_and_record(rop.READ_TIMESTAMP, None) + # ------------------------------ def setup_call(self, argboxes): diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py --- a/pypy/rpython/lltypesystem/opimpl.py +++ b/pypy/rpython/lltypesystem/opimpl.py @@ -380,7 +380,7 @@ return ord(b) def op_cast_int_to_unichar(b): - assert type(b) is int + assert type(b) is int return unichr(b) def op_cast_int_to_uint(b): @@ -578,6 +578,10 @@ def op_shrink_array(array, smallersize): return False +def op_ll_read_timestamp(): + from pypy.rlib.rtimer import read_timestamp + return read_timestamp() + # ____________________________________________________________ def get_op_impl(opname): diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py --- a/pypy/jit/backend/test/runner_test.py +++ b/pypy/jit/backend/test/runner_test.py @@ -1356,6 +1356,19 @@ self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest, c_nest, c_nest], 'void') + def test_read_timestamp(self): + if longlong.is_64_bit: + got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + res1 = got1.getint() + res2 = got2.getint() + else: + got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + res1 = got1.getlonglong() + res2 = got2.getlonglong() + assert res1 < res2 < res1 + 2**32 + class LLtypeBackendTest(BaseBackendTest): diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py --- a/pypy/rpython/lltypesystem/lloperation.py +++ b/pypy/rpython/lltypesystem/lloperation.py @@ -32,7 +32,7 @@ assert isinstance(canraise, tuple) assert not canraise or not canfold - + # The operation manipulates PyObjects self.pyobj = pyobj @@ -440,6 +440,7 @@ 'get_write_barrier_failing_case': LLOp(sideeffects=False), 'get_write_barrier_from_array_failing_case': LLOp(sideeffects=False), 'gc_get_type_info_group': LLOp(sideeffects=False), + 'll_read_timestamp': LLOp(canrun=True), # __________ GC operations __________ @@ -482,7 +483,7 @@ 'gc_typeids_z' : LLOp(), # ------- JIT & GC interaction, only for some GCs ---------- - + 'gc_adr_of_nursery_free' : LLOp(), # ^^^ returns an address of nursery free pointer, for later modifications 'gc_adr_of_nursery_top' : LLOp(), @@ -554,7 +555,8 @@ 'debug_pdb': LLOp(), 'debug_assert': LLOp(tryfold=True), 'debug_fatalerror': LLOp(), - 'debug_llinterpcall': LLOp(), # Python func call 'res=arg[0](*arg[1:])' + 'debug_llinterpcall': LLOp(canraise=(Exception,)), + # Python func call 'res=arg[0](*arg[1:])' # in backends, abort() or whatever is fine 'debug_start_traceback': LLOp(), 'debug_record_traceback': LLOp(), diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py --- a/pypy/jit/backend/x86/test/test_regloc.py +++ b/pypy/jit/backend/x86/test/test_regloc.py @@ -21,10 +21,12 @@ assert_encodes_as(cb32, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xB9\x39\x30') # 64-bit - assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x89\xD9') + assert_encodes_as(cb64, "MOV16", (r8, ebx), '\x66\x41\x89\xD8') # 11 011 000 + assert_encodes_as(cb64, "MOV16", (ebx, r8), '\x66\x44\x89\xC3') # 11 000 011 + assert_encodes_as(cb64, "MOV16", (ecx, ebx), '\x66\x40\x89\xD9') # XXX: What we are testing for here is actually not the most compact # encoding. - assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\xC7\xC1\x39\x30') + assert_encodes_as(cb64, "MOV16", (ecx, ImmedLoc(12345)), '\x66\x40\xC7\xC1\x39\x30') assert_encodes_as(cb64, "MOV16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\xC7\x45\x00\x39\x30') def test_cmp_16(): @@ -33,8 +35,10 @@ assert_encodes_as(cb32, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30') # 64-bit - assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x39\xD9') - assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x81\xF9\x39\x30') + assert_encodes_as(cb64, "CMP16", (r8, ebx), '\x66\x41\x39\xD8') # 11 011 000 + assert_encodes_as(cb64, "CMP16", (ebx, r8), '\x66\x44\x39\xC3') # 11 000 011 + assert_encodes_as(cb64, "CMP16", (ecx, ebx), '\x66\x40\x39\xD9') + assert_encodes_as(cb64, "CMP16", (ecx, ImmedLoc(12345)), '\x66\x40\x81\xF9\x39\x30') assert_encodes_as(cb64, "CMP16", (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), '\x66\x41\x81\x7D\x00\x39\x30') def test_relocation(): diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -298,8 +298,11 @@ # Profile cases if self.profilefunc is not None: - if event not in ['leaveframe', 'call', 'c_call', - 'c_return', 'c_exception']: + if not (event == 'leaveframe' or + event == 'call' or + event == 'c_call' or + event == 'c_return' or + event == 'c_exception'): return False last_exception = frame.last_exception diff --git a/pypy/rlib/rtimer.py b/pypy/rlib/rtimer.py new file mode 100644 --- /dev/null +++ b/pypy/rlib/rtimer.py @@ -0,0 +1,37 @@ +import time + +from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint +from pypy.rlib.rarithmetic import intmask, longlongmask +from pypy.rpython.extregistry import ExtRegistryEntry +from pypy.rpython.lltypesystem import lltype, rffi + +_is_64_bit = r_uint.BITS > 32 + + +def read_timestamp(): + # Returns a longlong on 32-bit, and a regular int on 64-bit. + # When running on top of python, build the result a bit arbitrarily. + x = long(time.time() * 500000000) + if _is_64_bit: + return intmask(x) + else: + return longlongmask(x) + + +class ReadTimestampEntry(ExtRegistryEntry): + _about_ = read_timestamp + + def compute_result_annotation(self): + from pypy.annotation.model import SomeInteger + if _is_64_bit: + return SomeInteger() + else: + return SomeInteger(knowntype=r_longlong) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + if _is_64_bit: + resulttype = lltype.Signed + else: + resulttype = rffi.LONGLONG + return hop.genop("ll_read_timestamp", [], resulttype=resulttype) diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py --- a/pypy/jit/metainterp/blackhole.py +++ b/pypy/jit/metainterp/blackhole.py @@ -1,4 +1,5 @@ from pypy.rlib.unroll import unrolling_iterable +from pypy.rlib.rtimer import read_timestamp from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.debug import debug_start, debug_stop @@ -1212,6 +1213,10 @@ def bhimpl_unicodesetitem(cpu, unicode, index, newchr): cpu.bh_unicodesetitem(unicode, index, newchr) + @arguments(returns=(longlong.is_64_bit and "i" or "f")) + def bhimpl_ll_read_timestamp(): + return read_timestamp() + # ---------- # helpers to resume running in blackhole mode when a guard failed @@ -1423,7 +1428,7 @@ current_exc = blackholeinterp._prepare_resume_from_failure( resumedescr.guard_opnum, dont_change_position) - + try: _run_forever(blackholeinterp, current_exc) finally: diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py --- a/pypy/jit/metainterp/resoperation.py +++ b/pypy/jit/metainterp/resoperation.py @@ -23,7 +23,7 @@ # methods implemented by each concrete class # ------------------------------------------ - + def getopnum(self): raise NotImplementedError @@ -234,7 +234,7 @@ def getarg(self, i): raise IndexError - + def setarg(self, i, box): raise IndexError @@ -258,7 +258,7 @@ return self._arg0 else: raise IndexError - + def setarg(self, i, box): if i == 0: self._arg0 = box @@ -288,7 +288,7 @@ return self._arg1 else: raise IndexError - + def setarg(self, i, box): if i == 0: self._arg0 = box @@ -326,7 +326,7 @@ return self._arg2 else: raise IndexError - + def setarg(self, i, box): if i == 0: self._arg0 = box @@ -352,7 +352,7 @@ def getarg(self, i): return self._args[i] - + def setarg(self, i, box): self._args[i] = box @@ -460,6 +460,7 @@ '_MALLOC_LAST', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend + 'READ_TIMESTAMP/0', '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'SETARRAYITEM_GC/3d', @@ -468,7 +469,7 @@ 'SETFIELD_RAW/2d', 'STRSETITEM/3', 'UNICODESETITEM/3', - #'RUNTIMENEW/1', # ootype operation + #'RUNTIMENEW/1', # ootype operation 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'DEBUG_MERGE_POINT/2', # debugging only 'JIT_DEBUG/*', # debugging only @@ -554,7 +555,7 @@ 2: BinaryOp, 3: TernaryOp } - + is_guard = name.startswith('GUARD') if is_guard: assert withdescr diff --git a/pypy/rpython/lltypesystem/rdict.py b/pypy/rpython/lltypesystem/rdict.py --- a/pypy/rpython/lltypesystem/rdict.py +++ b/pypy/rpython/lltypesystem/rdict.py @@ -521,6 +521,7 @@ ll_dict_insertclean(d, entry.key, entry.value, hash) i += 1 old_entries.delete() +ll_dict_resize.oopspec = 'dict.resize(d)' # ------- a port of CPython's dictobject.c's lookdict implementation ------- PERTURB_SHIFT = 5 diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -91,6 +91,30 @@ assert spam2bar.inlinetime == 1.0 assert spam2bar.totaltime == 1.0 + def test_scale_of_result(self): + import _lsprof, time + prof = _lsprof.Profiler() + def foo(n): + t = time.time() + while abs(t - time.time()) < 1.0: + pass # busy-wait for 1 second + def bar(n): + foo(n) + prof.enable() + bar(0) + prof.disable() + stats = prof.getstats() + entries = {} + for entry in stats: + entries[entry.code] = entry + efoo = entries[foo.func_code] + ebar = entries[bar.func_code] + assert 0.9 < efoo.totaltime < 2.9 + assert 0.9 < efoo.inlinetime < 2.9 + for subentry in ebar.calls: + assert 0.9 < subentry.totaltime < 2.9 + assert 0.9 < subentry.inlinetime < 2.9 + def test_cprofile(self): import sys, os # XXX this is evil trickery to walk around the fact that we don't diff --git a/pypy/translator/c/src/profiling.h b/pypy/translator/c/src/profiling.h new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/profiling.h @@ -0,0 +1,8 @@ + +#ifndef PROFILING_H +#define PROFILING_H + +void pypy_setup_profiling(); +void pypy_teardown_profiling(); + +#endif diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py --- a/pypy/jit/tl/pypyjit.py +++ b/pypy/jit/tl/pypyjit.py @@ -39,6 +39,7 @@ config.objspace.usemodules.array = True config.objspace.usemodules._weakref = True config.objspace.usemodules._sre = False +config.objspace.usemodules._lsprof = True # config.objspace.usemodules._ffi = True # @@ -99,7 +100,7 @@ from pypy.translator.goal.ann_override import PyPyAnnotatorPolicy from pypy.rpython.test.test_llinterp import get_interpreter - # first annotate, rtype, and backendoptimize PyPy + # first annotate and rtype try: interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py --- a/pypy/jit/backend/x86/assembler.py +++ b/pypy/jit/backend/x86/assembler.py @@ -330,7 +330,7 @@ if log: self._register_counter() operations = self._inject_debugging_code(looptoken, operations) - + regalloc = RegAlloc(self, self.cpu.translate_support_code) arglocs = regalloc.prepare_loop(inputargs, operations, looptoken) looptoken._x86_arglocs = arglocs @@ -339,7 +339,7 @@ stackadjustpos = self._assemble_bootstrap_code(inputargs, arglocs) self.looppos = self.mc.get_relative_pos() looptoken._x86_frame_depth = -1 # temporarily - looptoken._x86_param_depth = -1 # temporarily + looptoken._x86_param_depth = -1 # temporarily frame_depth, param_depth = self._assemble(regalloc, operations) looptoken._x86_frame_depth = frame_depth looptoken._x86_param_depth = param_depth @@ -538,7 +538,7 @@ def _assemble(self, regalloc, operations): self._regalloc = regalloc - regalloc.walk_operations(operations) + regalloc.walk_operations(operations) if we_are_translated() or self.cpu.dont_keepalive_stuff: self._regalloc = None # else keep it around for debugging frame_depth = regalloc.fm.frame_depth @@ -1015,7 +1015,7 @@ dst_locs.append(unused_gpr.pop()) else: pass_on_stack.append(loc) - + # Emit instructions to pass the stack arguments # XXX: Would be nice to let remap_frame_layout take care of this, but # we'd need to create something like StackLoc, but relative to esp, @@ -1441,6 +1441,17 @@ else: assert 0, itemsize + def genop_read_timestamp(self, op, arglocs, resloc): + self.mc.RDTSC() + if longlong.is_64_bit: + self.mc.SHL_ri(edx.value, 32) + self.mc.OR_rr(edx.value, eax.value) + else: + loc1, = arglocs + self.mc.MOVD_xr(loc1.value, edx.value) + self.mc.MOVD_xr(resloc.value, eax.value) + self.mc.PUNPCKLDQ_xx(resloc.value, loc1.value) + def genop_guard_guard_true(self, ign_1, guard_op, guard_token, locs, ign_2): loc = locs[0] self.mc.TEST(loc, loc) @@ -2131,7 +2142,7 @@ assert rx86.fits_in_32bits(tid) self.mc.MOV_mi((eax.value, 0), tid) self.mc.MOV(heap(nursery_free_adr), edx) - + genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST genop_list = [Assembler386.not_implemented_op] * rop._LAST genop_llong_list = {} @@ -2142,7 +2153,7 @@ opname = name[len('genop_discard_'):] num = getattr(rop, opname.upper()) genop_discard_list[num] = value - elif name.startswith('genop_guard_') and name != 'genop_guard_exception': + elif name.startswith('genop_guard_') and name != 'genop_guard_exception': opname = name[len('genop_guard_'):] num = getattr(rop, opname.upper()) genop_guard_list[num] = value diff --git a/pypy/translator/c/src/align.h b/pypy/translator/c/src/align.h --- a/pypy/translator/c/src/align.h +++ b/pypy/translator/c/src/align.h @@ -1,3 +1,6 @@ + +#ifndef _PYPY_ALIGN_H +#define _PYPY_ALIGN_H /* alignment for arena-based garbage collectors: the following line enforces an alignment that should be enough for any structure @@ -14,3 +17,5 @@ #define ROUND_UP_FOR_ALLOCATION(x, minsize) \ ((((x)>=(minsize)?(x):(minsize)) \ + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1)) + +#endif //_PYPY_ALIGN_H diff --git a/pypy/translator/c/src/profiling.c b/pypy/translator/c/src/profiling.c new file mode 100644 --- /dev/null +++ b/pypy/translator/c/src/profiling.c @@ -0,0 +1,35 @@ + +#include +#if defined(__GNUC__) && defined(__linux__) + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#include +#endif + +cpu_set_t base_cpu_set; +int profiling_setup = 0; + +void pypy_setup_profiling() +{ + if (!profiling_setup) { + cpu_set_t set; + sched_getaffinity(0, sizeof(cpu_set_t), &base_cpu_set); + CPU_ZERO(&set); + CPU_SET(0, &set); /* restrict to a single cpu */ + sched_setaffinity(0, sizeof(cpu_set_t), &set); + profiling_setup = 1; + } +} + +void pypy_teardown_profiling() +{ + if (profiling_setup) { + sched_setaffinity(0, sizeof(cpu_set_t), &base_cpu_set); + profiling_setup = 0; + } +} +#else +void pypy_setup_profiling() { } +void pypy_teardown_profiling() { } +#endif From commits-noreply at bitbucket.org Mon Apr 25 15:01:44 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 15:01:44 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: remove low-level-encapsulation.rst and translationapsects.rst Message-ID: <20110425130144.8F919282B9D@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43582:b357fa64eee9 Date: 2011-04-25 14:39 +0200 http://bitbucket.org/pypy/pypy/changeset/b357fa64eee9/ Log: remove low-level-encapsulation.rst and translationapsects.rst diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -190,7 +190,7 @@ .. _`transformation`: **transformation** - Code that modifies flowgraphs to weave in `translation-aspects`_ + Code that modifies flowgraphs to weave in translation aspects .. _`translation-time`: @@ -226,7 +226,6 @@ .. _`The RPython Typer`: translation.html#the-rpython-typer .. _`backends`: getting-started-dev.html#trying-out-the-translator .. _Tool: getting-started-dev.html#trying-out-the-translator -.. _`translation-aspects`: translation-aspects.html .. _`PyPy's garbage collectors`: garbage_collection.html .. _`Restricted Python`: coding-guide.html#restricted-python .. _PSF: http://www.python.org/psf/ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -105,7 +105,6 @@ interpreter-optimizations.rst configuration.rst - low-level-encapsulation.rst parser.rst rlib.rst rtyper.rst diff --git a/pypy/doc/low-level-encapsulation.rst b/pypy/doc/low-level-encapsulation.rst deleted file mode 100644 --- a/pypy/doc/low-level-encapsulation.rst +++ /dev/null @@ -1,345 +0,0 @@ -.. include:: throwaway.rst - -============================================================ - Encapsulating low-level implementation aspects -============================================================ - -.. contents:: - - - -Abstract -======== - -It has always been a major goal of PyPy to not force implementation -decisions. This means that even after the implementation of the -standard interpreter [#]_ has been written we are still able to experiment -with different approaches to memory management or concurrency and to -target wildly different platforms such as the Java Virtual Machine or -a very memory-limited embedded environment. - -We do this by allowing the encapsulation of these low level aspects as -well defined parts of the translation process. - -In the following document, we give examples of aspects that have been -successfully encapsulated in more detail and contrast the potential of -our approach with CPython. - -.. [#] `standard interpreter`_ is our term for the code which - implements the Python language, i.e. the interpreter and the - standard object space. - - -Background -========== - -One of the better known significant modifications to CPython are -Christian Tismer's "stackless" patches [STK]_, which allow for far more -flexible control flow than the typical function call/return supported by -CPython. Originally implemented as a series of invasive patches, -Christian found that maintaining these patches as CPython itself was -further developed was time consuming to the point of no longer being -able to work on the new functionality that was the point of the -exercise. - -One solution would have been for the patches to become part of core -CPython but this was not done partly because the code that fully -enabled stackless required widespread modifications that made the code -harder to understand (as the "stackless" model contains control flow -that is not easily expressable in C, the implementation became much -less "natural" in some sense). - -With PyPy, however, it is possible to obtain this flexible control -flow whilst retaining transparent implementation code as the necessary -modifications can be implemented as a localized translation aspect, -and indeed this was done at the Paris sprint in a couple of days (as -compared to around six months for the original stackless patches). - -Of course, this is not the only aspect that can be so decided a -posteriori, during translation. - - -Translation aspects -=================== - -Our standard interpreter is implemented at a very high level of -abstraction. This has a number of happy consequences, among which is -enabling the encapsulation of language aspects as described in this -document. For example, the implementation code simply makes no -reference to memory management, which clearly gives the translator -complete freedom to decide about this aspect. This contrasts with -CPython where the decision to use reference counting is reflected tens -or even hundreds of times in each C source file in the codebase. - -As described in [ARCH]_, producing a Python implementation from the -source of our standard interpreter involves various stages: the -initialization code is run, the resulting code is annotated, typed and -finally translated. By the nature of the task, the encapsulation of -*low-level aspects* mainly affects the typer and the translation -process. At the coarsest level, the selection of target platform -involves writing a new backend -- still a significant task, but much -much easier than writing a complete implementation of Python! - -Other aspects affect different levels, as their needs require. The -remainder of this section describes a few aspects that we have -successfully encapsulated. - -An advantage of our approach is that any combination of aspects can be -freely selected, avoiding the problem of combinatorial explosion of -variants seen in manually written interpreters. - - -Stacklessness -------------- - -The stackless modifications are mostly implemented in the C backend, -with a single extra flow graph operation to influence some details of -the generated C code. The total changes only required about 300 lines -of source, vindicating our abstract approach. - -In stackless mode, the C backend generates functions that are -systematically extended with a small amount of bookkeeping code. This -allows the C code to save its own stack to the heap on demand, where it -can then be inspected, manipulated and eventually resumed. This is -described in more detail in [TA]_. In this way, unlimited (or more -precisely heap-limited) recursion is possible, even on operating systems -that limit the size of the C stack. Alternatively, a different saved -stack can be resumed, thus implementing soft context switches - -coroutines, or green threads with an appropriate scheduler. We reobtain -in this way all the major benefits of the original "stackless" patches. - -This effect requires a number of changes in each and every C function -that would be extremely tedious to write by hand: checking for the -signal triggering the saving of the stack, actually saving precisely the -currently active local variables, and when re-entering the function -check which variables are being restored and which call site is resumed. -In addition, a couple of global tables must be maintained to drive the -process. The key point is that we can fine-tune all these interactions -freely, without having to rewrite the whole code all the time but only -modifying the C backend (in addition, of course, to being able to change -at any time the high-level code that is the input of the translation -process). So far, this allowed us to find a style that does not hinder -the optimizations performed by the C compiler and so has only a minor -impact on performance in the normal case. - -Also note that the fact that the C stack can be fully saved into the -heap can tremendously simplify the portable implementation of garbage -collection: after the stack has been completely transferred to the heap, -there are no roots left on the stack. - - -Multiple Interpreters ---------------------- - -Another implementation detail that causes tension between functionality -and both code clarity and memory consumption in CPython is the issue of -multiple independent interpreters in the same process. In CPython there -is a partial implementation of this idea in the "interpreter state" API, -but the interpreters produced by this are not truly independent -- for -instance the dictionary that contains interned strings is implemented as -file-level static object, and is thus shared between the interpreters. -A full implementation of this idea would entirely eschew the use of file -level statics and place all interpreter-global data in some large -structure, which would hamper readability and maintainability. In -addition, in many situations it is necessary to determine which -interpreter a given object is "from" -- and this is not possible in -CPython largely because of the memory overhead that adding a 'interp' -pointer to all Python objects would create. - -In PyPy, all of our implementation code manipulates an explicit object -space instance, as described in [CODG]_. The situation of multiple -interpreters is thus handled automatically: if there is only one space -instance, it is regarded as a pre-constructed constant and the space -object pointer (though not its non-constant contents) disappears from -the produced source, i.e. from function arguments, local variables and -instance fields. If there are two or more such instances, a 'space' -attribute will be automatically added to all application objects (or -more precisely, it will not be removed by the translation process), the -best of both worlds. - - -Memory Management ------------------ - -As mentioned above, CPython's decision to use a garbage collector based -on reference counting is reflected throughout its source. In the -implementation code of PyPy, it is not, and in fact the standard -interpreter can currently be compiled to use a reference counted scheme -or the Boehm GC [BOEHM]_. Again, more details are in [TA]_. We also -have an experimental framework for developing custom exact GCs [GC]_, -but it is not yet integrated with the low-level translation back-ends. - -Another advantage of the aspect oriented approach shows itself most -clearly with this memory management aspect: that of correctness. -Although reference counting is a fairly simple scheme, writing code for -CPython requires that the programmer make a large number of -not-quite-trivial decisions about the refcounting code. Experience -suggests that mistakes will always creep in, leading to crashes or -leaks. While tools exist to help find these mistakes, it is surely -better to not write the reference count manipulations at all and this is -what PyPy's approach allows. Writing the code that emits the correct -reference count manipulations is surely harder than writing any one -piece of explicit refcounting code, but once it is done and tested, it -just works without further effort. - - -Concurrency ------------ - -The aspect of CPython's implementation that has probably caused more -discussion than any other mentioned here is that of the threading -model. Python has supported threads since version 1.5 with what is -commonly referred to as the "Global Interpreter Lock" or GIL; the -execution of bytecodes is serialized such that only one thread can be -executing Python code at one time. This has the benefit of being -relatively unintrusive and not too complex, but has the disadvantage -that multi-threaded, computation-bound Python code does not gain -performance on multi-processor machines. - -PyPy will offer the opportunity to experiment with different models, -although currently we only offer a version with no thread support and -another with a GIL-like model [TA]_. (We also plan to support soon -"green" software-only threads in the Stackless model described above, -but obviously this would not solve the multi-processor scalability -issue.) - -The future work in this direction is to collect the numerous possible -approaches that have between thought out along the years and -e.g. presented on the CPython development mailing list. Most of them -have never been tried out in CPython, for lack of necessary resources. -A number of them are clearly easy to try out in PyPy, at least in an -experimental version that would allow its costs to be assessed -- for -example, various forms of object-level locking. - - -Evaluation Strategy -------------------- - -Possibly the most radical aspect to tinker with is the evaluation -strategy. The thunk object space [OBJS]_ wraps the standard object -space to allow the production of "lazily computed objects", i.e. objects -whose values are only calculated when needed. It also allows global and -total replacement of one object with another. - -The thunk object space is mostly meant as an example of what our -approach can achieve -- the combination of side-effects and lazy -evaluation is not easy to understand. This demonstration is important -because this level of flexibility will be required to implement future -features along the lines of Prolog-style logic variables, transparent -persistency, object distribution across several machines, or -object-level security. - - -Experimental results -==================== - -All the aspects described in the previous chapter have been successfully -implemented and are available since the release 0.7 or 0.8 of PyPy. - -We have conducted preliminary experimental measures of the performance -impact of enabling each of these features in the compiled PyPy -interpreter. We present below the current results as of October 2005. -Most figures appear to vary from machine to machine. Given that the -generated code is large (it produce a binary of 5.6MB on a Linux -Pentium), there might be locality and code ordering issues that cause -important cache effects. - -We have not particularly optimized any of these aspects yet. Our goal -is primarily to prove that the whole approach is worthwhile, and rely on -future work and push for external contributions to implement -state-of-the-art techniques in each of these domains. - -Stacklessness - - Producing Stackless-style C code means that all the functions of the - PyPy interpreter that can be involved in recursions contain stack - bookkeeping code (leaf functions, functions calling only leaves, - etc. do not need to use this style). The current performance impact - is to make PyPy slower by about 8%. A couple of minor pending - optimizations could reduce this figure a bit. We expect the rest of - the performance impact to be mainly caused by the increase of size - of the generated executable (+28%). - -Multiple Interpreters - - A binary that allowed selection between two copies of the standard - object space with a command line switch was about 10% slower and - about 40% larger than the default. Most of the extra size is - likely accounted for by the duplication of the large amount of - prebuilt data involved in an instance of the standard object - space. - -Memory Management - - The [Boehm] GC is well-optimized and produces excellent results. By - comparison, using reference counting instead makes the interpreter - twice as slow. This is almost certainly due to the naive approach - to reference counting used so far, which updates the counter far - more often than strictly necessary; we also still have a lot of - objects that would theoretically not need a reference counter, - either because they are short-lived or because we can prove that - they are "owned" by another object and can share its lifetime. In - the long run, it will be interesting to see how far this figure can - be reduced, given past experiences with CPython which seem to show - that reference counting is a viable idea for Python interpreters. - -Concurrency - - No experimental data available so far. Just enabling threads - currently creates an overhead that hides the real costs of locking. - -Evaluation Strategy - - When translated to C code, the Thunk object space has a global - performance impact of 6%. The executable is 13% bigger (probably - due to the arguably excessive inlining we perform). - -We have described five aspects in this document, each currently with -two implementation choices, leading to 32 possible translations. We -have not measured the performance of each variant, but the few we have -tried suggests that the performance impacts are what one would expect, -e.g. a translated stackless binary using the thunk object space would -be expected to be about 1.06 x 1.08 ~= 1.14 times slower than the -default and was found to be 1.15 times slower. - - -Conclusion -========== - -Although still a work in progress, we believe that the successes we -have had in encapsulating implementation aspects justifies the -approach we have taken. In particular, the relative ease of -implementing the translation aspects described in this paper -- as -mentioned above, the stackless modifications took only a few days -- -means we are confident that it will be easily possible to encapsulate -implementation aspects we have not yet considered. - - -References -========== - -.. [ARCH] `Architecture Overview`_, PyPy documentation, 2003-2005 - -.. [BOEHM] `Boehm-Demers-Weiser garbage collector`_, a garbage collector - for C and C++, Hans Boehm, 1988-2004 - -.. [CODG] `Coding Guide`_, PyPy documentation, 2003-2005 - -.. [GC] `Garbage Collection`_, PyPy documentation, 2005 - -.. [OBJS] `Object Spaces`_, PyPy documentation, 2003-2005 - -.. [STK] `Stackless Python`_, a Python implementation that does not use - the C stack, Christian Tismer, 1999-2004 - -.. [TA] `Memory management and threading models as translation aspects`_, - PyPy documentation (and EU Deliverable D05.3), 2005 - -.. _`standard interpreter`: architecture.html#standard-interpreter -.. _`Architecture Overview`: architecture.html -.. _`Coding Guide`: coding-guide.html -.. _`Garbage Collection`: garbage_collection.html -.. _`Object Spaces`: objspace.html -.. _`Stackless Python`: http://www.stackless.com -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _`Memory management and threading models as translation aspects`: translation-aspects.html diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -122,10 +122,6 @@ away a lot of low level details. This document is also part of the `EU reports`_. -`translation aspects`_ describes how we weave different -properties into our interpreter during the translation -process. This document is also part of the `EU reports`_. - `garbage collector`_ strategies that can be used by the virtual machines produced by the translation process. @@ -159,8 +155,6 @@ .. _`interpreter optimizations`: interpreter-optimizations.html .. _`translation`: translation.html .. _`dynamic-language translation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf -.. _`low-level encapsulation`: low-level-encapsulation.html -.. _`translation aspects`: translation-aspects.html .. _`configuration documentation`: config/ .. _`coding guide`: coding-guide.html .. _`architecture`: architecture.html diff --git a/pypy/doc/translation-aspects.rst b/pypy/doc/translation-aspects.rst deleted file mode 100644 --- a/pypy/doc/translation-aspects.rst +++ /dev/null @@ -1,481 +0,0 @@ -.. include:: throwaway.rst - -========================================================================================== -Memory management and threading models as translation aspects -- solutions and challenges -========================================================================================== - -.. contents:: - - -Introduction -============= - -One of the goals of the PyPy project is to have the memory and concurrency -models flexible and changeable without having to reimplement the -interpreter manually. In fact, PyPy, by the time of the 0.8 release contains code for memory -management and concurrency models which allows experimentation without -requiring early design decisions. This document describes many of the more -technical details of the current state of the implementation of the memory -object model, automatic memory management and concurrency models and describes -possible future developments. - - -The low level object model -=========================== - -One important part of the translation process is *rtyping* [DLT]_, [TR]_. -Before that step all objects in our flow graphs are annotated with types at the -level of the RPython type system which is still quite high-level and -target-independent. During rtyping they are transformed into objects that -match the model of the specific target platform. For C or C-like targets this -model consists of a set of C-like types like structures, arrays and functions -in addition to primitive types (integers, characters, floating point numbers). -This multi-stage approach gives a lot of flexibility in how a given object is -represented at the target's level. The RPython process can decide what -representation to use based on the type annotation and on the way the object is -used. - -In the following the structures used to represent RPython classes are described. -There is one "vtable" per RPython class, with the following structure: The root -class "object" has a vtable of the following type (expressed in a C-like -syntax):: - - struct object_vtable { - struct object_vtable* parenttypeptr; - RuntimeTypeInfo * rtti; - Signed subclassrange_min; - Signed subclassrange_max; - array { char } * name; - struct object * instantiate(); - } - -The structure members ``subclassrange_min`` and ``subclassrange_max`` are used -for subclass checking (see below). Every other class X, with parent Y, has the -structure:: - - struct vtable_X { - struct vtable_Y super; // inlined - ... // extra class attributes - } - -The extra class attributes usually contain function pointers to the methods -of that class, although the data class attributes (which are supported by the -RPython object model) are stored there. - -The type of the instances is:: - - struct object { // for instances of the root class - struct object_vtable* typeptr; - } - - struct X { // for instances of every other class - struct Y super; // inlined - ... // extra instance attributes - } - -The extra instance attributes are all the attributes of an instance. - -These structure layouts are quite similar to how classes are usually -implemented in C++. - -Subclass checking ------------------ - -The way we do subclass checking is a good example of the flexibility provided -by our approach: in the beginning we were using a naive linear lookup -algorithm. Since subclass checking is quite a common operation (it is also used -to check whether an object is an instance of a certain class), we wanted to -replace it with the more efficient relative numbering algorithm (see [PVE]_ for -an overview of techniques). This was a matter of changing just the appropriate -code of the rtyping process to calculate the class-ids during rtyping and -insert the necessary fields into the class structure. It would be similarly -easy to switch to another implementation. - -Identity hashes ---------------- - -In the RPython type system, class instances can be used as dictionary keys using -a default hash implementation based on identity, which in practice is -implemented using the memory address. This is similar to CPython's behavior -when no user-defined hash function is present. The annotator keeps track of the -classes for which this hashing is ever used. - -One of the peculiarities of PyPy's approach is that live objects are analyzed -by our translation toolchain. This leads to the presence of instances of RPython -classes that were built before the translation started. These are called -"pre-built constants" (PBCs for short). During rtyping, these instances must be -converted to the low level model. One of the problems with doing this is that -the standard hash implementation of Python is to take the id of an object, which - -is just the memory address. If the RPython program explicitly captures the -hash of a PBC by storing it (for example in the implementation of a data -structure) then the stored hash value will not match the value of the object's -address after translation. - -To prevent this the following strategy is used: for every class whose instances -are hashed somewhere in the program (either when storing them in a -dictionary or by calling the hash function) an extra field is introduced in the -structure used for the instances of that class. For PBCs of such a class this -field is used to store the memory address of the original object and new objects -have this field initialized to zero. The hash function for instances of such a -class stores the object's memory address in this field if it is zero. The -return value of the hash function is the content of the field. This means that -instances of such a class that are converted PBCs retain the hash values they -had before the conversion whereas new objects of the class have their memory -address as hash values. A strategy along these lines would in any case have been -required if we ever switch to using a copying garbage collector. - -Cached functions with PBC arguments ------------------------------------- - -As explained in [DLT]_ the annotated code can contain -functions from a finite set of PBCs to something else. The set itself has to be -finite but its content does not need to be provided explicitly but is discovered -as the annotation of the input argument by the annotator itself. This kind of -function is translated by recording the input-result relationship by calling -the function concretely at annotation time, and adding a field to the PBCs in -the set and emitting code reading that field instead of the function call. - -Changing the representation of an object ----------------------------------------- - -One example of the flexibility the RTyper provides is how we deal with lists. -Based on information gathered by the annotator the RTyper chooses between two -different list implementations. If a list never changes its size after creation, -a low-level array is used directly. For lists which might be resized, a -representation consisting of a structure with a pointer to an array is used, -together with over-allocation. - -We plan to use similar techniques to use tagged pointers instead of using boxing -to represent builtin types of the PyPy interpreter such as integers. This would -require attaching explicit hints to the involved classes. Field access would -then be translated to the corresponding masking operations. - - -Automatic Memory Management Implementations -============================================ - -The whole implementation of the PyPy interpreter assumes automatic memory -management, e.g. automatic reclamation of memory that is no longer used. The -whole analysis toolchain also assumes that memory management is being taken -care of -- only the backends have to concern themselves with that issue. For -backends that target environments that have their own garbage collector, like -.NET or Java, this is not an issue. For other targets like C -the backend has to produce code that uses some sort of garbage collection. - -This approach has several advantages. It makes it possible to target different -platforms, with and without integrated garbage collection. Furthermore, the -interpreter implementation is not complicated by the need to do explicit memory -management everywhere. Even more important the backend can optimize the memory -handling to fit a certain situation (like a machine with very restricted -memory) or completely replace the memory management technique or memory model -with a different one without the need to change source code. Additionally, -the backend can use information that was inferred by the rest of the toolchain -to improve the quality of memory management. - -Using the Boehm garbage collector ------------------------------------ - -Currently there are two different garbage collectors implemented in the C -backend (which is the most complete backend right now). One of them uses the -existing Boehm-Demers-Weiser garbage collector [BOEHM]_. For every memory -allocating operation in a low level flow graph the C backend introduces a call -to a function of the boehm collector which returns a suitable amount of memory. -Since the C backend has a lot of information available about the data structure -being allocated it can choose the memory allocation function out of the Boehm -API that fits best. For example, for objects that do not contain references to -other objects (e.g. strings) there is a special allocation function which -signals to the collector that it does not need to consider this memory when -tracing pointers. - -Using the Boehm collector has disadvantages as well. The problems stem from the -fact that the Boehm collector is conservative which means that it has to -consider every word in memory as a potential pointer. Since PyPy's toolchain -has complete knowledge of the placement of data in memory we can generate an -exact garbage collector that considers only genuine pointers. - -Using a simple reference counting garbage collector ------------------------------------------------------ - -The other implemented garbage collector is a simple reference counting scheme. -The C backend inserts a reference count field into every structure that has to be -handled by the garbage collector and puts increment and decrement operations -for this reference count into suitable places in the resulting C code. After -every reference decrement operations a check is performed whether the reference -count has dropped to zero. If this is the case the memory of the object will be -reclaimed after the references counts of the objects the original object -refers to are decremented as well. - -The current placement of reference counter updates is far from optimal: The -reference counts are updated much more often than theoretically necessary (e.g. -sometimes a counter is increased and then immediately decreased again). -Objects passed into a function as arguments can almost always use a "trusted reference", -because the call-site is responsible to create a valid reference. -Furthermore some more analysis could show that some objects don't need a -reference counter at all because they either have a very short, foreseeable -life-time or because they live exactly as long as another object. - -Another drawback of the current reference counting implementation is that it -cannot deal with circular references, which is a fundamental flaw of reference -counting memory management schemes in general. CPython solves this problem by -having special code that handles circular garbage which PyPy lacks at the -moment. This problem has to be addressed in the future to make the reference -counting scheme a viable garbage collector. Since reference counting is quite -successfully used by CPython it will be interesting to see how far it can be -optimized for PyPy. - -Simple escape analysis to remove memory allocation ---------------------------------------------------- - -We also implemented a technique to reduce the amount of memory allocation. -Sometimes it is possible to deduce from the flow graphs that an object lives -exactly as long as the stack frame of the function it is allocated in. -This happens if no pointer to the object is stored into another object and if -no pointer to the object is returned from the function. If this is the case and -if the size of the object is known in advance the object can be allocated on -the stack. To achieve this, the object is "exploded", that means that for every -element of the structure a new variable is generated that is handed around in -the graph. Reads from elements of the structure are removed and just replaced -by one of the variables, writes by assignments to same. - -Since quite a lot of objects are allocated in small helper functions, this -simple approach which does not track objects across function boundaries only -works well in the presence of function inlining. - -A general garbage collection framework --------------------------------------- - -In addition to the garbage collectors implemented in the C backend we have also -started writing a more general toolkit for implementing exact garbage -collectors in Python. The general idea is to express the garbage collection -algorithms in Python as well and translate them as part of the translation -process to C code (or whatever the intended platform is). - -To be able to access memory in a low level manner there are special ``Address`` -objects that behave like pointers to memory and can be manipulated accordingly: -it is possible to read/write to the location they point to a variety of data -types and to do pointer arithmetic. These objects are translated to real -pointers and the appropriate operations. When run on top of CPython there is a -*memory simulator* that makes the address objects behave like they were -accessing real memory. In addition the memory simulator contains a number of -consistency checks that expose common memory handling errors like dangling -pointers, uninitialized memory, etc. - -At the moment we have three simple garbage collectors implemented for this -framework: a simple copying collector, a mark-and-sweep collector and a -deferred reference counting collector. These garbage collectors are work when run on -top of the memory simulator, but at the moment it is not yet possible to translate -PyPy to C with them. This is because it is not easy to -find the root pointers that reside on the C stack -- both because the C stack layout is -heavily platform dependent, and also due to the possibility of roots that are not -only on the stack but also hiding in registers (which would give a problem for *moving -garbage collectors*). - -There are several possible solutions for this problem: One -of them is to not use C compilers to generate machine code, so that the stack -frame layout gets into our control. This is one of the tasks that need to be -tackled in phase 2, as directly generating assembly is needed anyway for a -just-in-time compiler. The other possibility (which would be much easier to -implement) is to move all the data away from the stack to the heap -before collecting garbage, as described in section "Stackless C code" below. - -Concurrency Model Implementations -============================================ - -At the moment we have implemented two different concurrency models, and the -option to not support concurrency at all -(another proof of the modularity of our approach): -threading with a global interpreter lock and a "stackless" model. - -No threading -------------- - -By default, multi-threading is not supported at all, which gives some small -benefits for single-threaded applications since even in the single-threaded -case there is some overhead if threading capabilities are built into -the interpreter. - -Threading with a Global Interpreter Lock ------------------------------------------- - -Right now, there is one non-trivial threading model implemented. It follows -the threading implementation of CPython and thus uses a global interpreter -lock. This lock prevents any two threads from interpreting python code at -the same time. The global interpreter lock is released around calls to blocking I/O -functions. This approach has a number of advantages: it gives very little -runtime penalty for single-threaded applications, makes many of the common uses -for threading possible, and it is relatively easy to implement and maintain. It has -the disadvantage that multiple threads cannot be distributed across multiple -processors. - -To make this threading-model usable for I/O-bound applications, the global -interpreter lock should be released around blocking external function calls -(which is also what CPython does). This has been partially implemented. - - -Stackless C code ------------------ - -"Stackless" C code is C code that only uses a bounded amount of -space in the C stack, and that can generally obtain explicit -control of its own stack. This is commonly known as "continuations", -or "continuation-passing style" code, although in our case we will limit -ourselves to single-shot continuations, i.e. continuations that are -captured and subsequently will be resumed exactly once. - -The technique we have implemented is based on the recurring idea -of emulating this style via exceptions: a specific program point can -generate a pseudo-exception whose purpose is to unwind the whole C stack -in a restartable way. More precisely, the "unwind" exception causes -the C stack to be saved into the heap in a compact and explicit -format, as described below. It is then possible to resume only the -innermost (most recent) frame of the saved stack -- allowing unlimited -recursion on OSes that limit the size of the C stack -- or to resume a -different previously-saved C stack altogether, thus implementing -coroutines or light-weight threads. - -In our case, exception handling is always explicit in the generated code: -the C backend puts a cheap check -after each call site to detect if the callee exited -normally or generated an exception. So when compiling functions in -stackless mode, the generated exception handling code special-cases the -new "unwind" exception. This exception causes the current function to -respond by saving its local variables to a heap structure (a linked list -of records, one per stack frame) and then propagating the exception -outwards. Eventually, at the end of the frame chain, the outermost -function is a manually-written dispatcher that catches the "unwind" -exception. - -At this point, the whole C stack is stored away in the heap. This is a -very interesting state in itself, because precisely there is no C stack -below the dispatcher -left. It is this which will allow us to write all the algorithms -in a portable way, that -normally require machine-specific code to inspect the stack, -in particular garbage collectors. - -To continue execution, the dispatcher can resume either the freshly saved or a -completely different stack. Moreover, it can resume directly the innermost -(most recent) saved frame in the heap chain, without having to resume all -intermediate frames first. This not only makes stack switches fast, but it -also allows the frame to continue to run on top of a clean C stack. When that -frame eventually exits normally, it returns to the dispatcher, which then -invokes the previous (parent) saved frame, and so on. We insert stack checks -before calls that can lead to recursion by detecting cycles in the call graph. -These stack checks copy the stack to the heap (by raising the special -exception) if it is about to grow deeper than a certain level. -As a different point of view, the C stack can also be considered as a cache -for the heap-based saved frames in this model. When we run out -of C stack space, we flush the cache. When the cache is empty, we fill it with -the next item from the heap. - -To give the translated program some amount of control over the -heap-based stack structures and over the top-level dispatcher that jumps -between them, there are a few "external" functions directly implemented -in C. These functions provide an elementary interface, on top of which -useful abstractions can be implemented, like: - -* coroutines: explicitly switching code, similar to Greenlets [GREENLET]_. - -* "tasklets": cooperatively-scheduled microthreads, as introduced in - Stackless Python [STK]_. - -* implicitly-scheduled (preemptive) microthreads, also known as green threads. - -An important property of the changes in all the generated C functions is -that they are written in a way that does only minimally degrade their performance in -the non-exceptional case. Most optimizations performed by C compilers, -like register allocation, continue to work... - -The following picture shows a graph function together with the modifications -necessary for the stackless style: the check whether the stack is too big and -should be unwound, the check whether we are in the process of currently storing -away the stack and the check whether the call to the function is not a regular -call but a reentry call. - -.. graphviz:: image/stackless_informal.dot - :scale: 70 - - -Future work -================ - -open challenges for phase 2: - -Garbage collection ------------------- - -One of the biggest missing features of our current garbage collectors is -finalization. At present finalizers are simply not invoked if an object is -freed by the garbage collector. Along the same lines weak references are not -supported yet. It should be possible to implement these with a reasonable -amount of effort for reference counting as well as the Boehm collector (which -provides the necessary hooks). - -Integrating the now simulated-only GC framework into the rtyping process and -the code generation will require considerable effort. It requires being able to -keep track of the GC roots which is hard to do with portable C code. One -solution would be to use the "stackless" code since it can move the stack -completely to the heap. We expect that we can implement GC read and write -barriers as function calls and rely on inlining to make them more efficient. - -We may also spend some time on improving the existing reference counting -implementation by removing unnecessary incref-decref pairs and identifying -trustworthy references. A bigger task would -be to add support for detecting circular references. - - -Threading model ---------------- - -One of the interesting possibilities that stackless offers is to implement *green -threading*. This would involve writing a scheduler and some preemption logic. - -We should also investigate other threading models based on operating system -threads with various granularities of locking for access of shared objects. - -Object model ------------- - -We also might want to experiment with more sophisticated structure inlining. -Sometimes it is possible to find out that one structure object -allocated on the heap lives exactly as long as another structure object on the -heap pointing to it. If this is the case it is possible to inline the first -object into the second. This saves the space of one pointer and avoids -pointer-chasing. - - -Conclusion -=========== - -As concretely shown with various detailed examples, our approach gives us -flexibility and lets us choose various aspects at translation time instead -of encoding them into the implementation itself. - -References -=========== - -.. [BOEHM] `Boehm-Demers-Weiser garbage collector`_, a garbage collector - for C and C++, Hans Boehm, 1988-2004 -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ - -.. [GREENLET] `Lightweight concurrent programming`_, py-lib Documentation 2003-2005 -.. _`Lightweight concurrent programming`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt - -.. [STK] `Stackless Python`_, a Python implementation that does not use - the C stack, Christian Tismer, 1999-2004 -.. _`Stackless Python`: http://www.stackless.com - -.. [TR] `Translation`_, PyPy documentation, 2003-2005 -.. _`Translation`: translation.html - -.. [LE] `Encapsulating low-level implementation aspects`_, - PyPy documentation (and EU deliverable D05.4), 2005 -.. _`Encapsulating low-level implementation aspects`: low-level-encapsulation.html - -.. [DLT] `Compiling dynamic language implementations`_, - PyPy documentation (and EU deliverable D05.1), 2005 -.. _`Compiling dynamic language implementations`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf - -.. [PVE] `Simple and Efficient Subclass Tests`_, Jonathan Bachrach, Draft submission to ECOOP-02, 2001 -.. _`Simple and Efficient Subclass Tests`: http://people.csail.mit.edu/jrb/pve/pve.pdf diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -26,8 +26,6 @@ statistic/index.rst - translation-aspects.rst - docindex.rst svn-help.rst From commits-noreply at bitbucket.org Mon Apr 25 15:01:48 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 15:01:48 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac) Kill geninterp documentation! Die Die Die! Message-ID: <20110425130148.33E2E282B9D@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43583:20960932aed3 Date: 2011-04-25 15:00 +0200 http://bitbucket.org/pypy/pypy/changeset/20960932aed3/ Log: (cfbolz, lac) Kill geninterp documentation! Die Die Die! diff --git a/pypy/doc/geninterp.rst b/pypy/doc/geninterp.rst deleted file mode 100644 --- a/pypy/doc/geninterp.rst +++ /dev/null @@ -1,190 +0,0 @@ -.. include:: throwaway.rst - -The Interpreter-Level backend ------------------------------ - -http://codespeak.net/pypy/trunk/pypy/translator/geninterplevel.py - -Motivation -++++++++++ - -PyPy often makes use of `application-level`_ helper methods. -The idea of the 'geninterplevel' backend is to automatically transform -such application level implementations to their equivalent representation -at interpreter level. Then, the RPython to C translation hopefully can -produce more efficient code than always re-interpreting these methods. - -One property of translation from application level Python to -Python is, that the produced code does the same thing as the -corresponding interpreted code, but no interpreter is needed -any longer to execute this code. - -.. _`application-level`: coding-guide.html#app-preferable - -Bootstrap issue -+++++++++++++++ - -One issue we had so far was of bootstrapping: some pieces of the -interpreter (e.g. exceptions) were written in geninterped code. -It is unclear how much of it is left, thought. - -That bootstrap issue is (was?) solved by invoking a new bytecode interpreter -which runs on FlowObjspace. FlowObjspace is complete without -complicated initialization. It is able to do abstract interpretation -of any Rpythonic code, without actually implementing anything. It just -records all the operations the bytecode interpreter would have done by -building flowgraphs for all the code. What the Python backend does is -just to produce correct Python code from these flowgraphs and return -it as source code. In the produced code Python operations recorded in -the original flowgraphs are replaced by calls to the corresponding -methods in the `object space`_ interface. - -.. _`object space`: objspace.html - -Example -+++++++ - -.. _implementation: ../../../../pypy/translator/geninterplevel.py - -Let's try a little example. You might want to look at the flowgraph that it -produces. Here, we directly run the Python translation and look at the -generated source. See also the header section of the implementation_ for the -interface:: - - >>> from pypy.translator.geninterplevel import translate_as_module - >>> entrypoint, source = translate_as_module(""" - ... - ... def g(n): - ... i = 0 - ... while n: - ... i = i + n - ... n = n - 1 - ... return i - ... - ... """) - -This call has invoked a PyPy bytecode interpreter running on FlowObjspace, -recorded every possible codepath into a flowgraph, and then rendered the -following source code:: - - #!/bin/env python - # -*- coding: LATIN-1 -*- - - def initapp2interpexec(space): - """NOT_RPYTHON""" - - def g(space, w_n_1): - goto = 3 # startblock - while True: - - if goto == 1: - v0 = space.is_true(w_n) - if v0 == True: - goto = 2 - else: - goto = 4 - - if goto == 2: - w_1 = space.add(w_0, w_n) - w_2 = space.sub(w_n, gi_1) - w_n, w_0 = w_2, w_1 - goto = 1 - continue - - if goto == 3: - w_n, w_0 = w_n_1, gi_0 - goto = 1 - continue - - if goto == 4: - return w_0 - - fastf_g = g - - g3dict = space.newdict() - gs___name__ = space.new_interned_str('__name__') - gs_app2interpexec = space.new_interned_str('app2interpexec') - space.setitem(g3dict, gs___name__, gs_app2interpexec) - gs_g = space.new_interned_str('g') - from pypy.interpreter import gateway - gfunc_g = space.wrap(gateway.interp2app(fastf_g, unwrap_spec=[gateway.ObjSpace, gateway.W_Root])) - space.setitem(g3dict, gs_g, gfunc_g) - gi_1 = space.wrap(1) - gi_0 = space.wrap(0) - return g3dict - -You see that actually a single function is produced: -``initapp2interpexec``. This is the function that you will call with a -space as argument. It defines a few functions and then does a number -of initialization steps, builds the global objects the function need, -and produces the PyPy function object ``gfunc_g``. - -The return value is ``g3dict``, which contains a module name and the -function we asked for. - -Let's have a look at the body of this code: The definition of ``g`` is -used as ``fast_g`` in the ``gateway.interp2app`` which constructs a -PyPy function object which takes care of argument unboxing (based on -the ``unwrap_spec``), and of invoking the original ``g``. - -We look at the definition of ``g`` itself which does the actual -computation. Comparing to the flowgraph, you see a code block for -every block in the graph. Since Python has no goto statement, the -jumps between the blocks are implemented by a loop that switches over -a ``goto`` variable. - -:: - - . if goto == 1: - v0 = space.is_true(w_n) - if v0 == True: - goto = 2 - else: - goto = 4 - -This is the implementation of the "``while n:``". There is no implicit state, -everything is passed over to the next block by initializing its -input variables. This directly resembles the nature of flowgraphs. -They are completely stateless. - - -:: - - . if goto == 2: - w_1 = space.add(w_0, w_n) - w_2 = space.sub(w_n, gi_1) - w_n, w_0 = w_2, w_1 - goto = 1 - continue - -The "``i = i + n``" and "``n = n - 1``" instructions. -You see how every instruction produces a new variable. -The state is again shuffled around by assigning to the -input variables ``w_n`` and ``w_0`` of the next target, block 1. - -Note that it is possible to rewrite this by re-using variables, -trying to produce nested blocks instead of the goto construction -and much more. The source would look much more like what we -used to write by hand. For the C backend, this doesn't make much -sense since the compiler optimizes it for us. For the Python interpreter it could -give a bit more speed. But this is a temporary format and will -get optimized anyway when we produce the executable. - -Interplevel Snippets in the Sources -+++++++++++++++++++++++++++++++++++ - -Code written in application space can consist of complete files -to be translated, or they -can be tiny snippets scattered all over a source file, similar -to our example from above. - -Translation of these snippets is done automatically and cached -in pypy/_cache with the modulename and the md5 checksum appended -to it as file name. If you have run your copy of pypy already, -this folder should exist and have some generated files in it. -These files consist of the generated code plus a little code -that auto-destructs the cached file (plus .pyc/.pyo versions) -if it is executed as __main__. On windows this means you can wipe -a cached code snippet clear by double-clicking it. Note also that -the auto-generated __init__.py file wipes the whole directory -when executed. diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -191,7 +191,6 @@ **transformation** Code that modifies flowgraphs to weave in translation aspects - .. _`translation-time`: **translation-time** diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -29,11 +29,8 @@ As of the 1.2 release, RPython_ programs can be translated into the following languages/platforms: C/POSIX, CLI/.NET -and Java/JVM (in addition, there's `a backend`_ that translates -`application-level`_ into `interpreter-level`_ code, but this is a special -case in several ways). +and Java/JVM. -.. _`a backend`: geninterp.html .. _`application-level`: coding-guide.html#application-level .. _`interpreter-level`: coding-guide.html#interpreter-level @@ -632,10 +629,6 @@ http://codespeak.net/pypy/trunk/pypy/translator/c/ -GenC is not really documented at the moment. The basic principle of creating -code from flow graphs is similar to the `Python back-end`_. See also -"Generating C code" in our `EU report about translation`_. - GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are implemented first. @@ -710,21 +703,6 @@ GenJVM is almost entirely the work of Niko Matsakis, who worked on it also as part of the Summer of PyPy program. -.. _`Python again`: -.. _`Python back-end`: - -The Interpreter-Level backend ------------------------------ - -http://codespeak.net/pypy/trunk/pypy/translator/geninterplevel.py - -Above, this backend was described as a "special case in several ways". One of -these ways is that the job it does is specific to PyPy's standard interpreter, -and the other is that it does not even use the annotator -- it works directly -the graphs produced by the Flow Object Space. - -See `geninterp's documentation `__. - .. _extfunccalls: External Function Calls diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -258,12 +258,10 @@ ``*/test/`` many directories have a test subdirectory containing test modules (see `Testing in PyPy`_) -``_cache/`` holds cache files from internally `translating application - level to interpreterlevel`_ code. +``_cache/`` holds internal cache files ============================ =========================================== .. _`bytecode interpreter`: interpreter.html -.. _`translating application level to interpreterlevel`: geninterp.html .. _`Testing in PyPy`: coding-guide.html#testing-in-pypy .. _`mixed modules`: coding-guide.html#mixed-modules .. _`modules`: coding-guide.html#modules diff --git a/pypy/doc/cleanup-todo.rst b/pypy/doc/cleanup-todo.rst --- a/pypy/doc/cleanup-todo.rst +++ b/pypy/doc/cleanup-todo.rst @@ -10,7 +10,6 @@ - low level backends should share more code - all backends should have more consistent interfaces - - geninterp is a hack - delegate finding type stuff like vtables etc to GC, cleaner interface for rtti, simplify translator/c/gc.py - clean up the tangle of including headers in the C backend diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -12,8 +12,6 @@ distribution.rst - geninterp.rst - objspace-proxies.rst old_news.rst From commits-noreply at bitbucket.org Mon Apr 25 15:14:35 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 15:14:35 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: (cfbolz, lac) new file, so people have a place other than jit.txt to put things. Message-ID: <20110425131435.A5FCF282B9D@codespeak.net> Author: Laura Creighton Branch: extradoc Changeset: r3534:51652316fcbd Date: 2011-04-25 15:13 +0200 http://bitbucket.org/pypy/extradoc/changeset/51652316fcbd/ Log: (cfbolz,lac) new file, so people have a place other than jit.txt to put things. the last bits of cleanup-todo from doc is here now. diff --git a/planning/todo.txt b/planning/todo.txt new file mode 100644 --- /dev/null +++ b/planning/todo.txt @@ -0,0 +1,13 @@ +PyPy todo areas +================== + +This is a todo list that lists various areas of PyPy that should be cleaned up +(for whatever reason: less mess, less code duplication, etc). + +translation toolchain +--------------------- + + - clean up the tangle of including headers in the C backend + - make approach for loading modules more sane, mixedmodule capture + too many platform dependencies especially for pypy-cli + From commits-noreply at bitbucket.org Mon Apr 25 15:17:07 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 15:17:07 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz) move the only unfinished bits of cleanup-todo to extradoc. Message-ID: <20110425131707.9B660282B9D@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43584:959111565f93 Date: 2011-04-25 15:16 +0200 http://bitbucket.org/pypy/pypy/changeset/959111565f93/ Log: (lac, cfbolz) move the only unfinished bits of cleanup-todo to extradoc. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -89,7 +89,6 @@ architecture.rst coding-guide.rst cpython_differences.rst - cleanup-todo.rst garbage_collection.rst interpreter.rst objspace.rst diff --git a/pypy/doc/cleanup-todo.rst b/pypy/doc/cleanup-todo.rst deleted file mode 100644 --- a/pypy/doc/cleanup-todo.rst +++ /dev/null @@ -1,29 +0,0 @@ - -PyPy cleanup areas -================== - -This is a todo list that lists various areas of PyPy that should be cleaned up -(for whatever reason: less mess, less code duplication, etc). - -translation toolchain ---------------------- - - - low level backends should share more code - - all backends should have more consistent interfaces - - delegate finding type stuff like vtables etc to GC, cleaner interface for rtti, - simplify translator/c/gc.py - - clean up the tangle of including headers in the C backend - - make approach for loading modules more sane, mixedmodule capture - too many platform dependencies especially for pypy-cli - - review pdbplus, especially the graph commands, also in the light of - https://codespeak.net/issue/pypy-dev/issue303 and the fact that - we can have more than one translator/annotator around (with the - timeshifter) - -interpreter ------------ - - - review the things implemented at applevel whether they are performance- - critical - - - review CPython regression test suite, enable running tests, fix bugs From commits-noreply at bitbucket.org Mon Apr 25 15:21:28 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 15:21:28 +0200 (CEST) Subject: [pypy-svn] pypy str-cmp-opt: Kill branch (alex: "it has one commit that isn't very important") Message-ID: <20110425132128.475E5282B9D@codespeak.net> Author: Armin Rigo Branch: str-cmp-opt Changeset: r43585:714629f3112d Date: 2011-04-25 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/714629f3112d/ Log: Kill branch (alex: "it has one commit that isn't very important") From commits-noreply at bitbucket.org Mon Apr 25 15:29:38 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 15:29:38 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer: Close old branch (exarkun: "is garbage") Message-ID: <20110425132938.90EC5282B9D@codespeak.net> Author: Armin Rigo Branch: pyarg-parsebuffer Changeset: r43586:6c6ea5e8186e Date: 2011-04-25 15:29 +0200 http://bitbucket.org/pypy/pypy/changeset/6c6ea5e8186e/ Log: Close old branch (exarkun: "is garbage") From commits-noreply at bitbucket.org Mon Apr 25 15:38:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 15:38:15 +0200 (CEST) Subject: [pypy-svn] pypy default: hg merge pyarg-parsebuffer-new Message-ID: <20110425133815.3C9F2282B9D@codespeak.net> Author: Armin Rigo Branch: Changeset: r43587:c43ec4e37818 Date: 2011-04-25 15:37 +0200 http://bitbucket.org/pypy/pypy/changeset/c43ec4e37818/ Log: hg merge pyarg-parsebuffer-new diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -16,9 +16,6 @@ */ #define staticforward static -typedef void* Py_buffer; - - #define PyObject_HEAD \ long ob_refcnt; \ struct _typeobject *ob_type; @@ -130,6 +127,29 @@ typedef int (*visitproc)(PyObject *, void *); typedef int (*traverseproc)(PyObject *, visitproc, void *); +/* Py3k buffer interface */ +typedef struct bufferinfo { + void *buf; + PyObject *obj; /* owned reference */ + Py_ssize_t len; + + /* This is Py_ssize_t so it can be + pointed to by strides in simple case.*/ + /* Py_ssize_t itemsize; */ + /* int readonly; */ + /* int ndim; */ + /* char *format; */ + /* Py_ssize_t *shape; */ + /* Py_ssize_t *strides; */ + /* Py_ssize_t *suboffsets; */ + + /* static store for shape and strides of + mono-dimensional buffers. */ + /* Py_ssize_t smalltable[2]; */ + /* void *internal; */ +} Py_buffer; + + typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -90,8 +90,8 @@ constant_names = """ -Py_TPFLAGS_READY Py_TPFLAGS_READYING -METH_COEXIST METH_STATIC METH_CLASS +Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER +METH_COEXIST METH_STATIC METH_CLASS METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE @@ -411,6 +411,23 @@ PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) +Py_buffer = cpython_struct( + "Py_buffer", ( + ('buf', rffi.VOIDP), + ('obj', PyObject), + ('len', Py_ssize_t), + # ('itemsize', Py_ssize_t), + + # ('readonly', lltype.Signed), + # ('ndim', lltype.Signed), + # ('format', rffi.CCHARP), + # ('shape', Py_ssize_tP), + # ('strides', Py_ssize_tP), + # ('suboffets', Py_ssize_tP), + # ('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), + # ('internal', rffi.VOIDP) + )) + @specialize.memo() def is_PyObject(TYPE): if not isinstance(TYPE, lltype.Ptr): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -10,6 +10,7 @@ cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, + Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers) from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, @@ -403,6 +404,7 @@ c_buf.c_bf_getcharbuffer = llhelper(str_getcharbuffer.api_func.functype, str_getcharbuffer.api_func.get_wrapper(space)) pto.c_tp_as_buffer = c_buf + pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER @cpython_api([PyObject], lltype.Void, external=False) def type_dealloc(space, obj): @@ -443,7 +445,7 @@ if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) - pto.c_tp_flags = Py_TPFLAGS_HEAPTYPE + pto.c_tp_flags |= Py_TPFLAGS_HEAPTYPE pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, PyObject_Del.api_func.get_wrapper(space)) pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype, diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -1,7 +1,8 @@ from pypy.rpython.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, - PyVarObject, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, + PyVarObject, Py_buffer, + Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, @@ -428,3 +429,31 @@ rffi.free_nonmovingbuffer(data, buf) return 0 + + at cpython_api([lltype.Ptr(Py_buffer), PyObject, rffi.VOIDP, Py_ssize_t, + lltype.Signed, lltype.Signed], rffi.INT, error=CANNOT_FAIL) +def PyBuffer_FillInfo(space, view, obj, buf, length, readonly, flags): + """ + Fills in a buffer-info structure correctly for an exporter that can only + share a contiguous chunk of memory of "unsigned bytes" of the given + length. Returns 0 on success and -1 (with raising an error) on error. + + This is not a complete re-implementation of the CPython API; it only + provides a subset of CPython's behavior. + """ + view.c_buf = buf + view.c_len = length + view.c_obj = obj + Py_IncRef(space, obj) + return 0 + + + at cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) +def PyBuffer_Release(space, view): + """ + Releases a Py_buffer obtained from getbuffer ParseTuple's s*. + + This is not a complete re-implementation of the CPython API; it only + provides a subset of CPython's behavior. + """ + Py_DecRef(space, view.c_obj) diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -135,13 +135,11 @@ PyMem_FREE(ptr); } -#if 0 static void cleanup_buffer(void *ptr) { PyBuffer_Release((Py_buffer *) ptr); } -#endif static int addcleanup(void *ptr, PyObject **freelist, void (*destr)(void *)) @@ -776,15 +774,19 @@ } case 's': {/* string */ if (*format == '*') { - Py_FatalError("* format unsupported for strings in PyArg_*\n"); -#if 0 Py_buffer *p = (Py_buffer *)va_arg(*p_va, Py_buffer *); if (PyString_Check(arg)) { PyBuffer_FillInfo(p, arg, PyString_AS_STRING(arg), PyString_GET_SIZE(arg), 1, 0); - } + } else { + PyErr_SetString( + PyExc_NotImplementedError, + "s* not implemented for non-string values"); + return NULL; + } +#if 0 #ifdef Py_USING_UNICODE else if (PyUnicode_Check(arg)) { uarg = UNICODE_DEFAULT_ENCODING(arg); @@ -801,13 +803,13 @@ if (getbuffer(arg, p, &buf) < 0) return converterr(buf, arg, msgbuf, bufsize); } +#endif if (addcleanup(p, freelist, cleanup_buffer)) { return converterr( "(cleanup problem)", arg, msgbuf, bufsize); } format++; -#endif } else if (*format == '#') { void **p = (void **)va_arg(*p_va, char **); FETCH_SIZE; @@ -1266,24 +1268,27 @@ } case 't': { /* 8-bit character buffer, read-only access */ - Py_FatalError("'t' unsupported"); -#if 0 char **p = va_arg(*p_va, char **); PyBufferProcs *pb = arg->ob_type->tp_as_buffer; Py_ssize_t count; - + +#if 0 if (*format++ != '#') return converterr( "invalid use of 't' format character", arg, msgbuf, bufsize); - if (!PyType_HasFeature(arg->ob_type, - Py_TPFLAGS_HAVE_GETCHARBUFFER) || - pb == NULL || pb->bf_getcharbuffer == NULL || - pb->bf_getsegcount == NULL) +#endif + if (!PyType_HasFeature(arg->ob_type, + Py_TPFLAGS_HAVE_GETCHARBUFFER) +#if 0 + || pb == NULL || pb->bf_getcharbuffer == NULL || + pb->bf_getsegcount == NULL +#endif + ) return converterr( "string or read-only character buffer", arg, msgbuf, bufsize); - +#if 0 if (pb->bf_getsegcount(arg, NULL) != 1) return converterr( "string or single-segment read-only buffer", @@ -1293,16 +1298,18 @@ return converterr( "string or pinned buffer", arg, msgbuf, bufsize); - +#endif count = pb->bf_getcharbuffer(arg, 0, p); +#if 0 if (count < 0) return converterr("(unspecified)", arg, msgbuf, bufsize); +#endif { FETCH_SIZE; STORE_SIZE(count); + ++format; } break; -#endif } default: return converterr("impossible", arg, msgbuf, bufsize); @@ -1616,7 +1623,7 @@ int match = 0; char *ks; if (!PyString_Check(key)) { - PyErr_SetString(PyExc_TypeError, + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); return cleanreturn(0, freelist); } diff --git a/.hgsubstate b/.hgsubstate new file mode 100644 diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -3,66 +3,145 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class AppTestGetargs(AppTestCpythonExtensionBase): - def test_pyarg_parse(self): - mod = self.import_extension('foo', [ - ('oneargint', 'METH_VARARGS', - ''' - int l; - if (!PyArg_ParseTuple(args, "i", &l)) { - return NULL; - } - return PyInt_FromLong(l); - ''' - ), - ('oneargandform', 'METH_VARARGS', - ''' - int l; - if (!PyArg_ParseTuple(args, "i:oneargandstuff", &l)) { - return NULL; - } - return PyInt_FromLong(l); - '''), - ('oneargobject', 'METH_VARARGS', - ''' - PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) { - return NULL; - } - Py_INCREF(obj); - return obj; - '''), - ('oneargobjectandlisttype', 'METH_VARARGS', - ''' - PyObject *obj; - if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &obj)) { - return NULL; - } - Py_INCREF(obj); - return obj; - '''), - ('twoopt', 'METH_VARARGS', - ''' - PyObject *a; - PyObject *b = NULL; - if (!PyArg_ParseTuple(args, "O|O", &a, &b)) { - return NULL; - } - if (b) - Py_INCREF(b); - else - b = PyInt_FromLong(42); - /* return an owned reference */ - return b; - ''')]) - assert mod.oneargint(1) == 1 - raises(TypeError, mod.oneargint, None) - raises(TypeError, mod.oneargint) - assert mod.oneargandform(1) == 1 + def setup_method(self, func): + super(AppTestGetargs, self).setup_method(func) + self.w_import_parser = self.space.wrap(self.import_parser) + + def import_parser(self, implementation, argstyle='METH_VARARGS'): + mod = self.import_extension( + 'modname', [('funcname', argstyle, implementation)]) + return self.space.getattr(mod, self.space.wrap("funcname")) + + + def test_pyarg_parse_int(self): + """ + The `i` format specifier can be used to parse an integer. + """ + oneargint = self.import_parser( + ''' + int l; + if (!PyArg_ParseTuple(args, "i", &l)) { + return NULL; + } + return PyInt_FromLong(l); + ''') + assert oneargint(1) == 1 + raises(TypeError, oneargint, None) + raises(TypeError, oneargint) + + + def test_pyarg_parse_fromname(self): + """ + The name of the function parsing the arguments can be given after a `:` + in the argument format string. + """ + oneargandform = self.import_parser( + ''' + int l; + if (!PyArg_ParseTuple(args, "i:oneargandstuff", &l)) { + return NULL; + } + return PyInt_FromLong(l); + ''') + assert oneargandform(1) == 1 + + + def test_pyarg_parse_object(self): + """ + The `O` format specifier can be used to parse an arbitrary object. + """ + oneargobject = self.import_parser( + ''' + PyObject *obj; + if (!PyArg_ParseTuple(args, "O", &obj)) { + return NULL; + } + Py_INCREF(obj); + return obj; + ''') sentinel = object() - res = mod.oneargobject(sentinel) - raises(TypeError, "mod.oneargobjectandlisttype(sentinel)") + res = oneargobject(sentinel) assert res is sentinel - assert mod.twoopt(1) == 42 - assert mod.twoopt(1, 2) == 2 - raises(TypeError, mod.twoopt, 1, 2, 3) + + def test_pyarg_parse_restricted_object_type(self): + """ + The `O!` format specifier can be used to parse an object of a particular + type. + """ + oneargobjectandlisttype = self.import_parser( + ''' + PyObject *obj; + if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &obj)) { + return NULL; + } + Py_INCREF(obj); + return obj; + ''') + sentinel = object() + raises(TypeError, "oneargobjectandlisttype(sentinel)") + sentinel = [] + res = oneargobjectandlisttype(sentinel) + assert res is sentinel + + + def test_pyarg_parse_one_optional(self): + """ + An object corresponding to a format specifier after a `|` in the + argument format string is optional and may be passed or not. + """ + twoopt = self.import_parser( + ''' + PyObject *a; + PyObject *b = NULL; + if (!PyArg_ParseTuple(args, "O|O", &a, &b)) { + return NULL; + } + if (b) + Py_INCREF(b); + else + b = PyInt_FromLong(42); + /* return an owned reference */ + return b; + ''') + assert twoopt(1) == 42 + assert twoopt(1, 2) == 2 + raises(TypeError, twoopt, 1, 2, 3) + + + def test_pyarg_parse_string_py_buffer(self): + """ + The `s*` format specifier can be used to parse a str into a Py_buffer + structure containing a pointer to the string data and the length of the + string data. + """ + pybuffer = self.import_parser( + ''' + Py_buffer buf; + PyObject *result; + if (!PyArg_ParseTuple(args, "s*", &buf)) { + return NULL; + } + result = PyString_FromStringAndSize(buf.buf, buf.len); + PyBuffer_Release(&buf); + return result; + ''') + assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + + + def test_pyarg_parse_charbuf_and_length(self): + """ + The `t#` format specifier can be used to parse a read-only 8-bit + character buffer into a char* and int giving its length in bytes. + """ + charbuf = self.import_parser( + ''' + char *buf; + int len; + if (!PyArg_ParseTuple(args, "t#", &buf, &len)) { + return NULL; + } + return PyString_FromStringAndSize(buf, len); + ''') + raises(TypeError, "charbuf(10)") + assert 'foo\0bar\0baz' == charbuf('foo\0bar\0baz') diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -231,3 +231,135 @@ """)]) assert module.dump(self.tmpname, None) assert open(self.tmpname).read() == 'None' + + + +class AppTestPyBuffer_FillInfo(AppTestCpythonExtensionBase): + """ + PyBuffer_FillInfo populates the fields of a Py_buffer from its arguments. + """ + def test_fillWithoutObject(self): + """ + PyBuffer_FillInfo populates the C{buf} and C{length}fields of the + Py_buffer passed to it. + """ + module = self.import_extension('foo', [ + ("fillinfo", "METH_VARARGS", + """ + Py_buffer buf; + PyObject *str = PyString_FromString("hello, world."); + PyObject *result; + + if (PyBuffer_FillInfo(&buf, NULL, PyString_AsString(str), 13, 0, 0)) { + return NULL; + } + + /* Check a few things we want to have happened. + */ + if (buf.buf != PyString_AsString(str)) { + PyErr_SetString(PyExc_ValueError, "buf field not initialized"); + return NULL; + } + + if (buf.len != 13) { + PyErr_SetString(PyExc_ValueError, "len field not initialized"); + return NULL; + } + + if (buf.obj != NULL) { + PyErr_SetString(PyExc_ValueError, "obj field not initialized"); + return NULL; + } + + /* Give back a new string to the caller, constructed from data in the + * Py_buffer. + */ + if (!(result = PyString_FromStringAndSize(buf.buf, buf.len))) { + return NULL; + } + + /* Free that string we allocated above. result does not share storage with + * it. + */ + Py_DECREF(str); + + return result; + """)]) + result = module.fillinfo() + assert "hello, world." == result + + + def test_fillWithObject(self): + """ + PyBuffer_FillInfo populates the C{buf}, C{length}, and C{obj} fields of + the Py_buffer passed to it and increments the reference count of the + object. + """ + module = self.import_extension('foo', [ + ("fillinfo", "METH_VARARGS", + """ + Py_buffer buf; + PyObject *str = PyString_FromString("hello, world."); + PyObject *result; + + if (PyBuffer_FillInfo(&buf, str, PyString_AsString(str), 13, 0, 0)) { + return NULL; + } + + /* Get rid of our own reference to the object, but the Py_buffer should + * still have a reference. + */ + Py_DECREF(str); + + /* Give back a new string to the caller, constructed from data in the + * Py_buffer. It better still be valid. + */ + if (!(result = PyString_FromStringAndSize(buf.buf, buf.len))) { + return NULL; + } + + /* Now the data in the Py_buffer is really no longer needed, get rid of it + *(could use PyBuffer_Release here, but that would drag in more code than + * necessary). + */ + Py_DECREF(buf.obj); + + /* Py_DECREF can't directly signal error to us, but if it makes a reference + * count go negative, it will set an error. + */ + if (PyErr_Occurred()) { + return NULL; + } + + return result; + """)]) + result = module.fillinfo() + assert "hello, world." == result + + +class AppTestPyBuffer_Release(AppTestCpythonExtensionBase): + """ + PyBuffer_Release releases the resources held by a Py_buffer. + """ + def test_decrefObject(self): + """ + The PyObject referenced by Py_buffer.obj has its reference count + decremented by PyBuffer_Release. + """ + module = self.import_extension('foo', [ + ("release", "METH_VARARGS", + """ + Py_buffer buf; + buf.obj = PyString_FromString("release me!"); + buf.buf = PyString_AsString(buf.obj); + buf.len = PyString_Size(buf.obj); + + /* The Py_buffer owns the only reference to that string. Release the + * Py_buffer and the string should be released as well. + */ + PyBuffer_Release(&buf); + + Py_RETURN_NONE; + """)]) + assert module.release() is None + From commits-noreply at bitbucket.org Mon Apr 25 15:38:16 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 15:38:16 +0200 (CEST) Subject: [pypy-svn] pypy pyarg-parsebuffer-new: Merged. Message-ID: <20110425133816.44304282B9D@codespeak.net> Author: Armin Rigo Branch: pyarg-parsebuffer-new Changeset: r43588:778fbbe69baa Date: 2011-04-25 15:37 +0200 http://bitbucket.org/pypy/pypy/changeset/778fbbe69baa/ Log: Merged. From commits-noreply at bitbucket.org Mon Apr 25 15:39:19 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 15:39:19 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac) kill another obsolete file, project-ideas.rst Message-ID: <20110425133919.120AF282B9D@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43589:8c803a17e9da Date: 2011-04-25 15:38 +0200 http://bitbucket.org/pypy/pypy/changeset/8c803a17e9da/ Log: (cfbolz,lac) kill another obsolete file, project-ideas.rst diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst deleted file mode 100644 --- a/pypy/doc/project-ideas.rst +++ /dev/null @@ -1,91 +0,0 @@ -Independent project ideas relating to PyPy -========================================== - -PyPy allows experimentation in many directions -- indeed facilitating -experimentation in language implementation was one of the main -motivations for the project. This page is meant to collect some ideas -of experiments that the core developers have not had time to perform -yet and also do not require too much in depth knowledge to get started -with. - -Feel free to suggest new ideas and discuss them in #pypy on the freenode IRC -network or the pypy-dev mailing list (see the home_ page). - ------------ - -.. contents:: - - - -JIT back-ends --------------------------------- - -PyPy's Just-In-Time compiler relies on backends for actual code -generation. We have so far a 32-bit Intel backend, and a CLI one. There is -Summer of Code project for 64bit (x86_64) backend, but other options -(ARM, llvm) remain open. - -.. _distribution: -.. _persistence: - -Extensions of the Python language ---------------------------------- - -+----------------------------------------------------------------------+ -| :NOTE: | -| | -| The ideas in this paragraph are marked as "experimental". We may | -| or may not be interested in helping you out. You are warned :-) | -| | -+----------------------------------------------------------------------+ - -One of the advantages of PyPy's implementation is that the Python-level type -of an object and its implementation are completely independent. This should -allow a much more intuitive interface to, for example, objects that are backed -by a persistent store. The `transparent proxy`_ objects are a key step in this -direction; now all that remains is to implement the interesting bits :-) - -An example project might be to implement functionality akin to the `ZODB's -Persistent class`_, without the need for the _p_changed hacks, and in pure -Python code (should be relatively easy on top of transparent proxy). - -Another example would be to implement a multi-CPU extension that internally -uses several processes and uses transparent proxies to share object views. - -Other ideas are to do something interesting with sandboxing_; or to -work more on the Stackless_ features (e.g. integrate it with the JIT); -or revive the logic object space, which tried to bring unification-like -features to Python. - -.. _sandboxing: sandbox.html -.. _Stackless: stackless.html - - -Other languages ---------------- - -Improve one of the `existing interpreters`__, or start a new one. -Experiment with the JIT compiler generator. - -.. __: http://codespeak.net/svn/pypy/lang/ - - -Or else... ----------- - -...or whatever else interests you! - -Feel free to mention your interest and discuss these ideas on the `pypy-dev -mailing list`_ or on the #pypy channel on irc.freenode.net. -You can also have a look around our documentation_. - - -.. _`efficient propagators for specialized finite domains`: http://codespeak.net/svn/pypy/extradoc/soc-2006/constraints.txt -.. _`object spaces`: objspace.html -.. _`code templating solution`: http://codespeak.net/svn/pypy/extradoc/soc-2006/code-templating.txt - -.. _documentation: docindex.html -.. _home: index.html -.. _`pypy-dev mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev -.. _`ZODB's Persistent class`: http://www.zope.org/Documentation/Books/ZDG/current/Persistence.stx -.. _`transparent proxy`: objspace-proxies.html#tproxy diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -182,13 +182,11 @@ sprint. Coming to a sprint is usually also the best way to get into PyPy development. -If you want to start on your own, take a look at the list of `project -suggestions`_. If you get stuck or need advice, `contact us`_. Usually IRC is +If you get stuck or need advice, `contact us`_. Usually IRC is the most immediate way to get feedback (at least during some parts of the day; many PyPy developers are in Europe) and the `mailing list`_ is better for long discussions. -.. _`project suggestions`: project-ideas.html .. _`contact us`: index.html .. _`mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -49,9 +49,6 @@ `papers, talks and related projects`_ lists presentations and related projects as well as our published papers. -`ideas for PyPy related projects`_ which might be a good way to get -into PyPy. - `PyPy video documentation`_ is a page linking to the videos (e.g. of talks and introductions) that are available. @@ -163,7 +160,6 @@ .. _`EU reports`: index-report.html .. _`Technical reports`: index-report.html .. _`summary`: http://codespeak.net:8099/summary -.. _`ideas for PyPy related projects`: project-ideas.html .. _`Nightly builds and benchmarks`: http://tuatara.cs.uni-duesseldorf.de/benchmark.html .. _`directory reference`: .. _`rlib`: rlib.html diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -16,8 +16,6 @@ old_news.rst - project-ideas.rst - rffi.rst sandbox.rst From commits-noreply at bitbucket.org Mon Apr 25 16:00:43 2011 From: commits-noreply at bitbucket.org (l.diekmann) Date: Mon, 25 Apr 2011 16:00:43 +0200 (CEST) Subject: [pypy-svn] pypy new-dict-proxy: Fixed bug in W_TypeObject.get_module: we cannot directly read values from w_dict anymore Message-ID: <20110425140043.0C2CE282B9D@codespeak.net> Author: Lukas Diekmann Branch: new-dict-proxy Changeset: r43590:ab926f846f39 Date: 2011-04-25 16:00 +0200 http://bitbucket.org/pypy/pypy/changeset/ab926f846f39/ Log: Fixed bug in W_TypeObject.get_module: we cannot directly read values from w_dict anymore diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -459,15 +459,15 @@ def get_module(w_self): space = w_self.space if w_self.is_heaptype() and '__module__' in w_self.dict_w: - return w_self.dict_w['__module__'] + return w_self.getdictvalue(space, '__module__') else: # for non-heap types, CPython checks for a module.name in the # type name. That's a hack, so we're allowed to use a different # hack... if ('__module__' in w_self.dict_w and - space.is_true(space.isinstance(w_self.dict_w['__module__'], + space.is_true(space.isinstance(w_self.getdictvalue(space, '__module__'), space.w_str))): - return w_self.dict_w['__module__'] + return w_self.getdictvalue(space, '__module__') return space.wrap('__builtin__') def get_module_type_name(w_self): diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -172,3 +172,8 @@ setattr(a, "a%s" % i, i) cache_counter = __pypy__.method_cache_counter("x") assert cache_counter[0] == 0 # 0 hits, because all the attributes are new + + def test_get_module_from_namedtuple(self): + # this used to crash + from collections import namedtuple + assert namedtuple("a", "b").__module__ From commits-noreply at bitbucket.org Mon Apr 25 16:15:36 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 16:15:36 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz) kill old_news Message-ID: <20110425141536.9E8AD282B9D@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43591:5237becb659d Date: 2011-04-25 15:47 +0200 http://bitbucket.org/pypy/pypy/changeset/5237becb659d/ Log: (lac, cfbolz) kill old_news diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -11,7 +11,7 @@ behavior of all objects in a running program is easy to implement on top of PyPy. -Here is what we implemented so far, in historical order: +Here is what we have implemented so far, in historical order: * *Thunk Object Space*: lazily computed objects, computing only when an operation is performed on them; lazy functions, computing their result diff --git a/pypy/doc/old_news.rst b/pypy/doc/old_news.rst deleted file mode 100644 --- a/pypy/doc/old_news.rst +++ /dev/null @@ -1,306 +0,0 @@ -The PyPy project aims at producing a flexible and fast Python_ -implementation. The guiding idea is to translate a Python-level -description of the Python language itself to lower level languages. -Rumors have it that the secret goal is being faster-than-C which is -nonsense, isn't it? `more...`_ - -.. _Python: http://www.python.org/doc/current/ref/ref.html -.. _`more...`: architecture.html#mission-statement - - -Leysin Winter Sports Sprint, 12th - 19th January 2008 -================================================================== - -.. raw:: html - -
- -The next PyPy sprint will be held in Leysin, Switzerland, for -the fifth time. The overall idea of the sprint is to continue -working on making PyPy ready for general use. - -.. raw:: html - -
- -The proposed topics are: ctypes, JIT, testing, LLVM. This is -a fully public sprint, so newcomers and other topics are -welcome. And like previous winters, the main side goal is to -have fun in winter sports :-) See the `sprint announcement`__ -for details. - -.. raw:: html - -   -
- -.. __: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2008/announcement.html - - -PyPy blog started -================= - -A few days ago some of the PyPy developers started a `PyPy Status Blog`_. Let's -see how this works out. *(November 13th, 2007)* - -.. _`PyPy Status Blog`: http://morepypy.blogspot.com - - -PyPy/Squeak Sprint in Bern finished -=================================== - -The Bern sprint, being the first Squeak-PyPy-collaboration-sprint is finished. -The week was very intense and productive, see `Bern Sprint Summary blog post`_ -for a list of things we accomplished. We covered most of what happened during -the sprint in quite some detail on the `PyPy Squeak blog`_. The sprint was -hosted by the Software Composition Group of the University of Bern from the -22nd to the 26th of October 2007. - -.. _`Bern sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/bern2007/announce.html -.. _`people that are known to come`: http://codespeak.net/pypy/extradoc/sprintinfo/bern2007/people.html -.. _`Bern Sprint Summary blog post`: http://pypysqueak.blogspot.com/2007/10/bern-sprint-finished-summary.html -.. _`PyPy Squeak blog`: http://pypysqueak.blogspot.com - - - -PyPy Sprint in Gothenburg: 19nd-25th November 2007 -================================================================== - - -The next post-EU-project PyPy sprint will be in Gothenburg, Sweden. It will -focus on cleaning up the PyPy codebase and making it ready for the next round -of improvements. It is a "public" sprint but it will probably be more suitable -for people already somewhat acquainted with PyPy. For more information see the -`Gothenburg sprint announcement`_ or a list of the `people that are known to -come to Gothenburg`_. - -.. _`Gothenburg sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2007/announce.html -.. _`people that are known to come to Gothenburg`: http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2007/people.html - - - - -PyPy Sprint at EuroPython, Vilnius is finished -================================================================== - -The sprint at the last EuroPython_ conference in Vilnius from the 9th to -the 11th of July, 2007 is finished. For more information -see the `Vilnius sprint announcement`_. - - -.. _EuroPython: http://europython.org -.. _`Vilnius sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2007/announcement.html - - -Review passed with flying colours -================================= - -On the 31st of May 2007 the PyPy project was reviewed by the EU -Commission in Brussels. Reviewers were Roel Wuyts, Unversité Libre de -Bruxelles and Aki Lumiaho, Ramboll, Finland. Present was also our -Project Officer, Charles McMillan. After 6 hours of presentations of -the various aspects of the project, it only took the reviewers a few -minutes to decide that the project was accepted, without any further -work being required. Professor Wuyts, who has dynamic programming -languages as his main field of research was very enthusiastic about -the entire project and the results with the Just In Time Compiler -Generator in particular. He offered his help in establishing -collaborations with the communities around Prolog, Smalltalk, Lisp and -other dynamic languages, as well as giving hints on how to get our -results most widely publicized. - -The preparations for the review left the team rather exhausted so -development progress will be rather slow until the sprint at -Europython in the second week of July. - -PyPy EU funding period over, Review ahead -=========================================================== - -The 28 month EU project period of PyPy is over and new things are to come! -On 11th May we `submitted last documents`_ to the European Union and are now -heading towards a 31st May Review Meeting in Bruxelles. The `PyPy EU Final -Activity Report`_ summarizes what we did and what we have in mind -on technical, scientific and community levels. It also contains reflections -and recommendations possibly interesting to other projects aiming at -EU funded Open Source research. *(12th May, 2007)* - -.. _`submitted last documents`: http://codespeak.net/pypy/dist/pypy/doc/index-report.html -.. _`PyPy EU Final Activity Report`: http://codespeak.net/pypy/extradoc/eu-report/PYPY-EU-Final-Activity-Report.pdf - -PyPy 1.0: JIT compiler generator, optimizations and more -================================================================== - -We are proud to release PyPy 1.0.0, our sixth public release (Download_). See -the `release announcement `__ to read about the -many new features in this release, especially the results of our -JIT generation technology. See also our detailed instructions on -how to `get started`_. *(March 27th, 2007)* - -.. _Download: getting-started.html#just-the-facts -.. _`get started`: getting-started.html - - - - -PyPy Trillke Sprints (25-28th Feb and 1-5th March 2007) finished -================================================================== - -Both of the sprints that mark the end of the EU period are over. There were very -good results, both on a `report level`_ as well as on a `technical level`_. -The sprint also had a good discussion about the future of PyPy after the EU -project ends, see the `mail Armin wrote`_ and `the meeting's minutes`_. You can -also look at the pictures that `Carl Friedrich`_ and that `Lene took`_ during -the sprint or read the `sprint announcement`_. *(March 10th, 2007)* - -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/announcement.html -.. _`report level`: http://codespeak.net/pipermail/pypy-dev/2007q1/003578.html -.. _`technical level`: http://codespeak.net/pipermail/pypy-dev/2007q1/003579.html -.. _`Carl Friedrich`: http://codespeak.net/~cfbolz/hildesheim3-sprint-pictures/ -.. _`Lene took`: http://codespeak.net/~lene/trillke-sprint-web/Page1.html -.. _`mail Armin wrote`: http://codespeak.net/pipermail/pypy-dev/2007q1/003577.html -.. _`the meeting's minutes`: http://codespeak.net/svn/pypy/extradoc/minute/post-eu-structure.txt - - - - -PyPy 0.99.0: optimizations, backends, new object spaces and more -================================================================== - -We are proud to release PyPy 0.99.0, our fifth public release. See -the `release announcement `__ to read about the -many new features in this release. See also our detailed instructions on -how to `get started`_. *(February 17th, 2007)* - -.. _`get started`: getting-started.html - - -py lib 0.9.0: py.test, distributed execution, greenlets and more -================================================================== - -Our development support and testing library was publically released, see the -`0.9 release announcement `__ -and its extensive `online documentation `__. -*(February 15th, 2007)* - - - -Leysin Winter Sports Sprint, 8th - 14th January 2007 -================================================================== - -.. raw:: html - -
- -The PyPy Leysin sprint is over. We worked hard on various topics, including -preparing the upcoming py-lib and PyPy releases. For more details, see the -`Leysin sprint report`_, the `Leysin announcement`_ and the -`list of people present`_. - - -.. raw:: html - -
- -.. _`Leysin announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/announcement.html -.. _`Leysin sprint report`: http://codespeak.net/pipermail/pypy-dev/2007q1/003481.html -.. _`list of people present`: http://codespeak.net/svn/pypy/extradoc/sprintinfo/leysin-winter-2007/people.txt - - -Massive Parallelism and Translation Aspects -======================================================== - -Our next big `EU report`_ about Stackless features, optimizations, and -memory management is finished. You can download it `as pdf`_. - -.. _`EU report`: index-report.html -.. _`as pdf`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf - - -Duesseldorf sprint #2, 30th October - 5th November over -================================================================== - -The Duesseldorf sprint is over. It was a very productive sprint with work done -in various areas. Read the `sprint report`_ for a detailed description of what -was achieved and the `full announcement`_ for various details. - -.. _`full announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/announce.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q4/003396.html - - - -Dynamic Languages Symposium (OOPSLA, 23rd October) -================================================================== - -We will present a paper at the `Dynamic Languages Symposium`_ describing -`PyPy's approach to virtual machine construction`_. The DLS is a -one-day forum within OOPSLA'06 (Portland, Oregon, USA). The paper is a -motivated overview of the annotation/rtyping translation tool-chain, -with experimental results. - -As usual, terminology with PyPy is delicate :-) Indeed, the title is -both correct and misleading - it does not describe "the" PyPy virtual -machine, since we have never hand-written one. This paper focuses on -how we are generating such VMs, not what they do. - -.. _`Dynamic Languages Symposium`: http://www.oopsla.org/2006/submission/tracks/dynamic_languages_symposium.html -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf - - - -Summer of PyPy: Calls for proposals open now! -================================================================== - -Happily, we are able to offer students mentoring and full sprint -participant's funding if we receive a proposal outlining an -interesting project related to PyPy and its development tools. This -follows up on the "Summer of Code" campaign from Google but is -completely independent from it and also works differently. -See the full call for details: - - http://codespeak.net/pypy/dist/pypy/doc/summer-of-pypy.html - - -Ireland sprint 21st-27th August -================================================================== - -The last PyPy sprint happened in the nice city of -Limerick in Ireland from 21st till 27th August. -The main focus of the sprint was on JIT compiler works, -various optimization works, porting extension modules, -infrastructure works like a build tool for PyPy and -extended (distributed) testing. -Read the full `announcement`_ for more details. - -.. _`announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ireland-2006/announce.html - -Release of PyPy video documentation -================================================================== - -The PyPy team is happy to announce that the first bunch of PyPy videos -can now be downloaded from: - -http://codespeak.net/pypy/dist/pypy/doc/video-index.html - -The videos introduce involved people and contain different talks, tutorials and -interviews and can be downloaded via bittorrent. **29th June 2006** - -PyPy 0.9.0 -================================================================== - -We are proud to release PyPy 0.9.0, our fourth public release. See -the `release announcement `__ to read about the -many new features in this release. - -PyPy and Summer of Code 2006 -================================================================== - -PyPy will again mentor students through Google's `Summer of Code`_ -campaign. Three students will kick-off their work on PyPy by -participating in the Duesseldorf sprint. They will be exploring a -back-end for Microsoft.NET, work on ways to build web applications -with Javascript code (in this case by translating RPython to -Javascript) and porting some CPython modules to use ctypes. Welcome to -the team! - -.. _`Summer of Code`: http://code.google.com/soc/psf/about.html - diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -12,10 +12,6 @@ distribution.rst - objspace-proxies.rst - - old_news.rst - rffi.rst sandbox.rst From commits-noreply at bitbucket.org Mon Apr 25 16:15:40 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 16:15:40 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz) kill maemo, svn-help, obsolete parts of the coding guide and the development method Message-ID: <20110425141540.23BA3282C2B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43592:17114b27cd15 Date: 2011-04-25 16:09 +0200 http://bitbucket.org/pypy/pypy/changeset/17114b27cd15/ Log: (lac, cfbolz) kill maemo, svn-help, obsolete parts of the coding guide and the development method diff --git a/pypy/doc/svn-help.rst b/pypy/doc/svn-help.rst deleted file mode 100644 --- a/pypy/doc/svn-help.rst +++ /dev/null @@ -1,153 +0,0 @@ - -Installing subversion for PyPy -============================== - -Jens-Uwe Mager has prepared some installation files which should -help you to install subversion on your computer. - -+ Download Unix source tarball or prepackaged versions_ for MacOS, Windows, FreeBSD and Linux - -+ Additional information for Windows users: - - * See Microsoft website_ if you have .DLL issues. - - * Windows Installer file for Tortoise SVN (like Tortoise CVS) GUI_ - (Pick the UNICODE version for Windows 2000 and XP and - see Win_ 2000, NT if you have problems loading it.) - -+ Local copy of MacOS_ X binary tar ball - (This requires at least OS X 10.3) - -+ Debian instructions below... - -Getting started ------------------ - -If you're just getting started with subversion, here's a simple how-to. -For complete information, you can go read the subversion guide_. - -**Download and install the appropriate installation file of subversion above.** - -For linux: - -download the tarball. unzip and untar it. Then type *./configure*. Then, as root, *make* followed by *make install*. Voila ... a subversion client. - -For Debian users:: - - $ apt-get install subversion-tools - -People using Debian *stable* first need to add the following line to ``/etc/apt/sources.list`` (thanks backports_!):: - - deb http://fs.cs.fhm.edu/mirror/backports.org/debian stable subversion - -Note that you can always go look at the files online_ with your browser, located at: http://codespeak.net/svn/pypy/trunk -But, you'll want to check out your own local copies to work on. - -Check out and Check in ----------------------------- - -In order to get the sourcecode and docs downloaded onto your drive, open a shell or commandline and type:: - - $ svn co http://codespeak.net/svn/pypy/trunk - -If you are behind a dump proxy this may or may not work; see below. - -Once you've got the files checked out to your own system, you can use your favorite text editor to change to files. Be sure to read the coding-guide_ and other documentation files before doing a lot of work on the source code. Before doing any work, make sure you're using the most recent update with:: - - $ svn up - -this will update whichever subdirectory you're in (doc or src). - -When you're ready to **check in** a file, - -cd to your local checked out sourcecode directory, and if necessary, copy the file over from wherever you worked on it:: - - $ cp ~/mydir/filename.ext filename.ext - -If you're adding a brand-new file:: - - $ svn add filename.ext - -Then, to **commit** it:: - - $ svn ci -m "your comments about what changes your committing" - $ your password: (this may not be necessary) - -You'll see something like the following:: - - Adding goals/stringcomp.py - Transmitting file data . - Committed revision 578. - -or:: - - Sending coding-guide.txt - Transmitting file data . - Committed revision 631. - -Check online on the `svn-commit archives`_ and you'll see your revision. Feel free to add a documentation file on any major changes you've made! - -.. _`svn-commit archives`: http://codespeak.net/pipermail/pypy-svn/ - -Some other useful subversion tricks: --------------------------------------- - -**Be sure to remember ``svn`` in the commandline in the following commands.** - -``$ svn mv filename.ext`` - to move or rename a file - -``$ svn rm filename.ext`` - to remove (delete) a file - -``$ svn status`` - will let you know what changes you've made compared to the current repository version - -``$ svn revert filename.ext`` - will fix problems if you deleted or moved a file without telling svn. - -``$ svn cleanup`` - last resort to fix it if you've got a totally messed up local copy. - Use this if you see error messages about ``locked`` files that you can't fix otherwise. - -Circumventing proxies ----------------------------- - -Some proxies don't let extended HTTP commands through. If you have an -error complaining about a bad request, you should use https: instead of -http: in the subversion URL. This will make use of SSL encryption, which -cannot be intercepted by proxies. - -Alternatively, if you want to change your proxy configuration, see the -subversion FAQ: http://subversion.tigris.org/faq.html#proxy - -How to Avoid Line-ending Hell ------------------------------ - -We will assume that whenever you create a .txt or a .py file, you would -like other people to be able to read it with the line endings their -OS prefers, even if that is different from the one your OS likes. This -could occasionally be wrong -- say when you are specifically testing -that code you are writing handles line endings properly -- but this is -what you want by default. Binary files, on the other hand, should be -stored exactly as is. This has to be set on every client. Here is how: - -In your home directory edit .subversion/config and comment in :: - - [miscellany] - enable-auto-props = yes - - [auto-props] - *.txt = svn:eol-style=native - *.py = svn:eol-style=native - - -.. _website: http://support.microsoft.com/default.aspx?scid=kb%3Ben-us%3B259403 -.. _GUI: http://tortoisesvn.tigris.org/servlets/ProjectDocumentList?folderID=616 -.. _MacOS: http://codespeak.net/~jum/svn-1.4.0-darwin-ppc.tar.gz -.. _versions: http://subversion.tigris.org/project_packages.html -.. _Win: http://www.microsoft.com/downloads/details.aspx?displaylang=en&FamilyID=4B6140F9-2D36-4977-8FA1-6F8A0F5DCA8F -.. _guide: http://svnbook.red-bean.com/book.html#svn-ch-1 -.. _backports: http://www.backports.org -.. _online: http://codespeak.net/svn/pypy/trunk/ -.. _coding-guide: coding-guide.html diff --git a/pypy/doc/dev_method.rst b/pypy/doc/dev_method.rst --- a/pypy/doc/dev_method.rst +++ b/pypy/doc/dev_method.rst @@ -20,7 +20,7 @@ Main tools for achieving this is: * py.test - automated testing - * Subversion - version control + * Mercurial - version control * Transparent communication and documentation (mailinglists, IRC, tutorials etc etc) @@ -237,124 +237,3 @@ interested in using sprints as away of making contact with active developers (Python/compiler design etc)! -If you have questions about our sprints and EU-funding - please send an email -to pypy-funding at codespeak.net, our mailinglist for project coordination. - -Previous sprints? -+++++++++++++++++ - -The PyPy team has been sprinting on the following occasions:: - - * Hildesheim Feb 2003 - * Gothenburg May 2003 - * Europython/Louvain-La-Neuve June 2003 - * Berlin Sept 2003 - * Amsterdam Dec 2003 - * Europython/Gothenburg June 2004 - * Vilnius Nov 2004 - * Leysin Jan 2005 - * PyCon/Washington March 2005 - * Europython/Gothenburg June 2005 - * Hildesheim July 2005 - * Heidelberg Aug 2005 - * Paris Oct 2005 - * Gothenburg Dec 2005 - * Mallorca Jan 2006 - * PyCon/Dallas Feb 2006 - * Louvain-La-Neuve March 2006 - * Leysin April 2006 - * Tokyo April 2006 - * Düsseldorf June 2006 - * Europython/Geneva July 2006 - * Limerick Aug 2006 - * Düsseldorf Oct 2006 - * Leysin Jan 2007 - * Hildesheim Feb 2007 - -People who have participated and contributed during our sprints and thus -contributing to PyPy (if we have missed someone here - please contact us -so we can correct it): - - Armin Rigo - Holger Krekel - Samuele Pedroni - Christian Tismer - Laura Creighton - Jacob Hallén - Michael Hudson - Richard Emslie - Anders Chrigström - Alex Martelli - Ludovic Aubry - Adrien DiMascio - Nicholas Chauvat - Niklaus Haldimann - Anders Lehmann - Carl Friedrich Bolz - Eric Van Riet Paap - Stephan Diel - Dinu Gherman - Jens-Uwe Mager - Marcus Denker - Bert Freudenberg - Gunther Jantzen - Henrion Benjamin - Godefroid Chapelle - Anna Ravenscroft - Tomek Meka - Jonathan David Riehl - Patrick Maupain - Etienne Posthumus - Nicola Paolucci - Albertas Agejevas - Marius Gedminas - Jesus Cea Avion - Olivier Dormond - Jacek Generowicz - Brian Dorsey - Guido van Rossum - Bob Ippolito - Alan McIntyre - Lutz Paelike - Michael Chermside - Beatrice Düring - Boris Feigin - Amaury Forgeot d'Arc - Andrew Thompson - Valentino Volonghi - Aurelien Campeas - Stephan Busemann - Johan Hahn - Gerald Klix - Gene Oden - Josh Gilbert - Geroge Paci - Martin Blais - Stuart Williams - Jiwon Seo - Michael Twomey - Wanja Saatkamp - Alexandre Fayolle - Raphaël Collet - Grégoire Dooms - Sanghyeon Seo - Yutaka Niibe - Yusei Tahara - George Toshida - Koichi Sasada - Guido Wesdorp - Maciej Fijalkowski - Antonio Cuni - Lawrence Oluyede - Fabrizio Milo - Alexander Schremmer - David Douard - Michele Frettoli - Simon Burton - Aaron Bingham - Pieter Zieschang - Sad Rejeb - Brian Sutherland - Georg Brandl - - diff --git a/pypy/doc/maemo.rst b/pypy/doc/maemo.rst deleted file mode 100644 --- a/pypy/doc/maemo.rst +++ /dev/null @@ -1,187 +0,0 @@ -How to run PyPy on top of maemo platform -======================================== - -This howto explains how to use Scratchbox_ to cross-compile PyPy's -Python Interpreter to an `Internet-Tablet-OS`_, more specifically -the Maemo_ platform. This howto should work well for getting -a usable Python Interpreter for Nokia's N810_ device. - -setup cross-compilation environment -------------------------------------- - -The main steps are to install scratchbox and the Maemo SDK. Please refer -to Nokia's `INSTALL.txt`_ for more detail. - -Adjust linux kernel settings -+++++++++++++++++++++++++++++++++ - -In order to install and run scratchbox you will need to adjust -your Linux kernel settings. Note that the VDSO setting may -crash your computer - if that is the case, try running without -this setting. You can try it like this:: - - $ echo 4096 | sudo tee /proc/sys/vm/mmap_min_addr - $ echo 0 | sudo tee /proc/sys/vm/vdso_enabled - -If that works fine for you (on some machines the vdso setting can freeze machines) -you can make the changes permanent by editing ``/etc/sysctl.conf`` to contain:: - - vm.vdso_enabled = 0 - vm.mmap_min_addr = 4096 - -install scratchbox packages -+++++++++++++++++++++++++++++++++ - -Download - - http://repository.maemo.org/stable/diablo/maemo-scratchbox-install_4.1.1.sh - -and run this script as root:: - - $ sh maemo-scratchbox-install_4.1.1.sh -s /scratchbox -u ACCOUNTNAME - -The script will automatically download Debian packages or tarballs -and pre-configure a scratchbox environment with so called "devkits" -and "toolchains" for performing cross-compilation. It's fine -and recommended to use your linux account name as a scratchbox -ACCOUNTNAME. - -It also sets up an "sbox" group on your system and makes you -a member - giving the right to login to a scratchbox environment. - -testing that scratchbox environment works -+++++++++++++++++++++++++++++++++++++++++++++++ - -Login freshly to your Linux account in order to activate -your membership in the "sbox" unix group and then type:: - - $ /scratchbox/login - -This should warn you with something like "sb-conf: no current -target" because we have not yet created a cross-compilation -target. - -Note that Scratchbox starts daemon services which -can be controlled via:: - - /scratchbox/sbin/sbox_ctl start|stop - - -Installing the Maemo SDK -+++++++++++++++++++++++++++++++ - -To mimic the specific N810_ environment we now install the Maemo-SDK. -This will create an target within our new scratchbox environment -that we then use to compile PyPy. - -Make sure that you are a member of the "sbox" group - this might -require logging out and in again. - -Then, download - - http://repository.maemo.org/stable/diablo/maemo-sdk-install_4.1.1.sh - -and execute it with user privileges:: - - $ sh maemo-sdk-install_4.1.1.sh - -When being asked select the default "Runtime + Dev" packages. You do not need -Closed source Nokia binaries for PyPy. This installation -script will download "rootstraps" and create so called -"targets" and preselect the "DIABLO_ARMEL" target for ARM -compilation. Within the targets a large number of packages -will be pre-installed resulting in a base scratchbox -environment that is usable for cross compilation of PyPy. - -Customizing the DIABLO_ARMEL target for PyPy -++++++++++++++++++++++++++++++++++++++++++++++++ - -As PyPy does not yet provide a debian package description -file for use on Maemo, we have to install some dependencies manually -into our Scratchbox target environment. - -1. Go into your scratchbox by executing ``/scratchbox/login`` - (this should bring you to a shell with the DIABLO_ARMEL target) - -2. Add these lines to ``/etc/apt/sources.list``:: - - deb http://repository.maemo.org/extras/ diablo free non-free - deb http://repository.maemo.org/extras-devel/ diablo free non-free - - NOTE: if you have an older version of Maemo on your device you - can try substitute "chinook" for "diablo" in the above lines - and/or update your firmware. You can probably see which version - you are using by looking at the other content of the ``sources.list``. - -3. Perform ``apt-get update``. - -4. Install some necessary packages:: - - apt-get install python2.5-dev libffi4-dev zlib1g-dev libbz2-dev libgc-dev libncurses5-dev - - The "libgc-dev" package is only needed if you want to use the Boehm - garbage collector. - -5. Leave the scratchbox shell again with ``exit``. - - -Translating PyPy for the Maemo platform ------------------------------------------- - -You at least need "gcc" and "libc-dev" packages on your host system -to compile pypy. The scratchbox and its DIABLO_ARMEL target contains -its own copies of GCC, various C libraries and header files -which pypy needs for successful cross-compilation. - -Now, on the host system, perform a subversion checkout of PyPy:: - - svn co https://codespeak.net/svn/pypy/trunk pypy-trunk - -Several svn revisions since the 60000's are known to work and -the last manually tested one is currently 65011. - -Change to the ``pypy-trunk/pypy/translator/goal`` directory and execute:: - - python translate.py --platform=maemo --opt=3 - -You need to run translate.py using Python 2.5. This will last some 30-60 -minutes on most machines. For compiling the C source code PyPy's tool chain -will use our scratchbox/Maemo cross-compilation environment. - -When this step succeeds, your ``goal`` directory will contain a binary called -``pypy-c`` which is executable on the Maemo device. To run this binary -on your device you need to also copy some support files. A good way to -perform copies to your device is to install OpenSSH on the -mobile device and use "scp" or rsync for transferring files. - -You can just copy your whole pypy-trunk directory over to your mobile -device - however, only these should be needed:: - - lib/pypy1.2/lib_pypy - lib/pypy1.2/lib-python - pypy/translator/goal/pypy-c - -It is necessary that the ``pypy-c`` can find a "lib-python" and "lib_pypy" directory -if you want to successfully startup the interpreter on the device. - -Start ``pypy-c`` on the device. If you see an error like "setupterm: could not find terminal" -you probably need to perform this install on the device:: - - apt-get install ncurses-base - -Eventually you should see something like:: - - Nokia-N810-51-3:~/pypy/trunk# ./pypy-c - Python Python 2.5.2 (pypy 1.0.0 build 59527) on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``E09 2K @CAA:85?'' - >>>> - - -.. _N810: http://en.wikipedia.org/wiki/Nokia_N810 -.. _`Internet-Tablet-OS`: http://en.wikipedia.org/wiki/Internet_Tablet_OS -.. _Maemo: http://www.maemo.org -.. _Scratchbox: http://www.scratchbox.org -.. _`INSTALL.txt`: http://tablets-dev.nokia.com/4.1/INSTALL.txt - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -4,8 +4,6 @@ .. contents:: - - This document describes coding requirements and conventions for working with the PyPy code base. Please read it carefully and ask back any questions you might have. The document does not talk @@ -878,12 +876,6 @@ branching/copying is a cheap operation with subversion, as it takes constant time irrespective of the size of the tree. -- To learn more about how to use subversion read `this document`_. - -.. _`this document`: svn-help.html - - - .. _`using development tracker`: Using the development bug/feature tracker diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,35 +7,16 @@ .. toctree:: + + discussion/cli-optimizations.rst + discussion/distribution-implementation.rst + discussion/distribution-newattempt.rst + discussion/distribution-roadmap.rst + discussion/distribution.rst + discussion/finalizer-order.rst + discussion/howtoimplementpickling.rst + discussion/improve-rpython.rst + discussion/outline-external-ootype.rst + discussion/VM-integration.rst - discussion/GC-performance.rst - discussion/VM-integration.rst - discussion/chained_getattr.rst - discussion/cli-optimizations.rst - discussion/cmd-prompt-translation.rst - discussion/compiled-swamp.rst - discussion/ctypes_modules.rst - discussion/ctypes_todo.rst - discussion/distribution.rst - discussion/distribution-implementation.rst - discussion/distribution-newattempt.rst - discussion/distribution-roadmap.rst - discussion/emptying-the-malloc-zoo.rst - discussion/finalizer-order.rst - discussion/gc.rst - discussion/howtoimplementpickling.rst - discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/oz-thread-api.rst - discussion/paper-wishlist.rst - discussion/parsing-ideas.rst - discussion/pypy_metaclasses_in_cl.rst - discussion/removing-stable-compiler.rst - discussion/security-ideas.rst - discussion/somepbc-refactoring-plan.rst - discussion/summer-of-pypy-pytest.rst - discussion/testing-zope.rst - discussion/thoughts_string_interning.rst - discussion/translation-swamp.rst - discussion/use_case_of_logic.rst diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -5,26 +5,12 @@ .. doc-index: This needs merging somehow -.. svn-help.rst: Needs merging/replacing with hg stuff: - - .. toctree:: distribution.rst - rffi.rst - - sandbox.rst - - statistic/index.rst - - docindex.rst - - svn-help.rst - dot-net.rst - maemo.rst From commits-noreply at bitbucket.org Mon Apr 25 16:15:42 2011 From: commits-noreply at bitbucket.org (lac) Date: Mon, 25 Apr 2011 16:15:42 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz) kill old codespeak doc style sheets, and navigation used by them. Message-ID: <20110425141542.41FAF282C2B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43593:e03921e948fd Date: 2011-04-25 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/e03921e948fd/ Log: (lac, cfbolz) kill old codespeak doc style sheets, and navigation used by them. diff --git a/pypy/doc/statistic/style.css b/pypy/doc/statistic/style.css deleted file mode 100644 --- a/pypy/doc/statistic/style.css +++ /dev/null @@ -1,1083 +0,0 @@ -body,body.editor,body.body { - font: 110% "Times New Roman", Arial, Verdana, Helvetica, serif; - background: White; - color: Black; -} - -a, a.reference { - text-decoration: none; -} -a[href]:hover { text-decoration: underline; } - -img { - border: none; - vertical-align: middle; -} - -p, div.text { - text-align: left; - line-height: 1.5em; - margin: 0.5em 0em 0em 0em; -} - - - -p a:active { - color: Red; - background-color: transparent; -} - -p img { - border: 0; - margin: 0; -} - -img.inlinephoto { - padding: 0; - padding-right: 1em; - padding-top: 0.7em; - float: left; -} - -hr { - clear: both; - height: 1px; - color: #8CACBB; - background-color: transparent; -} - - -ul { - line-height: 1.5em; - /*list-style-image: url("bullet.gif"); */ - margin-left: 1.5em; - padding:0; -} - -ol { - line-height: 1.5em; - margin-left: 1.5em; - padding:0; -} - -ul a, ol a { - text-decoration: underline; -} - -dl { -} - -dt { - font-weight: bold; -} - -dd { - line-height: 1.5em; - margin-bottom: 1em; -} - -blockquote { - font-family: Times, "Times New Roman", serif; - font-style: italic; - font-size: 120%; -} - -code { - color: Black; - /*background-color: #dee7ec;*/ - background-color: #cccccc; -} - -pre { - padding: 1em; - border: 1px solid #8cacbb; - color: Black; - background-color: #dee7ec; - background-color: #cccccc; - overflow: auto; -} - - -.netscape4 { - display: none; -} - -/* main page styles */ - -/*a[href]:hover { color: black; text-decoration: underline; } -a[href]:link { color: black; text-decoration: underline; } -a[href] { color: black; text-decoration: underline; } -*/ - -span.menu_selected { - color: black; - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; - padding-right: 0.3em; - background-color: #cccccc; -} - - -a.menu { - /*color: #3ba6ec; */ - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; - padding-right: 0.3em; -} - -a.menu[href]:visited, a.menu[href]:link{ - /*color: #3ba6ec; */ - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; -} - -a.menu[href]:hover { - /*color: black;*/ -} - -div.project_title{ - /*border-spacing: 20px;*/ - font: 160% Verdana, Helvetica, Arial, sans-serif; - color: #3ba6ec; - vertical-align: center; - padding-bottom: 0.3em; -} - -a.wikicurrent { - font: 100% Verdana, Helvetica, Arial, sans-serif; - color: #3ba6ec; - vertical-align: middle; -} - - -table.body { - border: 0; - /*padding: 0; - border-spacing: 0px; - border-collapse: separate; - */ -} - -td.page-header-left { - padding: 5px; - /*border-bottom: 1px solid #444444;*/ -} - -td.page-header-top { - padding: 0; - - /*border-bottom: 1px solid #444444;*/ -} - -td.sidebar { - padding: 1 0 0 1; -} - -td.sidebar p.classblock { - padding: 0 5 0 5; - margin: 1 1 1 1; - border: 1px solid #444444; - background-color: #eeeeee; -} - -td.sidebar p.userblock { - padding: 0 5 0 5; - margin: 1 1 1 1; - border: 1px solid #444444; - background-color: #eeeeff; -} - -td.content { - padding: 1 5 1 5; - vertical-align: top; - width: 100%; -} - -p.ok-message { - background-color: #22bb22; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} -p.error-message { - background-color: #bb2222; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} - -p:first-child { - margin: 0 ; - padding: 0; -} - -/* style for forms */ -table.form { - padding: 2; - border-spacing: 0px; - border-collapse: separate; -} - -table.form th { - color: #333388; - text-align: right; - vertical-align: top; - font-weight: normal; -} -table.form th.header { - font-weight: bold; - background-color: #eeeeff; - text-align: left; -} - -table.form th.required { - font-weight: bold; -} - -table.form td { - color: #333333; - empty-cells: show; - vertical-align: top; -} - -table.form td.optional { - font-weight: bold; - font-style: italic; -} - -table.form td.html { - color: #777777; -} - -/* style for lists */ -table.list { - border-spacing: 0px; - border-collapse: separate; - vertical-align: top; - padding-top: 0; - width: 100%; -} - -table.list th { - padding: 0 4 0 4; - color: #404070; - background-color: #eeeeff; - border-right: 1px solid #404070; - border-top: 1px solid #404070; - border-bottom: 1px solid #404070; - vertical-align: top; - empty-cells: show; -} -table.list th a[href]:hover { color: #404070 } -table.list th a[href]:link { color: #404070 } -table.list th a[href] { color: #404070 } -table.list th.group { - background-color: #f4f4ff; - text-align: center; - font-size: 120%; -} - -table.list td { - padding: 0 4 0 4; - border: 0 2 0 2; - border-right: 1px solid #404070; - color: #404070; - background-color: white; - vertical-align: top; - empty-cells: show; -} - -table.list tr.normal td { - background-color: white; - white-space: nowrap; -} - -table.list tr.alt td { - background-color: #efefef; - white-space: nowrap; -} - -table.list td:first-child { - border-left: 1px solid #404070; - border-right: 1px solid #404070; -} - -table.list th:first-child { - border-left: 1px solid #404070; - border-right: 1px solid #404070; -} - -table.list tr.navigation th { - text-align: right; -} -table.list tr.navigation th:first-child { - border-right: none; - text-align: left; -} - - -/* style for message displays */ -table.messages { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.messages th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.messages th { - font-weight: bold; - color: black; - text-align: left; - border-bottom: 1px solid #afafaf; -} - -table.messages td { - font-family: monospace; - background-color: #efefef; - border-bottom: 1px solid #afafaf; - color: black; - empty-cells: show; - border-right: 1px solid #afafaf; - vertical-align: top; - padding: 2 5 2 5; -} - -table.messages td:first-child { - border-left: 1px solid #afafaf; - border-right: 1px solid #afafaf; -} - -/* style for file displays */ -table.files { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.files th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.files th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; -} - -table.files td { - font-family: monospace; - empty-cells: show; -} - -/* style for history displays */ -table.history { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.history th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; - font-size: 100%; -} - -table.history th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; - font-size: 90%; -} - -table.history td { - font-size: 90%; - vertical-align: top; - empty-cells: show; -} - - -/* style for class list */ -table.classlist { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.classlist th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.classlist th { - font-weight: bold; - text-align: left; -} - - -/* style for class help display */ -table.classhelp { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.classhelp th { - font-weight: bold; - text-align: left; - color: #707040; -} - -table.classhelp td { - padding: 2 2 2 2; - border: 1px solid black; - text-align: left; - vertical-align: top; - empty-cells: show; -} - - -/* style for "other" displays */ -table.otherinfo { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.otherinfo th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.otherinfo th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; -} - -input { - border: 1px solid #8cacbb; - color: Black; - background-color: white; - vertical-align: middle; - margin-bottom: 1px; /* IE bug fix */ - padding: 0.1em; -} - -select { - border: 1px solid #8cacbb; - color: Black; - background-color: white; - vertical-align: middle; - margin-bottom: 1px; /* IE bug fix */ - padding: 0.1em; -} - - -a.nonexistent { - color: #FF2222; -} -a.nonexistent:visited { - color: #FF2222; -} -a.external { - color: #AA6600; -} - -/* -dl,ul,ol { - margin-top: 1pt; -} -tt,pre { - font-family: Lucida Console,Courier New,Courier,monotype; - font-size: 12pt; -} -pre.code { - margin-top: 8pt; - margin-bottom: 8pt; - background-color: #FFFFEE; - white-space:pre; - border-style:solid; - border-width:1pt; - border-color:#999999; - color:#111111; - padding:5px; - width:100%; -} -*/ -div.diffold { - background-color: #FFFF80; - border-style:none; - border-width:thin; - width:100%; -} -div.diffnew { - background-color: #80FF80; - border-style:none; - border-width:thin; - width:100%; -} -div.message { - margin-top: 6pt; - background-color: #E8FFE8; - border-style:solid; - border-width:1pt; - border-color:#999999; - color:#440000; - padding:5px; - width:100%; -} -strong.highlight { - background-color: #FFBBBB; -/* as usual, NetScape fucks up with innocent CSS - border-color: #FFAAAA; - border-style: solid; - border-width: 1pt; -*/ -} - -table.navibar { - background-color: #C8C8C8; - border-spacing: 3px; -} -td.navibar { - background-color: #E8E8E8; - vertical-align: top; - text-align: right; - padding: 0px; -} - -div.pagename { - font-size: 140%; - color: blue; - text-align: center; - font-weight: bold; - background-color: white; - padding: 0 ; -} - -a.wikiaction, input.wikiaction { - color: black; - text-decoration: None; - text-align: center; - color: black; - /*border: 1px solid #3ba6ec; */ - margin: 4px; - padding: 5; - padding-bottom: 0; - white-space: nowrap; -} - -a.wikiaction[href]:hover { - color: black; - text-decoration: none; - /*background-color: #dddddd; */ -} - -span.wikiuserpref { - padding-top: 1em; - font-size: 120%; -} - -div.wikitrail { - vertical-align: bottom; - /*font-size: -1;*/ - padding-top: 1em; - display: none; -} - -div.wikiaction { - vertical-align: middle; - /*border-bottom: 1px solid #8cacbb;*/ - padding-bottom:1em; - text-align: left; - width: 100%; -} - -div.wikieditmenu { - text-align: right; -} - -form.wikiedit { - border: 1px solid #8cacbb; - background-color: #f0f0f0; - background-color: #fabf00; - padding: 1em; - padding-right: 0em; -} - -div.legenditem { - padding-top: 0.5em; - padding-left: 0.3em; -} - -span.wikitoken { - background-color: #eeeeee; -} - - -div#contentspace h1:first-child, div.heading:first-child { - padding-top: 0; - margin-top: 0; -} -div#contentspace h2:first-child { - padding-top: 0; - margin-top: 0; -} - -/* heading and paragraph text */ - -div.heading, h1 { - font-family: Verdana, Helvetica, Arial, sans-serif; - background-color: #58b3ef; - background-color: #FFFFFF; - /*color: #4893cf;*/ - color: black; - padding-top: 1.0em; - padding-bottom:0.2em; - text-align: left; - margin-top: 0em; - /*margin-bottom:8pt;*/ - font-weight: bold; - font-size: 115%; - border-bottom: 1px solid #8CACBB; -} - - -h1, h2, h3, h4, h5, h6 { - color: Black; - clear: left; - font: 100% Verdana, Helvetica, Arial, sans-serif; - margin: 0; - padding-left: 0em; - padding-top: 1em; - padding-bottom: 0.2em; - /*border-bottom: 1px solid #8CACBB;*/ -} -/* h1,h2 { padding-top: 0; }*/ - - -h1 { font-size: 145%; } -h2 { font-size: 135%; } -h3 { font-size: 125%; } -h4 { font-size: 120%; } -h5 { font-size: 110%; } -h6 { font-size: 80%; } - -h1 a { text-decoration: None;} - -div.exception { - background-color: #bb2222; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} -pre.exception { - font-size: 110%; - padding: 1em; - border: 1px solid #8cacbb; - color: Black; - background-color: #dee7ec; - background-color: #cccccc; -} - -/* defines for navgiation bar (documentation) */ - - -div.direntry { - padding-top: 0.3em; - padding-bottom: 0.3em; - margin-right: 1em; - font-weight: bold; - background-color: #dee7ec; - font-size: 110%; -} - -div.fileentry { - font-family: Verdana, Helvetica, Arial, sans-serif; - padding-bottom: 0.3em; - white-space: nowrap; - line-height: 150%; -} - -a.fileentry { - white-space: nowrap; -} - - -span.left { - text-align: left; -} -span.right { - text-align: right; -} - -div.navbar { - /*margin: 0;*/ - font-size: 80% /*smaller*/; - font-weight: bold; - text-align: left; - /* position: fixed; */ - top: 100pt; - left: 0pt; /* auto; */ - width: 120pt; - /* right: auto; - right: 0pt; 2em; */ -} - - -div.history a { - /* font-size: 70%; */ -} - -div.wikiactiontitle { - font-weight: bold; -} - -/* REST defines */ - -div.document { - margin: 0; -} - -h1.title { - margin: 0; -} - -td.toplist { - vertical-align: top; -} - -img#pyimg { - position: absolute; - top: 4px; - left: 4px; -} - -img#extraimg { - position: absolute; - right: 14px; - top: 4px; -} - -div#navspace { - position: absolute; - top: 130px; - left: 11px; - font-size: 100%; - width: 150px; - overflow: hidden; /* scroll; */ -} - -div#metaspace { - position: absolute; - top: 40px; - left: 170px; -} - -div#errorline { - position: relative; - top: 5px; - float: right; -} - -div#contentspace { - position: absolute; - /* font: 120% "Times New Roman", serif;*/ - font: 110% Verdana, Helvetica, Arial, sans-serif; - top: 130px; - left: 170px; - margin-right: 5px; -} - -div#menubar { -/* width: 400px; */ - float: left; -} - -/* for the documentation page */ -div#docinfoline { - position: relative; - top: 5px; - left: 0px; - - /*background-color: #dee7ec; */ - padding: 5pt; - padding-bottom: 1em; - color: black; - /*border-width: 1pt; - border-style: solid;*/ - -} - -div#docnavlist { - /*background-color: #dee7ec; */ - padding: 5pt; - padding-bottom: 2em; - color: black; - border-width: 1pt; - /*border-style: solid;*/ -} - - -/* text markup */ - -div.listtitle { - color: Black; - clear: left; - font: 120% Verdana, Helvetica, Arial, sans-serif; - margin: 0; - padding-left: 0em; - padding-top: 0em; - padding-bottom: 0.2em; - margin-right: 0.5em; - border-bottom: 1px solid #8CACBB; -} - -div.actionbox h3 { - padding-top: 0; - padding-right: 0.5em; - padding-left: 0.5em; - background-color: #fabf00; - text-align: center; - border: 1px solid black; /* 8cacbb; */ -} - -div.actionbox a { - display: block; - padding-bottom: 0.5em; - padding-top: 0.5em; - margin-left: 0.5em; -} - -div.actionbox a.history { - display: block; - padding-bottom: 0.5em; - padding-top: 0.5em; - margin-left: 0.5em; - font-size: 90%; -} - -div.actionbox { - margin-bottom: 2em; - padding-bottom: 1em; - overflow: hidden; /* scroll; */ -} - -/* taken from docutils (oh dear, a bit senseless) */ -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - - -/* -:Author: David Goodger -:Contact: goodger at users.sourceforge.net -:date: $Date: 2003/01/22 22:26:48 $ -:version: $Revision: 1.29 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ -/* -.first { - margin-top: 0 } - -.last { - margin-bottom: 0 } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.address { - margin-bottom: 0 ; - margin-top: 0 ; - font-family: serif ; - font-size: 100% } - -pre.line-block { - font-family: serif ; - font-size: 100% } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #eeeeee } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.interpreted { - font-family: sans-serif } - -span.option { - white-space: nowrap } - -span.option-argument { - font-style: italic } - -span.pre { - white-space: pre } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: top } - -th.docinfo-name, th.field-name { - font-weight: bold ; - text-align: left ; - white-space: nowrap } - -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { - font-size: 100% } - -tt { - background-color: #eeeeee } - -ul.auto-toc { - list-style-type: none } -*/ - -div.section { - margin-top: 1.0em ; -} diff --git a/pypy/doc/style.css b/pypy/doc/style.css deleted file mode 100644 --- a/pypy/doc/style.css +++ /dev/null @@ -1,1091 +0,0 @@ -body,body.editor,body.body { - font: 90% "Times New Roman", Arial, Verdana, Helvetica, serif; - background: White; - color: Black; -} - -a, a.reference { - text-decoration: none; -} -a[href]:hover { text-decoration: underline; } - -img { - border: none; - vertical-align: middle; -} - -p, div.text { - text-align: left; - line-height: 1.5em; - margin: 0.5em 0em 0em 0em; -} - - - -p a:active { - color: Red; - background-color: transparent; -} - -p img { - border: 0; - margin: 0; -} - -img.inlinephoto { - padding: 0; - padding-right: 1em; - padding-top: 0.7em; - float: left; -} - -hr { - clear: both; - height: 1px; - color: #8CACBB; - background-color: transparent; -} - - -ul { - line-height: 1.5em; - /*list-style-image: url("bullet.gif"); */ - margin-left: 1.5em; - padding:0; -} - -ol { - line-height: 1.5em; - margin-left: 1.5em; - padding:0; -} - -ul a, ol a { - text-decoration: underline; -} - -dl { -} - -dt { - font-weight: bold; -} - -dd { - line-height: 1.5em; - margin-bottom: 1em; -} - -blockquote { - font-family: Times, "Times New Roman", serif; - font-style: italic; - font-size: 120%; -} - -code { - color: Black; - /*background-color: #dee7ec;*/ - background-color: #cccccc; -} - -pre { - padding: 1em; - border: 1px solid #8cacbb; - color: Black; - background-color: #dee7ec; - background-color: #cccccc; - overflow: auto; -} - - -.netscape4 { - display: none; -} - -/* main page styles */ - -/*a[href]:hover { color: black; text-decoration: underline; } -a[href]:link { color: black; text-decoration: underline; } -a[href] { color: black; text-decoration: underline; } -*/ - -span.menu_selected { - color: black; - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; - padding-right: 0.3em; - background-color: #cccccc; -} - - -a.menu { - /*color: #3ba6ec; */ - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; - padding-right: 0.3em; -} - -a.menu[href]:visited, a.menu[href]:link{ - /*color: #3ba6ec; */ - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; -} - -a.menu[href]:hover { - /*color: black;*/ -} - -div.project_title{ - /*border-spacing: 20px;*/ - font: 160% Verdana, Helvetica, Arial, sans-serif; - color: #3ba6ec; - vertical-align: center; - padding-bottom: 0.3em; -} - -a.wikicurrent { - font: 100% Verdana, Helvetica, Arial, sans-serif; - color: #3ba6ec; - vertical-align: middle; -} - - -table.body { - border: 0; - /*padding: 0; - border-spacing: 0px; - border-collapse: separate; - */ -} - -td.page-header-left { - padding: 5px; - /*border-bottom: 1px solid #444444;*/ -} - -td.page-header-top { - padding: 0; - - /*border-bottom: 1px solid #444444;*/ -} - -td.sidebar { - padding: 1 0 0 1; -} - -td.sidebar p.classblock { - padding: 0 5 0 5; - margin: 1 1 1 1; - border: 1px solid #444444; - background-color: #eeeeee; -} - -td.sidebar p.userblock { - padding: 0 5 0 5; - margin: 1 1 1 1; - border: 1px solid #444444; - background-color: #eeeeff; -} - -td.content { - padding: 1 5 1 5; - vertical-align: top; - width: 100%; -} - -p.ok-message { - background-color: #22bb22; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} -p.error-message { - background-color: #bb2222; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} - -p:first-child { - margin: 0 ; - padding: 0; -} - -/* style for forms */ -table.form { - padding: 2; - border-spacing: 0px; - border-collapse: separate; -} - -table.form th { - color: #333388; - text-align: right; - vertical-align: top; - font-weight: normal; -} -table.form th.header { - font-weight: bold; - background-color: #eeeeff; - text-align: left; -} - -table.form th.required { - font-weight: bold; -} - -table.form td { - color: #333333; - empty-cells: show; - vertical-align: top; -} - -table.form td.optional { - font-weight: bold; - font-style: italic; -} - -table.form td.html { - color: #777777; -} - -/* style for lists */ -table.list { - border-spacing: 0px; - border-collapse: separate; - vertical-align: top; - padding-top: 0; - width: 100%; -} - -table.list th { - padding: 0 4 0 4; - color: #404070; - background-color: #eeeeff; - border-right: 1px solid #404070; - border-top: 1px solid #404070; - border-bottom: 1px solid #404070; - vertical-align: top; - empty-cells: show; -} -table.list th a[href]:hover { color: #404070 } -table.list th a[href]:link { color: #404070 } -table.list th a[href] { color: #404070 } -table.list th.group { - background-color: #f4f4ff; - text-align: center; - font-size: 120%; -} - -table.list td { - padding: 0 4 0 4; - border: 0 2 0 2; - border-right: 1px solid #404070; - color: #404070; - background-color: white; - vertical-align: top; - empty-cells: show; -} - -table.list tr.normal td { - background-color: white; - white-space: nowrap; -} - -table.list tr.alt td { - background-color: #efefef; - white-space: nowrap; -} - -table.list td:first-child { - border-left: 1px solid #404070; - border-right: 1px solid #404070; -} - -table.list th:first-child { - border-left: 1px solid #404070; - border-right: 1px solid #404070; -} - -table.list tr.navigation th { - text-align: right; -} -table.list tr.navigation th:first-child { - border-right: none; - text-align: left; -} - - -/* style for message displays */ -table.messages { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.messages th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.messages th { - font-weight: bold; - color: black; - text-align: left; - border-bottom: 1px solid #afafaf; -} - -table.messages td { - font-family: monospace; - background-color: #efefef; - border-bottom: 1px solid #afafaf; - color: black; - empty-cells: show; - border-right: 1px solid #afafaf; - vertical-align: top; - padding: 2 5 2 5; -} - -table.messages td:first-child { - border-left: 1px solid #afafaf; - border-right: 1px solid #afafaf; -} - -/* style for file displays */ -table.files { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.files th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.files th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; -} - -table.files td { - font-family: monospace; - empty-cells: show; -} - -/* style for history displays */ -table.history { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.history th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; - font-size: 100%; -} - -table.history th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; - font-size: 90%; -} - -table.history td { - font-size: 90%; - vertical-align: top; - empty-cells: show; -} - - -/* style for class list */ -table.classlist { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.classlist th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.classlist th { - font-weight: bold; - text-align: left; -} - - -/* style for class help display */ -table.classhelp { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.classhelp th { - font-weight: bold; - text-align: left; - color: #707040; -} - -table.classhelp td { - padding: 2 2 2 2; - border: 1px solid black; - text-align: left; - vertical-align: top; - empty-cells: show; -} - - -/* style for "other" displays */ -table.otherinfo { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.otherinfo th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.otherinfo th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; -} - -input { - border: 1px solid #8cacbb; - color: Black; - background-color: white; - vertical-align: middle; - margin-bottom: 1px; /* IE bug fix */ - padding: 0.1em; -} - -select { - border: 1px solid #8cacbb; - color: Black; - background-color: white; - vertical-align: middle; - margin-bottom: 1px; /* IE bug fix */ - padding: 0.1em; -} - - -a.nonexistent { - color: #FF2222; -} -a.nonexistent:visited { - color: #FF2222; -} -a.external { - color: #AA6600; -} - -/* -dl,ul,ol { - margin-top: 1pt; -} -tt,pre { - font-family: Lucida Console,Courier New,Courier,monotype; - font-size: 12pt; -} -pre.code { - margin-top: 8pt; - margin-bottom: 8pt; - background-color: #FFFFEE; - white-space:pre; - border-style:solid; - border-width:1pt; - border-color:#999999; - color:#111111; - padding:5px; - width:100%; -} -*/ -div.diffold { - background-color: #FFFF80; - border-style:none; - border-width:thin; - width:100%; -} -div.diffnew { - background-color: #80FF80; - border-style:none; - border-width:thin; - width:100%; -} -div.message { - margin-top: 6pt; - background-color: #E8FFE8; - border-style:solid; - border-width:1pt; - border-color:#999999; - color:#440000; - padding:5px; - width:100%; -} -strong.highlight { - background-color: #FFBBBB; -/* as usual, NetScape fucks up with innocent CSS - border-color: #FFAAAA; - border-style: solid; - border-width: 1pt; -*/ -} - -table.navibar { - background-color: #C8C8C8; - border-spacing: 3px; -} -td.navibar { - background-color: #E8E8E8; - vertical-align: top; - text-align: right; - padding: 0px; -} - -div.pagename { - font-size: 140%; - color: blue; - text-align: center; - font-weight: bold; - background-color: white; - padding: 0 ; -} - -a.wikiaction, input.wikiaction { - color: black; - text-decoration: None; - text-align: center; - color: black; - /*border: 1px solid #3ba6ec; */ - margin: 4px; - padding: 5; - padding-bottom: 0; - white-space: nowrap; -} - -a.wikiaction[href]:hover { - color: black; - text-decoration: none; - /*background-color: #dddddd; */ -} - -span.wikiuserpref { - padding-top: 1em; - font-size: 120%; -} - -div.wikitrail { - vertical-align: bottom; - /*font-size: -1;*/ - padding-top: 1em; - display: none; -} - -div.wikiaction { - vertical-align: middle; - /*border-bottom: 1px solid #8cacbb;*/ - padding-bottom:1em; - text-align: left; - width: 100%; -} - -div.wikieditmenu { - text-align: right; -} - -form.wikiedit { - border: 1px solid #8cacbb; - background-color: #f0f0f0; - background-color: #fabf00; - padding: 1em; - padding-right: 0em; -} - -div.legenditem { - padding-top: 0.5em; - padding-left: 0.3em; -} - -span.wikitoken { - background-color: #eeeeee; -} - - -div#contentspace h1:first-child, div.heading:first-child { - padding-top: 0; - margin-top: 0; -} -div#contentspace h2:first-child { - padding-top: 0; - margin-top: 0; -} - -/* heading and paragraph text */ - -div.heading, h1 { - font-family: Verdana, Helvetica, Arial, sans-serif; - background-color: #58b3ef; - background-color: #FFFFFF; - /*color: #4893cf;*/ - color: black; - padding-top: 1.0em; - padding-bottom:0.2em; - text-align: left; - margin-top: 0em; - /*margin-bottom:8pt;*/ - font-weight: bold; - font-size: 115%; - border-bottom: 1px solid #8CACBB; -} - - -h1, h2, h3, h4, h5, h6 { - color: Black; - clear: left; - font: 100% Verdana, Helvetica, Arial, sans-serif; - margin: 0; - padding-left: 0em; - padding-top: 1em; - padding-bottom: 0.2em; - /*border-bottom: 1px solid #8CACBB;*/ -} -/* h1,h2 { padding-top: 0; }*/ - - -h1 { font-size: 145%; } -h2 { font-size: 135%; } -h3 { font-size: 125%; } -h4 { font-size: 120%; } -h5 { font-size: 110%; } -h6 { font-size: 80%; } - -h1 a { text-decoration: None;} - -div.exception { - background-color: #bb2222; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} -pre.exception { - font-size: 110%; - padding: 1em; - border: 1px solid #8cacbb; - color: Black; - background-color: #dee7ec; - background-color: #cccccc; -} - -/* defines for navgiation bar (documentation) */ - - -div.direntry { - padding-top: 0.3em; - padding-bottom: 0.3em; - margin-right: 1em; - font-weight: bold; - background-color: #dee7ec; - font-size: 110%; -} - -div.fileentry { - font-family: Verdana, Helvetica, Arial, sans-serif; - padding-bottom: 0.3em; - white-space: nowrap; - line-height: 150%; -} - -a.fileentry { - white-space: nowrap; -} - - -span.left { - text-align: left; -} -span.right { - text-align: right; -} - -div.navbar { - /*margin: 0;*/ - font-size: 80% /*smaller*/; - font-weight: bold; - text-align: left; - /* position: fixed; */ - top: 100pt; - left: 0pt; /* auto; */ - width: 120pt; - /* right: auto; - right: 0pt; 2em; */ -} - - -div.history a { - /* font-size: 70%; */ -} - -div.wikiactiontitle { - font-weight: bold; -} - -/* REST defines */ - -div.document { - margin: 0; -} - -h1.title { - margin: 0; -} - -td.toplist { - vertical-align: top; -} - -img#pyimg { - position: absolute; - top: 0px; - left: 20px; - margin: 20px; -} - -img#extraimg { - position: absolute; - right: 14px; - top: 4px; -} - -div#navspace { - position: absolute; - top: 130px; - left: 11px; - font-size: 100%; - width: 150px; - overflow: hidden; /* scroll; */ -} - -div#metaspace { - position: absolute; - top: 40px; - left: 210px; -} - -div#errorline { - position: relative; - top: 5px; - float: right; -} - -div#contentspace { - position: absolute; - /* font: 120% "Times New Roman", serif;*/ - font: 110% Verdana, Helvetica, Arial, sans-serif; - top: 140px; - left: 130px; - margin-right: 140px; -} - -div#menubar { -/* width: 400px; */ - float: left; -} - -/* for the documentation page */ -div#docinfoline { - position: relative; - top: 5px; - left: 0px; - - /*background-color: #dee7ec; */ - padding: 5pt; - padding-bottom: 1em; - color: black; - /*border-width: 1pt; - border-style: solid;*/ - -} - -div#docnavlist { - /*background-color: #dee7ec; */ - padding: 5pt; - padding-bottom: 2em; - color: black; - border-width: 1pt; - /*border-style: solid;*/ -} - - -/* text markup */ - -div.listtitle { - color: Black; - clear: left; - font: 120% Verdana, Helvetica, Arial, sans-serif; - margin: 0; - padding-left: 0em; - padding-top: 0em; - padding-bottom: 0.2em; - margin-right: 0.5em; - border-bottom: 1px solid #8CACBB; -} - -div.actionbox h3 { - padding-top: 0; - padding-right: 0.5em; - padding-left: 0.5em; - background-color: #fabf00; - text-align: center; - border: 1px solid black; /* 8cacbb; */ -} - -div.actionbox a { - display: block; - padding-bottom: 0.5em; - padding-top: 0.5em; - margin-left: 0.5em; -} - -div.actionbox a.history { - display: block; - padding-bottom: 0.5em; - padding-top: 0.5em; - margin-left: 0.5em; - font-size: 90%; -} - -div.actionbox { - margin-bottom: 2em; - padding-bottom: 1em; - overflow: hidden; /* scroll; */ -} - -/* taken from docutils (oh dear, a bit senseless) */ -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - - -/* -:Author: David Goodger -:Contact: goodger at users.sourceforge.net -:date: $Date: 2003/01/22 22:26:48 $ -:version: $Revision: 1.29 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ -/* -.first { - margin-top: 0 } - -.last { - margin-bottom: 0 } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.address { - margin-bottom: 0 ; - margin-top: 0 ; - font-family: serif ; - font-size: 100% } - -pre.line-block { - font-family: serif ; - font-size: 100% } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #eeeeee } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.interpreted { - font-family: sans-serif } - -span.option { - white-space: nowrap } - -span.option-argument { - font-style: italic } - -span.pre { - white-space: pre } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: top } - -th.docinfo-name, th.field-name { - font-weight: bold ; - text-align: left ; - white-space: nowrap } - -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { - font-size: 100% } - -tt { - background-color: #eeeeee } - -ul.auto-toc { - list-style-type: none } -*/ - -div.section { - margin-top: 1.0em ; -} - -div.abstract { - margin: 2em 4em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } diff --git a/pypy/doc/navlist b/pypy/doc/navlist deleted file mode 100644 --- a/pypy/doc/navlist +++ /dev/null @@ -1,9 +0,0 @@ -[ - 'architecture.html', - 'getting-started.html', - 'coding-guide.html', - 'objspace.html', - 'translation.html', -# 'misc.html', - 'theory.html', -] From commits-noreply at bitbucket.org Mon Apr 25 16:25:26 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 16:25:26 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Updated lots of old links from codespeak's svn to bitbucket's hg Message-ID: <20110425142526.367F4282B9D@codespeak.net> Author: Dario Bertini Branch: documentation-cleanup Changeset: r43594:74b4a53a3ea3 Date: 2011-04-25 16:14 +0200 http://bitbucket.org/pypy/pypy/changeset/74b4a53a3ea3/ Log: Updated lots of old links from codespeak's svn to bitbucket's hg diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -263,7 +263,7 @@ If you start an untranslated Python interpreter via:: - python pypy-svn/pypy/bin/py.py + python pypy/bin/py.py If you press on the console you enter the interpreter-level console, a @@ -397,15 +397,15 @@ .. _`Dot Graphviz`: http://www.graphviz.org/ .. _Pygame: http://www.pygame.org/ -.. _pyopcode.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/pyopcode.py -.. _eval.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/eval.py -.. _pyframe.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/pyframe.py -.. _function.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/function.py -.. _argument.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/argument.py -.. _baseobjspace.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/baseobjspace.py -.. _module.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/module.py -.. _mixedmodule.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/mixedmodule.py -.. _typedef.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/typedef.py +.. _pyopcode.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/pyopcode.py +.. _eval.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/eval.py +.. _pyframe.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/pyframe.py +.. _function.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/function.py +.. _argument.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/argument.py +.. _baseobjspace.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/baseobjspace.py +.. _module.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/module.py +.. _mixedmodule.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/mixedmodule.py +.. _typedef.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/typedef.py .. _Standard object space: objspace.html#the-standard-object-space .. _objspace.py: ../../../../pypy/objspace/std/objspace.py .. _thunk: ../../../../pypy/objspace/thunk.py diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -60,7 +60,7 @@ .. _`development bug/feature tracker`: https://codespeak.net/issue/pypy-dev/ .. _here: http://tismerysoft.de/pypy/irc-logs/pypy .. _`sprint mailing list`: http://codespeak.net/mailman/listinfo/pypy-sprint -.. _`subversion commit mailing list`: http://codespeak.net/mailman/listinfo/pypy-svn +.. _`commit mailing list`: http://codespeak.net/mailman/listinfo/pypy-svn .. _`development mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html .. _`Documentation`: docindex.html diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -640,7 +640,7 @@ PyPy has a C compiler, for one thing -- and is usually where new features are implemented first. -.. _`EU report about translation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`EU report about translation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf A Historical Note diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -241,9 +241,9 @@ .. _`documentation index`: docindex.html .. _`getting-started`: getting-started.html -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf +.. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`the translation document`: translation.html -.. _`Compiling dynamic language implementations`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling dynamic language implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf .. _`Technical reports`: index-report.html .. _`getting started`: getting-started.html diff --git a/pypy/doc/_ref.rst b/pypy/doc/_ref.rst --- a/pypy/doc/_ref.rst +++ b/pypy/doc/_ref.rst @@ -104,4 +104,3 @@ .. _`translator/jvm/`: ../../../../pypy/translator/jvm .. _`translator/stackless/`: ../../../../pypy/translator/stackless .. _`translator/tool/`: ../../../../pypy/translator/tool -.. _`translator/js/`: http://codespeak.net/svn/pypy/branch/oo-jit/pypy/translator/js/ diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -57,19 +57,19 @@ for Python`_, A. Rigo -.. _bibtex: http://codespeak.net/svn/pypy/extradoc/talk/bibtex.bib +.. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf -.. _`How to *not* write Virtual Machines for Dynamic Languages`: http://codespeak.net/svn/pypy/extradoc/talk/dyla2007/dyla.pdf -.. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: http://codespeak.net/svn/pypy/extradoc/talk/icooolps2009/bolz-tracing-jit.pdf -.. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: http://codespeak.net/svn/pypy/extradoc/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://codespeak.net/svn/user/cfbolz/jitpl/thesis/final-master.pdf +.. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf +.. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf +.. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://www.iam.unibe.ch/~verwaest/pygirl.pdf .. _`Representation-Based Just-in-Time Specialization and the Psyco Prototype for Python`: http://psyco.sourceforge.net/psyco-pepm-a.ps.gz .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 -.. _`Automatic generation of JIT compilers for dynamic languages in .NET`: http://codespeak.net/svn/pypy/extradoc/talk/ecoop2009/main.pdf -.. _`Core Object Optimization Results`: http://codespeak.net/svn/pypy/extradoc/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf +.. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf +.. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf .. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf @@ -122,7 +122,7 @@ * `PyCon 2008`_. -.. __: http://codespeak.net/svn/pypy/extradoc/talk/s3-2008/talk.pdf +.. __: https://bitbucket.org/pypy/extradoc/raw/tip/talk/s3-2008/talk.pdf Talks in 2007 @@ -157,9 +157,9 @@ * `Warsaw 2007`_. -.. __: http://codespeak.net/svn/pypy/extradoc/talk/roadshow-ibm/ -.. __: http://codespeak.net/svn/pypy/extradoc/talk/roadshow-google/Pypy_architecture.pdf -.. __: http://codespeak.net/svn/pypy/extradoc/talk/dls2007/rpython-talk.pdf +.. __: https://bitbucket.org/pypy/extradoc/raw/tip/talk/roadshow-ibm/ +.. __: https://bitbucket.org/pypy/extradoc/raw/tip/talk/roadshow-google/Pypy_architecture.pdf +.. __: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2007/rpython-talk.pdf Talks in 2006 @@ -257,36 +257,36 @@ .. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf .. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf .. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html -.. _`Sprinting the PyPy way`: http://codespeak.net/svn/pypy/extradoc/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf +.. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf .. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf -.. _`EuroPython talks 2009`: http://codespeak.net/svn/pypy/extradoc/talk/ep2009/ -.. _`PyCon talks 2009`: http://codespeak.net/svn/pypy/extradoc/talk/pycon2009/ -.. _`Wroclaw (Poland) presentation`: http://codespeak.net/svn/pypy/extradoc/talk/wroclaw2009/talk.pdf +.. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf +.. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ +.. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ +.. _`Wroclaw (Poland) presentation`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/wroclaw2009/talk.pdf .. _`PyPy talk at OpenBossa 09`: http://morepypy.blogspot.com/2009/03/pypy-talk-at-openbossa-09.html -.. _`at SFI 08`: http://codespeak.net/svn/pypy/extradoc/talk/sfi2008/ -.. _`at PyCon Poland 08`: http://codespeak.net/svn/pypy/extradoc/talk/pyconpl-2008/talk.pdf -.. _`The PyPy Project and You`: http://codespeak.net/svn/pypy/extradoc/talk/osdc2008/osdc08.pdf -.. _`EuroPython talks 2008`: http://codespeak.net/svn/pypy/extradoc/talk/ep2008/ +.. _`at SFI 08`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/sfi2008/ +.. _`at PyCon Poland 08`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pyconpl-2008/talk.pdf +.. _`The PyPy Project and You`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/osdc2008/osdc08.pdf +.. _`EuroPython talks 2008`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2008/ .. _`Maemo summit`: http://morepypy.blogspot.com/2008/09/pypypython-at-maemo-summit.html -.. _`PyCon UK 2008 - JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pycon-uk-2008/jit/pypy-vm.pdf -.. _`PyCon UK 2008 - Status`: http://codespeak.net/svn/pypy/extradoc/talk/pycon-uk-2008/status/status.pdf -.. _`PyCon Italy 2008`: http://codespeak.net/svn/pypy/extradoc/talk/pycon-italy-2008/pypy-vm.pdf -.. _`RuPy 2008`: http://codespeak.net/svn/pypy/extradoc/talk/rupy2008/ -.. _`RuPy 2007`: http://codespeak.net/svn/pypy/extradoc/talk/rupy2007/ -.. _`PyCon 2008`: http://codespeak.net/svn/pypy/extradoc/talk/pycon2008/ -.. _`ESUG 2007`: http://codespeak.net/svn/pypy/extradoc/talk/esug2007/ -.. _`Bern (Switzerland) 2007`: http://codespeak.net/svn/pypy/extradoc/talk/bern2007/ -.. _`PyCon UK 2007`: http://codespeak.net/svn/pypy/extradoc/talk/pyconuk07/ -.. _Dresden: http://codespeak.net/svn/pypy/extradoc/talk/dresden/ -.. _`EuroPython 2007`: http://codespeak.net/svn/pypy/extradoc/talk/ep2007/ -.. _`Bad Honnef 2007`: http://codespeak.net/svn/pypy/extradoc/talk/badhonnef2007/talk.pdf -.. _`Dzug talk`: http://codespeak.net/svn/pypy/extradoc/talk/dzug2007/dzug2007.txt -.. _`PyCon 2007`: http://codespeak.net/svn/pypy/extradoc/talk/pycon2007/ -.. _`PyCon - Uno 2007`: http://codespeak.net/svn/pypy/extradoc/talk/pycon-uno2007/pycon07.pdf -.. _`Warsaw 2007`: http://codespeak.net/svn/pypy/extradoc/talk/warsaw2007/ -.. _`Warsaw 2006`: http://codespeak.net/svn/pypy/extradoc/talk/warsaw2006/ -.. _`Tokyo 2006`: http://codespeak.net/svn/pypy/extradoc/talk/tokyo/ +.. _`PyCon UK 2008 - JIT`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon-uk-2008/jit/pypy-vm.pdf +.. _`PyCon UK 2008 - Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon-uk-2008/status/status.pdf +.. _`PyCon Italy 2008`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon-italy-2008/pypy-vm.pdf +.. _`RuPy 2008`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/rupy2008/ +.. _`RuPy 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/rupy2007/ +.. _`PyCon 2008`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2008/ +.. _`ESUG 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/esug2007/ +.. _`Bern (Switzerland) 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bern2007/ +.. _`PyCon UK 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pyconuk07/ +.. _Dresden: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dresden/ +.. _`EuroPython 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2007/ +.. _`Bad Honnef 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/badhonnef2007/talk.pdf +.. _`Dzug talk`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dzug2007/dzug2007.txt +.. _`PyCon 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2007/ +.. _`PyCon - Uno 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon-uno2007/pycon07.pdf +.. _`Warsaw 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/warsaw2007/ +.. _`Warsaw 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/warsaw2006/ +.. _`Tokyo 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/tokyo/ Related projects diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -152,7 +152,7 @@ ./translate.py -Ojit --backend=cli targetpypystandalone.py -.. _`branch/cli-jit`: http://codespeak.net/svn/pypy/branch/cli-jit/ +.. _`branch/cli-jit`: https://bitbucket.org/pypy/pypy/src/tip .. _`Ph.D. thesis`: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf The executable and all its dependencies will be stored in the diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -158,7 +158,7 @@ .. _`object spaces`: objspace.html .. _`interpreter optimizations`: interpreter-optimizations.html .. _`translation`: translation.html -.. _`dynamic-language translation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`dynamic-language translation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf .. _`low-level encapsulation`: low-level-encapsulation.html .. _`translation aspects`: translation-aspects.html .. _`configuration documentation`: config/ diff --git a/pypy/doc/release-1.4.0beta.rst b/pypy/doc/release-1.4.0beta.rst --- a/pypy/doc/release-1.4.0beta.rst +++ b/pypy/doc/release-1.4.0beta.rst @@ -33,4 +33,4 @@ Cheers, The PyPy team -.. _`list of patches`: http://codespeak.net/svn/pypy/trunk/pypy/module/cpyext/patches/ +.. _`list of patches`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/cpyext/patches/ diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -530,6 +530,6 @@ -.. _`Prolog interpreter`: http://codespeak.net/svn/pypy/lang/prolog/ +.. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _parsing: ../../../../pypy/rlib/parsing/ .. _`json format`: http://www.json.org diff --git a/pypy/doc/maemo.rst b/pypy/doc/maemo.rst --- a/pypy/doc/maemo.rst +++ b/pypy/doc/maemo.rst @@ -133,12 +133,12 @@ its own copies of GCC, various C libraries and header files which pypy needs for successful cross-compilation. -Now, on the host system, perform a subversion checkout of PyPy:: +Now, on the host system, perform a mercurial clone of PyPy:: - svn co https://codespeak.net/svn/pypy/trunk pypy-trunk + hg clone ssh://hg at bitbucket.org/pypy/pypy -Several svn revisions since the 60000's are known to work and -the last manually tested one is currently 65011. +Several revisions since about 9d7b7ecb9144 are known to work and +the last manually tested one is currently 7f267e4b7861. Change to the ``pypy-trunk/pypy/translator/goal`` directory and execute:: diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -140,35 +140,35 @@ `D14.1 Report about Milestone/Phase 1`_ describes what happened in the PyPy project during the first year of EU funding (December 2004 - December 2005) -.. _`PyPy EU Final Activity Report`: http://codespeak.net/pypy/extradoc/eu-report/PYPY-EU-Final-Activity-Report.pdf -.. _`D01.2-4 Project Organization`: http://codespeak.net/pypy/extradoc/eu-report/D01.2-4_Project_Organization-2007-03-28.pdf -.. _`D02.1 Development Tools and Website`: http://codespeak.net/pypy/extradoc/eu-report/D02.1_Development_Tools_and_Website-2007-03-21.pdf -.. _`D02.2 Release Scheme`: http://codespeak.net/svn/pypy/extradoc/eu-report/D02.2_Release_Scheme-2007-03-30.pdf -.. _`D02.3 Testing Tool`: http://codespeak.net/pypy/extradoc/eu-report/D02.3_Testing_Framework-2007-03-23.pdf -.. _`D03.1 Extension Compiler`: http://codespeak.net/pypy/extradoc/eu-report/D03.1_Extension_Compiler-2007-03-21.pdf -.. _`D04.1 Partial Python Implementation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D04.1_Partial_Python_Implementation_on_top_of_CPython.pdf -.. _`D04.2 Complete Python Implementation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D04.2_Complete_Python_Implementation_on_top_of_CPython.pdf -.. _`D04.3 Parser and Bytecode Compiler`: http://codespeak.net/svn/pypy/extradoc/eu-report/D04.3_Report_about_the_parser_and_bytecode_compiler.pdf -.. _`D04.4 PyPy as a Research Tool`: http://codespeak.net/svn/pypy/extradoc/eu-report/D04.4_Release_PyPy_as_a_research_tool.pdf -.. _`D05.1 Compiling Dynamic Language Implementations`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf -.. _`D05.2 A Compiled Version of PyPy`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.2_A_compiled,_self-contained_version_of_PyPy.pdf -.. _`D05.3 Implementation with Translation Aspects`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.3_Publish_on_implementation_with_translation_aspects.pdf -.. _`D05.4 Encapsulating Low Level Aspects`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.4_Publish_on_encapsulating_low_level_language_aspects.pdf -.. _`D06.1 Core Object Optimization Results`: http://codespeak.net/svn/pypy/extradoc/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`D07.1 Massive Parallelism and Translation Aspects`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf -.. _`D08.2 JIT Compiler Architecture`: http://codespeak.net/pypy/extradoc/eu-report/D08.2_JIT_Compiler_Architecture-2007-05-01.pdf -.. _`D08.1 JIT Compiler Release`: http://codespeak.net/pypy/extradoc/eu-report/D08.1_JIT_Compiler_Release-2007-04-30.pdf -.. _`D09.1 Constraint Solving and Semantic Web`: http://codespeak.net/pypy/extradoc/eu-report/D09.1_Constraint_Solving_and_Semantic_Web-2007-05-11.pdf -.. _`D10.1 Aspect-Oriented, Design-by-Contract Programming and RPython static checking`: http://codespeak.net/pypy/extradoc/eu-report/D10.1_Aspect_Oriented_Programming_in_PyPy-2007-03-22.pdf -.. _`D11.1 PyPy for Embedded Devices`: http://codespeak.net/pypy/extradoc/eu-report/D11.1_PyPy_for_Embedded_Devices-2007-03-26.pdf -.. _`D12.1 High-Level-Backends and Feature Prototypes`: http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf -.. _`D13.1 Integration and Configuration`: http://codespeak.net/pypy/extradoc/eu-report/D13.1_Integration_and_Configuration-2007-03-30.pdf -.. _`D14.1 Report about Milestone/Phase 1`: http://codespeak.net/svn/pypy/extradoc/eu-report/D14.1_Report_about_Milestone_Phase_1.pdf -.. _`D14.2 Tutorials and Guide Through the PyPy Source Code`: http://codespeak.net/pypy/extradoc/eu-report/D14.2_Tutorials_and_Guide_Through_the_PyPy_Source_Code-2007-03-22.pdf -.. _`D14.3 Report about Milestone/Phase 2`: http://codespeak.net/pypy/extradoc/eu-report/D14.3_Report_about_Milestone_Phase_2-final-2006-08-03.pdf -.. _`D14.4 PyPy-1.0 Milestone report`: http://codespeak.net/pypy/extradoc/eu-report/D14.4_Report_About_Milestone_Phase_3-2007-05-01.pdf -.. _`D14.5 Documentation of the development process`: http://codespeak.net/pypy/extradoc/eu-report/D14.5_Documentation_of_the_development_process-2007-03-30.pdf +.. _`PyPy EU Final Activity Report`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/PYPY-EU-Final-Activity-Report.pdf +.. _`D01.2-4 Project Organization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D01.2-4_Project_Organization-2007-03-28.pdf +.. _`D02.1 Development Tools and Website`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D02.1_Development_Tools_and_Website-2007-03-21.pdf +.. _`D02.2 Release Scheme`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D02.2_Release_Scheme-2007-03-30.pdf +.. _`D02.3 Testing Tool`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D02.3_Testing_Framework-2007-03-23.pdf +.. _`D03.1 Extension Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D03.1_Extension_Compiler-2007-03-21.pdf +.. _`D04.1 Partial Python Implementation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D04.1_Partial_Python_Implementation_on_top_of_CPython.pdf +.. _`D04.2 Complete Python Implementation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D04.2_Complete_Python_Implementation_on_top_of_CPython.pdf +.. _`D04.3 Parser and Bytecode Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D04.3_Report_about_the_parser_and_bytecode_compiler.pdf +.. _`D04.4 PyPy as a Research Tool`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D04.4_Release_PyPy_as_a_research_tool.pdf +.. _`D05.1 Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`D05.2 A Compiled Version of PyPy`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.2_A_compiled,_self-contained_version_of_PyPy.pdf +.. _`D05.3 Implementation with Translation Aspects`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.3_Publish_on_implementation_with_translation_aspects.pdf +.. _`D05.4 Encapsulating Low Level Aspects`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.4_Publish_on_encapsulating_low_level_language_aspects.pdf +.. _`D06.1 Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf +.. _`D07.1 Massive Parallelism and Translation Aspects`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`D08.2 JIT Compiler Architecture`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D08.2_JIT_Compiler_Architecture-2007-05-01.pdf +.. _`D08.1 JIT Compiler Release`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D08.1_JIT_Compiler_Release-2007-04-30.pdf +.. _`D09.1 Constraint Solving and Semantic Web`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D09.1_Constraint_Solving_and_Semantic_Web-2007-05-11.pdf +.. _`D10.1 Aspect-Oriented, Design-by-Contract Programming and RPython static checking`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D10.1_Aspect_Oriented_Programming_in_PyPy-2007-03-22.pdf +.. _`D11.1 PyPy for Embedded Devices`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D11.1_PyPy_for_Embedded_Devices-2007-03-26.pdf +.. _`D12.1 High-Level-Backends and Feature Prototypes`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf +.. _`D13.1 Integration and Configuration`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D13.1_Integration_and_Configuration-2007-03-30.pdf +.. _`D14.1 Report about Milestone/Phase 1`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.1_Report_about_Milestone_Phase_1.pdf +.. _`D14.2 Tutorials and Guide Through the PyPy Source Code`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.2_Tutorials_and_Guide_Through_the_PyPy_Source_Code-2007-03-22.pdf +.. _`D14.3 Report about Milestone/Phase 2`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.3_Report_about_Milestone_Phase_2-final-2006-08-03.pdf +.. _`D14.4 PyPy-1.0 Milestone report`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.4_Report_About_Milestone_Phase_3-2007-05-01.pdf +.. _`D14.5 Documentation of the development process`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.5_Documentation_of_the_development_process-2007-03-30.pdf -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf +.. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -707,9 +707,9 @@ If you want to change a module or test contained in ``lib-python/2.5.2`` then make sure that you copy the file to our ``lib-python/modified-2.5.2`` -directory first. In subversion commandline terms this reads:: +directory first. In mercurial commandline terms this reads:: - svn cp lib-python/2.5.2/somemodule.py lib-python/modified-2.5.2/ + hg cp lib-python/2.5.2/somemodule.py lib-python/modified-2.5.2/ and subsequently you edit and commit ``lib-python/modified-2.5.2/somemodule.py``. This copying operation is @@ -860,29 +860,23 @@ - write good log messages because several people are reading the diffs. -- if you add (text/py) files to the repository then please run - pypy/tool/fixeol in that directory. This will make sure - that the property 'svn:eol-style' is set to native which - allows checkin/checkout in native line-ending format. +- What was previously called ``trunk`` is called the ``default`` branch in + mercurial. Branches in mercurial are always pushed together with the rest + of the repository. To create a ``try1`` branch (assuming that a branch named + ``try1`` doesn't already exists) you should do:: -- branching (aka "svn copy") of source code should usually - happen at ``svn/pypy/trunk`` level in order to have a full - self-contained pypy checkout for each branch. For branching - a ``try1`` branch you would for example do:: + hg branch try1 + + The branch will be recorded in the repository only after a commit. To switch + back to the default branch:: + + hg update default + + For further details use the help or refer to the `official wiki`_:: + + hg help branch - svn cp http://codespeak.net/svn/pypy/trunk \ - http://codespeak.net/svn/pypy/branch/try1 - - This allows to checkout the ``try1`` branch and receive a - self-contained working-copy for the branch. Note that - branching/copying is a cheap operation with subversion, as it - takes constant time irrespective of the size of the tree. - -- To learn more about how to use subversion read `this document`_. - -.. _`this document`: svn-help.html - - +.. _`official wiki`: http://mercurial.selenic.com/wiki/Branch .. _`using development tracker`: @@ -905,30 +899,13 @@ If you are not a commiter then you can still `register with the tracker`_ easily. -modifying Issues from svn commit messages +modifying Issues from hg commit messages ----------------------------------------- -If you are committing something related to -an issue in the development tracker you -can correlate your login message to a tracker -item by following these rules: +XXX: to be written after migrating the issue tracker away from codespeak.net -- put the content of ``issueN STATUS`` on a single - new line - -- `N` must be an existing issue number from the `development tracker`_. - -- STATUS is one of:: - - unread - chatting - in-progress - testing - duplicate - resolved .. _`register with the tracker`: https://codespeak.net/issue/pypy-dev/user?@template=register -.. _`development tracker`: http://codespeak.net/issue/pypy-dev/ .. _`roundup`: http://roundup.sf.net diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: http://codespeak.net/svn/pypy/trunk/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: http://codespeak.net/svn/pypy/trunk/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,7 +68,7 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: http://codespeak.net/svn/pypy/trunk/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py OO backends diff --git a/pypy/doc/translation-aspects.rst b/pypy/doc/translation-aspects.rst --- a/pypy/doc/translation-aspects.rst +++ b/pypy/doc/translation-aspects.rst @@ -475,7 +475,7 @@ .. [DLT] `Compiling dynamic language implementations`_, PyPy documentation (and EU deliverable D05.1), 2005 -.. _`Compiling dynamic language implementations`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling dynamic language implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf .. [PVE] `Simple and Efficient Subclass Tests`_, Jonathan Bachrach, Draft submission to ECOOP-02, 2001 .. _`Simple and Efficient Subclass Tests`: http://people.csail.mit.edu/jrb/pve/pve.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -161,11 +161,12 @@ Currently, we have preliminary versions of a JavaScript interpreter (Leonardo Santagada as his Summer of PyPy project), a `Prolog interpreter`_ (Carl Friedrich Bolz as his Bachelor thesis), and a `SmallTalk interpreter`_ -(produced during a sprint). `All of them`_ are unfinished at the moment. +(produced during a sprint). On the `PyPy "user" main page`_ there are also a +Scheme and Io implementation, all of these are unfinished at the moment. -.. _`Prolog interpreter`: http://codespeak.net/svn/pypy/lang/prolog/ +.. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _`SmallTalk interpreter`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 -.. _`All of them`: http://codespeak.net/svn/pypy/lang/ +.. _`PyPy "user" main page`: https://bitbucket.org/pypy/ Development @@ -263,7 +264,7 @@ .. [D05.1] Compiling Dynamic Language Implementations, Report from the PyPy project to the E.U., - http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf + https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf .. _`PyPy's RPython`: diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -269,7 +269,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: http://codespeak.net/svn/pypy/trunk/pypy/module/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: From commits-noreply at bitbucket.org Mon Apr 25 16:25:31 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 16:25:31 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110425142531.A97D6282C2C@codespeak.net> Author: Dario Bertini Branch: documentation-cleanup Changeset: r43595:5f6d08ddc76f Date: 2011-04-25 16:18 +0200 http://bitbucket.org/pypy/pypy/changeset/5f6d08ddc76f/ Log: merge heads diff --git a/pypy/doc/geninterp.rst b/pypy/doc/geninterp.rst deleted file mode 100644 --- a/pypy/doc/geninterp.rst +++ /dev/null @@ -1,190 +0,0 @@ -.. include:: throwaway.rst - -The Interpreter-Level backend ------------------------------ - -http://codespeak.net/pypy/trunk/pypy/translator/geninterplevel.py - -Motivation -++++++++++ - -PyPy often makes use of `application-level`_ helper methods. -The idea of the 'geninterplevel' backend is to automatically transform -such application level implementations to their equivalent representation -at interpreter level. Then, the RPython to C translation hopefully can -produce more efficient code than always re-interpreting these methods. - -One property of translation from application level Python to -Python is, that the produced code does the same thing as the -corresponding interpreted code, but no interpreter is needed -any longer to execute this code. - -.. _`application-level`: coding-guide.html#app-preferable - -Bootstrap issue -+++++++++++++++ - -One issue we had so far was of bootstrapping: some pieces of the -interpreter (e.g. exceptions) were written in geninterped code. -It is unclear how much of it is left, thought. - -That bootstrap issue is (was?) solved by invoking a new bytecode interpreter -which runs on FlowObjspace. FlowObjspace is complete without -complicated initialization. It is able to do abstract interpretation -of any Rpythonic code, without actually implementing anything. It just -records all the operations the bytecode interpreter would have done by -building flowgraphs for all the code. What the Python backend does is -just to produce correct Python code from these flowgraphs and return -it as source code. In the produced code Python operations recorded in -the original flowgraphs are replaced by calls to the corresponding -methods in the `object space`_ interface. - -.. _`object space`: objspace.html - -Example -+++++++ - -.. _implementation: ../../../../pypy/translator/geninterplevel.py - -Let's try a little example. You might want to look at the flowgraph that it -produces. Here, we directly run the Python translation and look at the -generated source. See also the header section of the implementation_ for the -interface:: - - >>> from pypy.translator.geninterplevel import translate_as_module - >>> entrypoint, source = translate_as_module(""" - ... - ... def g(n): - ... i = 0 - ... while n: - ... i = i + n - ... n = n - 1 - ... return i - ... - ... """) - -This call has invoked a PyPy bytecode interpreter running on FlowObjspace, -recorded every possible codepath into a flowgraph, and then rendered the -following source code:: - - #!/bin/env python - # -*- coding: LATIN-1 -*- - - def initapp2interpexec(space): - """NOT_RPYTHON""" - - def g(space, w_n_1): - goto = 3 # startblock - while True: - - if goto == 1: - v0 = space.is_true(w_n) - if v0 == True: - goto = 2 - else: - goto = 4 - - if goto == 2: - w_1 = space.add(w_0, w_n) - w_2 = space.sub(w_n, gi_1) - w_n, w_0 = w_2, w_1 - goto = 1 - continue - - if goto == 3: - w_n, w_0 = w_n_1, gi_0 - goto = 1 - continue - - if goto == 4: - return w_0 - - fastf_g = g - - g3dict = space.newdict() - gs___name__ = space.new_interned_str('__name__') - gs_app2interpexec = space.new_interned_str('app2interpexec') - space.setitem(g3dict, gs___name__, gs_app2interpexec) - gs_g = space.new_interned_str('g') - from pypy.interpreter import gateway - gfunc_g = space.wrap(gateway.interp2app(fastf_g, unwrap_spec=[gateway.ObjSpace, gateway.W_Root])) - space.setitem(g3dict, gs_g, gfunc_g) - gi_1 = space.wrap(1) - gi_0 = space.wrap(0) - return g3dict - -You see that actually a single function is produced: -``initapp2interpexec``. This is the function that you will call with a -space as argument. It defines a few functions and then does a number -of initialization steps, builds the global objects the function need, -and produces the PyPy function object ``gfunc_g``. - -The return value is ``g3dict``, which contains a module name and the -function we asked for. - -Let's have a look at the body of this code: The definition of ``g`` is -used as ``fast_g`` in the ``gateway.interp2app`` which constructs a -PyPy function object which takes care of argument unboxing (based on -the ``unwrap_spec``), and of invoking the original ``g``. - -We look at the definition of ``g`` itself which does the actual -computation. Comparing to the flowgraph, you see a code block for -every block in the graph. Since Python has no goto statement, the -jumps between the blocks are implemented by a loop that switches over -a ``goto`` variable. - -:: - - . if goto == 1: - v0 = space.is_true(w_n) - if v0 == True: - goto = 2 - else: - goto = 4 - -This is the implementation of the "``while n:``". There is no implicit state, -everything is passed over to the next block by initializing its -input variables. This directly resembles the nature of flowgraphs. -They are completely stateless. - - -:: - - . if goto == 2: - w_1 = space.add(w_0, w_n) - w_2 = space.sub(w_n, gi_1) - w_n, w_0 = w_2, w_1 - goto = 1 - continue - -The "``i = i + n``" and "``n = n - 1``" instructions. -You see how every instruction produces a new variable. -The state is again shuffled around by assigning to the -input variables ``w_n`` and ``w_0`` of the next target, block 1. - -Note that it is possible to rewrite this by re-using variables, -trying to produce nested blocks instead of the goto construction -and much more. The source would look much more like what we -used to write by hand. For the C backend, this doesn't make much -sense since the compiler optimizes it for us. For the Python interpreter it could -give a bit more speed. But this is a temporary format and will -get optimized anyway when we produce the executable. - -Interplevel Snippets in the Sources -+++++++++++++++++++++++++++++++++++ - -Code written in application space can consist of complete files -to be translated, or they -can be tiny snippets scattered all over a source file, similar -to our example from above. - -Translation of these snippets is done automatically and cached -in pypy/_cache with the modulename and the md5 checksum appended -to it as file name. If you have run your copy of pypy already, -this folder should exist and have some generated files in it. -These files consist of the generated code plus a little code -that auto-destructs the cached file (plus .pyc/.pyo versions) -if it is executed as __main__. On windows this means you can wipe -a cached code snippet clear by double-clicking it. Note also that -the auto-generated __init__.py file wipes the whole directory -when executed. diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -190,8 +190,7 @@ .. _`transformation`: **transformation** - Code that modifies flowgraphs to weave in `translation-aspects`_ - + Code that modifies flowgraphs to weave in translation aspects .. _`translation-time`: **translation-time** @@ -226,7 +225,6 @@ .. _`The RPython Typer`: translation.html#the-rpython-typer .. _`backends`: getting-started-dev.html#trying-out-the-translator .. _Tool: getting-started-dev.html#trying-out-the-translator -.. _`translation-aspects`: translation-aspects.html .. _`PyPy's garbage collectors`: garbage_collection.html .. _`Restricted Python`: coding-guide.html#restricted-python .. _PSF: http://www.python.org/psf/ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -89,7 +89,6 @@ architecture.rst coding-guide.rst cpython_differences.rst - cleanup-todo.rst garbage_collection.rst interpreter.rst objspace.rst @@ -105,7 +104,6 @@ interpreter-optimizations.rst configuration.rst - low-level-encapsulation.rst parser.rst rlib.rst rtyper.rst diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -29,11 +29,8 @@ As of the 1.2 release, RPython_ programs can be translated into the following languages/platforms: C/POSIX, CLI/.NET -and Java/JVM (in addition, there's `a backend`_ that translates -`application-level`_ into `interpreter-level`_ code, but this is a special -case in several ways). +and Java/JVM. -.. _`a backend`: geninterp.html .. _`application-level`: coding-guide.html#application-level .. _`interpreter-level`: coding-guide.html#interpreter-level @@ -632,10 +629,6 @@ http://codespeak.net/pypy/trunk/pypy/translator/c/ -GenC is not really documented at the moment. The basic principle of creating -code from flow graphs is similar to the `Python back-end`_. See also -"Generating C code" in our `EU report about translation`_. - GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are implemented first. @@ -710,21 +703,6 @@ GenJVM is almost entirely the work of Niko Matsakis, who worked on it also as part of the Summer of PyPy program. -.. _`Python again`: -.. _`Python back-end`: - -The Interpreter-Level backend ------------------------------ - -http://codespeak.net/pypy/trunk/pypy/translator/geninterplevel.py - -Above, this backend was described as a "special case in several ways". One of -these ways is that the job it does is specific to PyPy's standard interpreter, -and the other is that it does not even use the annotator -- it works directly -the graphs produced by the Flow Object Space. - -See `geninterp's documentation `__. - .. _extfunccalls: External Function Calls diff --git a/pypy/doc/low-level-encapsulation.rst b/pypy/doc/low-level-encapsulation.rst deleted file mode 100644 --- a/pypy/doc/low-level-encapsulation.rst +++ /dev/null @@ -1,345 +0,0 @@ -.. include:: throwaway.rst - -============================================================ - Encapsulating low-level implementation aspects -============================================================ - -.. contents:: - - - -Abstract -======== - -It has always been a major goal of PyPy to not force implementation -decisions. This means that even after the implementation of the -standard interpreter [#]_ has been written we are still able to experiment -with different approaches to memory management or concurrency and to -target wildly different platforms such as the Java Virtual Machine or -a very memory-limited embedded environment. - -We do this by allowing the encapsulation of these low level aspects as -well defined parts of the translation process. - -In the following document, we give examples of aspects that have been -successfully encapsulated in more detail and contrast the potential of -our approach with CPython. - -.. [#] `standard interpreter`_ is our term for the code which - implements the Python language, i.e. the interpreter and the - standard object space. - - -Background -========== - -One of the better known significant modifications to CPython are -Christian Tismer's "stackless" patches [STK]_, which allow for far more -flexible control flow than the typical function call/return supported by -CPython. Originally implemented as a series of invasive patches, -Christian found that maintaining these patches as CPython itself was -further developed was time consuming to the point of no longer being -able to work on the new functionality that was the point of the -exercise. - -One solution would have been for the patches to become part of core -CPython but this was not done partly because the code that fully -enabled stackless required widespread modifications that made the code -harder to understand (as the "stackless" model contains control flow -that is not easily expressable in C, the implementation became much -less "natural" in some sense). - -With PyPy, however, it is possible to obtain this flexible control -flow whilst retaining transparent implementation code as the necessary -modifications can be implemented as a localized translation aspect, -and indeed this was done at the Paris sprint in a couple of days (as -compared to around six months for the original stackless patches). - -Of course, this is not the only aspect that can be so decided a -posteriori, during translation. - - -Translation aspects -=================== - -Our standard interpreter is implemented at a very high level of -abstraction. This has a number of happy consequences, among which is -enabling the encapsulation of language aspects as described in this -document. For example, the implementation code simply makes no -reference to memory management, which clearly gives the translator -complete freedom to decide about this aspect. This contrasts with -CPython where the decision to use reference counting is reflected tens -or even hundreds of times in each C source file in the codebase. - -As described in [ARCH]_, producing a Python implementation from the -source of our standard interpreter involves various stages: the -initialization code is run, the resulting code is annotated, typed and -finally translated. By the nature of the task, the encapsulation of -*low-level aspects* mainly affects the typer and the translation -process. At the coarsest level, the selection of target platform -involves writing a new backend -- still a significant task, but much -much easier than writing a complete implementation of Python! - -Other aspects affect different levels, as their needs require. The -remainder of this section describes a few aspects that we have -successfully encapsulated. - -An advantage of our approach is that any combination of aspects can be -freely selected, avoiding the problem of combinatorial explosion of -variants seen in manually written interpreters. - - -Stacklessness -------------- - -The stackless modifications are mostly implemented in the C backend, -with a single extra flow graph operation to influence some details of -the generated C code. The total changes only required about 300 lines -of source, vindicating our abstract approach. - -In stackless mode, the C backend generates functions that are -systematically extended with a small amount of bookkeeping code. This -allows the C code to save its own stack to the heap on demand, where it -can then be inspected, manipulated and eventually resumed. This is -described in more detail in [TA]_. In this way, unlimited (or more -precisely heap-limited) recursion is possible, even on operating systems -that limit the size of the C stack. Alternatively, a different saved -stack can be resumed, thus implementing soft context switches - -coroutines, or green threads with an appropriate scheduler. We reobtain -in this way all the major benefits of the original "stackless" patches. - -This effect requires a number of changes in each and every C function -that would be extremely tedious to write by hand: checking for the -signal triggering the saving of the stack, actually saving precisely the -currently active local variables, and when re-entering the function -check which variables are being restored and which call site is resumed. -In addition, a couple of global tables must be maintained to drive the -process. The key point is that we can fine-tune all these interactions -freely, without having to rewrite the whole code all the time but only -modifying the C backend (in addition, of course, to being able to change -at any time the high-level code that is the input of the translation -process). So far, this allowed us to find a style that does not hinder -the optimizations performed by the C compiler and so has only a minor -impact on performance in the normal case. - -Also note that the fact that the C stack can be fully saved into the -heap can tremendously simplify the portable implementation of garbage -collection: after the stack has been completely transferred to the heap, -there are no roots left on the stack. - - -Multiple Interpreters ---------------------- - -Another implementation detail that causes tension between functionality -and both code clarity and memory consumption in CPython is the issue of -multiple independent interpreters in the same process. In CPython there -is a partial implementation of this idea in the "interpreter state" API, -but the interpreters produced by this are not truly independent -- for -instance the dictionary that contains interned strings is implemented as -file-level static object, and is thus shared between the interpreters. -A full implementation of this idea would entirely eschew the use of file -level statics and place all interpreter-global data in some large -structure, which would hamper readability and maintainability. In -addition, in many situations it is necessary to determine which -interpreter a given object is "from" -- and this is not possible in -CPython largely because of the memory overhead that adding a 'interp' -pointer to all Python objects would create. - -In PyPy, all of our implementation code manipulates an explicit object -space instance, as described in [CODG]_. The situation of multiple -interpreters is thus handled automatically: if there is only one space -instance, it is regarded as a pre-constructed constant and the space -object pointer (though not its non-constant contents) disappears from -the produced source, i.e. from function arguments, local variables and -instance fields. If there are two or more such instances, a 'space' -attribute will be automatically added to all application objects (or -more precisely, it will not be removed by the translation process), the -best of both worlds. - - -Memory Management ------------------ - -As mentioned above, CPython's decision to use a garbage collector based -on reference counting is reflected throughout its source. In the -implementation code of PyPy, it is not, and in fact the standard -interpreter can currently be compiled to use a reference counted scheme -or the Boehm GC [BOEHM]_. Again, more details are in [TA]_. We also -have an experimental framework for developing custom exact GCs [GC]_, -but it is not yet integrated with the low-level translation back-ends. - -Another advantage of the aspect oriented approach shows itself most -clearly with this memory management aspect: that of correctness. -Although reference counting is a fairly simple scheme, writing code for -CPython requires that the programmer make a large number of -not-quite-trivial decisions about the refcounting code. Experience -suggests that mistakes will always creep in, leading to crashes or -leaks. While tools exist to help find these mistakes, it is surely -better to not write the reference count manipulations at all and this is -what PyPy's approach allows. Writing the code that emits the correct -reference count manipulations is surely harder than writing any one -piece of explicit refcounting code, but once it is done and tested, it -just works without further effort. - - -Concurrency ------------ - -The aspect of CPython's implementation that has probably caused more -discussion than any other mentioned here is that of the threading -model. Python has supported threads since version 1.5 with what is -commonly referred to as the "Global Interpreter Lock" or GIL; the -execution of bytecodes is serialized such that only one thread can be -executing Python code at one time. This has the benefit of being -relatively unintrusive and not too complex, but has the disadvantage -that multi-threaded, computation-bound Python code does not gain -performance on multi-processor machines. - -PyPy will offer the opportunity to experiment with different models, -although currently we only offer a version with no thread support and -another with a GIL-like model [TA]_. (We also plan to support soon -"green" software-only threads in the Stackless model described above, -but obviously this would not solve the multi-processor scalability -issue.) - -The future work in this direction is to collect the numerous possible -approaches that have between thought out along the years and -e.g. presented on the CPython development mailing list. Most of them -have never been tried out in CPython, for lack of necessary resources. -A number of them are clearly easy to try out in PyPy, at least in an -experimental version that would allow its costs to be assessed -- for -example, various forms of object-level locking. - - -Evaluation Strategy -------------------- - -Possibly the most radical aspect to tinker with is the evaluation -strategy. The thunk object space [OBJS]_ wraps the standard object -space to allow the production of "lazily computed objects", i.e. objects -whose values are only calculated when needed. It also allows global and -total replacement of one object with another. - -The thunk object space is mostly meant as an example of what our -approach can achieve -- the combination of side-effects and lazy -evaluation is not easy to understand. This demonstration is important -because this level of flexibility will be required to implement future -features along the lines of Prolog-style logic variables, transparent -persistency, object distribution across several machines, or -object-level security. - - -Experimental results -==================== - -All the aspects described in the previous chapter have been successfully -implemented and are available since the release 0.7 or 0.8 of PyPy. - -We have conducted preliminary experimental measures of the performance -impact of enabling each of these features in the compiled PyPy -interpreter. We present below the current results as of October 2005. -Most figures appear to vary from machine to machine. Given that the -generated code is large (it produce a binary of 5.6MB on a Linux -Pentium), there might be locality and code ordering issues that cause -important cache effects. - -We have not particularly optimized any of these aspects yet. Our goal -is primarily to prove that the whole approach is worthwhile, and rely on -future work and push for external contributions to implement -state-of-the-art techniques in each of these domains. - -Stacklessness - - Producing Stackless-style C code means that all the functions of the - PyPy interpreter that can be involved in recursions contain stack - bookkeeping code (leaf functions, functions calling only leaves, - etc. do not need to use this style). The current performance impact - is to make PyPy slower by about 8%. A couple of minor pending - optimizations could reduce this figure a bit. We expect the rest of - the performance impact to be mainly caused by the increase of size - of the generated executable (+28%). - -Multiple Interpreters - - A binary that allowed selection between two copies of the standard - object space with a command line switch was about 10% slower and - about 40% larger than the default. Most of the extra size is - likely accounted for by the duplication of the large amount of - prebuilt data involved in an instance of the standard object - space. - -Memory Management - - The [Boehm] GC is well-optimized and produces excellent results. By - comparison, using reference counting instead makes the interpreter - twice as slow. This is almost certainly due to the naive approach - to reference counting used so far, which updates the counter far - more often than strictly necessary; we also still have a lot of - objects that would theoretically not need a reference counter, - either because they are short-lived or because we can prove that - they are "owned" by another object and can share its lifetime. In - the long run, it will be interesting to see how far this figure can - be reduced, given past experiences with CPython which seem to show - that reference counting is a viable idea for Python interpreters. - -Concurrency - - No experimental data available so far. Just enabling threads - currently creates an overhead that hides the real costs of locking. - -Evaluation Strategy - - When translated to C code, the Thunk object space has a global - performance impact of 6%. The executable is 13% bigger (probably - due to the arguably excessive inlining we perform). - -We have described five aspects in this document, each currently with -two implementation choices, leading to 32 possible translations. We -have not measured the performance of each variant, but the few we have -tried suggests that the performance impacts are what one would expect, -e.g. a translated stackless binary using the thunk object space would -be expected to be about 1.06 x 1.08 ~= 1.14 times slower than the -default and was found to be 1.15 times slower. - - -Conclusion -========== - -Although still a work in progress, we believe that the successes we -have had in encapsulating implementation aspects justifies the -approach we have taken. In particular, the relative ease of -implementing the translation aspects described in this paper -- as -mentioned above, the stackless modifications took only a few days -- -means we are confident that it will be easily possible to encapsulate -implementation aspects we have not yet considered. - - -References -========== - -.. [ARCH] `Architecture Overview`_, PyPy documentation, 2003-2005 - -.. [BOEHM] `Boehm-Demers-Weiser garbage collector`_, a garbage collector - for C and C++, Hans Boehm, 1988-2004 - -.. [CODG] `Coding Guide`_, PyPy documentation, 2003-2005 - -.. [GC] `Garbage Collection`_, PyPy documentation, 2005 - -.. [OBJS] `Object Spaces`_, PyPy documentation, 2003-2005 - -.. [STK] `Stackless Python`_, a Python implementation that does not use - the C stack, Christian Tismer, 1999-2004 - -.. [TA] `Memory management and threading models as translation aspects`_, - PyPy documentation (and EU Deliverable D05.3), 2005 - -.. _`standard interpreter`: architecture.html#standard-interpreter -.. _`Architecture Overview`: architecture.html -.. _`Coding Guide`: coding-guide.html -.. _`Garbage Collection`: garbage_collection.html -.. _`Object Spaces`: objspace.html -.. _`Stackless Python`: http://www.stackless.com -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _`Memory management and threading models as translation aspects`: translation-aspects.html diff --git a/pypy/doc/externaltools.rst b/pypy/doc/externaltools.rst deleted file mode 100644 --- a/pypy/doc/externaltools.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. include:: throwaway.rst - -External tools&programs needed by PyPy -====================================== - -Tools needed for testing ------------------------- - -These tools are used in various ways by PyPy tests; if they are not found, -some tests might be skipped, so they need to be installed on every buildbot -slave to be sure we actually run all tests: - - - Mono (versions 1.2.1.1 and 1.9.1 known to work) - - - Java/JVM (preferably sun-jdk; version 1.6.0 known to work) - - - Jasmin >= 2.2 (copy it from wyvern, /usr/local/bin/jasmin and /usr/local/share/jasmin.jar) - - - gcc - - - make - - - Some libraries (these are Debian package names, adapt as needed): - - * ``python-dev`` - * ``python-ctypes`` - * ``libffi-dev`` - * ``libz-dev`` (for the optional ``zlib`` module) - * ``libbz2-dev`` (for the optional ``bz2`` module) - * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) - * ``libgc-dev`` (only when translating with `--opt=0, 1` or `size`) diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -49,9 +49,6 @@ `papers, talks and related projects`_ lists presentations and related projects as well as our published papers. -`ideas for PyPy related projects`_ which might be a good way to get -into PyPy. - `PyPy video documentation`_ is a page linking to the videos (e.g. of talks and introductions) that are available. diff --git a/pypy/doc/cleanup-todo.rst b/pypy/doc/cleanup-todo.rst deleted file mode 100644 --- a/pypy/doc/cleanup-todo.rst +++ /dev/null @@ -1,30 +0,0 @@ - -PyPy cleanup areas -================== - -This is a todo list that lists various areas of PyPy that should be cleaned up -(for whatever reason: less mess, less code duplication, etc). - -translation toolchain ---------------------- - - - low level backends should share more code - - all backends should have more consistent interfaces - - geninterp is a hack - - delegate finding type stuff like vtables etc to GC, cleaner interface for rtti, - simplify translator/c/gc.py - - clean up the tangle of including headers in the C backend - - make approach for loading modules more sane, mixedmodule capture - too many platform dependencies especially for pypy-cli - - review pdbplus, especially the graph commands, also in the light of - https://codespeak.net/issue/pypy-dev/issue303 and the fact that - we can have more than one translator/annotator around (with the - timeshifter) - -interpreter ------------ - - - review the things implemented at applevel whether they are performance- - critical - - - review CPython regression test suite, enable running tests, fix bugs diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst deleted file mode 100644 --- a/pypy/doc/project-ideas.rst +++ /dev/null @@ -1,91 +0,0 @@ -Independent project ideas relating to PyPy -========================================== - -PyPy allows experimentation in many directions -- indeed facilitating -experimentation in language implementation was one of the main -motivations for the project. This page is meant to collect some ideas -of experiments that the core developers have not had time to perform -yet and also do not require too much in depth knowledge to get started -with. - -Feel free to suggest new ideas and discuss them in #pypy on the freenode IRC -network or the pypy-dev mailing list (see the home_ page). - ------------ - -.. contents:: - - - -JIT back-ends --------------------------------- - -PyPy's Just-In-Time compiler relies on backends for actual code -generation. We have so far a 32-bit Intel backend, and a CLI one. There is -Summer of Code project for 64bit (x86_64) backend, but other options -(ARM, llvm) remain open. - -.. _distribution: -.. _persistence: - -Extensions of the Python language ---------------------------------- - -+----------------------------------------------------------------------+ -| :NOTE: | -| | -| The ideas in this paragraph are marked as "experimental". We may | -| or may not be interested in helping you out. You are warned :-) | -| | -+----------------------------------------------------------------------+ - -One of the advantages of PyPy's implementation is that the Python-level type -of an object and its implementation are completely independent. This should -allow a much more intuitive interface to, for example, objects that are backed -by a persistent store. The `transparent proxy`_ objects are a key step in this -direction; now all that remains is to implement the interesting bits :-) - -An example project might be to implement functionality akin to the `ZODB's -Persistent class`_, without the need for the _p_changed hacks, and in pure -Python code (should be relatively easy on top of transparent proxy). - -Another example would be to implement a multi-CPU extension that internally -uses several processes and uses transparent proxies to share object views. - -Other ideas are to do something interesting with sandboxing_; or to -work more on the Stackless_ features (e.g. integrate it with the JIT); -or revive the logic object space, which tried to bring unification-like -features to Python. - -.. _sandboxing: sandbox.html -.. _Stackless: stackless.html - - -Other languages ---------------- - -Improve one of the `existing interpreters`__, or start a new one. -Experiment with the JIT compiler generator. - -.. __: http://codespeak.net/svn/pypy/lang/ - - -Or else... ----------- - -...or whatever else interests you! - -Feel free to mention your interest and discuss these ideas on the `pypy-dev -mailing list`_ or on the #pypy channel on irc.freenode.net. -You can also have a look around our documentation_. - - -.. _`efficient propagators for specialized finite domains`: http://codespeak.net/svn/pypy/extradoc/soc-2006/constraints.txt -.. _`object spaces`: objspace.html -.. _`code templating solution`: http://codespeak.net/svn/pypy/extradoc/soc-2006/code-templating.txt - -.. _documentation: docindex.html -.. _home: index.html -.. _`pypy-dev mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev -.. _`ZODB's Persistent class`: http://www.zope.org/Documentation/Books/ZDG/current/Persistence.stx -.. _`transparent proxy`: objspace-proxies.html#tproxy diff --git a/pypy/doc/translation-aspects.rst b/pypy/doc/translation-aspects.rst deleted file mode 100644 --- a/pypy/doc/translation-aspects.rst +++ /dev/null @@ -1,481 +0,0 @@ -.. include:: throwaway.rst - -========================================================================================== -Memory management and threading models as translation aspects -- solutions and challenges -========================================================================================== - -.. contents:: - - -Introduction -============= - -One of the goals of the PyPy project is to have the memory and concurrency -models flexible and changeable without having to reimplement the -interpreter manually. In fact, PyPy, by the time of the 0.8 release contains code for memory -management and concurrency models which allows experimentation without -requiring early design decisions. This document describes many of the more -technical details of the current state of the implementation of the memory -object model, automatic memory management and concurrency models and describes -possible future developments. - - -The low level object model -=========================== - -One important part of the translation process is *rtyping* [DLT]_, [TR]_. -Before that step all objects in our flow graphs are annotated with types at the -level of the RPython type system which is still quite high-level and -target-independent. During rtyping they are transformed into objects that -match the model of the specific target platform. For C or C-like targets this -model consists of a set of C-like types like structures, arrays and functions -in addition to primitive types (integers, characters, floating point numbers). -This multi-stage approach gives a lot of flexibility in how a given object is -represented at the target's level. The RPython process can decide what -representation to use based on the type annotation and on the way the object is -used. - -In the following the structures used to represent RPython classes are described. -There is one "vtable" per RPython class, with the following structure: The root -class "object" has a vtable of the following type (expressed in a C-like -syntax):: - - struct object_vtable { - struct object_vtable* parenttypeptr; - RuntimeTypeInfo * rtti; - Signed subclassrange_min; - Signed subclassrange_max; - array { char } * name; - struct object * instantiate(); - } - -The structure members ``subclassrange_min`` and ``subclassrange_max`` are used -for subclass checking (see below). Every other class X, with parent Y, has the -structure:: - - struct vtable_X { - struct vtable_Y super; // inlined - ... // extra class attributes - } - -The extra class attributes usually contain function pointers to the methods -of that class, although the data class attributes (which are supported by the -RPython object model) are stored there. - -The type of the instances is:: - - struct object { // for instances of the root class - struct object_vtable* typeptr; - } - - struct X { // for instances of every other class - struct Y super; // inlined - ... // extra instance attributes - } - -The extra instance attributes are all the attributes of an instance. - -These structure layouts are quite similar to how classes are usually -implemented in C++. - -Subclass checking ------------------ - -The way we do subclass checking is a good example of the flexibility provided -by our approach: in the beginning we were using a naive linear lookup -algorithm. Since subclass checking is quite a common operation (it is also used -to check whether an object is an instance of a certain class), we wanted to -replace it with the more efficient relative numbering algorithm (see [PVE]_ for -an overview of techniques). This was a matter of changing just the appropriate -code of the rtyping process to calculate the class-ids during rtyping and -insert the necessary fields into the class structure. It would be similarly -easy to switch to another implementation. - -Identity hashes ---------------- - -In the RPython type system, class instances can be used as dictionary keys using -a default hash implementation based on identity, which in practice is -implemented using the memory address. This is similar to CPython's behavior -when no user-defined hash function is present. The annotator keeps track of the -classes for which this hashing is ever used. - -One of the peculiarities of PyPy's approach is that live objects are analyzed -by our translation toolchain. This leads to the presence of instances of RPython -classes that were built before the translation started. These are called -"pre-built constants" (PBCs for short). During rtyping, these instances must be -converted to the low level model. One of the problems with doing this is that -the standard hash implementation of Python is to take the id of an object, which - -is just the memory address. If the RPython program explicitly captures the -hash of a PBC by storing it (for example in the implementation of a data -structure) then the stored hash value will not match the value of the object's -address after translation. - -To prevent this the following strategy is used: for every class whose instances -are hashed somewhere in the program (either when storing them in a -dictionary or by calling the hash function) an extra field is introduced in the -structure used for the instances of that class. For PBCs of such a class this -field is used to store the memory address of the original object and new objects -have this field initialized to zero. The hash function for instances of such a -class stores the object's memory address in this field if it is zero. The -return value of the hash function is the content of the field. This means that -instances of such a class that are converted PBCs retain the hash values they -had before the conversion whereas new objects of the class have their memory -address as hash values. A strategy along these lines would in any case have been -required if we ever switch to using a copying garbage collector. - -Cached functions with PBC arguments ------------------------------------- - -As explained in [DLT]_ the annotated code can contain -functions from a finite set of PBCs to something else. The set itself has to be -finite but its content does not need to be provided explicitly but is discovered -as the annotation of the input argument by the annotator itself. This kind of -function is translated by recording the input-result relationship by calling -the function concretely at annotation time, and adding a field to the PBCs in -the set and emitting code reading that field instead of the function call. - -Changing the representation of an object ----------------------------------------- - -One example of the flexibility the RTyper provides is how we deal with lists. -Based on information gathered by the annotator the RTyper chooses between two -different list implementations. If a list never changes its size after creation, -a low-level array is used directly. For lists which might be resized, a -representation consisting of a structure with a pointer to an array is used, -together with over-allocation. - -We plan to use similar techniques to use tagged pointers instead of using boxing -to represent builtin types of the PyPy interpreter such as integers. This would -require attaching explicit hints to the involved classes. Field access would -then be translated to the corresponding masking operations. - - -Automatic Memory Management Implementations -============================================ - -The whole implementation of the PyPy interpreter assumes automatic memory -management, e.g. automatic reclamation of memory that is no longer used. The -whole analysis toolchain also assumes that memory management is being taken -care of -- only the backends have to concern themselves with that issue. For -backends that target environments that have their own garbage collector, like -.NET or Java, this is not an issue. For other targets like C -the backend has to produce code that uses some sort of garbage collection. - -This approach has several advantages. It makes it possible to target different -platforms, with and without integrated garbage collection. Furthermore, the -interpreter implementation is not complicated by the need to do explicit memory -management everywhere. Even more important the backend can optimize the memory -handling to fit a certain situation (like a machine with very restricted -memory) or completely replace the memory management technique or memory model -with a different one without the need to change source code. Additionally, -the backend can use information that was inferred by the rest of the toolchain -to improve the quality of memory management. - -Using the Boehm garbage collector ------------------------------------ - -Currently there are two different garbage collectors implemented in the C -backend (which is the most complete backend right now). One of them uses the -existing Boehm-Demers-Weiser garbage collector [BOEHM]_. For every memory -allocating operation in a low level flow graph the C backend introduces a call -to a function of the boehm collector which returns a suitable amount of memory. -Since the C backend has a lot of information available about the data structure -being allocated it can choose the memory allocation function out of the Boehm -API that fits best. For example, for objects that do not contain references to -other objects (e.g. strings) there is a special allocation function which -signals to the collector that it does not need to consider this memory when -tracing pointers. - -Using the Boehm collector has disadvantages as well. The problems stem from the -fact that the Boehm collector is conservative which means that it has to -consider every word in memory as a potential pointer. Since PyPy's toolchain -has complete knowledge of the placement of data in memory we can generate an -exact garbage collector that considers only genuine pointers. - -Using a simple reference counting garbage collector ------------------------------------------------------ - -The other implemented garbage collector is a simple reference counting scheme. -The C backend inserts a reference count field into every structure that has to be -handled by the garbage collector and puts increment and decrement operations -for this reference count into suitable places in the resulting C code. After -every reference decrement operations a check is performed whether the reference -count has dropped to zero. If this is the case the memory of the object will be -reclaimed after the references counts of the objects the original object -refers to are decremented as well. - -The current placement of reference counter updates is far from optimal: The -reference counts are updated much more often than theoretically necessary (e.g. -sometimes a counter is increased and then immediately decreased again). -Objects passed into a function as arguments can almost always use a "trusted reference", -because the call-site is responsible to create a valid reference. -Furthermore some more analysis could show that some objects don't need a -reference counter at all because they either have a very short, foreseeable -life-time or because they live exactly as long as another object. - -Another drawback of the current reference counting implementation is that it -cannot deal with circular references, which is a fundamental flaw of reference -counting memory management schemes in general. CPython solves this problem by -having special code that handles circular garbage which PyPy lacks at the -moment. This problem has to be addressed in the future to make the reference -counting scheme a viable garbage collector. Since reference counting is quite -successfully used by CPython it will be interesting to see how far it can be -optimized for PyPy. - -Simple escape analysis to remove memory allocation ---------------------------------------------------- - -We also implemented a technique to reduce the amount of memory allocation. -Sometimes it is possible to deduce from the flow graphs that an object lives -exactly as long as the stack frame of the function it is allocated in. -This happens if no pointer to the object is stored into another object and if -no pointer to the object is returned from the function. If this is the case and -if the size of the object is known in advance the object can be allocated on -the stack. To achieve this, the object is "exploded", that means that for every -element of the structure a new variable is generated that is handed around in -the graph. Reads from elements of the structure are removed and just replaced -by one of the variables, writes by assignments to same. - -Since quite a lot of objects are allocated in small helper functions, this -simple approach which does not track objects across function boundaries only -works well in the presence of function inlining. - -A general garbage collection framework --------------------------------------- - -In addition to the garbage collectors implemented in the C backend we have also -started writing a more general toolkit for implementing exact garbage -collectors in Python. The general idea is to express the garbage collection -algorithms in Python as well and translate them as part of the translation -process to C code (or whatever the intended platform is). - -To be able to access memory in a low level manner there are special ``Address`` -objects that behave like pointers to memory and can be manipulated accordingly: -it is possible to read/write to the location they point to a variety of data -types and to do pointer arithmetic. These objects are translated to real -pointers and the appropriate operations. When run on top of CPython there is a -*memory simulator* that makes the address objects behave like they were -accessing real memory. In addition the memory simulator contains a number of -consistency checks that expose common memory handling errors like dangling -pointers, uninitialized memory, etc. - -At the moment we have three simple garbage collectors implemented for this -framework: a simple copying collector, a mark-and-sweep collector and a -deferred reference counting collector. These garbage collectors are work when run on -top of the memory simulator, but at the moment it is not yet possible to translate -PyPy to C with them. This is because it is not easy to -find the root pointers that reside on the C stack -- both because the C stack layout is -heavily platform dependent, and also due to the possibility of roots that are not -only on the stack but also hiding in registers (which would give a problem for *moving -garbage collectors*). - -There are several possible solutions for this problem: One -of them is to not use C compilers to generate machine code, so that the stack -frame layout gets into our control. This is one of the tasks that need to be -tackled in phase 2, as directly generating assembly is needed anyway for a -just-in-time compiler. The other possibility (which would be much easier to -implement) is to move all the data away from the stack to the heap -before collecting garbage, as described in section "Stackless C code" below. - -Concurrency Model Implementations -============================================ - -At the moment we have implemented two different concurrency models, and the -option to not support concurrency at all -(another proof of the modularity of our approach): -threading with a global interpreter lock and a "stackless" model. - -No threading -------------- - -By default, multi-threading is not supported at all, which gives some small -benefits for single-threaded applications since even in the single-threaded -case there is some overhead if threading capabilities are built into -the interpreter. - -Threading with a Global Interpreter Lock ------------------------------------------- - -Right now, there is one non-trivial threading model implemented. It follows -the threading implementation of CPython and thus uses a global interpreter -lock. This lock prevents any two threads from interpreting python code at -the same time. The global interpreter lock is released around calls to blocking I/O -functions. This approach has a number of advantages: it gives very little -runtime penalty for single-threaded applications, makes many of the common uses -for threading possible, and it is relatively easy to implement and maintain. It has -the disadvantage that multiple threads cannot be distributed across multiple -processors. - -To make this threading-model usable for I/O-bound applications, the global -interpreter lock should be released around blocking external function calls -(which is also what CPython does). This has been partially implemented. - - -Stackless C code ------------------ - -"Stackless" C code is C code that only uses a bounded amount of -space in the C stack, and that can generally obtain explicit -control of its own stack. This is commonly known as "continuations", -or "continuation-passing style" code, although in our case we will limit -ourselves to single-shot continuations, i.e. continuations that are -captured and subsequently will be resumed exactly once. - -The technique we have implemented is based on the recurring idea -of emulating this style via exceptions: a specific program point can -generate a pseudo-exception whose purpose is to unwind the whole C stack -in a restartable way. More precisely, the "unwind" exception causes -the C stack to be saved into the heap in a compact and explicit -format, as described below. It is then possible to resume only the -innermost (most recent) frame of the saved stack -- allowing unlimited -recursion on OSes that limit the size of the C stack -- or to resume a -different previously-saved C stack altogether, thus implementing -coroutines or light-weight threads. - -In our case, exception handling is always explicit in the generated code: -the C backend puts a cheap check -after each call site to detect if the callee exited -normally or generated an exception. So when compiling functions in -stackless mode, the generated exception handling code special-cases the -new "unwind" exception. This exception causes the current function to -respond by saving its local variables to a heap structure (a linked list -of records, one per stack frame) and then propagating the exception -outwards. Eventually, at the end of the frame chain, the outermost -function is a manually-written dispatcher that catches the "unwind" -exception. - -At this point, the whole C stack is stored away in the heap. This is a -very interesting state in itself, because precisely there is no C stack -below the dispatcher -left. It is this which will allow us to write all the algorithms -in a portable way, that -normally require machine-specific code to inspect the stack, -in particular garbage collectors. - -To continue execution, the dispatcher can resume either the freshly saved or a -completely different stack. Moreover, it can resume directly the innermost -(most recent) saved frame in the heap chain, without having to resume all -intermediate frames first. This not only makes stack switches fast, but it -also allows the frame to continue to run on top of a clean C stack. When that -frame eventually exits normally, it returns to the dispatcher, which then -invokes the previous (parent) saved frame, and so on. We insert stack checks -before calls that can lead to recursion by detecting cycles in the call graph. -These stack checks copy the stack to the heap (by raising the special -exception) if it is about to grow deeper than a certain level. -As a different point of view, the C stack can also be considered as a cache -for the heap-based saved frames in this model. When we run out -of C stack space, we flush the cache. When the cache is empty, we fill it with -the next item from the heap. - -To give the translated program some amount of control over the -heap-based stack structures and over the top-level dispatcher that jumps -between them, there are a few "external" functions directly implemented -in C. These functions provide an elementary interface, on top of which -useful abstractions can be implemented, like: - -* coroutines: explicitly switching code, similar to Greenlets [GREENLET]_. - -* "tasklets": cooperatively-scheduled microthreads, as introduced in - Stackless Python [STK]_. - -* implicitly-scheduled (preemptive) microthreads, also known as green threads. - -An important property of the changes in all the generated C functions is -that they are written in a way that does only minimally degrade their performance in -the non-exceptional case. Most optimizations performed by C compilers, -like register allocation, continue to work... - -The following picture shows a graph function together with the modifications -necessary for the stackless style: the check whether the stack is too big and -should be unwound, the check whether we are in the process of currently storing -away the stack and the check whether the call to the function is not a regular -call but a reentry call. - -.. graphviz:: image/stackless_informal.dot - :scale: 70 - - -Future work -================ - -open challenges for phase 2: - -Garbage collection ------------------- - -One of the biggest missing features of our current garbage collectors is -finalization. At present finalizers are simply not invoked if an object is -freed by the garbage collector. Along the same lines weak references are not -supported yet. It should be possible to implement these with a reasonable -amount of effort for reference counting as well as the Boehm collector (which -provides the necessary hooks). - -Integrating the now simulated-only GC framework into the rtyping process and -the code generation will require considerable effort. It requires being able to -keep track of the GC roots which is hard to do with portable C code. One -solution would be to use the "stackless" code since it can move the stack -completely to the heap. We expect that we can implement GC read and write -barriers as function calls and rely on inlining to make them more efficient. - -We may also spend some time on improving the existing reference counting -implementation by removing unnecessary incref-decref pairs and identifying -trustworthy references. A bigger task would -be to add support for detecting circular references. - - -Threading model ---------------- - -One of the interesting possibilities that stackless offers is to implement *green -threading*. This would involve writing a scheduler and some preemption logic. - -We should also investigate other threading models based on operating system -threads with various granularities of locking for access of shared objects. - -Object model ------------- - -We also might want to experiment with more sophisticated structure inlining. -Sometimes it is possible to find out that one structure object -allocated on the heap lives exactly as long as another structure object on the -heap pointing to it. If this is the case it is possible to inline the first -object into the second. This saves the space of one pointer and avoids -pointer-chasing. - - -Conclusion -=========== - -As concretely shown with various detailed examples, our approach gives us -flexibility and lets us choose various aspects at translation time instead -of encoding them into the implementation itself. - -References -=========== - -.. [BOEHM] `Boehm-Demers-Weiser garbage collector`_, a garbage collector - for C and C++, Hans Boehm, 1988-2004 -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ - -.. [GREENLET] `Lightweight concurrent programming`_, py-lib Documentation 2003-2005 -.. _`Lightweight concurrent programming`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt - -.. [STK] `Stackless Python`_, a Python implementation that does not use - the C stack, Christian Tismer, 1999-2004 -.. _`Stackless Python`: http://www.stackless.com - -.. [TR] `Translation`_, PyPy documentation, 2003-2005 -.. _`Translation`: translation.html - -.. [LE] `Encapsulating low-level implementation aspects`_, - PyPy documentation (and EU deliverable D05.4), 2005 -.. _`Encapsulating low-level implementation aspects`: low-level-encapsulation.html - -.. [DLT] `Compiling dynamic language implementations`_, - PyPy documentation (and EU deliverable D05.1), 2005 -.. _`Compiling dynamic language implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf - -.. [PVE] `Simple and Efficient Subclass Tests`_, Jonathan Bachrach, Draft submission to ECOOP-02, 2001 -.. _`Simple and Efficient Subclass Tests`: http://people.csail.mit.edu/jrb/pve/pve.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -183,13 +183,11 @@ sprint. Coming to a sprint is usually also the best way to get into PyPy development. -If you want to start on your own, take a look at the list of `project -suggestions`_. If you get stuck or need advice, `contact us`_. Usually IRC is +If you get stuck or need advice, `contact us`_. Usually IRC is the most immediate way to get feedback (at least during some parts of the day; many PyPy developers are in Europe) and the `mailing list`_ is better for long discussions. -.. _`project suggestions`: project-ideas.html .. _`contact us`: index.html .. _`mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -12,24 +12,16 @@ distribution.rst - externaltools.rst - - geninterp.rst - objspace-proxies.rst old_news.rst - project-ideas.rst - rffi.rst sandbox.rst statistic/index.rst - translation-aspects.rst - docindex.rst svn-help.rst From commits-noreply at bitbucket.org Mon Apr 25 16:25:43 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 16:25:43 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110425142543.545B9282C1E@codespeak.net> Author: Dario Bertini Branch: documentation-cleanup Changeset: r43596:729f6067755b Date: 2011-04-25 16:24 +0200 http://bitbucket.org/pypy/pypy/changeset/729f6067755b/ Log: merge heads diff --git a/pypy/doc/statistic/style.css b/pypy/doc/statistic/style.css deleted file mode 100644 --- a/pypy/doc/statistic/style.css +++ /dev/null @@ -1,1083 +0,0 @@ -body,body.editor,body.body { - font: 110% "Times New Roman", Arial, Verdana, Helvetica, serif; - background: White; - color: Black; -} - -a, a.reference { - text-decoration: none; -} -a[href]:hover { text-decoration: underline; } - -img { - border: none; - vertical-align: middle; -} - -p, div.text { - text-align: left; - line-height: 1.5em; - margin: 0.5em 0em 0em 0em; -} - - - -p a:active { - color: Red; - background-color: transparent; -} - -p img { - border: 0; - margin: 0; -} - -img.inlinephoto { - padding: 0; - padding-right: 1em; - padding-top: 0.7em; - float: left; -} - -hr { - clear: both; - height: 1px; - color: #8CACBB; - background-color: transparent; -} - - -ul { - line-height: 1.5em; - /*list-style-image: url("bullet.gif"); */ - margin-left: 1.5em; - padding:0; -} - -ol { - line-height: 1.5em; - margin-left: 1.5em; - padding:0; -} - -ul a, ol a { - text-decoration: underline; -} - -dl { -} - -dt { - font-weight: bold; -} - -dd { - line-height: 1.5em; - margin-bottom: 1em; -} - -blockquote { - font-family: Times, "Times New Roman", serif; - font-style: italic; - font-size: 120%; -} - -code { - color: Black; - /*background-color: #dee7ec;*/ - background-color: #cccccc; -} - -pre { - padding: 1em; - border: 1px solid #8cacbb; - color: Black; - background-color: #dee7ec; - background-color: #cccccc; - overflow: auto; -} - - -.netscape4 { - display: none; -} - -/* main page styles */ - -/*a[href]:hover { color: black; text-decoration: underline; } -a[href]:link { color: black; text-decoration: underline; } -a[href] { color: black; text-decoration: underline; } -*/ - -span.menu_selected { - color: black; - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; - padding-right: 0.3em; - background-color: #cccccc; -} - - -a.menu { - /*color: #3ba6ec; */ - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; - padding-right: 0.3em; -} - -a.menu[href]:visited, a.menu[href]:link{ - /*color: #3ba6ec; */ - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; -} - -a.menu[href]:hover { - /*color: black;*/ -} - -div.project_title{ - /*border-spacing: 20px;*/ - font: 160% Verdana, Helvetica, Arial, sans-serif; - color: #3ba6ec; - vertical-align: center; - padding-bottom: 0.3em; -} - -a.wikicurrent { - font: 100% Verdana, Helvetica, Arial, sans-serif; - color: #3ba6ec; - vertical-align: middle; -} - - -table.body { - border: 0; - /*padding: 0; - border-spacing: 0px; - border-collapse: separate; - */ -} - -td.page-header-left { - padding: 5px; - /*border-bottom: 1px solid #444444;*/ -} - -td.page-header-top { - padding: 0; - - /*border-bottom: 1px solid #444444;*/ -} - -td.sidebar { - padding: 1 0 0 1; -} - -td.sidebar p.classblock { - padding: 0 5 0 5; - margin: 1 1 1 1; - border: 1px solid #444444; - background-color: #eeeeee; -} - -td.sidebar p.userblock { - padding: 0 5 0 5; - margin: 1 1 1 1; - border: 1px solid #444444; - background-color: #eeeeff; -} - -td.content { - padding: 1 5 1 5; - vertical-align: top; - width: 100%; -} - -p.ok-message { - background-color: #22bb22; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} -p.error-message { - background-color: #bb2222; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} - -p:first-child { - margin: 0 ; - padding: 0; -} - -/* style for forms */ -table.form { - padding: 2; - border-spacing: 0px; - border-collapse: separate; -} - -table.form th { - color: #333388; - text-align: right; - vertical-align: top; - font-weight: normal; -} -table.form th.header { - font-weight: bold; - background-color: #eeeeff; - text-align: left; -} - -table.form th.required { - font-weight: bold; -} - -table.form td { - color: #333333; - empty-cells: show; - vertical-align: top; -} - -table.form td.optional { - font-weight: bold; - font-style: italic; -} - -table.form td.html { - color: #777777; -} - -/* style for lists */ -table.list { - border-spacing: 0px; - border-collapse: separate; - vertical-align: top; - padding-top: 0; - width: 100%; -} - -table.list th { - padding: 0 4 0 4; - color: #404070; - background-color: #eeeeff; - border-right: 1px solid #404070; - border-top: 1px solid #404070; - border-bottom: 1px solid #404070; - vertical-align: top; - empty-cells: show; -} -table.list th a[href]:hover { color: #404070 } -table.list th a[href]:link { color: #404070 } -table.list th a[href] { color: #404070 } -table.list th.group { - background-color: #f4f4ff; - text-align: center; - font-size: 120%; -} - -table.list td { - padding: 0 4 0 4; - border: 0 2 0 2; - border-right: 1px solid #404070; - color: #404070; - background-color: white; - vertical-align: top; - empty-cells: show; -} - -table.list tr.normal td { - background-color: white; - white-space: nowrap; -} - -table.list tr.alt td { - background-color: #efefef; - white-space: nowrap; -} - -table.list td:first-child { - border-left: 1px solid #404070; - border-right: 1px solid #404070; -} - -table.list th:first-child { - border-left: 1px solid #404070; - border-right: 1px solid #404070; -} - -table.list tr.navigation th { - text-align: right; -} -table.list tr.navigation th:first-child { - border-right: none; - text-align: left; -} - - -/* style for message displays */ -table.messages { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.messages th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.messages th { - font-weight: bold; - color: black; - text-align: left; - border-bottom: 1px solid #afafaf; -} - -table.messages td { - font-family: monospace; - background-color: #efefef; - border-bottom: 1px solid #afafaf; - color: black; - empty-cells: show; - border-right: 1px solid #afafaf; - vertical-align: top; - padding: 2 5 2 5; -} - -table.messages td:first-child { - border-left: 1px solid #afafaf; - border-right: 1px solid #afafaf; -} - -/* style for file displays */ -table.files { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.files th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.files th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; -} - -table.files td { - font-family: monospace; - empty-cells: show; -} - -/* style for history displays */ -table.history { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.history th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; - font-size: 100%; -} - -table.history th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; - font-size: 90%; -} - -table.history td { - font-size: 90%; - vertical-align: top; - empty-cells: show; -} - - -/* style for class list */ -table.classlist { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.classlist th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.classlist th { - font-weight: bold; - text-align: left; -} - - -/* style for class help display */ -table.classhelp { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.classhelp th { - font-weight: bold; - text-align: left; - color: #707040; -} - -table.classhelp td { - padding: 2 2 2 2; - border: 1px solid black; - text-align: left; - vertical-align: top; - empty-cells: show; -} - - -/* style for "other" displays */ -table.otherinfo { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.otherinfo th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.otherinfo th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; -} - -input { - border: 1px solid #8cacbb; - color: Black; - background-color: white; - vertical-align: middle; - margin-bottom: 1px; /* IE bug fix */ - padding: 0.1em; -} - -select { - border: 1px solid #8cacbb; - color: Black; - background-color: white; - vertical-align: middle; - margin-bottom: 1px; /* IE bug fix */ - padding: 0.1em; -} - - -a.nonexistent { - color: #FF2222; -} -a.nonexistent:visited { - color: #FF2222; -} -a.external { - color: #AA6600; -} - -/* -dl,ul,ol { - margin-top: 1pt; -} -tt,pre { - font-family: Lucida Console,Courier New,Courier,monotype; - font-size: 12pt; -} -pre.code { - margin-top: 8pt; - margin-bottom: 8pt; - background-color: #FFFFEE; - white-space:pre; - border-style:solid; - border-width:1pt; - border-color:#999999; - color:#111111; - padding:5px; - width:100%; -} -*/ -div.diffold { - background-color: #FFFF80; - border-style:none; - border-width:thin; - width:100%; -} -div.diffnew { - background-color: #80FF80; - border-style:none; - border-width:thin; - width:100%; -} -div.message { - margin-top: 6pt; - background-color: #E8FFE8; - border-style:solid; - border-width:1pt; - border-color:#999999; - color:#440000; - padding:5px; - width:100%; -} -strong.highlight { - background-color: #FFBBBB; -/* as usual, NetScape fucks up with innocent CSS - border-color: #FFAAAA; - border-style: solid; - border-width: 1pt; -*/ -} - -table.navibar { - background-color: #C8C8C8; - border-spacing: 3px; -} -td.navibar { - background-color: #E8E8E8; - vertical-align: top; - text-align: right; - padding: 0px; -} - -div.pagename { - font-size: 140%; - color: blue; - text-align: center; - font-weight: bold; - background-color: white; - padding: 0 ; -} - -a.wikiaction, input.wikiaction { - color: black; - text-decoration: None; - text-align: center; - color: black; - /*border: 1px solid #3ba6ec; */ - margin: 4px; - padding: 5; - padding-bottom: 0; - white-space: nowrap; -} - -a.wikiaction[href]:hover { - color: black; - text-decoration: none; - /*background-color: #dddddd; */ -} - -span.wikiuserpref { - padding-top: 1em; - font-size: 120%; -} - -div.wikitrail { - vertical-align: bottom; - /*font-size: -1;*/ - padding-top: 1em; - display: none; -} - -div.wikiaction { - vertical-align: middle; - /*border-bottom: 1px solid #8cacbb;*/ - padding-bottom:1em; - text-align: left; - width: 100%; -} - -div.wikieditmenu { - text-align: right; -} - -form.wikiedit { - border: 1px solid #8cacbb; - background-color: #f0f0f0; - background-color: #fabf00; - padding: 1em; - padding-right: 0em; -} - -div.legenditem { - padding-top: 0.5em; - padding-left: 0.3em; -} - -span.wikitoken { - background-color: #eeeeee; -} - - -div#contentspace h1:first-child, div.heading:first-child { - padding-top: 0; - margin-top: 0; -} -div#contentspace h2:first-child { - padding-top: 0; - margin-top: 0; -} - -/* heading and paragraph text */ - -div.heading, h1 { - font-family: Verdana, Helvetica, Arial, sans-serif; - background-color: #58b3ef; - background-color: #FFFFFF; - /*color: #4893cf;*/ - color: black; - padding-top: 1.0em; - padding-bottom:0.2em; - text-align: left; - margin-top: 0em; - /*margin-bottom:8pt;*/ - font-weight: bold; - font-size: 115%; - border-bottom: 1px solid #8CACBB; -} - - -h1, h2, h3, h4, h5, h6 { - color: Black; - clear: left; - font: 100% Verdana, Helvetica, Arial, sans-serif; - margin: 0; - padding-left: 0em; - padding-top: 1em; - padding-bottom: 0.2em; - /*border-bottom: 1px solid #8CACBB;*/ -} -/* h1,h2 { padding-top: 0; }*/ - - -h1 { font-size: 145%; } -h2 { font-size: 135%; } -h3 { font-size: 125%; } -h4 { font-size: 120%; } -h5 { font-size: 110%; } -h6 { font-size: 80%; } - -h1 a { text-decoration: None;} - -div.exception { - background-color: #bb2222; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} -pre.exception { - font-size: 110%; - padding: 1em; - border: 1px solid #8cacbb; - color: Black; - background-color: #dee7ec; - background-color: #cccccc; -} - -/* defines for navgiation bar (documentation) */ - - -div.direntry { - padding-top: 0.3em; - padding-bottom: 0.3em; - margin-right: 1em; - font-weight: bold; - background-color: #dee7ec; - font-size: 110%; -} - -div.fileentry { - font-family: Verdana, Helvetica, Arial, sans-serif; - padding-bottom: 0.3em; - white-space: nowrap; - line-height: 150%; -} - -a.fileentry { - white-space: nowrap; -} - - -span.left { - text-align: left; -} -span.right { - text-align: right; -} - -div.navbar { - /*margin: 0;*/ - font-size: 80% /*smaller*/; - font-weight: bold; - text-align: left; - /* position: fixed; */ - top: 100pt; - left: 0pt; /* auto; */ - width: 120pt; - /* right: auto; - right: 0pt; 2em; */ -} - - -div.history a { - /* font-size: 70%; */ -} - -div.wikiactiontitle { - font-weight: bold; -} - -/* REST defines */ - -div.document { - margin: 0; -} - -h1.title { - margin: 0; -} - -td.toplist { - vertical-align: top; -} - -img#pyimg { - position: absolute; - top: 4px; - left: 4px; -} - -img#extraimg { - position: absolute; - right: 14px; - top: 4px; -} - -div#navspace { - position: absolute; - top: 130px; - left: 11px; - font-size: 100%; - width: 150px; - overflow: hidden; /* scroll; */ -} - -div#metaspace { - position: absolute; - top: 40px; - left: 170px; -} - -div#errorline { - position: relative; - top: 5px; - float: right; -} - -div#contentspace { - position: absolute; - /* font: 120% "Times New Roman", serif;*/ - font: 110% Verdana, Helvetica, Arial, sans-serif; - top: 130px; - left: 170px; - margin-right: 5px; -} - -div#menubar { -/* width: 400px; */ - float: left; -} - -/* for the documentation page */ -div#docinfoline { - position: relative; - top: 5px; - left: 0px; - - /*background-color: #dee7ec; */ - padding: 5pt; - padding-bottom: 1em; - color: black; - /*border-width: 1pt; - border-style: solid;*/ - -} - -div#docnavlist { - /*background-color: #dee7ec; */ - padding: 5pt; - padding-bottom: 2em; - color: black; - border-width: 1pt; - /*border-style: solid;*/ -} - - -/* text markup */ - -div.listtitle { - color: Black; - clear: left; - font: 120% Verdana, Helvetica, Arial, sans-serif; - margin: 0; - padding-left: 0em; - padding-top: 0em; - padding-bottom: 0.2em; - margin-right: 0.5em; - border-bottom: 1px solid #8CACBB; -} - -div.actionbox h3 { - padding-top: 0; - padding-right: 0.5em; - padding-left: 0.5em; - background-color: #fabf00; - text-align: center; - border: 1px solid black; /* 8cacbb; */ -} - -div.actionbox a { - display: block; - padding-bottom: 0.5em; - padding-top: 0.5em; - margin-left: 0.5em; -} - -div.actionbox a.history { - display: block; - padding-bottom: 0.5em; - padding-top: 0.5em; - margin-left: 0.5em; - font-size: 90%; -} - -div.actionbox { - margin-bottom: 2em; - padding-bottom: 1em; - overflow: hidden; /* scroll; */ -} - -/* taken from docutils (oh dear, a bit senseless) */ -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - - -/* -:Author: David Goodger -:Contact: goodger at users.sourceforge.net -:date: $Date: 2003/01/22 22:26:48 $ -:version: $Revision: 1.29 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ -/* -.first { - margin-top: 0 } - -.last { - margin-bottom: 0 } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.address { - margin-bottom: 0 ; - margin-top: 0 ; - font-family: serif ; - font-size: 100% } - -pre.line-block { - font-family: serif ; - font-size: 100% } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #eeeeee } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.interpreted { - font-family: sans-serif } - -span.option { - white-space: nowrap } - -span.option-argument { - font-style: italic } - -span.pre { - white-space: pre } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: top } - -th.docinfo-name, th.field-name { - font-weight: bold ; - text-align: left ; - white-space: nowrap } - -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { - font-size: 100% } - -tt { - background-color: #eeeeee } - -ul.auto-toc { - list-style-type: none } -*/ - -div.section { - margin-top: 1.0em ; -} diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -11,7 +11,7 @@ behavior of all objects in a running program is easy to implement on top of PyPy. -Here is what we implemented so far, in historical order: +Here is what we have implemented so far, in historical order: * *Thunk Object Space*: lazily computed objects, computing only when an operation is performed on them; lazy functions, computing their result diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -4,8 +4,6 @@ .. contents:: - - This document describes coding requirements and conventions for working with the PyPy code base. Please read it carefully and ask back any questions you might have. The document does not talk diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,35 +7,16 @@ .. toctree:: + + discussion/cli-optimizations.rst + discussion/distribution-implementation.rst + discussion/distribution-newattempt.rst + discussion/distribution-roadmap.rst + discussion/distribution.rst + discussion/finalizer-order.rst + discussion/howtoimplementpickling.rst + discussion/improve-rpython.rst + discussion/outline-external-ootype.rst + discussion/VM-integration.rst - discussion/GC-performance.rst - discussion/VM-integration.rst - discussion/chained_getattr.rst - discussion/cli-optimizations.rst - discussion/cmd-prompt-translation.rst - discussion/compiled-swamp.rst - discussion/ctypes_modules.rst - discussion/ctypes_todo.rst - discussion/distribution.rst - discussion/distribution-implementation.rst - discussion/distribution-newattempt.rst - discussion/distribution-roadmap.rst - discussion/emptying-the-malloc-zoo.rst - discussion/finalizer-order.rst - discussion/gc.rst - discussion/howtoimplementpickling.rst - discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/oz-thread-api.rst - discussion/paper-wishlist.rst - discussion/parsing-ideas.rst - discussion/pypy_metaclasses_in_cl.rst - discussion/removing-stable-compiler.rst - discussion/security-ideas.rst - discussion/somepbc-refactoring-plan.rst - discussion/summer-of-pypy-pytest.rst - discussion/testing-zope.rst - discussion/thoughts_string_interning.rst - discussion/translation-swamp.rst - discussion/use_case_of_logic.rst diff --git a/pypy/doc/svn-help.rst b/pypy/doc/svn-help.rst deleted file mode 100644 --- a/pypy/doc/svn-help.rst +++ /dev/null @@ -1,153 +0,0 @@ - -Installing subversion for PyPy -============================== - -Jens-Uwe Mager has prepared some installation files which should -help you to install subversion on your computer. - -+ Download Unix source tarball or prepackaged versions_ for MacOS, Windows, FreeBSD and Linux - -+ Additional information for Windows users: - - * See Microsoft website_ if you have .DLL issues. - - * Windows Installer file for Tortoise SVN (like Tortoise CVS) GUI_ - (Pick the UNICODE version for Windows 2000 and XP and - see Win_ 2000, NT if you have problems loading it.) - -+ Local copy of MacOS_ X binary tar ball - (This requires at least OS X 10.3) - -+ Debian instructions below... - -Getting started ------------------ - -If you're just getting started with subversion, here's a simple how-to. -For complete information, you can go read the subversion guide_. - -**Download and install the appropriate installation file of subversion above.** - -For linux: - -download the tarball. unzip and untar it. Then type *./configure*. Then, as root, *make* followed by *make install*. Voila ... a subversion client. - -For Debian users:: - - $ apt-get install subversion-tools - -People using Debian *stable* first need to add the following line to ``/etc/apt/sources.list`` (thanks backports_!):: - - deb http://fs.cs.fhm.edu/mirror/backports.org/debian stable subversion - -Note that you can always go look at the files online_ with your browser, located at: http://codespeak.net/svn/pypy/trunk -But, you'll want to check out your own local copies to work on. - -Check out and Check in ----------------------------- - -In order to get the sourcecode and docs downloaded onto your drive, open a shell or commandline and type:: - - $ svn co http://codespeak.net/svn/pypy/trunk - -If you are behind a dump proxy this may or may not work; see below. - -Once you've got the files checked out to your own system, you can use your favorite text editor to change to files. Be sure to read the coding-guide_ and other documentation files before doing a lot of work on the source code. Before doing any work, make sure you're using the most recent update with:: - - $ svn up - -this will update whichever subdirectory you're in (doc or src). - -When you're ready to **check in** a file, - -cd to your local checked out sourcecode directory, and if necessary, copy the file over from wherever you worked on it:: - - $ cp ~/mydir/filename.ext filename.ext - -If you're adding a brand-new file:: - - $ svn add filename.ext - -Then, to **commit** it:: - - $ svn ci -m "your comments about what changes your committing" - $ your password: (this may not be necessary) - -You'll see something like the following:: - - Adding goals/stringcomp.py - Transmitting file data . - Committed revision 578. - -or:: - - Sending coding-guide.txt - Transmitting file data . - Committed revision 631. - -Check online on the `svn-commit archives`_ and you'll see your revision. Feel free to add a documentation file on any major changes you've made! - -.. _`svn-commit archives`: http://codespeak.net/pipermail/pypy-svn/ - -Some other useful subversion tricks: --------------------------------------- - -**Be sure to remember ``svn`` in the commandline in the following commands.** - -``$ svn mv filename.ext`` - to move or rename a file - -``$ svn rm filename.ext`` - to remove (delete) a file - -``$ svn status`` - will let you know what changes you've made compared to the current repository version - -``$ svn revert filename.ext`` - will fix problems if you deleted or moved a file without telling svn. - -``$ svn cleanup`` - last resort to fix it if you've got a totally messed up local copy. - Use this if you see error messages about ``locked`` files that you can't fix otherwise. - -Circumventing proxies ----------------------------- - -Some proxies don't let extended HTTP commands through. If you have an -error complaining about a bad request, you should use https: instead of -http: in the subversion URL. This will make use of SSL encryption, which -cannot be intercepted by proxies. - -Alternatively, if you want to change your proxy configuration, see the -subversion FAQ: http://subversion.tigris.org/faq.html#proxy - -How to Avoid Line-ending Hell ------------------------------ - -We will assume that whenever you create a .txt or a .py file, you would -like other people to be able to read it with the line endings their -OS prefers, even if that is different from the one your OS likes. This -could occasionally be wrong -- say when you are specifically testing -that code you are writing handles line endings properly -- but this is -what you want by default. Binary files, on the other hand, should be -stored exactly as is. This has to be set on every client. Here is how: - -In your home directory edit .subversion/config and comment in :: - - [miscellany] - enable-auto-props = yes - - [auto-props] - *.txt = svn:eol-style=native - *.py = svn:eol-style=native - - -.. _website: http://support.microsoft.com/default.aspx?scid=kb%3Ben-us%3B259403 -.. _GUI: http://tortoisesvn.tigris.org/servlets/ProjectDocumentList?folderID=616 -.. _MacOS: http://codespeak.net/~jum/svn-1.4.0-darwin-ppc.tar.gz -.. _versions: http://subversion.tigris.org/project_packages.html -.. _Win: http://www.microsoft.com/downloads/details.aspx?displaylang=en&FamilyID=4B6140F9-2D36-4977-8FA1-6F8A0F5DCA8F -.. _guide: http://svnbook.red-bean.com/book.html#svn-ch-1 -.. _backports: http://www.backports.org -.. _online: http://codespeak.net/svn/pypy/trunk/ -.. _coding-guide: coding-guide.html diff --git a/pypy/doc/old_news.rst b/pypy/doc/old_news.rst deleted file mode 100644 --- a/pypy/doc/old_news.rst +++ /dev/null @@ -1,306 +0,0 @@ -The PyPy project aims at producing a flexible and fast Python_ -implementation. The guiding idea is to translate a Python-level -description of the Python language itself to lower level languages. -Rumors have it that the secret goal is being faster-than-C which is -nonsense, isn't it? `more...`_ - -.. _Python: http://www.python.org/doc/current/ref/ref.html -.. _`more...`: architecture.html#mission-statement - - -Leysin Winter Sports Sprint, 12th - 19th January 2008 -================================================================== - -.. raw:: html - -
- -The next PyPy sprint will be held in Leysin, Switzerland, for -the fifth time. The overall idea of the sprint is to continue -working on making PyPy ready for general use. - -.. raw:: html - -
- -The proposed topics are: ctypes, JIT, testing, LLVM. This is -a fully public sprint, so newcomers and other topics are -welcome. And like previous winters, the main side goal is to -have fun in winter sports :-) See the `sprint announcement`__ -for details. - -.. raw:: html - -   -
- -.. __: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2008/announcement.html - - -PyPy blog started -================= - -A few days ago some of the PyPy developers started a `PyPy Status Blog`_. Let's -see how this works out. *(November 13th, 2007)* - -.. _`PyPy Status Blog`: http://morepypy.blogspot.com - - -PyPy/Squeak Sprint in Bern finished -=================================== - -The Bern sprint, being the first Squeak-PyPy-collaboration-sprint is finished. -The week was very intense and productive, see `Bern Sprint Summary blog post`_ -for a list of things we accomplished. We covered most of what happened during -the sprint in quite some detail on the `PyPy Squeak blog`_. The sprint was -hosted by the Software Composition Group of the University of Bern from the -22nd to the 26th of October 2007. - -.. _`Bern sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/bern2007/announce.html -.. _`people that are known to come`: http://codespeak.net/pypy/extradoc/sprintinfo/bern2007/people.html -.. _`Bern Sprint Summary blog post`: http://pypysqueak.blogspot.com/2007/10/bern-sprint-finished-summary.html -.. _`PyPy Squeak blog`: http://pypysqueak.blogspot.com - - - -PyPy Sprint in Gothenburg: 19nd-25th November 2007 -================================================================== - - -The next post-EU-project PyPy sprint will be in Gothenburg, Sweden. It will -focus on cleaning up the PyPy codebase and making it ready for the next round -of improvements. It is a "public" sprint but it will probably be more suitable -for people already somewhat acquainted with PyPy. For more information see the -`Gothenburg sprint announcement`_ or a list of the `people that are known to -come to Gothenburg`_. - -.. _`Gothenburg sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2007/announce.html -.. _`people that are known to come to Gothenburg`: http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2007/people.html - - - - -PyPy Sprint at EuroPython, Vilnius is finished -================================================================== - -The sprint at the last EuroPython_ conference in Vilnius from the 9th to -the 11th of July, 2007 is finished. For more information -see the `Vilnius sprint announcement`_. - - -.. _EuroPython: http://europython.org -.. _`Vilnius sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2007/announcement.html - - -Review passed with flying colours -================================= - -On the 31st of May 2007 the PyPy project was reviewed by the EU -Commission in Brussels. Reviewers were Roel Wuyts, Unversité Libre de -Bruxelles and Aki Lumiaho, Ramboll, Finland. Present was also our -Project Officer, Charles McMillan. After 6 hours of presentations of -the various aspects of the project, it only took the reviewers a few -minutes to decide that the project was accepted, without any further -work being required. Professor Wuyts, who has dynamic programming -languages as his main field of research was very enthusiastic about -the entire project and the results with the Just In Time Compiler -Generator in particular. He offered his help in establishing -collaborations with the communities around Prolog, Smalltalk, Lisp and -other dynamic languages, as well as giving hints on how to get our -results most widely publicized. - -The preparations for the review left the team rather exhausted so -development progress will be rather slow until the sprint at -Europython in the second week of July. - -PyPy EU funding period over, Review ahead -=========================================================== - -The 28 month EU project period of PyPy is over and new things are to come! -On 11th May we `submitted last documents`_ to the European Union and are now -heading towards a 31st May Review Meeting in Bruxelles. The `PyPy EU Final -Activity Report`_ summarizes what we did and what we have in mind -on technical, scientific and community levels. It also contains reflections -and recommendations possibly interesting to other projects aiming at -EU funded Open Source research. *(12th May, 2007)* - -.. _`submitted last documents`: http://codespeak.net/pypy/dist/pypy/doc/index-report.html -.. _`PyPy EU Final Activity Report`: http://codespeak.net/pypy/extradoc/eu-report/PYPY-EU-Final-Activity-Report.pdf - -PyPy 1.0: JIT compiler generator, optimizations and more -================================================================== - -We are proud to release PyPy 1.0.0, our sixth public release (Download_). See -the `release announcement `__ to read about the -many new features in this release, especially the results of our -JIT generation technology. See also our detailed instructions on -how to `get started`_. *(March 27th, 2007)* - -.. _Download: getting-started.html#just-the-facts -.. _`get started`: getting-started.html - - - - -PyPy Trillke Sprints (25-28th Feb and 1-5th March 2007) finished -================================================================== - -Both of the sprints that mark the end of the EU period are over. There were very -good results, both on a `report level`_ as well as on a `technical level`_. -The sprint also had a good discussion about the future of PyPy after the EU -project ends, see the `mail Armin wrote`_ and `the meeting's minutes`_. You can -also look at the pictures that `Carl Friedrich`_ and that `Lene took`_ during -the sprint or read the `sprint announcement`_. *(March 10th, 2007)* - -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/announcement.html -.. _`report level`: http://codespeak.net/pipermail/pypy-dev/2007q1/003578.html -.. _`technical level`: http://codespeak.net/pipermail/pypy-dev/2007q1/003579.html -.. _`Carl Friedrich`: http://codespeak.net/~cfbolz/hildesheim3-sprint-pictures/ -.. _`Lene took`: http://codespeak.net/~lene/trillke-sprint-web/Page1.html -.. _`mail Armin wrote`: http://codespeak.net/pipermail/pypy-dev/2007q1/003577.html -.. _`the meeting's minutes`: http://codespeak.net/svn/pypy/extradoc/minute/post-eu-structure.txt - - - - -PyPy 0.99.0: optimizations, backends, new object spaces and more -================================================================== - -We are proud to release PyPy 0.99.0, our fifth public release. See -the `release announcement `__ to read about the -many new features in this release. See also our detailed instructions on -how to `get started`_. *(February 17th, 2007)* - -.. _`get started`: getting-started.html - - -py lib 0.9.0: py.test, distributed execution, greenlets and more -================================================================== - -Our development support and testing library was publically released, see the -`0.9 release announcement `__ -and its extensive `online documentation `__. -*(February 15th, 2007)* - - - -Leysin Winter Sports Sprint, 8th - 14th January 2007 -================================================================== - -.. raw:: html - -
- -The PyPy Leysin sprint is over. We worked hard on various topics, including -preparing the upcoming py-lib and PyPy releases. For more details, see the -`Leysin sprint report`_, the `Leysin announcement`_ and the -`list of people present`_. - - -.. raw:: html - -
- -.. _`Leysin announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/announcement.html -.. _`Leysin sprint report`: http://codespeak.net/pipermail/pypy-dev/2007q1/003481.html -.. _`list of people present`: http://codespeak.net/svn/pypy/extradoc/sprintinfo/leysin-winter-2007/people.txt - - -Massive Parallelism and Translation Aspects -======================================================== - -Our next big `EU report`_ about Stackless features, optimizations, and -memory management is finished. You can download it `as pdf`_. - -.. _`EU report`: index-report.html -.. _`as pdf`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf - - -Duesseldorf sprint #2, 30th October - 5th November over -================================================================== - -The Duesseldorf sprint is over. It was a very productive sprint with work done -in various areas. Read the `sprint report`_ for a detailed description of what -was achieved and the `full announcement`_ for various details. - -.. _`full announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/announce.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q4/003396.html - - - -Dynamic Languages Symposium (OOPSLA, 23rd October) -================================================================== - -We will present a paper at the `Dynamic Languages Symposium`_ describing -`PyPy's approach to virtual machine construction`_. The DLS is a -one-day forum within OOPSLA'06 (Portland, Oregon, USA). The paper is a -motivated overview of the annotation/rtyping translation tool-chain, -with experimental results. - -As usual, terminology with PyPy is delicate :-) Indeed, the title is -both correct and misleading - it does not describe "the" PyPy virtual -machine, since we have never hand-written one. This paper focuses on -how we are generating such VMs, not what they do. - -.. _`Dynamic Languages Symposium`: http://www.oopsla.org/2006/submission/tracks/dynamic_languages_symposium.html -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf - - - -Summer of PyPy: Calls for proposals open now! -================================================================== - -Happily, we are able to offer students mentoring and full sprint -participant's funding if we receive a proposal outlining an -interesting project related to PyPy and its development tools. This -follows up on the "Summer of Code" campaign from Google but is -completely independent from it and also works differently. -See the full call for details: - - http://codespeak.net/pypy/dist/pypy/doc/summer-of-pypy.html - - -Ireland sprint 21st-27th August -================================================================== - -The last PyPy sprint happened in the nice city of -Limerick in Ireland from 21st till 27th August. -The main focus of the sprint was on JIT compiler works, -various optimization works, porting extension modules, -infrastructure works like a build tool for PyPy and -extended (distributed) testing. -Read the full `announcement`_ for more details. - -.. _`announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ireland-2006/announce.html - -Release of PyPy video documentation -================================================================== - -The PyPy team is happy to announce that the first bunch of PyPy videos -can now be downloaded from: - -http://codespeak.net/pypy/dist/pypy/doc/video-index.html - -The videos introduce involved people and contain different talks, tutorials and -interviews and can be downloaded via bittorrent. **29th June 2006** - -PyPy 0.9.0 -================================================================== - -We are proud to release PyPy 0.9.0, our fourth public release. See -the `release announcement `__ to read about the -many new features in this release. - -PyPy and Summer of Code 2006 -================================================================== - -PyPy will again mentor students through Google's `Summer of Code`_ -campaign. Three students will kick-off their work on PyPy by -participating in the Duesseldorf sprint. They will be exploring a -back-end for Microsoft.NET, work on ways to build web applications -with Javascript code (in this case by translating RPython to -Javascript) and porting some CPython modules to use ctypes. Welcome to -the team! - -.. _`Summer of Code`: http://code.google.com/soc/psf/about.html - diff --git a/pypy/doc/maemo.rst b/pypy/doc/maemo.rst deleted file mode 100644 --- a/pypy/doc/maemo.rst +++ /dev/null @@ -1,187 +0,0 @@ -How to run PyPy on top of maemo platform -======================================== - -This howto explains how to use Scratchbox_ to cross-compile PyPy's -Python Interpreter to an `Internet-Tablet-OS`_, more specifically -the Maemo_ platform. This howto should work well for getting -a usable Python Interpreter for Nokia's N810_ device. - -setup cross-compilation environment -------------------------------------- - -The main steps are to install scratchbox and the Maemo SDK. Please refer -to Nokia's `INSTALL.txt`_ for more detail. - -Adjust linux kernel settings -+++++++++++++++++++++++++++++++++ - -In order to install and run scratchbox you will need to adjust -your Linux kernel settings. Note that the VDSO setting may -crash your computer - if that is the case, try running without -this setting. You can try it like this:: - - $ echo 4096 | sudo tee /proc/sys/vm/mmap_min_addr - $ echo 0 | sudo tee /proc/sys/vm/vdso_enabled - -If that works fine for you (on some machines the vdso setting can freeze machines) -you can make the changes permanent by editing ``/etc/sysctl.conf`` to contain:: - - vm.vdso_enabled = 0 - vm.mmap_min_addr = 4096 - -install scratchbox packages -+++++++++++++++++++++++++++++++++ - -Download - - http://repository.maemo.org/stable/diablo/maemo-scratchbox-install_4.1.1.sh - -and run this script as root:: - - $ sh maemo-scratchbox-install_4.1.1.sh -s /scratchbox -u ACCOUNTNAME - -The script will automatically download Debian packages or tarballs -and pre-configure a scratchbox environment with so called "devkits" -and "toolchains" for performing cross-compilation. It's fine -and recommended to use your linux account name as a scratchbox -ACCOUNTNAME. - -It also sets up an "sbox" group on your system and makes you -a member - giving the right to login to a scratchbox environment. - -testing that scratchbox environment works -+++++++++++++++++++++++++++++++++++++++++++++++ - -Login freshly to your Linux account in order to activate -your membership in the "sbox" unix group and then type:: - - $ /scratchbox/login - -This should warn you with something like "sb-conf: no current -target" because we have not yet created a cross-compilation -target. - -Note that Scratchbox starts daemon services which -can be controlled via:: - - /scratchbox/sbin/sbox_ctl start|stop - - -Installing the Maemo SDK -+++++++++++++++++++++++++++++++ - -To mimic the specific N810_ environment we now install the Maemo-SDK. -This will create an target within our new scratchbox environment -that we then use to compile PyPy. - -Make sure that you are a member of the "sbox" group - this might -require logging out and in again. - -Then, download - - http://repository.maemo.org/stable/diablo/maemo-sdk-install_4.1.1.sh - -and execute it with user privileges:: - - $ sh maemo-sdk-install_4.1.1.sh - -When being asked select the default "Runtime + Dev" packages. You do not need -Closed source Nokia binaries for PyPy. This installation -script will download "rootstraps" and create so called -"targets" and preselect the "DIABLO_ARMEL" target for ARM -compilation. Within the targets a large number of packages -will be pre-installed resulting in a base scratchbox -environment that is usable for cross compilation of PyPy. - -Customizing the DIABLO_ARMEL target for PyPy -++++++++++++++++++++++++++++++++++++++++++++++++ - -As PyPy does not yet provide a debian package description -file for use on Maemo, we have to install some dependencies manually -into our Scratchbox target environment. - -1. Go into your scratchbox by executing ``/scratchbox/login`` - (this should bring you to a shell with the DIABLO_ARMEL target) - -2. Add these lines to ``/etc/apt/sources.list``:: - - deb http://repository.maemo.org/extras/ diablo free non-free - deb http://repository.maemo.org/extras-devel/ diablo free non-free - - NOTE: if you have an older version of Maemo on your device you - can try substitute "chinook" for "diablo" in the above lines - and/or update your firmware. You can probably see which version - you are using by looking at the other content of the ``sources.list``. - -3. Perform ``apt-get update``. - -4. Install some necessary packages:: - - apt-get install python2.5-dev libffi4-dev zlib1g-dev libbz2-dev libgc-dev libncurses5-dev - - The "libgc-dev" package is only needed if you want to use the Boehm - garbage collector. - -5. Leave the scratchbox shell again with ``exit``. - - -Translating PyPy for the Maemo platform ------------------------------------------- - -You at least need "gcc" and "libc-dev" packages on your host system -to compile pypy. The scratchbox and its DIABLO_ARMEL target contains -its own copies of GCC, various C libraries and header files -which pypy needs for successful cross-compilation. - -Now, on the host system, perform a mercurial clone of PyPy:: - - hg clone ssh://hg at bitbucket.org/pypy/pypy - -Several revisions since about 9d7b7ecb9144 are known to work and -the last manually tested one is currently 7f267e4b7861. - -Change to the ``pypy-trunk/pypy/translator/goal`` directory and execute:: - - python translate.py --platform=maemo --opt=3 - -You need to run translate.py using Python 2.5. This will last some 30-60 -minutes on most machines. For compiling the C source code PyPy's tool chain -will use our scratchbox/Maemo cross-compilation environment. - -When this step succeeds, your ``goal`` directory will contain a binary called -``pypy-c`` which is executable on the Maemo device. To run this binary -on your device you need to also copy some support files. A good way to -perform copies to your device is to install OpenSSH on the -mobile device and use "scp" or rsync for transferring files. - -You can just copy your whole pypy-trunk directory over to your mobile -device - however, only these should be needed:: - - lib/pypy1.2/lib_pypy - lib/pypy1.2/lib-python - pypy/translator/goal/pypy-c - -It is necessary that the ``pypy-c`` can find a "lib-python" and "lib_pypy" directory -if you want to successfully startup the interpreter on the device. - -Start ``pypy-c`` on the device. If you see an error like "setupterm: could not find terminal" -you probably need to perform this install on the device:: - - apt-get install ncurses-base - -Eventually you should see something like:: - - Nokia-N810-51-3:~/pypy/trunk# ./pypy-c - Python Python 2.5.2 (pypy 1.0.0 build 59527) on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``E09 2K @CAA:85?'' - >>>> - - -.. _N810: http://en.wikipedia.org/wiki/Nokia_N810 -.. _`Internet-Tablet-OS`: http://en.wikipedia.org/wiki/Internet_Tablet_OS -.. _Maemo: http://www.maemo.org -.. _Scratchbox: http://www.scratchbox.org -.. _`INSTALL.txt`: http://tablets-dev.nokia.com/4.1/INSTALL.txt - - diff --git a/pypy/doc/dev_method.rst b/pypy/doc/dev_method.rst --- a/pypy/doc/dev_method.rst +++ b/pypy/doc/dev_method.rst @@ -20,7 +20,7 @@ Main tools for achieving this is: * py.test - automated testing - * Subversion - version control + * Mercurial - version control * Transparent communication and documentation (mailinglists, IRC, tutorials etc etc) @@ -237,124 +237,3 @@ interested in using sprints as away of making contact with active developers (Python/compiler design etc)! -If you have questions about our sprints and EU-funding - please send an email -to pypy-funding at codespeak.net, our mailinglist for project coordination. - -Previous sprints? -+++++++++++++++++ - -The PyPy team has been sprinting on the following occasions:: - - * Hildesheim Feb 2003 - * Gothenburg May 2003 - * Europython/Louvain-La-Neuve June 2003 - * Berlin Sept 2003 - * Amsterdam Dec 2003 - * Europython/Gothenburg June 2004 - * Vilnius Nov 2004 - * Leysin Jan 2005 - * PyCon/Washington March 2005 - * Europython/Gothenburg June 2005 - * Hildesheim July 2005 - * Heidelberg Aug 2005 - * Paris Oct 2005 - * Gothenburg Dec 2005 - * Mallorca Jan 2006 - * PyCon/Dallas Feb 2006 - * Louvain-La-Neuve March 2006 - * Leysin April 2006 - * Tokyo April 2006 - * Düsseldorf June 2006 - * Europython/Geneva July 2006 - * Limerick Aug 2006 - * Düsseldorf Oct 2006 - * Leysin Jan 2007 - * Hildesheim Feb 2007 - -People who have participated and contributed during our sprints and thus -contributing to PyPy (if we have missed someone here - please contact us -so we can correct it): - - Armin Rigo - Holger Krekel - Samuele Pedroni - Christian Tismer - Laura Creighton - Jacob Hallén - Michael Hudson - Richard Emslie - Anders Chrigström - Alex Martelli - Ludovic Aubry - Adrien DiMascio - Nicholas Chauvat - Niklaus Haldimann - Anders Lehmann - Carl Friedrich Bolz - Eric Van Riet Paap - Stephan Diel - Dinu Gherman - Jens-Uwe Mager - Marcus Denker - Bert Freudenberg - Gunther Jantzen - Henrion Benjamin - Godefroid Chapelle - Anna Ravenscroft - Tomek Meka - Jonathan David Riehl - Patrick Maupain - Etienne Posthumus - Nicola Paolucci - Albertas Agejevas - Marius Gedminas - Jesus Cea Avion - Olivier Dormond - Jacek Generowicz - Brian Dorsey - Guido van Rossum - Bob Ippolito - Alan McIntyre - Lutz Paelike - Michael Chermside - Beatrice Düring - Boris Feigin - Amaury Forgeot d'Arc - Andrew Thompson - Valentino Volonghi - Aurelien Campeas - Stephan Busemann - Johan Hahn - Gerald Klix - Gene Oden - Josh Gilbert - Geroge Paci - Martin Blais - Stuart Williams - Jiwon Seo - Michael Twomey - Wanja Saatkamp - Alexandre Fayolle - Raphaël Collet - Grégoire Dooms - Sanghyeon Seo - Yutaka Niibe - Yusei Tahara - George Toshida - Koichi Sasada - Guido Wesdorp - Maciej Fijalkowski - Antonio Cuni - Lawrence Oluyede - Fabrizio Milo - Alexander Schremmer - David Douard - Michele Frettoli - Simon Burton - Aaron Bingham - Pieter Zieschang - Sad Rejeb - Brian Sutherland - Georg Brandl - - diff --git a/pypy/doc/style.css b/pypy/doc/style.css deleted file mode 100644 --- a/pypy/doc/style.css +++ /dev/null @@ -1,1091 +0,0 @@ -body,body.editor,body.body { - font: 90% "Times New Roman", Arial, Verdana, Helvetica, serif; - background: White; - color: Black; -} - -a, a.reference { - text-decoration: none; -} -a[href]:hover { text-decoration: underline; } - -img { - border: none; - vertical-align: middle; -} - -p, div.text { - text-align: left; - line-height: 1.5em; - margin: 0.5em 0em 0em 0em; -} - - - -p a:active { - color: Red; - background-color: transparent; -} - -p img { - border: 0; - margin: 0; -} - -img.inlinephoto { - padding: 0; - padding-right: 1em; - padding-top: 0.7em; - float: left; -} - -hr { - clear: both; - height: 1px; - color: #8CACBB; - background-color: transparent; -} - - -ul { - line-height: 1.5em; - /*list-style-image: url("bullet.gif"); */ - margin-left: 1.5em; - padding:0; -} - -ol { - line-height: 1.5em; - margin-left: 1.5em; - padding:0; -} - -ul a, ol a { - text-decoration: underline; -} - -dl { -} - -dt { - font-weight: bold; -} - -dd { - line-height: 1.5em; - margin-bottom: 1em; -} - -blockquote { - font-family: Times, "Times New Roman", serif; - font-style: italic; - font-size: 120%; -} - -code { - color: Black; - /*background-color: #dee7ec;*/ - background-color: #cccccc; -} - -pre { - padding: 1em; - border: 1px solid #8cacbb; - color: Black; - background-color: #dee7ec; - background-color: #cccccc; - overflow: auto; -} - - -.netscape4 { - display: none; -} - -/* main page styles */ - -/*a[href]:hover { color: black; text-decoration: underline; } -a[href]:link { color: black; text-decoration: underline; } -a[href] { color: black; text-decoration: underline; } -*/ - -span.menu_selected { - color: black; - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; - padding-right: 0.3em; - background-color: #cccccc; -} - - -a.menu { - /*color: #3ba6ec; */ - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; - padding-right: 0.3em; -} - -a.menu[href]:visited, a.menu[href]:link{ - /*color: #3ba6ec; */ - font: 120% Verdana, Helvetica, Arial, sans-serif; - text-decoration: none; -} - -a.menu[href]:hover { - /*color: black;*/ -} - -div.project_title{ - /*border-spacing: 20px;*/ - font: 160% Verdana, Helvetica, Arial, sans-serif; - color: #3ba6ec; - vertical-align: center; - padding-bottom: 0.3em; -} - -a.wikicurrent { - font: 100% Verdana, Helvetica, Arial, sans-serif; - color: #3ba6ec; - vertical-align: middle; -} - - -table.body { - border: 0; - /*padding: 0; - border-spacing: 0px; - border-collapse: separate; - */ -} - -td.page-header-left { - padding: 5px; - /*border-bottom: 1px solid #444444;*/ -} - -td.page-header-top { - padding: 0; - - /*border-bottom: 1px solid #444444;*/ -} - -td.sidebar { - padding: 1 0 0 1; -} - -td.sidebar p.classblock { - padding: 0 5 0 5; - margin: 1 1 1 1; - border: 1px solid #444444; - background-color: #eeeeee; -} - -td.sidebar p.userblock { - padding: 0 5 0 5; - margin: 1 1 1 1; - border: 1px solid #444444; - background-color: #eeeeff; -} - -td.content { - padding: 1 5 1 5; - vertical-align: top; - width: 100%; -} - -p.ok-message { - background-color: #22bb22; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} -p.error-message { - background-color: #bb2222; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} - -p:first-child { - margin: 0 ; - padding: 0; -} - -/* style for forms */ -table.form { - padding: 2; - border-spacing: 0px; - border-collapse: separate; -} - -table.form th { - color: #333388; - text-align: right; - vertical-align: top; - font-weight: normal; -} -table.form th.header { - font-weight: bold; - background-color: #eeeeff; - text-align: left; -} - -table.form th.required { - font-weight: bold; -} - -table.form td { - color: #333333; - empty-cells: show; - vertical-align: top; -} - -table.form td.optional { - font-weight: bold; - font-style: italic; -} - -table.form td.html { - color: #777777; -} - -/* style for lists */ -table.list { - border-spacing: 0px; - border-collapse: separate; - vertical-align: top; - padding-top: 0; - width: 100%; -} - -table.list th { - padding: 0 4 0 4; - color: #404070; - background-color: #eeeeff; - border-right: 1px solid #404070; - border-top: 1px solid #404070; - border-bottom: 1px solid #404070; - vertical-align: top; - empty-cells: show; -} -table.list th a[href]:hover { color: #404070 } -table.list th a[href]:link { color: #404070 } -table.list th a[href] { color: #404070 } -table.list th.group { - background-color: #f4f4ff; - text-align: center; - font-size: 120%; -} - -table.list td { - padding: 0 4 0 4; - border: 0 2 0 2; - border-right: 1px solid #404070; - color: #404070; - background-color: white; - vertical-align: top; - empty-cells: show; -} - -table.list tr.normal td { - background-color: white; - white-space: nowrap; -} - -table.list tr.alt td { - background-color: #efefef; - white-space: nowrap; -} - -table.list td:first-child { - border-left: 1px solid #404070; - border-right: 1px solid #404070; -} - -table.list th:first-child { - border-left: 1px solid #404070; - border-right: 1px solid #404070; -} - -table.list tr.navigation th { - text-align: right; -} -table.list tr.navigation th:first-child { - border-right: none; - text-align: left; -} - - -/* style for message displays */ -table.messages { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.messages th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.messages th { - font-weight: bold; - color: black; - text-align: left; - border-bottom: 1px solid #afafaf; -} - -table.messages td { - font-family: monospace; - background-color: #efefef; - border-bottom: 1px solid #afafaf; - color: black; - empty-cells: show; - border-right: 1px solid #afafaf; - vertical-align: top; - padding: 2 5 2 5; -} - -table.messages td:first-child { - border-left: 1px solid #afafaf; - border-right: 1px solid #afafaf; -} - -/* style for file displays */ -table.files { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.files th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.files th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; -} - -table.files td { - font-family: monospace; - empty-cells: show; -} - -/* style for history displays */ -table.history { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.history th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; - font-size: 100%; -} - -table.history th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; - font-size: 90%; -} - -table.history td { - font-size: 90%; - vertical-align: top; - empty-cells: show; -} - - -/* style for class list */ -table.classlist { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.classlist th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.classlist th { - font-weight: bold; - text-align: left; -} - - -/* style for class help display */ -table.classhelp { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.classhelp th { - font-weight: bold; - text-align: left; - color: #707040; -} - -table.classhelp td { - padding: 2 2 2 2; - border: 1px solid black; - text-align: left; - vertical-align: top; - empty-cells: show; -} - - -/* style for "other" displays */ -table.otherinfo { - border-spacing: 0px; - border-collapse: separate; - width: 100%; -} - -table.otherinfo th.header{ - padding-top: 10px; - border-bottom: 1px solid gray; - font-weight: bold; - background-color: white; - color: #707040; -} - -table.otherinfo th { - border-bottom: 1px solid #afafaf; - font-weight: bold; - text-align: left; -} - -input { - border: 1px solid #8cacbb; - color: Black; - background-color: white; - vertical-align: middle; - margin-bottom: 1px; /* IE bug fix */ - padding: 0.1em; -} - -select { - border: 1px solid #8cacbb; - color: Black; - background-color: white; - vertical-align: middle; - margin-bottom: 1px; /* IE bug fix */ - padding: 0.1em; -} - - -a.nonexistent { - color: #FF2222; -} -a.nonexistent:visited { - color: #FF2222; -} -a.external { - color: #AA6600; -} - -/* -dl,ul,ol { - margin-top: 1pt; -} -tt,pre { - font-family: Lucida Console,Courier New,Courier,monotype; - font-size: 12pt; -} -pre.code { - margin-top: 8pt; - margin-bottom: 8pt; - background-color: #FFFFEE; - white-space:pre; - border-style:solid; - border-width:1pt; - border-color:#999999; - color:#111111; - padding:5px; - width:100%; -} -*/ -div.diffold { - background-color: #FFFF80; - border-style:none; - border-width:thin; - width:100%; -} -div.diffnew { - background-color: #80FF80; - border-style:none; - border-width:thin; - width:100%; -} -div.message { - margin-top: 6pt; - background-color: #E8FFE8; - border-style:solid; - border-width:1pt; - border-color:#999999; - color:#440000; - padding:5px; - width:100%; -} -strong.highlight { - background-color: #FFBBBB; -/* as usual, NetScape fucks up with innocent CSS - border-color: #FFAAAA; - border-style: solid; - border-width: 1pt; -*/ -} - -table.navibar { - background-color: #C8C8C8; - border-spacing: 3px; -} -td.navibar { - background-color: #E8E8E8; - vertical-align: top; - text-align: right; - padding: 0px; -} - -div.pagename { - font-size: 140%; - color: blue; - text-align: center; - font-weight: bold; - background-color: white; - padding: 0 ; -} - -a.wikiaction, input.wikiaction { - color: black; - text-decoration: None; - text-align: center; - color: black; - /*border: 1px solid #3ba6ec; */ - margin: 4px; - padding: 5; - padding-bottom: 0; - white-space: nowrap; -} - -a.wikiaction[href]:hover { - color: black; - text-decoration: none; - /*background-color: #dddddd; */ -} - -span.wikiuserpref { - padding-top: 1em; - font-size: 120%; -} - -div.wikitrail { - vertical-align: bottom; - /*font-size: -1;*/ - padding-top: 1em; - display: none; -} - -div.wikiaction { - vertical-align: middle; - /*border-bottom: 1px solid #8cacbb;*/ - padding-bottom:1em; - text-align: left; - width: 100%; -} - -div.wikieditmenu { - text-align: right; -} - -form.wikiedit { - border: 1px solid #8cacbb; - background-color: #f0f0f0; - background-color: #fabf00; - padding: 1em; - padding-right: 0em; -} - -div.legenditem { - padding-top: 0.5em; - padding-left: 0.3em; -} - -span.wikitoken { - background-color: #eeeeee; -} - - -div#contentspace h1:first-child, div.heading:first-child { - padding-top: 0; - margin-top: 0; -} -div#contentspace h2:first-child { - padding-top: 0; - margin-top: 0; -} - -/* heading and paragraph text */ - -div.heading, h1 { - font-family: Verdana, Helvetica, Arial, sans-serif; - background-color: #58b3ef; - background-color: #FFFFFF; - /*color: #4893cf;*/ - color: black; - padding-top: 1.0em; - padding-bottom:0.2em; - text-align: left; - margin-top: 0em; - /*margin-bottom:8pt;*/ - font-weight: bold; - font-size: 115%; - border-bottom: 1px solid #8CACBB; -} - - -h1, h2, h3, h4, h5, h6 { - color: Black; - clear: left; - font: 100% Verdana, Helvetica, Arial, sans-serif; - margin: 0; - padding-left: 0em; - padding-top: 1em; - padding-bottom: 0.2em; - /*border-bottom: 1px solid #8CACBB;*/ -} -/* h1,h2 { padding-top: 0; }*/ - - -h1 { font-size: 145%; } -h2 { font-size: 135%; } -h3 { font-size: 125%; } -h4 { font-size: 120%; } -h5 { font-size: 110%; } -h6 { font-size: 80%; } - -h1 a { text-decoration: None;} - -div.exception { - background-color: #bb2222; - padding: 5 5 5 5; - color: white; - font-weight: bold; -} -pre.exception { - font-size: 110%; - padding: 1em; - border: 1px solid #8cacbb; - color: Black; - background-color: #dee7ec; - background-color: #cccccc; -} - -/* defines for navgiation bar (documentation) */ - - -div.direntry { - padding-top: 0.3em; - padding-bottom: 0.3em; - margin-right: 1em; - font-weight: bold; - background-color: #dee7ec; - font-size: 110%; -} - -div.fileentry { - font-family: Verdana, Helvetica, Arial, sans-serif; - padding-bottom: 0.3em; - white-space: nowrap; - line-height: 150%; -} - -a.fileentry { - white-space: nowrap; -} - - -span.left { - text-align: left; -} -span.right { - text-align: right; -} - -div.navbar { - /*margin: 0;*/ - font-size: 80% /*smaller*/; - font-weight: bold; - text-align: left; - /* position: fixed; */ - top: 100pt; - left: 0pt; /* auto; */ - width: 120pt; - /* right: auto; - right: 0pt; 2em; */ -} - - -div.history a { - /* font-size: 70%; */ -} - -div.wikiactiontitle { - font-weight: bold; -} - -/* REST defines */ - -div.document { - margin: 0; -} - -h1.title { - margin: 0; -} - -td.toplist { - vertical-align: top; -} - -img#pyimg { - position: absolute; - top: 0px; - left: 20px; - margin: 20px; -} - -img#extraimg { - position: absolute; - right: 14px; - top: 4px; -} - -div#navspace { - position: absolute; - top: 130px; - left: 11px; - font-size: 100%; - width: 150px; - overflow: hidden; /* scroll; */ -} - -div#metaspace { - position: absolute; - top: 40px; - left: 210px; -} - -div#errorline { - position: relative; - top: 5px; - float: right; -} - -div#contentspace { - position: absolute; - /* font: 120% "Times New Roman", serif;*/ - font: 110% Verdana, Helvetica, Arial, sans-serif; - top: 140px; - left: 130px; - margin-right: 140px; -} - -div#menubar { -/* width: 400px; */ - float: left; -} - -/* for the documentation page */ -div#docinfoline { - position: relative; - top: 5px; - left: 0px; - - /*background-color: #dee7ec; */ - padding: 5pt; - padding-bottom: 1em; - color: black; - /*border-width: 1pt; - border-style: solid;*/ - -} - -div#docnavlist { - /*background-color: #dee7ec; */ - padding: 5pt; - padding-bottom: 2em; - color: black; - border-width: 1pt; - /*border-style: solid;*/ -} - - -/* text markup */ - -div.listtitle { - color: Black; - clear: left; - font: 120% Verdana, Helvetica, Arial, sans-serif; - margin: 0; - padding-left: 0em; - padding-top: 0em; - padding-bottom: 0.2em; - margin-right: 0.5em; - border-bottom: 1px solid #8CACBB; -} - -div.actionbox h3 { - padding-top: 0; - padding-right: 0.5em; - padding-left: 0.5em; - background-color: #fabf00; - text-align: center; - border: 1px solid black; /* 8cacbb; */ -} - -div.actionbox a { - display: block; - padding-bottom: 0.5em; - padding-top: 0.5em; - margin-left: 0.5em; -} - -div.actionbox a.history { - display: block; - padding-bottom: 0.5em; - padding-top: 0.5em; - margin-left: 0.5em; - font-size: 90%; -} - -div.actionbox { - margin-bottom: 2em; - padding-bottom: 1em; - overflow: hidden; /* scroll; */ -} - -/* taken from docutils (oh dear, a bit senseless) */ -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - - -/* -:Author: David Goodger -:Contact: goodger at users.sourceforge.net -:date: $Date: 2003/01/22 22:26:48 $ -:version: $Revision: 1.29 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ -/* -.first { - margin-top: 0 } - -.last { - margin-bottom: 0 } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.address { - margin-bottom: 0 ; - margin-top: 0 ; - font-family: serif ; - font-size: 100% } - -pre.line-block { - font-family: serif ; - font-size: 100% } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #eeeeee } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.interpreted { - font-family: sans-serif } - -span.option { - white-space: nowrap } - -span.option-argument { - font-style: italic } - -span.pre { - white-space: pre } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: top } - -th.docinfo-name, th.field-name { - font-weight: bold ; - text-align: left ; - white-space: nowrap } - -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { - font-size: 100% } - -tt { - background-color: #eeeeee } - -ul.auto-toc { - list-style-type: none } -*/ - -div.section { - margin-top: 1.0em ; -} - -div.abstract { - margin: 2em 4em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } diff --git a/pypy/doc/navlist b/pypy/doc/navlist deleted file mode 100644 --- a/pypy/doc/navlist +++ /dev/null @@ -1,9 +0,0 @@ -[ - 'architecture.html', - 'getting-started.html', - 'coding-guide.html', - 'objspace.html', - 'translation.html', -# 'misc.html', - 'theory.html', -] diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -5,30 +5,12 @@ .. doc-index: This needs merging somehow -.. svn-help.rst: Needs merging/replacing with hg stuff: - - .. toctree:: distribution.rst - objspace-proxies.rst - - old_news.rst - - rffi.rst - - sandbox.rst - - statistic/index.rst - - docindex.rst - - svn-help.rst - dot-net.rst - maemo.rst From commits-noreply at bitbucket.org Mon Apr 25 16:36:47 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 16:36:47 +0200 (CEST) Subject: [pypy-svn] pypy post-release-1.5: fixes issue 688 of not optimizing out repeated getfield_gc_pure on structures with multiple immutable fields Message-ID: <20110425143647.7B4AB282B9D@codespeak.net> Author: Hakan Ardo Branch: post-release-1.5 Changeset: r43597:87c68a7494a8 Date: 2011-04-25 16:36 +0200 http://bitbucket.org/pypy/pypy/changeset/87c68a7494a8/ Log: fixes issue 688 of not optimizing out repeated getfield_gc_pure on structures with multiple immutable fields diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2102,6 +2102,45 @@ self.check_loops(int_rshift=1, everywhere=True) + def test_bug688_multiple_immutable_fields(self): + myjitdriver = JitDriver(greens=[], reds=['counter','context']) + + class Tag: + pass + class InnerContext(): + _immutable_fields_ = ['variables','local_names'] + def __init__(self, variables): + self.variables = variables + self.local_names = [0] + + def store(self): + self.local_names[0] = 1 + + def retrieve(self): + variables = hint(self.variables, promote=True) + result = self.local_names[0] + if result == 0: + return -1 + else: + return -1 + def build(): + context = InnerContext(Tag()) + + context.store() + + counter = 0 + while True: + myjitdriver.jit_merge_point(context=context, counter = counter) + context.retrieve() + context.retrieve() + + counter += 1 + if counter > 10: + return 7 + assert self.meta_interp(build, []) == 7 + self.check_loops(getfield_gc_pure=0) + self.check_loops(getfield_gc_pure=2, everywhere=True) + class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -59,7 +59,8 @@ def find_rewritable_bool(self, op, args): try: oldopnum = opboolinvers[op.getopnum()] - targs = [args[0], args[1], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[0], args[1]], + None)) if self.try_boolinvers(op, targs): return True except KeyError: @@ -67,7 +68,8 @@ try: oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL - targs = [args[1], args[0], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) oldop = self.optimizer.pure_operations.get(targs, None) if oldop is not None and oldop.getdescr() is op.getdescr(): self.make_equal_to(op.result, self.getvalue(oldop.result)) @@ -77,7 +79,8 @@ try: oldopnum = opboolinvers[opboolreflex[op.getopnum()]] - targs = [args[1], args[0], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) if self.try_boolinvers(op, targs): return True except KeyError: diff --git a/pypy/jit/metainterp/optimizeutil.py b/pypy/jit/metainterp/optimizeutil.py --- a/pypy/jit/metainterp/optimizeutil.py +++ b/pypy/jit/metainterp/optimizeutil.py @@ -99,7 +99,9 @@ make_sure_not_resized(args) res = 0x345678 for arg in args: - if isinstance(arg, history.Const): + if arg is None: + y = 17 + elif isinstance(arg, history.Const): y = arg._get_hash_() else: y = compute_identity_hash(arg) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -220,7 +220,7 @@ self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op def has_pure_result(self, opnum, args, descr): - op = ResOperation(opnum, args, None) + op = ResOperation(opnum, args, None, descr) key = self.optimizer.make_args_key(op) op = self.optimizer.pure_operations.get(key, None) if op is None: @@ -482,7 +482,7 @@ def make_args_key(self, op): n = op.numargs() - args = [None] * (n + 1) + args = [None] * (n + 2) for i in range(n): arg = op.getarg(i) try: @@ -493,6 +493,7 @@ arg = value.get_key_box() args[i] = arg args[n] = ConstInt(op.getopnum()) + args[n+1] = op.getdescr() return args def optimize_default(self, op): diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -219,6 +219,7 @@ same before and after translation, except for RPython instances on the lltypesystem. """ + assert x is not None result = object.__hash__(x) try: x.__dict__['__precomputed_identity_hash'] = result From commits-noreply at bitbucket.org Mon Apr 25 16:38:45 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 16:38:45 +0200 (CEST) Subject: [pypy-svn] pypy default: Removed fixeol (it was an old svn leftover) Message-ID: <20110425143845.4D475282B9D@codespeak.net> Author: Dario Bertini Branch: Changeset: r43598:f3dbe0c32633 Date: 2011-04-25 16:38 +0200 http://bitbucket.org/pypy/pypy/changeset/f3dbe0c32633/ Log: Removed fixeol (it was an old svn leftover) diff --git a/pypy/tool/fixeol b/pypy/tool/fixeol deleted file mode 100755 --- a/pypy/tool/fixeol +++ /dev/null @@ -1,109 +0,0 @@ -#! /usr/bin/env python -""" -This script walks over the files and subdirs of the specified directories -('.' by default), and changes the svn properties to match the PyPy guidelines: - - svn:ignore includes '*.pyc' and '*.pyo' for all directories - svn:eol-style is 'native' for *.py and *.txt files - -""" - -import sys, os -import autopath -import py - -forbidden = range(0,32) -forbidden.remove(9) # tab -forbidden.remove(10) # lf -forbidden.remove(12) # ff, ^L -forbidden.remove(13) # cr - - -def looksbinary(data, forbidden = [chr(i) for i in forbidden]): - "Check if some data chunk appears to be binary." - for c in forbidden: - if c in data: - return True - return False - -def can_set_eol_style(path): - "check to see if we could set eol-style on the path." - data = path.read(mode='rb') - if looksbinary(data): - print "%s looks like a binary, ignoring" % path - return False - original = data - data = data.replace('\r\n', '\n') - data = data.replace('\r', '\n') - data = data.replace('\n', os.linesep) - if data != original: - print "*"*30 - print "---> %s <---" % path - print ("WARNING: the file content was modified " - "by fixing the EOL style.") - print "*"*30 - #return False - path.write(data, mode='wb') - return True - return True - -def checkeolfile(path): - return path.ext in ('.txt', '.py', '.asc', '.h', '.c') - -def fixdirectory(path): - print "+ checking directory", path, - fns = path.listdir(checkeolfile) - if fns: - ignores = path.propget('svn:ignore') - newignores = ignores - l = ignores.split('\n') - for x in ('*.pyc', '*.pyo'): - if x not in l: - l.append(x) - newignores = "\n".join(l) - print ", setting ignores", newignores - path.propset('svn:ignore', newignores) - else: - print - for fn in fns: - fixfile(fn) - - for x in path.listdir(lambda x: x.check(dir=1, versioned=True)): - if x.check(link=1): - continue - fixdirectory(x) - -def fixfile(path): - x = path.localpath.relto(py.path.local()) - if not x: - x = path.localpath - print "checking", x, - if path.check(versioned=0): - return False - oldprop = path.propget('svn:eol-style') - if oldprop: - print "eol-style already set (%r)" %(oldprop, ) - else: - if can_set_eol_style(path): - print "setting eol-style native" - path.propset('svn:eol-style', 'native') - else: - print "cannot set eol-style" - -if __name__ == '__main__': - if len(sys.argv) > 1: - for fname in sys.argv[1:]: - paths = [py.path.svnwc(x) for x in sys.argv[1:]] - else: - paths = [py.path.svnwc()] - - for path in paths: - if path.check(link=1): - print 'ignoring link', path - elif path.check(dir=1): - fixdirectory(path) - elif path.check(file=1): - fixfile(path) - else: - print "ignoring", path - From commits-noreply at bitbucket.org Mon Apr 25 17:08:00 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 17:08:00 +0200 (CEST) Subject: [pypy-svn] pypy default: Backed out changeset dc4f7684d9b3 Message-ID: <20110425150800.8A98736C208@codespeak.net> Author: Dario Bertini Branch: Changeset: r43599:289d772b4cff Date: 2011-04-25 17:06 +0200 http://bitbucket.org/pypy/pypy/changeset/289d772b4cff/ Log: Backed out changeset dc4f7684d9b3 (reintegrated the greenlet module) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py new file mode 100644 --- /dev/null +++ b/lib_pypy/greenlet.py @@ -0,0 +1,1 @@ +from _stackless import greenlet From commits-noreply at bitbucket.org Mon Apr 25 17:08:02 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 17:08:02 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110425150802.8130C282BEB@codespeak.net> Author: Dario Bertini Branch: Changeset: r43600:840d77bf3659 Date: 2011-04-25 17:07 +0200 http://bitbucket.org/pypy/pypy/changeset/840d77bf3659/ Log: merge heads diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py new file mode 100644 --- /dev/null +++ b/lib_pypy/greenlet.py @@ -0,0 +1,1 @@ +from _stackless import greenlet From commits-noreply at bitbucket.org Mon Apr 25 17:11:36 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 17:11:36 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Updated link to the greenlet documentation Message-ID: <20110425151136.2EB79282B9D@codespeak.net> Author: Dario Bertini Branch: documentation-cleanup Changeset: r43601:4112d0db16de Date: 2011-04-25 17:10 +0200 http://bitbucket.org/pypy/pypy/changeset/4112d0db16de/ Log: Updated link to the greenlet documentation diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -592,7 +592,7 @@ .. _`Stackless Python`: http://www.stackless.com -.. _`documentation of the greenlets`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt +.. _`documentation of the greenlets`: http://packages.python.org/greenlet/ .. _`Stackless Transform`: translation.html#the-stackless-transform .. include:: _ref.rst From commits-noreply at bitbucket.org Mon Apr 25 17:22:02 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 17:22:02 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): fix the auto-generation to generate links to the code on Message-ID: <20110425152202.8765C282B9D@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43602:07acca39c902 Date: 2011-04-25 17:21 +0200 http://bitbucket.org/pypy/pypy/changeset/07acca39c902/ Log: (lac, cfbolz): fix the auto-generation to generate links to the code on bitbucket. use the auto-generation in more places. also, check that _ref.rst is included where it needs to be. diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -152,11 +152,11 @@ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher - in pyopcode.py_, frame and code objects in eval.py_ and pyframe.py_, - function objects and argument passing in function.py_ and argument.py_, - the object space interface definition in baseobjspace.py_, modules in - module.py_ and mixedmodule.py_. Core types supporting the bytecode - interpreter are defined in typedef.py_. + in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, + function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, + the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, and input data files that allow it to parse both Python 2.3 and 2.4 @@ -170,17 +170,17 @@ resulting binary interactively much more pleasant. * `pypy/objspace/std`_ contains the `Standard object space`_. The main file - is objspace.py_. For each type, the files ``xxxtype.py`` and + is `pypy/interpreter/objspace.py`_. For each type, the files ``xxxtype.py`` and ``xxxobject.py`` contain respectively the definition of the type and its (default) implementation. -* `pypy/objspace`_ contains a few other object spaces: the thunk_, - trace_ and flow_ object spaces. The latter is a relatively short piece +* `pypy/objspace`_ contains a few other object spaces: the `pypy/objspace/thunk.py`_, + `pypy/objspace/trace`_ and `pypy/objspace/flow`_ object spaces. The latter is a relatively short piece of code that builds the control flow graphs when the bytecode interpreter runs in it. * `pypy/translator`_ contains the code analysis and generation stuff. - Start reading from translator.py_, from which it should be easy to follow + Start reading from translator.py, from which it should be easy to follow the pieces of code involved in the various translation phases. * `pypy/annotation`_ contains the data model for the type annotation that @@ -397,21 +397,7 @@ .. _`Dot Graphviz`: http://www.graphviz.org/ .. _Pygame: http://www.pygame.org/ -.. _pyopcode.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/pyopcode.py -.. _eval.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/eval.py -.. _pyframe.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/pyframe.py -.. _function.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/function.py -.. _argument.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/argument.py -.. _baseobjspace.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/baseobjspace.py -.. _module.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/module.py -.. _mixedmodule.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/mixedmodule.py -.. _typedef.py: https://bitbucket.org/pypy/pypy/src/tip/pypy/interpreter/typedef.py .. _Standard object space: objspace.html#the-standard-object-space -.. _objspace.py: ../../../../pypy/objspace/std/objspace.py -.. _thunk: ../../../../pypy/objspace/thunk.py -.. _trace: ../../../../pypy/objspace/trace.py -.. _flow: ../../../../pypy/objspace/flow/ -.. _translator.py: ../../../../pypy/translator/translator.py .. _mailing lists: index.html .. _documentation: docindex.html .. _unit tests: coding-guide.html#test-design diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -485,7 +485,7 @@ ---------------------------------------------------- Suppose we want to have a list which stores all operations performed on -it for later analysis. We can use the small `tputil`_ module to help +it for later analysis. We can use the small `lib_pypy/tputil.py`_ module to help with transparently proxying builtin instances:: from tputil import make_proxy @@ -534,10 +534,10 @@ .. _tputil: -tputil help module +tputil helper module ---------------------------- -The `tputil.py`_ module provides: +The `lib_pypy/tputil.py`_ module provides: * ``make_proxy(controller, type, obj)``: function which creates a transparent proxy controlled by the given @@ -595,8 +595,8 @@ to application level code. Transparent proxies are implemented on top of the `standard object -space`_, in `proxy_helpers.py`_, `proxyobject.py`_ and -`transparent.py`_. To use them you will need to pass a +space`_, in `pypy/objspace/std/proxy_helpers.py`_, `pypy/objspace/std/proxyobject.py`_ and +`pypy/objspace/std/transparent.py`_. To use them you will need to pass a `--objspace-std-withtproxy`_ option to ``py.py`` or ``translate.py``. This registers implementations named ``W_TransparentXxx`` - which usually correspond to an @@ -607,10 +607,6 @@ lists, dicts, exceptions, tracebacks and frames. .. _`standard object space`: objspace.html#the-standard-object-space -.. _`proxy_helpers.py`: ../../../../pypy/objspace/std/proxy_helpers.py -.. _`proxyobject.py`: ../../../../pypy/objspace/std/proxyobject.py -.. _`transparent.py`: ../../../../pypy/objspace/std/transparent.py -.. _`tputil.py`: ../../lib_pypy/tputil.py .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -19,6 +19,8 @@ Garbage collectors currently written for the GC framework ========================================================= +XXX we need to add something about minimark + (Very rough sketch only for now.) Reminder: to select which GC you want to include in a translated @@ -32,7 +34,7 @@ -------------- Classical Mark and Sweep collector. Also contains a lot of experimental -and half-unmaintained features. See `rpython/memory/gc/marksweep.py`_. +and half-unmaintained features. See `pypy/rpython/memory/gc/marksweep.py`_. Semispace copying collector --------------------------- @@ -40,7 +42,7 @@ Two arenas of equal size, with only one arena in use and getting filled with new objects. When the arena is full, the live objects are copied into the other arena using Cheney's algorithm. The old arena is then -cleared. See `rpython/memory/gc/semispace.py`_. +cleared. See `pypy/rpython/memory/gc/semispace.py`_. On Unix the clearing is done by reading ``/dev/zero`` into the arena, which is extremely memory efficient at least on Linux: it lets the @@ -53,7 +55,7 @@ Generational GC --------------- -This is a two-generations GC. See `rpython/memory/gc/generation.py`_. +This is a two-generations GC. See `pypy/rpython/memory/gc/generation.py`_. It is implemented as a subclass of the Semispace copying collector. It adds a nursery, which is a chunk of the current semispace. Its size is @@ -84,7 +86,7 @@ Each generation is collected much less often than the previous one. The division of the generations is slightly more complicated than just nursery / semispace / external; see the diagram at the start of the -source code, in `rpython/memory/gc/hybrid.py`_. +source code, in `pypy/rpython/memory/gc/hybrid.py`_. Mark & Compact GC ----------------- @@ -122,6 +124,6 @@ information in the regular headers. More details are available as comments at the start of the source -in `rpython/memory/gc/markcompact.py`_. +in `pypy/rpython/memory/gc/markcompact.py`_. .. include:: _ref.rst diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -14,7 +14,7 @@ PyPy's bytecode interpreter has a structure reminiscent of CPython's Virtual Machine: It processes code objects parsed and compiled from -Python source code. It is implemented in the `interpreter/`_ directory. +Python source code. It is implemented in the `pypy/interpreter/`_ directory. People familiar with the CPython implementation will easily recognize similar concepts there. The major differences are the overall usage of the `object space`_ indirection to perform operations on objects, and @@ -27,7 +27,7 @@ abstract syntax tree builder, bytecode generator). The latter passes are based on the ``compiler`` package from the standard library of CPython, with various improvements and bug fixes. The bytecode compiler -(living under `interpreter/astcompiler/`_) is now integrated and is +(living under `pypy/interpreter/astcompiler/`_) is now integrated and is translated with the rest of PyPy. Code objects contain diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -90,7 +90,7 @@ (although these steps are not quite as distinct as you might think from this presentation). -There is an `interactive interface`_ called `translatorshell.py`_ to the +There is an `interactive interface`_ called `pypy/bin/translatorshell.py`_ to the translation process which allows you to interactively work through these stages. @@ -104,7 +104,6 @@ .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation .. _`Flow Object Space`: objspace.html#the-flow-object-space .. _`interactive interface`: getting-started-dev.html#try-out-the-translator -.. _`translatorshell.py`: ../../../../pypy/bin/translatorshell.py .. _`flow model`: .. _`control flow graphs`: @@ -117,7 +116,7 @@ which are the basic data structures of the translation process. -All these types are defined in `pypy.objspace.flow.model`_ (which is a rather +All these types are defined in `pypy/objspace/flow/model/`_ (which is a rather important module in the PyPy source base, to reinforce the point). The flow graph of a function is represented by the class ``FunctionGraph``. @@ -271,7 +270,6 @@ should not attempt to actually mutate such Constants. .. _`document describing object spaces`: objspace.html -.. _`pypy.objspace.flow.model`: ../../../../pypy/objspace/flow/model.py .. _Annotator: @@ -295,7 +293,7 @@ An "annotation" is an instance of a subclass of ``SomeObject``. Each subclass that represents a specific family of objects. -Here is an overview (see ``pypy.annotation.model``): +Here is an overview (see ``pypy/annotation/model/``): * ``SomeObject`` is the base class. An instance of ``SomeObject()`` represents any Python object, and as such usually means that the input diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -147,14 +147,6 @@ ./translate.py --backend=cli targetpypystandalone.py -Or better, try out the experimental `branch/cli-jit`_ described by -Antonio Cuni's `Ph.D. thesis`_ and translate with the JIT:: - - ./translate.py -Ojit --backend=cli targetpypystandalone.py - -.. _`branch/cli-jit`: https://bitbucket.org/pypy/pypy/src/tip -.. _`Ph.D. thesis`: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf - The executable and all its dependencies will be stored in the ./pypy-cli-data directory. To run pypy.NET, you can run ./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use diff --git a/pypy/doc/_ref.rst b/pypy/doc/_ref.rst --- a/pypy/doc/_ref.rst +++ b/pypy/doc/_ref.rst @@ -1,106 +1,126 @@ -.. _`demo/`: ../../demo -.. _`demo/pickle_coroutine.py`: ../../demo/pickle_coroutine.py -.. _`lib-python/`: ../../lib-python -.. _`lib-python/2.5.2/dis.py`: ../../lib-python/2.5.2/dis.py -.. _`annotation/`: -.. _`pypy/annotation`: ../../../../pypy/annotation -.. _`pypy/annotation/annrpython.py`: ../../../../pypy/annotation/annrpython.py -.. _`annotation/binaryop.py`: ../../../../pypy/annotation/binaryop.py -.. _`pypy/annotation/builtin.py`: ../../../../pypy/annotation/builtin.py -.. _`pypy/annotation/model.py`: ../../../../pypy/annotation/model.py -.. _`bin/`: ../../../../pypy/bin -.. _`config/`: ../../../../pypy/config -.. _`pypy/config/pypyoption.py`: ../../../../pypy/config/pypyoption.py -.. _`doc/`: ../../../../pypy/doc -.. _`doc/config/`: ../../../../pypy/doc/config -.. _`doc/discussion/`: ../../../../pypy/doc/discussion -.. _`interpreter/`: -.. _`pypy/interpreter`: ../../../../pypy/interpreter -.. _`pypy/interpreter/argument.py`: ../../../../pypy/interpreter/argument.py -.. _`interpreter/astcompiler/`: -.. _`pypy/interpreter/astcompiler`: ../../../../pypy/interpreter/astcompiler -.. _`pypy/interpreter/executioncontext.py`: ../../../../pypy/interpreter/executioncontext.py -.. _`pypy/interpreter/function.py`: ../../../../pypy/interpreter/function.py -.. _`interpreter/gateway.py`: -.. _`pypy/interpreter/gateway.py`: ../../../../pypy/interpreter/gateway.py -.. _`pypy/interpreter/generator.py`: ../../../../pypy/interpreter/generator.py -.. _`pypy/interpreter/mixedmodule.py`: ../../../../pypy/interpreter/mixedmodule.py -.. _`pypy/interpreter/module.py`: ../../../../pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: ../../../../pypy/interpreter/nestedscope.py -.. _`pypy/interpreter/pyopcode.py`: ../../../../pypy/interpreter/pyopcode.py -.. _`interpreter/pyparser/`: -.. _`pypy/interpreter/pyparser`: ../../../../pypy/interpreter/pyparser -.. _`pypy/interpreter/pyparser/pytokenizer.py`: ../../../../pypy/interpreter/pyparser/pytokenizer.py -.. _`pypy/interpreter/pyparser/parser.py`: ../../../../pypy/interpreter/pyparser/parser.py -.. _`pypy/interpreter/pyparser/pyparse.py`: ../../../../pypy/interpreter/pyparser/pyparse.py -.. _`pypy/interpreter/pyparser/future.py`: ../../../../pypy/interpreter/pyparser/future.py -.. _`pypy/interpreter/pyparser/metaparser.py`: ../../../../pypy/interpreter/pyparser/metaparser.py -.. _`pypy/interpreter/astcompiler/astbuilder.py`: ../../../../pypy/interpreter/astcompiler/astbuilder.py -.. _`pypy/interpreter/astcompiler/optimize.py`: ../../../../pypy/interpreter/astcompiler/optimize.py -.. _`pypy/interpreter/astcompiler/codegen.py`: ../../../../pypy/interpreter/astcompiler/codegen.py -.. _`pypy/interpreter/astcompiler/tools/asdl_py.py`: ../../../../pypy/interpreter/astcompiler/tools/asdl_py.py -.. _`pypy/interpreter/astcompiler/tools/Python.asdl`: ../../../../pypy/interpreter/astcompiler/tools/Python.asdl -.. _`pypy/interpreter/astcompiler/assemble.py`: ../../../../pypy/interpreter/astcompiler/assemble.py -.. _`pypy/interpreter/astcompiler/symtable.py`: ../../../../pypy/interpreter/astcompiler/symtable.py -.. _`pypy/interpreter/astcompiler/asthelpers.py`: ../../../../pypy/interpreter/astcompiler/asthelpers.py -.. _`pypy/interpreter/astcompiler/ast.py`: ../../../../pypy/interpreter/astcompiler/ast.py -.. _`pypy/interpreter/typedef.py`: ../../../../pypy/interpreter/typedef.py -.. _`lib/`: -.. _`lib_pypy/`: ../../lib_pypy -.. _`lib/distributed/`: ../../lib_pypy/distributed -.. _`lib_pypy/stackless.py`: ../../lib_pypy/stackless.py -.. _`lib_pypy/pypy_test/`: ../../lib_pypy/pypy_test -.. _`module/`: -.. _`pypy/module`: -.. _`pypy/module/`: ../../../../pypy/module -.. _`pypy/module/__builtin__/__init__.py`: ../../../../pypy/module/__builtin__/__init__.py -.. _`pypy/module/_stackless/test/test_clonable.py`: ../../../../pypy/module/_stackless/test/test_clonable.py -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: ../../../../pypy/module/_stackless/test/test_composable_coroutine.py -.. _`objspace/`: -.. _`pypy/objspace`: ../../../../pypy/objspace -.. _`objspace/dump.py`: ../../../../pypy/objspace/dump.py -.. _`objspace/flow/`: ../../../../pypy/objspace/flow -.. _`objspace/std/`: -.. _`pypy/objspace/std`: ../../../../pypy/objspace/std -.. _`objspace/taint.py`: ../../../../pypy/objspace/taint.py -.. _`objspace/thunk.py`: -.. _`pypy/objspace/thunk.py`: ../../../../pypy/objspace/thunk.py -.. _`objspace/trace.py`: -.. _`pypy/objspace/trace.py`: ../../../../pypy/objspace/trace.py -.. _`pypy/rlib`: -.. _`rlib/`: ../../../../pypy/rlib -.. _`pypy/rlib/rarithmetic.py`: ../../../../pypy/rlib/rarithmetic.py -.. _`pypy/rlib/test`: ../../../../pypy/rlib/test -.. _`pypy/rpython`: -.. _`pypy/rpython/`: -.. _`rpython/`: ../../../../pypy/rpython -.. _`rpython/lltypesystem/`: ../../../../pypy/rpython/lltypesystem -.. _`pypy/rpython/lltypesystem/lltype.py`: -.. _`rpython/lltypesystem/lltype.py`: ../../../../pypy/rpython/lltypesystem/lltype.py -.. _`rpython/memory/`: ../../../../pypy/rpython/memory -.. _`rpython/memory/gc/generation.py`: ../../../../pypy/rpython/memory/gc/generation.py -.. _`rpython/memory/gc/hybrid.py`: ../../../../pypy/rpython/memory/gc/hybrid.py -.. _`rpython/memory/gc/markcompact.py`: ../../../../pypy/rpython/memory/gc/markcompact.py -.. _`rpython/memory/gc/marksweep.py`: ../../../../pypy/rpython/memory/gc/marksweep.py -.. _`rpython/memory/gc/semispace.py`: ../../../../pypy/rpython/memory/gc/semispace.py -.. _`rpython/ootypesystem/`: ../../../../pypy/rpython/ootypesystem -.. _`rpython/ootypesystem/ootype.py`: ../../../../pypy/rpython/ootypesystem/ootype.py -.. _`rpython/rint.py`: ../../../../pypy/rpython/rint.py -.. _`rpython/rlist.py`: ../../../../pypy/rpython/rlist.py -.. _`rpython/rmodel.py`: ../../../../pypy/rpython/rmodel.py -.. _`pypy/rpython/rtyper.py`: ../../../../pypy/rpython/rtyper.py -.. _`pypy/rpython/test/test_llinterp.py`: ../../../../pypy/rpython/test/test_llinterp.py -.. _`pypy/test_all.py`: ../../../../pypy/test_all.py -.. _`tool/`: ../../../../pypy/tool -.. _`tool/algo/`: ../../../../pypy/tool/algo -.. _`tool/pytest/`: ../../../../pypy/tool/pytest -.. _`pypy/translator`: -.. _`translator/`: ../../../../pypy/translator -.. _`translator/backendopt/`: ../../../../pypy/translator/backendopt -.. _`translator/c/`: ../../../../pypy/translator/c -.. _`translator/cli/`: ../../../../pypy/translator/cli -.. _`translator/goal/`: ../../../../pypy/translator/goal -.. _`pypy/translator/goal/targetnopstandalone.py`: ../../../../pypy/translator/goal/targetnopstandalone.py -.. _`translator/jvm/`: ../../../../pypy/translator/jvm -.. _`translator/stackless/`: ../../../../pypy/translator/stackless -.. _`translator/tool/`: ../../../../pypy/translator/tool +.. _`./LICENSE`: https://bitbucket.org/pypy/pypy/src/default/./LICENSE +.. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ +.. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py +.. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ +.. _`lib-python/2.5.2/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.5.2/dis.py +.. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ +.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ +.. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py +.. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py +.. _`pypy/annotation`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation +.. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/ +.. _`pypy/annotation/annrpython.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/annrpython.py +.. _`pypy/annotation/binaryop.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/binaryop.py +.. _`pypy/annotation/builtin.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/builtin.py +.. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ +.. _`pypy/bin/translatorshell.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/translatorshell.py +.. _`pypy/config/`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/ +.. _`pypy/config/pypyoption.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/pypyoption.py +.. _`pypy/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/translationoption.py +.. _`pypy/doc/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/ +.. _`pypy/doc/config/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/config/ +.. _`pypy/doc/discussion/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/discussion/ +.. _`pypy/interpreter`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter +.. _`pypy/interpreter/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/ +.. _`pypy/interpreter/argument.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/argument.py +.. _`pypy/interpreter/astcompiler`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler +.. _`pypy/interpreter/astcompiler/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ +.. _`pypy/interpreter/astcompiler/assemble.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/assemble.py +.. _`pypy/interpreter/astcompiler/ast.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ast.py +.. _`pypy/interpreter/astcompiler/astbuilder.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/astbuilder.py +.. _`pypy/interpreter/astcompiler/asthelpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/asthelpers.py +.. _`pypy/interpreter/astcompiler/codegen.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/codegen.py +.. _`pypy/interpreter/astcompiler/optimize.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/optimize.py +.. _`pypy/interpreter/astcompiler/symtable.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/symtable.py +.. _`pypy/interpreter/astcompiler/tools/Python.asdl`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/tools/Python.asdl +.. _`pypy/interpreter/astcompiler/tools/asdl_py.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/tools/asdl_py.py +.. _`pypy/interpreter/baseobjspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/baseobjspace.py +.. _`pypy/interpreter/eval.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/eval.py +.. _`pypy/interpreter/executioncontext.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/executioncontext.py +.. _`pypy/interpreter/function.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/function.py +.. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py +.. _`pypy/interpreter/generator.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/generator.py +.. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py +.. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py +.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py +.. _`pypy/interpreter/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/objspace.py +.. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py +.. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py +.. _`pypy/interpreter/pyparser`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser +.. _`pypy/interpreter/pyparser/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/ +.. _`pypy/interpreter/pyparser/future.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/future.py +.. _`pypy/interpreter/pyparser/metaparser.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/metaparser.py +.. _`pypy/interpreter/pyparser/parser.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/parser.py +.. _`pypy/interpreter/pyparser/pyparse.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pyparse.py +.. _`pypy/interpreter/pyparser/pytokenizer.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pytokenizer.py +.. _`pypy/interpreter/typedef.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/typedef.py +.. _`pypy/module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module +.. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ +.. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py +.. _`pypy/objspace`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace +.. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ +.. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py +.. _`pypy/objspace/flow`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow +.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ +.. _`pypy/objspace/flow/model/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/model/ +.. _`pypy/objspace/std`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std +.. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ +.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py +.. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py +.. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py +.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py +.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py +.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py +.. _`pypy/objspace/taint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/taint.py +.. _`pypy/objspace/thunk.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/thunk.py +.. _`pypy/objspace/trace`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace +.. _`pypy/objspace/trace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace.py +.. _`pypy/rlib`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib +.. _`pypy/rlib/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/ +.. _`pypy/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/listsort.py +.. _`pypy/rlib/nonconst.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/nonconst.py +.. _`pypy/rlib/objectmodel.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/objectmodel.py +.. _`pypy/rlib/parsing/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/parsing/ +.. _`pypy/rlib/parsing/tree.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/parsing/tree.py +.. _`pypy/rlib/rarithmetic.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rarithmetic.py +.. _`pypy/rlib/rbigint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rbigint.py +.. _`pypy/rlib/rrandom.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rrandom.py +.. _`pypy/rlib/rsocket.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rsocket.py +.. _`pypy/rlib/rstack.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rstack.py +.. _`pypy/rlib/streamio.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/streamio.py +.. _`pypy/rlib/test`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/test +.. _`pypy/rlib/unroll.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/unroll.py +.. _`pypy/rpython`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython +.. _`pypy/rpython/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +.. _`pypy/rpython/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/ +.. _`pypy/rpython/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/lltype.py +.. _`pypy/rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/ +.. _`pypy/rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/generation.py +.. _`pypy/rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/hybrid.py +.. _`pypy/rpython/memory/gc/markcompact.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/markcompact.py +.. _`pypy/rpython/memory/gc/marksweep.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/marksweep.py +.. _`pypy/rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/semispace.py +.. _`pypy/rpython/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ +.. _`pypy/rpython/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ootype.py +.. _`pypy/rpython/rint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rint.py +.. _`pypy/rpython/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rlist.py +.. _`pypy/rpython/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rmodel.py +.. _`pypy/rpython/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rtyper.py +.. _`pypy/rpython/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/test/test_llinterp.py +.. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ +.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ +.. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ +.. _`pypy/tool/traceconfig.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/traceconfig.py +.. _`pypy/translator`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator +.. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/ +.. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/ +.. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +.. _`pypy/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/cli/ +.. _`pypy/translator/goal/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/ +.. _`pypy/translator/goal/targetnopstandalone.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/targetnopstandalone.py +.. _`pypy/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/jvm/ +.. _`pypy/translator/stackless/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/stackless/ +.. _`pypy/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/tool/ diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -57,7 +57,7 @@ `development methodology`_ describes our sprint-driven approach. -`license`_ contains licensing details (basically a straight MIT-license). +`./LICENSE`_ contains licensing details (basically a straight MIT-license). `Glossary`_ of PyPy words to help you align your inner self with the PyPy universe. @@ -149,7 +149,6 @@ .. _`development methodology`: dev_method.html .. _`sprint reports`: sprint-reports.html .. _`papers, talks and related projects`: extradoc.html -.. _`license`: ../../LICENSE .. _`PyPy LOC statistics`: http://codespeak.net/~hpk/pypy-stat/ .. _`PyPy statistics`: http://codespeak.net/pypy/trunk/pypy/doc/statistic .. _`object spaces`: objspace.html @@ -178,92 +177,92 @@ Here is a fully referenced alphabetical two-level deep directory overview of PyPy: -============================ =========================================== -Directory explanation/links -============================ =========================================== -`annotation/`_ `type inferencing code`_ for `RPython`_ programs +================================ =========================================== +Directory explanation/links +================================ =========================================== +`pypy/annotation/`_ `type inferencing code`_ for `RPython`_ programs -`bin/`_ command-line scripts, mainly `py.py`_ and `translatorshell.py`_ +`pypy/bin/`_ command-line scripts, mainly `py.py`_ and `translatorshell.py`_ -`config/`_ handles the numerous options for building and running PyPy +`pypy/config/`_ handles the numerous options for building and running PyPy -`doc/`_ text versions of PyPy developer documentation +`pypy/doc/`_ text versions of PyPy developer documentation -`doc/config/`_ documentation for the numerous translation options +`pypy/doc/config/`_ documentation for the numerous translation options -`doc/discussion/`_ drafts of ideas and documentation +`pypy/doc/discussion/`_ drafts of ideas and documentation -``doc/*/`` other specific documentation topics or tools +``doc/*/`` other specific documentation topics or tools -`interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) +`pypy/interpreter/`_ `bytecode interpreter`_ and related objects + (frames, functions, modules,...) -`interpreter/pyparser/`_ interpreter-level Python source parser +`pypy/interpreter/pyparser/`_ interpreter-level Python source parser -`interpreter/astcompiler/`_ interpreter-level bytecode compiler, via an AST - representation +`pypy/interpreter/astcompiler/`_ interpreter-level bytecode compiler, via an AST + representation -`module/`_ contains `mixed modules`_ implementing core modules with - both application and interpreter level code. - Not all are finished and working. Use the ``--withmod-xxx`` - or ``--allworkingmodules`` translation options. +`pypy/module/`_ contains `mixed modules`_ implementing core modules with + both application and interpreter level code. + Not all are finished and working. Use the ``--withmod-xxx`` + or ``--allworkingmodules`` translation options. -`objspace/`_ `object space`_ implementations +`pypy/objspace/`_ `object space`_ implementations -`objspace/trace.py`_ the `trace object space`_ monitoring bytecode and space operations +`pypy/objspace/trace.py`_ the `trace object space`_ monitoring bytecode and space operations -`objspace/dump.py`_ the dump object space saves a large, searchable log file - with all operations +`pypy/objspace/dump.py`_ the dump object space saves a large, searchable log file + with all operations -`objspace/taint.py`_ the `taint object space`_, providing object tainting +`pypy/objspace/taint.py`_ the `taint object space`_, providing object tainting -`objspace/thunk.py`_ the `thunk object space`_, providing unique object features +`pypy/objspace/thunk.py`_ the `thunk object space`_, providing unique object features -`objspace/flow/`_ the FlowObjSpace_ implementing `abstract interpretation`_ +`pypy/objspace/flow/`_ the FlowObjSpace_ implementing `abstract interpretation`_ -`objspace/std/`_ the StdObjSpace_ implementing CPython's objects and types +`pypy/objspace/std/`_ the StdObjSpace_ implementing CPython's objects and types -`rlib/`_ a `"standard library"`_ for RPython_ programs +`pypy/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/`_ the `RPython Typer`_ +`pypy/rpython/`_ the `RPython Typer`_ -`rpython/lltypesystem/`_ the `low-level type system`_ for C-like backends +`pypy/rpython/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/ootypesystem/`_ the `object-oriented type system`_ for OO backends +`pypy/rpython/ootypesystem/`_ the `object-oriented type system`_ for OO backends -`rpython/memory/`_ the `garbage collector`_ construction framework +`pypy/rpython/memory/`_ the `garbage collector`_ construction framework -`tool/`_ various utilities and hacks used from various places +`pypy/tool/`_ various utilities and hacks used from various places -`tool/algo/`_ general-purpose algorithmic and mathematic - tools +`pypy/tool/algo/`_ general-purpose algorithmic and mathematic + tools -`tool/pytest/`_ support code for our `testing methods`_ +`pypy/tool/pytest/`_ support code for our `testing methods`_ -`translator/`_ translation_ backends and support code +`pypy/translator/`_ translation_ backends and support code -`translator/backendopt/`_ general optimizations that run before a backend generates code +`pypy/translator/backendopt/`_ general optimizations that run before a backend generates code -`translator/c/`_ the `GenC backend`_, producing C code from an - RPython program (generally via the rtyper_) +`pypy/translator/c/`_ the `GenC backend`_, producing C code from an + RPython program (generally via the rtyper_) -`translator/cli/`_ the `CLI backend`_ for `.NET`_ (Microsoft CLR or Mono_) +`pypy/translator/cli/`_ the `CLI backend`_ for `.NET`_ (Microsoft CLR or Mono_) -`translator/goal/`_ our `main PyPy-translation scripts`_ live here +`pypy/translator/goal/`_ our `main PyPy-translation scripts`_ live here -`translator/jvm/`_ the Java backend +`pypy/translator/jvm/`_ the Java backend -`translator/stackless/`_ the `Stackless Transform`_ +`pypy/translator/stackless/`_ the `Stackless Transform`_ -`translator/tool/`_ helper tools for translation, including the Pygame - `graph viewer`_ +`pypy/translator/tool/`_ helper tools for translation, including the Pygame + `graph viewer`_ -``*/test/`` many directories have a test subdirectory containing test - modules (see `Testing in PyPy`_) +``*/test/`` many directories have a test subdirectory containing test + modules (see `Testing in PyPy`_) -``_cache/`` holds cache files from internally `translating application - level to interpreterlevel`_ code. -============================ =========================================== +``_cache/`` holds cache files from internally `translating application + level to interpreterlevel`_ code. +================================ =========================================== .. _`bytecode interpreter`: interpreter.html .. _`translating application level to interpreterlevel`: geninterp.html diff --git a/pypy/doc/configuration.rst b/pypy/doc/configuration.rst --- a/pypy/doc/configuration.rst +++ b/pypy/doc/configuration.rst @@ -186,9 +186,8 @@ The two large parts of PyPy, the standard interpreter and the translation toolchain, have two separate sets of options. The translation toolchain options can be found on the ``config`` attribute of all ``TranslationContext`` -instances and are described in translationoption.py_. The interpreter options +instances and are described in `pypy/config/translationoption.py`_. The interpreter options are attached to the object space, also under the name ``config`` and are -described in pypyoption.py_. +described in `pypy/config/pypyoption.py`_. -.. _translationoption.py: ../config/translationoption.py -.. _pypyoption.py: ../config/pypyoption.py +.. include:: _ref.rst diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -66,7 +66,7 @@ each operation. In both cases the analysis of an operation depends on the annotations of its input arguments. This is reflected in the usage of the same ``__extend__`` syntax in the source files (compare e.g. -`annotation/binaryop.py`_ and `rpython/rint.py`_). +`pypy/annotation/binaryop.py`_ and `pypy/rpython/rint.py`_). The analogy stops here, though: while it runs, the Annotator is in the middle of computing the annotations, so it might need to reflow and generalize until @@ -104,7 +104,7 @@ implementations for the same high-level operations. This is the reason for turning representations into explicit objects. -The base Repr class is defined in `rpython/rmodel.py`_. Most of the +The base Repr class is defined in `pypy/rpython/rmodel.py`_. Most of the ``rpython/r*.py`` files define one or a few subclasses of Repr. The method getrepr() of the RTyper will build and cache a single Repr instance per SomeXxx() instance; moreover, two SomeXxx() instances that are equal get the @@ -131,9 +131,9 @@ The RPython Typer uses a standard low-level model which we believe can correspond rather directly to various target languages such as C. This model is implemented in the first part of -`rpython/lltypesystem/lltype.py`_. +`pypy/rpython/lltypesystem/lltype.py`_. -The second part of `rpython/lltypesystem/lltype.py`_ is a runnable +The second part of `pypy/rpython/lltypesystem/lltype.py`_ is a runnable implementation of these types, for testing purposes. It allows us to write and test plain Python code using a malloc() function to obtain and manipulate structures and arrays. This is useful for example to implement and test @@ -191,7 +191,7 @@ types like list in this elementary world. The ``malloc()`` function is a kind of placeholder, which must eventually be provided by the code generator for the target platform; but as we have just seen its Python implementation in -`rpython/lltypesystem/lltype.py`_ works too, which is primarily useful for +`pypy/rpython/lltypesystem/lltype.py`_ works too, which is primarily useful for testing, interactive exploring, etc. The argument to ``malloc()`` is the structure type directly, but it returns a @@ -316,7 +316,7 @@ with care: the bigger structure of which they are part of could be freed while the Ptr to the substructure is still in use. In general, it is a good idea to avoid passing around pointers to inlined substructures of malloc()ed structures. -(The testing implementation of `rpython/lltypesystem/lltype.py`_ checks to some +(The testing implementation of `pypy/rpython/lltypesystem/lltype.py`_ checks to some extent that you are not trying to use a pointer to a structure after its container has been freed, using weak references. But pointers to non-GC structures are not officially meant to be weak references: using them after what @@ -429,7 +429,7 @@ change needed to the Annotator to allow it to perform type inference of our very-low-level snippets of code. -See for example `rpython/rlist.py`_. +See for example `pypy/rpython/rlist.py`_. .. _`oo type`: @@ -441,10 +441,10 @@ targeting low level backends such as C, but it is not good enough for targeting higher level backends such as .NET CLI or Java JVM, so a new object oriented model has been introduced. This model is -implemented in the first part of `rpython/ootypesystem/ootype.py`_. +implemented in the first part of `pypy/rpython/ootypesystem/ootype.py`_. As for the low-level typesystem, the second part of -`rpython/ootypesystem/ootype.py`_ is a runnable implementation of +`pypy/rpython/ootypesystem/ootype.py`_ is a runnable implementation of these types, for testing purposes. diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -5,20 +5,13 @@ pypydir = py.path.local(pypy.__file__).dirpath() distdir = pypydir.dirpath() issue_url = 'http://codespeak.net/issue/pypy-dev/' +bitbucket_url = 'https://bitbucket.org/pypy/pypy/src/default/' import urllib2, posixpath -possible_start_dirs = [ - distdir, - distdir.join('pypy'), - # for now, let the jit links point to the oo-jit branch - 'http://codespeak.net/svn/pypy/branch/oo-jit', - 'http://codespeak.net/svn/pypy/branch/oo-jit/pypy', - ] - def makeref(docdir): - reffile = docdir.join('_ref.txt') + reffile = docdir.join('_ref.rst') linkrex = py.std.re.compile('`(\S+)`_') @@ -31,35 +24,19 @@ name2target.setdefault(linktarget, []).append(linkname) for textfile in docdir.listdir(): # for subdirs, see below - if textfile.ext != '.txt': + if textfile.ext != '.rst': continue - for linkname in linkrex.findall(textfile.read()): - if '/' in linkname: - for startdir in possible_start_dirs: - if isinstance(startdir, str): - assert startdir.startswith('http://') - target = posixpath.join(startdir, linkname) - try: - urllib2.urlopen(target).close() - except urllib2.HTTPError: - continue - else: - cand = startdir.join(linkname) - if not cand.check(): - continue - assert cand.relto(distdir) - dotdots = 0 - p = docdir - while p != distdir: - p = p.dirpath() - dotdots += 1 - target = '../' * dotdots + cand.relto(distdir) - addlink(linkname, target) - break - else: - print "WARNING %s: link %r may be bogus" %(textfile, linkname) + content = textfile.read() + found = False + for linkname in linkrex.findall(content): + if '/' in linkname: + found = True + addlink(linkname, bitbucket_url + linkname) elif linkname.startswith('issue'): + found = True addlink(linkname, issue_url+linkname) + if found: + assert ".. include:: _ref.rst" in content, "you need to include _ref.rst in %s" % (textfile, ) items = name2target.items() items.sort() diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -14,13 +14,11 @@ to change at some point. Usually it is useful to look at the tests in `pypy/rlib/test`_ to get an impression of how to use a module. -.. _`pypy/rlib`: ../../../../pypy/rlib -.. _`pypy/rlib/test`: ../../../../pypy/rlib/test ``listsort`` ============ -The listsort_ module contains an implementation of the timsort sorting algorithm +The `pypy/rlib/listsort.py`_ module contains an implementation of the timsort sorting algorithm (the sort method of lists is not RPython). To use it, subclass from the ``listsort.TimSort`` class and override the ``lt`` method to change the comparison behaviour. The constructor of ``TimSort`` takes a list as an @@ -29,19 +27,16 @@ be sorted using the ``listsort`` module in one program, otherwise the annotator will be confused. -.. _listsort: ../../../../pypy/rlib/listsort.py - ``nonconst`` ============ -The nonconst_ module is useful mostly for tests. The `flow object space`_ and +The `pypy/rlib/nonconst.py`_ module is useful mostly for tests. The `flow object space`_ and the `annotator`_ do quite some constant folding, which is sometimes not desired in a test. To prevent constant folding on a certain value, use the ``NonConst`` class. The constructor of ``NonConst`` takes an arbitrary value. The instance of ``NonConst`` will behave during annotation like that value, but no constant folding will happen. -.. _nonconst: ../../../../pypy/rlib/nonconst.py .. _`flow object space`: objspace.html#the-flow-object-space .. _`annotator`: translation.html#the-annotation-pass @@ -49,7 +44,7 @@ ``objectmodel`` =============== -The objectmodel_ module is a mixed bag of various functionality. Some of the +The `pypy/rlib/objectmodel.py`_ module is a mixed bag of various functionality. Some of the more useful ones are: ``ComputedIntSymbolic``: @@ -95,24 +90,21 @@ won't be allocated but represented by *tagged pointers**, that is pointers that have the lowest bit set. -.. _objectmodel: ../../../../pypy/rlib/objectmodel.py - ``rarithmetic`` =============== -The rarithmetic_ module contains functionality to handle the small differences +The `pypy/rlib/rarithmetic.py`_ module contains functionality to handle the small differences in the behaviour of arithmetic code in regular Python and RPython code. Most of them are already described in the `coding guide`_ -.. _rarithmetic: ../../../../pypy/rlib/rarithmetic.py .. _`coding guide`: coding-guide.html ``rbigint`` =========== -The rbigint module contains a full RPython implementation of the Python ``long`` +The `pypy/rlib/rbigint.py`_ module contains a full RPython implementation of the Python ``long`` type (which itself is not supported in RPython). The ``rbigint`` class contains that implementation. To construct ``rbigint`` instances use the static methods ``fromint``, ``frombool``, ``fromfloat`` and ``fromdecimalstr``. To convert back @@ -122,36 +114,30 @@ these underscores left out for better readability (so ``a.add(b)`` can be used to add two rbigint instances). -.. _rbigint: ../../../../pypy/rlib/rbigint.py - ``rrandom`` =========== -The rrandom_ module contains an implementation of the mersenne twister random +The `pypy/rlib/rrandom.py`_ module contains an implementation of the mersenne twister random number generator. It contains one class ``Random`` which most importantly has a ``random`` method which returns a pseudo-random floating point number between 0.0 and 1.0. -.. _rrandom: ../../../../pypy/rlib/rrandom.py - ``rsocket`` =========== -The rsocket_ module contains an RPython implementation of the functionality of +The `pypy/rlib/rsocket.py`_ module contains an RPython implementation of the functionality of the socket standard library with a slightly different interface. The difficulty with the Python socket API is that addresses are not "well-typed" objects: depending on the address family they are tuples, or strings, and so on, which is not suitable for RPython. Instead, ``rsocket`` contains a hierarchy of Address classes, in a typical static-OO-programming style. -.. _rsocket: ../../../../pypy/rlib/rsocket.py - ``rstack`` ========== -The rstack_ module allows an RPython program to control its own execution stack. +The `pypy/rlib/rstack.py`_ module allows an RPython program to control its own execution stack. This is only useful if the program is translated using stackless. An old description of the exposed functions is below. @@ -210,32 +196,28 @@ f() -.. _rstack: ../../../../pypy/rlib/rstack.py - ``streamio`` ============ -The streamio_ contains an RPython stream I/O implementation (which was started +The `pypy/rlib/streamio.py`_ contains an RPython stream I/O implementation (which was started by Guido van Rossum as `sio.py`_ in the CPython sandbox as a prototype for the upcoming new file implementation in Python 3000). -.. _streamio: ../../../../pypy/rlib/streamio.py .. _`sio.py`: http://svn.python.org/view/sandbox/trunk/sio/sio.py ``unroll`` ========== -The unroll_ module most importantly contains the function ``unrolling_iterable`` +The `pypy/rlib/unroll.py`_ module most importantly contains the function ``unrolling_iterable`` which wraps an iterator. Looping over the iterator in RPython code will not produce a loop in the resulting flow graph but will unroll the loop instead. -.. _unroll: ../../../../pypy/rlib/unroll.py ``parsing`` =========== -The parsing_ module is a still in-development module to generate tokenizers and +The `pypy/rlib/parsing/`_ module is a still in-development module to generate tokenizers and parsers in RPython. It is still highly experimental and only really used by the `Prolog interpreter`_ (although in slightly non-standard ways). The easiest way to specify a tokenizer/grammar is to write it down using regular expressions and @@ -348,7 +330,7 @@ The parsing process builds up a tree consisting of instances of ``Symbol`` and ``Nonterminal``, the former corresponding to tokens, the latter to nonterminal -symbols. Both classes live in the `pypy.rlib.parsing.tree`_ module. You can use +symbols. Both classes live in the `pypy/rlib/parsing/tree.py`_ module. You can use the ``view()`` method ``Nonterminal`` instances to get a pygame view of the parse tree. @@ -359,13 +341,11 @@ of the nonterminal and ``children`` which is a list of the children attributes. -.. _`pypy.rlib.parsing.tree`: ../../../../pypy/rlib/parsing/tree.py - Visitors ++++++++ To write tree visitors for the parse trees that are RPython, there is a special -baseclass ``RPythonVisitor`` in ``pypy.rlib.parsing.tree``_ to use. If your +baseclass ``RPythonVisitor`` in `pypy/rlib/parsing/tree.py`_ to use. If your class uses this, it will grow a ``dispatch(node)`` method, that calls an appropriate ``visit_`` method, depending on the ``node`` argument. Here the is replaced by the ``symbol`` attribute of the visited node. @@ -531,5 +511,6 @@ .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ -.. _parsing: ../../../../pypy/rlib/parsing/ .. _`json format`: http://www.json.org + +.. include:: _ref.rst diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -352,9 +352,6 @@ silent wrap-around. Whenever we need more control, we use the following helpers (which live the `pypy/rlib/rarithmetic.py`_): -.. _`pypy/rlib/rarithmetic.py`: ../../../../pypy/rlib/rarithmetic.py - - **ovfcheck()** This special function should only be used with a single arithmetic operation @@ -887,6 +884,8 @@ for the next milestone, both from an E-Mail and from a web interface. +.. _`development tracker`: https://codespeak.net/issue/pypy-dev/ + use your codespeak login or register ------------------------------------ diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -56,7 +56,7 @@ The present document gives a description of the above object spaces. The sources of PyPy contain the various object spaces in the directory -`objspace/`_. +`pypy/objspace/`_. To choose which object space to use, use the :config:`objspace.name` option. @@ -297,7 +297,7 @@ Introduction ------------ -The Standard Object Space (StdObjSpace_) is the direct equivalent of CPython's +The Standard Object Space (`pypy/objspace/std/`_) is the direct equivalent of CPython's object library (the "Objects/" subdirectory in the distribution). It is an implementation of the common Python types in a lower-level language. @@ -341,13 +341,11 @@ using plain integers instead is the complex path, not the other way around. -.. _StdObjSpace: ../../../../pypy/objspace/std/ - Object types ------------ -The larger part of the `StdObjSpace`_ package defines and implements the +The larger part of the `pypy/objspace/std/`_ package defines and implements the library of Python's standard built-in object types. Each type (int, float, list, tuple, str, type, etc.) is typically implemented by two modules: @@ -356,17 +354,17 @@ * the *implementation* module, called ``xxxobject.py``. The ``xxxtype.py`` module basically defines the type object itself. For -example, `listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `listtype.py`_ enumerates the methods +example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when +you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods specific to lists, like ``append()``. A particular method implemented by all types is the ``__new__()`` special method, which in Python's new-style-classes world is responsible for creating an instance of the type. In PyPy, ``__new__()`` locates and imports the module implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `tupletype.py`_ +arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ defines ``__new__()`` to import the class ``W_TupleObject`` from -`tupleobject.py`_ and instantiate it. The `tupleobject.py`_ then contains a +`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a "real" implementation of tuples: the way the data is stored in the ``W_TupleObject`` class, how the operations work, etc. @@ -387,18 +385,13 @@ same Python type. PyPy knows that (e.g.) the application-level type of its interpreter-level ``W_StringObject`` instances is str because there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `stringtype.py`_; all +points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all other implementations of strings use the same ``typedef`` from -`stringtype.py`_. +`pypy/objspace/std/stringtype.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. -.. _`listtype.py`: ../../../../pypy/objspace/std/listtype.py -.. _`stringtype.py`: ../../../../pypy/objspace/std/stringtype.py -.. _`tupletype.py`: ../../../../pypy/objspace/std/tupletype.py -.. _`tupleobject.py`: ../../../../pypy/objspace/std/tupleobject.py - .. _`Standard Interpreter Optimizations`: interpreter-optimizations.html @@ -408,12 +401,10 @@ The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, -see the comment at the start of the source__. However, multimethods +see the comment at the start of `pypy/objspace/std/multimethod.py`_. However, multimethods alone are not enough for the Standard Object Space: the complete picture spans several levels in order to emulate the exact Python semantics. -.. __: ../../../../pypy/objspace/std/multimethod.py - Consider the example of the ``space.getitem(w_a, w_b)`` operation, corresponding to the application-level syntax ``a[b]``. The Standard Object Space contains a corresponding ``getitem`` multimethod and a @@ -552,13 +543,11 @@ operations are usually shown. A quick introduction on how to use the trace object space can be `found here`_. -A number of options for configuration is here in `traceconfig.py`_. +A number of options for configuration is here in `pypy/tool/traceconfig.py`_. .. _`found here` : getting-started-dev.html#tracing-bytecode-and-operations-on-objects .. _`Abstract Interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`traceconfig.py`: ../tool/traceconfig.py - .. _`Flow Object Space`: @@ -568,7 +557,7 @@ Introduction ------------ -The task of the FlowObjSpace_ is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. @@ -600,8 +589,6 @@ v3 = add(v2, Constant(2)) -.. _FlowObjSpace: ../../../../pypy/objspace/flow/ - The Flow model -------------- From commits-noreply at bitbucket.org Mon Apr 25 17:22:08 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 17:22:08 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge Message-ID: <20110425152208.A6C98282BEA@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43603:f959a19fc75c Date: 2011-04-25 17:21 +0200 http://bitbucket.org/pypy/pypy/changeset/f959a19fc75c/ Log: merge diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -592,7 +592,7 @@ .. _`Stackless Python`: http://www.stackless.com -.. _`documentation of the greenlets`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt +.. _`documentation of the greenlets`: http://packages.python.org/greenlet/ .. _`Stackless Transform`: translation.html#the-stackless-transform .. include:: _ref.rst From commits-noreply at bitbucket.org Mon Apr 25 17:34:31 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Mon, 25 Apr 2011 17:34:31 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac) fix links of directories Message-ID: <20110425153431.B7FC936C206@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43604:5513311abf13 Date: 2011-04-25 17:34 +0200 http://bitbucket.org/pypy/pypy/changeset/5513311abf13/ Log: (cfbolz, lac) fix links of directories diff --git a/pypy/doc/_ref.rst b/pypy/doc/_ref.rst --- a/pypy/doc/_ref.rst +++ b/pypy/doc/_ref.rst @@ -7,7 +7,6 @@ .. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py -.. _`pypy/annotation`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation .. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/ .. _`pypy/annotation/annrpython.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/annrpython.py .. _`pypy/annotation/binaryop.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/binaryop.py @@ -20,10 +19,8 @@ .. _`pypy/doc/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/ .. _`pypy/doc/config/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/config/ .. _`pypy/doc/discussion/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/discussion/ -.. _`pypy/interpreter`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter .. _`pypy/interpreter/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/ .. _`pypy/interpreter/argument.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/argument.py -.. _`pypy/interpreter/astcompiler`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler .. _`pypy/interpreter/astcompiler/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ .. _`pypy/interpreter/astcompiler/assemble.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/assemble.py .. _`pypy/interpreter/astcompiler/ast.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ast.py @@ -46,7 +43,6 @@ .. _`pypy/interpreter/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/objspace.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py -.. _`pypy/interpreter/pyparser`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser .. _`pypy/interpreter/pyparser/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/ .. _`pypy/interpreter/pyparser/future.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/future.py .. _`pypy/interpreter/pyparser/metaparser.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/metaparser.py @@ -54,17 +50,13 @@ .. _`pypy/interpreter/pyparser/pyparse.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pyparse.py .. _`pypy/interpreter/pyparser/pytokenizer.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pytokenizer.py .. _`pypy/interpreter/typedef.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/typedef.py -.. _`pypy/module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py .. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py -.. _`pypy/objspace`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ .. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py -.. _`pypy/objspace/flow`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow .. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/flow/model/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/model/ -.. _`pypy/objspace/std`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ .. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py @@ -78,7 +70,6 @@ .. _`pypy/objspace/thunk.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/thunk.py .. _`pypy/objspace/trace`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace .. _`pypy/objspace/trace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace.py -.. _`pypy/rlib`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib .. _`pypy/rlib/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/ .. _`pypy/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/listsort.py .. _`pypy/rlib/nonconst.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/nonconst.py @@ -91,9 +82,8 @@ .. _`pypy/rlib/rsocket.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rsocket.py .. _`pypy/rlib/rstack.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rstack.py .. _`pypy/rlib/streamio.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/streamio.py -.. _`pypy/rlib/test`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/test +.. _`pypy/rlib/test/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/test/ .. _`pypy/rlib/unroll.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/unroll.py -.. _`pypy/rpython`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython .. _`pypy/rpython/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ .. _`pypy/rpython/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/ .. _`pypy/rpython/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/lltype.py @@ -114,7 +104,6 @@ .. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`pypy/tool/traceconfig.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/traceconfig.py -.. _`pypy/translator`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator .. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/ .. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/ .. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -31,6 +31,9 @@ for linkname in linkrex.findall(content): if '/' in linkname: found = True + if not linkname.endswith("/") and distdir.join(linkname).check(dir=1): + print linkname + linkname += "/" addlink(linkname, bitbucket_url + linkname) elif linkname.startswith('issue'): found = True From commits-noreply at bitbucket.org Mon Apr 25 19:13:35 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 19:13:35 +0200 (CEST) Subject: [pypy-svn] pypy default: (rguillebert, arigo) Message-ID: <20110425171335.21578282BEA@codespeak.net> Author: Armin Rigo Branch: Changeset: r43605:c8a693a66bc5 Date: 2011-04-25 18:12 +0200 http://bitbucket.org/pypy/pypy/changeset/c8a693a66bc5/ Log: (rguillebert, arigo) Optimized the other path through _get_relative_name() too, after finding out that it's often executed too. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -121,11 +121,13 @@ return space.finditem_str(space.sys.get('modules'), modulename) @jit.purefunction -def _get_dot_position(ctxt_package, level): - result = len(ctxt_package) - while level > 1 and result >= 0: - level -= 1 - result = ctxt_package.rfind('.', 0, result) +def _get_dot_position(str, n): + # return the index in str of the '.' such that there are n '.'-separated + # strings after it + result = len(str) + while n > 0 and result >= 0: + n -= 1 + result = str.rfind('.', 0, result) return result def _get_relative_name(space, modulename, level, w_globals): @@ -149,7 +151,7 @@ if ctxt_package == '' and level < 0: return None, 0 - dot_position = _get_dot_position(ctxt_package, level) + dot_position = _get_dot_position(ctxt_package, level - 1) if dot_position < 0: if len(ctxt_package) == 0: msg = "Attempted relative import in non-package" @@ -182,6 +184,7 @@ ctxt_w_name = space.finditem_str(w_globals, '__name__') ctxt_w_path = space.finditem_str(w_globals, '__path__') + ctxt_w_name = jit.hint(ctxt_w_name, promote=True) ctxt_name = None if ctxt_w_name is not None: try: @@ -193,19 +196,19 @@ if not ctxt_name: return None, 0 - ctxt_name_prefix_parts = ctxt_name.split('.') - if level > 0: - n = len(ctxt_name_prefix_parts)-level+1 - assert n>=0 - ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n] - if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module - ctxt_name_prefix_parts.pop() - - if level > 0 and not ctxt_name_prefix_parts: - msg = "Attempted relative import in non-package" - raise OperationError(space.w_ValueError, w(msg)) - - rel_modulename = '.'.join(ctxt_name_prefix_parts) + m = max(level - 1, 0) + if ctxt_w_path is None: # plain module + m += 1 + dot_position = _get_dot_position(ctxt_name, m) + if dot_position < 0: + if level > 0: + msg = "Attempted relative import in non-package" + raise OperationError(space.w_ValueError, w(msg)) + rel_modulename = '' + rel_level = 0 + else: + rel_modulename = ctxt_name[:dot_position] + rel_level = rel_modulename.count('.') + 1 if ctxt_w_path is not None: # __path__ is set, so __name__ is already the package name @@ -223,8 +226,6 @@ else: rel_modulename = modulename - rel_level = len(ctxt_name_prefix_parts) - return rel_modulename, rel_level From commits-noreply at bitbucket.org Mon Apr 25 19:13:36 2011 From: commits-noreply at bitbucket.org (arigo) Date: Mon, 25 Apr 2011 19:13:36 +0200 (CEST) Subject: [pypy-svn] pypy default: (rguillebert, arigo) Message-ID: <20110425171336.EC08A282BF2@codespeak.net> Author: Armin Rigo Branch: Changeset: r43606:6089258b9ec0 Date: 2011-04-25 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/6089258b9ec0/ Log: (rguillebert, arigo) Good, the JIT output for any "import x.y.z" seems to be exactly 3 guards now. diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -685,17 +685,12 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - p14 = call(ConstClass(ll_split_chr), p8, 46, -1, descr=) - guard_no_exception(descr=) - guard_nonnull(p14, descr=) - i15 = getfield_gc(p14, descr=) - i16 = int_is_true(i15) - guard_true(i16, descr=) - p18 = call(ConstClass(ll_pop_default), p14, descr=) - guard_no_exception(descr=) - i19 = getfield_gc(p14, descr=) - i20 = int_is_true(i19) - guard_false(i20, descr=) + p11 = getfield_gc(ConstPtr(ptr10), descr=) + guard_value(p11, ConstPtr(ptr12), descr=) + p14 = getfield_gc(ConstPtr(ptr13), descr=) + p16 = getfield_gc(ConstPtr(ptr15), descr=) + guard_value(p14, ConstPtr(ptr17), descr=) + guard_isnull(p16, descr=) """) def test_arraycopy_disappears(self): From commits-noreply at bitbucket.org Mon Apr 25 19:43:52 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 19:43:52 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: hg merge post-release-1.5 Message-ID: <20110425174352.F015136C209@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43607:4b434eb43229 Date: 2011-04-25 16:44 +0200 http://bitbucket.org/pypy/pypy/changeset/4b434eb43229/ Log: hg merge post-release-1.5 diff --git a/pypy/translator/c/node.py b/pypy/translator/c/node.py --- a/pypy/translator/c/node.py +++ b/pypy/translator/c/node.py @@ -11,7 +11,7 @@ from pypy.translator.c.support import c_char_array_constant, barebonearray from pypy.translator.c.primitive import PrimitiveType, name_signed from pypy.rlib import exports -from pypy.rlib.rfloat import isinf, isnan +from pypy.rlib.rfloat import isfinite from pypy.rlib.rstackovf import _StackOverflow from pypy.translator.c import extfunc from pypy.translator.tool.cbuild import ExternalCompilationInfo @@ -793,7 +793,7 @@ node = db.getcontainernode(value._obj) expr = 'NULL /*%s*/' % node.name node.where_to_copy_me.append('&%s' % access_expr) - elif typeOf(value) == Float and (isinf(value) or isnan(value)): + elif typeOf(value) == Float and not isfinite(value): db.late_initializations.append(('%s' % access_expr, db.get(value))) expr = '0.0 /* patched later by %sinfinity */' % ( '-+'[value > 0]) diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -158,12 +158,12 @@ return _formatd(x, code, precision, flags) def double_to_string(value, tp, precision, flags): - if isnan(value): - special = DIST_NAN + if isfinite(value): + special = DIST_FINITE elif isinf(value): special = DIST_INFINITY - else: - special = DIST_FINITE + else: #isnan(value): + special = DIST_NAN result = formatd(value, tp, precision, flags) return result, special @@ -344,7 +344,7 @@ def asinh(x): "NOT_RPYTHON" absx = abs(x) - if isnan(x) or isinf(x): + if not isfinite(x): return x if absx < _2_to_m28: return x @@ -405,3 +405,6 @@ r = math.floor(absx) return copysign(r, x) +def isfinite(x): + "NOT_RPYTHON" + return not isinf(x) and not isnan(x) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2223,6 +2223,45 @@ return sa assert self.meta_interp(f, [10]) == f(10) + def test_bug688_multiple_immutable_fields(self): + myjitdriver = JitDriver(greens=[], reds=['counter','context']) + + class Tag: + pass + class InnerContext(): + _immutable_fields_ = ['variables','local_names'] + def __init__(self, variables): + self.variables = variables + self.local_names = [0] + + def store(self): + self.local_names[0] = 1 + + def retrieve(self): + variables = hint(self.variables, promote=True) + result = self.local_names[0] + if result == 0: + return -1 + else: + return -1 + def build(): + context = InnerContext(Tag()) + + context.store() + + counter = 0 + while True: + myjitdriver.jit_merge_point(context=context, counter = counter) + context.retrieve() + context.retrieve() + + counter += 1 + if counter > 10: + return 7 + assert self.meta_interp(build, []) == 7 + self.check_loops(getfield_gc_pure=0) + self.check_loops(getfield_gc_pure=2, everywhere=True) + class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): diff --git a/pypy/rpython/extfuncregistry.py b/pypy/rpython/extfuncregistry.py --- a/pypy/rpython/extfuncregistry.py +++ b/pypy/rpython/extfuncregistry.py @@ -36,6 +36,9 @@ register_external(rfloat.isnan, [float], bool, export_name="ll_math.ll_math_isnan", sandboxsafe=True, llimpl=ll_math.ll_math_isnan) +register_external(rfloat.isfinite, [float], bool, + export_name="ll_math.ll_math_isfinite", sandboxsafe=True, + llimpl=ll_math.ll_math_isfinite) register_external(rfloat.copysign, [float, float], float, export_name="ll_math.ll_math_copysign", sandboxsafe=True, llimpl=ll_math.ll_math_copysign) diff --git a/pypy/translator/c/test/test_genc.py b/pypy/translator/c/test/test_genc.py --- a/pypy/translator/c/test/test_genc.py +++ b/pypy/translator/c/test/test_genc.py @@ -273,7 +273,7 @@ assert res == 1.5 def test_nan_and_special_values(): - from pypy.rlib.rfloat import isnan, isinf, copysign + from pypy.rlib.rfloat import isnan, isinf, isfinite, copysign inf = 1e300 * 1e300 assert isinf(inf) nan = inf/inf @@ -283,6 +283,7 @@ (inf, lambda x: isinf(x) and x > 0.0), (-inf, lambda x: isinf(x) and x < 0.0), (nan, isnan), + (42.0, isfinite), (0.0, lambda x: not x and copysign(1., x) == 1.), (-0.0, lambda x: not x and copysign(1., x) == -1.), ]: diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -1,7 +1,7 @@ from pypy.rlib.rarithmetic import LONG_BIT, intmask, r_uint, r_ulonglong from pypy.rlib.rarithmetic import ovfcheck, r_longlong, widen from pypy.rlib.rarithmetic import most_neg_value_of_same_type -from pypy.rlib.rfloat import isinf, isnan +from pypy.rlib.rfloat import isfinite from pypy.rlib.debug import make_sure_not_resized, check_regular_int from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib import jit @@ -173,9 +173,15 @@ def fromfloat(dval): """ Create a new bigint object from a float """ # This function is not marked as pure because it can raise + if isfinite(dval): + return rbigint._fromfloat_finite(dval) + else: + raise OverflowError + + @staticmethod + @jit.purefunction + def _fromfloat_finite(dval): sign = 1 - if isinf(dval) or isnan(dval): - raise OverflowError if dval < 0.0: sign = -1 dval = -dval diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -72,7 +72,8 @@ def find_rewritable_bool(self, op, args): try: oldopnum = opboolinvers[op.getopnum()] - targs = [args[0], args[1], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[0], args[1]], + None)) if self.try_boolinvers(op, targs): return True except KeyError: @@ -80,7 +81,8 @@ try: oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL - targs = [args[1], args[0], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) oldop = self.optimizer.pure_operations.get(targs, None) if oldop is not None and oldop.getdescr() is op.getdescr(): self.make_equal_to(op.result, self.getvalue(oldop.result)) @@ -90,7 +92,8 @@ try: oldopnum = opboolinvers[opboolreflex[op.getopnum()]] - targs = [args[1], args[0], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) if self.try_boolinvers(op, targs): return True except KeyError: diff --git a/pypy/jit/metainterp/optimizeutil.py b/pypy/jit/metainterp/optimizeutil.py --- a/pypy/jit/metainterp/optimizeutil.py +++ b/pypy/jit/metainterp/optimizeutil.py @@ -99,7 +99,9 @@ make_sure_not_resized(args) res = 0x345678 for arg in args: - if isinstance(arg, history.Const): + if arg is None: + y = 17 + elif isinstance(arg, history.Const): y = arg._get_hash_() else: y = compute_identity_hash(arg) diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -157,9 +157,9 @@ self.interpret(fn, [1.0, 2.0, 3.0]) def test_copysign(self): - import math + from pypy.rlib import rfloat def fn(x, y): - return math.copysign(x, y) + return rfloat.copysign(x, y) assert self.interpret(fn, [42, -1]) == -42 assert self.interpret(fn, [42, -0.0]) == -42 assert self.interpret(fn, [42, 0.0]) == 42 @@ -172,21 +172,42 @@ assert self.interpret(fn, [0]) == 42.3 def test_isnan(self): - import math - def fn(x): - inf = x * x - nan = inf / inf - return math.isnan(nan) - assert self.interpret(fn, [1e200]) + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isnan(n1 / n2) + assert self.interpret(fn, [1e200, 1e200]) # nan + assert not self.interpret(fn, [1e200, 1.0]) # +inf + assert not self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [42.5, 2.3]) # +finite + assert not self.interpret(fn, [42.5, -2.3]) # -finite def test_isinf(self): - import math - def fn(x): - inf = x * x - return math.isinf(inf) - assert self.interpret(fn, [1e200]) + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isinf(n1 / n2) + assert self.interpret(fn, [1e200, 1.0]) # +inf + assert self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [1e200, 1e200]) # nan + assert not self.interpret(fn, [42.5, 2.3]) # +finite + assert not self.interpret(fn, [42.5, -2.3]) # -finite - + def test_isfinite(self): + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isfinite(n1 / n2) + assert self.interpret(fn, [42.5, 2.3]) # +finite + assert self.interpret(fn, [42.5, -2.3]) # -finite + assert not self.interpret(fn, [1e200, 1.0]) # +inf + assert not self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [1e200, 1e200]) # nan + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -269,7 +269,7 @@ self.optimizer.pure_operations[key] = op def has_pure_result(self, opnum, args, descr): - op = ResOperation(opnum, args, None) + op = ResOperation(opnum, args, None, descr) key = self.optimizer.make_args_key(op) op = self.optimizer.pure_operations.get(key, None) if op is None: @@ -571,7 +571,7 @@ def make_args_key(self, op): n = op.numargs() - args = [None] * (n + 1) + args = [None] * (n + 2) for i in range(n): arg = op.getarg(i) try: @@ -582,6 +582,7 @@ arg = value.get_key_box() args[i] = arg args[n] = ConstInt(op.getopnum()) + args[n+1] = op.getdescr() return args def optimize_default(self, op): diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -219,6 +219,7 @@ same before and after translation, except for RPython instances on the lltypesystem. """ + assert x is not None result = object.__hash__(x) try: x.__dict__['__precomputed_identity_hash'] = result @@ -267,14 +268,15 @@ In RPython, floats cannot be used with ints in dicts, anyway. """ from pypy.rlib.rarithmetic import intmask - from pypy.rlib.rfloat import isinf, isnan - if isinf(f): - if f < 0.0: - return -271828 - else: - return 314159 - elif isnan(f): - return 0 + from pypy.rlib.rfloat import isfinite, isinf + if not isfinite(f): + if isinf(f): + if f < 0.0: + return -271828 + else: + return 314159 + else: #isnan(f): + return 0 v, expo = math.frexp(f) v *= TAKE_NEXT hipart = int(v) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -10,7 +10,7 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT from pypy.rlib.rfloat import ( - isinf, isnan, INFINITY, NAN, copysign, formatd, + isinf, isnan, isfinite, INFINITY, NAN, copysign, formatd, DTSF_ADD_DOT_0, DTSF_STR_PRECISION) from pypy.rlib.rbigint import rbigint from pypy.rlib.objectmodel import we_are_translated @@ -102,7 +102,7 @@ def float_hex__Float(space, w_float): value = w_float.floatval - if isinf(value) or isnan(value): + if not isfinite(value): return str__Float(space, w_float) if value == 0.0: if copysign(1., value) == -1.: @@ -136,15 +136,15 @@ def float2string(space, w_float, code, precision): x = w_float.floatval # we special-case explicitly inf and nan here - if isinf(x): + if isfinite(x): + s = formatd(x, code, precision, DTSF_ADD_DOT_0) + elif isinf(x): if x > 0.0: s = "inf" else: s = "-inf" - elif isnan(x): + else: # isnan(x): s = "nan" - else: - s = formatd(x, code, precision, DTSF_ADD_DOT_0) return space.wrap(s) def repr__Float(space, w_float): @@ -179,7 +179,7 @@ if opname == 'eq' or opname == 'ne': def do_compare_bigint(f1, b2): """f1 is a float. b2 is a bigint.""" - if isinf(f1) or isnan(f1) or math.floor(f1) != f1: + if not isfinite(f1) or math.floor(f1) != f1: return opname == 'ne' b1 = rbigint.fromfloat(f1) res = b1.eq(b2) @@ -189,7 +189,7 @@ else: def do_compare_bigint(f1, b2): """f1 is a float. b2 is a bigint.""" - if isinf(f1) or isnan(f1): + if not isfinite(f1): return op(f1, 0.0) if opname == 'gt' or opname == 'le': # 'float > long' <==> 'ceil(float) > long' @@ -457,8 +457,6 @@ if x == 0.0: if y < 0.0: - if isinf(y): - return space.wrap(INFINITY) raise OperationError(space.w_ZeroDivisionError, space.wrap("0.0 cannot be raised to " "a negative power")) diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -22,11 +22,60 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_isfinite(self): + inf = 1e200 * 1e200 + nan = inf / inf + assert ll_math.ll_math_isfinite(0.0) + assert ll_math.ll_math_isfinite(-42.0) + assert not ll_math.ll_math_isfinite(nan) + assert not ll_math.ll_math_isnan(inf) + assert not ll_math.ll_math_isnan(-inf) + + def test_compiled_isnan(self): + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isnan(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(1e200, 1e200) # nan + assert not f(1e200, 1.0) # +inf + assert not f(1e200, -1.0) # -inf + assert not f(42.5, 2.3) # +finite + assert not f(42.5, -2.3) # -finite + def test_compiled_isinf(self): - def f(x): - return ll_math.ll_math_isinf(1. / x) - f = compile(f, [float], backendopt=False) - assert f(5.5e-309) + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isinf(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(1e200, 1.0) # +inf + assert f(1e200, -1.0) # -inf + assert not f(1e200, 1e200) # nan + assert not f(42.5, 2.3) # +finite + assert not f(42.5, -2.3) # -finite + + def test_compiled_isfinite(self): + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isfinite(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(42.5, 2.3) # +finite + assert f(42.5, -2.3) # -finite + assert not f(1e200, 1.0) # +inf + assert not f(1e200, -1.0) # -inf + assert not f(1e200, 1e200) # nan + + +from pypy.rpython.lltypesystem import lltype +_A = lltype.GcArray(lltype.Float) +def normalize(x): + # workaround: force the C compiler to cast to a double + a = lltype.malloc(_A, 1) + a[0] = x + import time; time.time() + return a[0] def make_test_case((fnname, args, expected), dict): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -8,7 +8,7 @@ from pypy.tool.autopath import pypydir from pypy.rlib import jit, rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo -from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN +from pypy.rlib.rfloat import isinf, isnan, isfinite, INFINITY, NAN if sys.platform == "win32": eci = ExternalCompilationInfo() @@ -91,14 +91,20 @@ # Custom implementations def ll_math_isnan(y): - # By not calling into the extenal function the JIT can inline this. Floats - # are awesome. + # By not calling into the external function the JIT can inline this. + # Floats are awesome. return y != y def ll_math_isinf(y): # Use a bitwise OR so the JIT doesn't produce 2 different guards. return (y == INFINITY) | (y == -INFINITY) +def ll_math_isfinite(y): + # Use a custom hack that is reasonably well-suited to the JIT. + # Floats are awesome (bis). + z = 0.0 * y + return z == z # i.e.: z is not a NaN + ll_math_floor = math_floor diff --git a/pypy/rlib/rstruct/ieee.py b/pypy/rlib/rstruct/ieee.py --- a/pypy/rlib/rstruct/ieee.py +++ b/pypy/rlib/rstruct/ieee.py @@ -87,12 +87,13 @@ raise ValueError("invalid size value") sign = rfloat.copysign(1.0, x) < 0.0 - if rfloat.isinf(x): - mant = r_ulonglong(0) - exp = MAX_EXP - MIN_EXP + 2 - elif rfloat.isnan(x): - mant = r_ulonglong(1) << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 + if not rfloat.isfinite(x): + if rfloat.isinf(x): + mant = r_ulonglong(0) + exp = MAX_EXP - MIN_EXP + 2 + else: # rfloat.isnan(x): + mant = r_ulonglong(1) << (MANT_DIG-2) # other values possible + exp = MAX_EXP - MIN_EXP + 2 elif x == 0.0: mant = r_ulonglong(0) exp = 0 From commits-noreply at bitbucket.org Mon Apr 25 19:43:55 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 19:43:55 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: failing tests for unhandled cases Message-ID: <20110425174355.13747282BEB@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43608:eab706f69ad3 Date: 2011-04-25 18:10 +0200 http://bitbucket.org/pypy/pypy/changeset/eab706f69ad3/ Log: failing tests for unhandled cases diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -1290,6 +1290,27 @@ """ self.optimize_loop(ops, expected) + def test_virtual_field_forced_by_later_jumpargs(self): + ops = """ + [i0, p1, p3] + i28 = int_add(i0, 1) + p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) + setfield_gc(p3, p30, descr=valuedescr) + p45 = getfield_gc(p3, descr=valuedescr) + i29 = int_add(i28, 1) + jump(i29, p45, p3) + """ + preamble = """ + [i, p0, i1] + jump(i, p0, i1) + """ + expected = """ + [i, p0, i1] + jump(i, p0, i1) + """ + self.optimize_loop(ops, expected, preamble) + def test_nonvirtual_1(self): ops = """ [i] diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2262,6 +2262,64 @@ self.check_loops(getfield_gc_pure=0) self.check_loops(getfield_gc_pure=2, everywhere=True) + def test_args_becomming_equal(self): + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) + def f(n, a, b): + sa = i = 0 + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a, b=b) + sa += a + sa *= b + if i > n/2: + a = b + i += 1 + return sa + assert self.meta_interp(f, [20, 1, 2]) == f(20, 1, 2) + + def test_args_becomming_equal_boxed1(self): + class A(object): + def __init__(self, a, b): + self.a = a + self.b = b + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b', 'node']) + + def f(n, a, b): + sa = i = 0 + node = A(a,b) + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a, b=b, node=node) + sa += node.a + sa *= node.b + if i > n/2: + node = A(b, b) + else: + node = A(a, b) + i += 1 + return sa + assert self.meta_interp(f, [20, 1, 2]) == f(20, 1, 2) + + def test_args_becomming_equal_boxed2(self): + class A(object): + def __init__(self, a, b): + self.a = a + self.b = b + myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node']) + + def f(n, a, b): + sa = i = 0 + node = A(a, b) + while i < n: + myjitdriver.jit_merge_point(n=n, i=i, sa=sa, node=node) + sa += node.a + sa *= node.b + if i > n/2: + node = A(node.b, node.b) + else: + node = A(node.b, node.a) + i += 1 + return sa + assert self.meta_interp(f, [20, 1, 2]) == f(20, 1, 2) + class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): From commits-noreply at bitbucket.org Mon Apr 25 19:43:57 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 19:43:57 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: force_at_end_of_preamble before first get_args_for_fail in case it forces anything more than itself Message-ID: <20110425174357.6DFBE282BEB@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43609:83fd21d03642 Date: 2011-04-25 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/83fd21d03642/ Log: force_at_end_of_preamble before first get_args_for_fail in case it forces anything more than itself diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -1291,6 +1291,38 @@ self.optimize_loop(ops, expected) def test_virtual_field_forced_by_later_jumpargs(self): + # FIXME: Can that occure? How to test?!? + ops = """ + [i0, p1, p3, p3next] + i28 = int_add(i0, 1) + p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) + i29 = int_add(i28, 1) + p3v = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p3v, p30, descr=valuedescr) + p45 = getfield_gc(p3next, descr=valuedescr) + jump(i29, p45, p3next, p3v) + """ + preamble = """ + [i0, p1, p3, p3next] + i28 = int_add(i0, 1) + i29 = int_add(i28, 1) + p45 = getfield_gc(p3next, descr=valuedescr) + jump(i29, p45, p3next, i28) + """ + expected = """ + [i0, p1, p3, i1] + i28 = int_add(i0, 1) + i29 = int_add(i28, 1) + p3v = new_with_vtable(ConstClass(node_vtable)) + p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i1, descr=nextdescr) + setfield_gc(p3v, p30, descr=valuedescr) + jump(i29, i28, p3v) + """ + self.optimize_loop(ops, expected, preamble) + + def test_virtual_field_forced_by_lazy_setfield(self): ops = """ [i0, p1, p3] i28 = int_add(i0, 1) diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -270,13 +270,12 @@ def get_virtual_state(self, jump_args): already_forced = {} - for box in jump_args: - value = self.getvalue(box) - value = value.force_at_end_of_preamble(already_forced) + values = [self.getvalue(box).force_at_end_of_preamble(already_forced) + for box in jump_args] + for value in values: value.get_args_for_fail(self) return VirtualState([self.state(box) for box in jump_args]) - def make_not_virtual(self, value): return NotVirtualStateInfo(value) From commits-noreply at bitbucket.org Mon Apr 25 19:44:00 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Mon, 25 Apr 2011 19:44:00 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: force lazy setfields before producing virtual state Message-ID: <20110425174400.9D33B282BEB@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43610:cdbc89562f76 Date: 2011-04-25 19:43 +0200 http://bitbucket.org/pypy/pypy/changeset/cdbc89562f76/ Log: force lazy setfields before producing virtual state diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py --- a/pypy/jit/metainterp/test/test_optimizeopt.py +++ b/pypy/jit/metainterp/test/test_optimizeopt.py @@ -1334,13 +1334,15 @@ jump(i29, p45, p3) """ preamble = """ - [i, p0, i1] - jump(i, p0, i1) - """ - expected = """ - [i, p0, i1] - jump(i, p0, i1) - """ + [i0, p1, p3] + i28 = int_add(i0, 1) + i29 = int_add(i28, 1) + p30 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p30, i28, descr=nextdescr) + setfield_gc(p3, p30, descr=valuedescr) + jump(i29, p30, p3) + """ + expected = preamble self.optimize_loop(ops, expected, preamble) def test_nonvirtual_1(self): diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py --- a/pypy/jit/metainterp/optimizeopt/heap.py +++ b/pypy/jit/metainterp/optimizeopt/heap.py @@ -136,15 +136,13 @@ self.cached_arrayitems = {} self.original_producer = {} + def force_at_end_of_preamble(self): + self.force_all_lazy_setfields() + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes, optimizer, valuemap): new = OptHeap() - if True: - self.force_all_lazy_setfields() - else: - assert 0 # was: new.lazy_setfields = self.lazy_setfields - for descr, d in self.cached_fields.items(): new.cached_fields[descr] = d.get_cloned(optimizer, valuemap, short_boxes) diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -282,9 +282,12 @@ def turned_constant(self, value): pass + def force_at_end_of_preamble(self): + pass + + # It is too late to force stuff here, it must be done in force_at_end_of_preamble def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes=None, optimizer=None, valuemap=None): - #return self.__class__() raise NotImplementedError def produce_potential_short_preamble_ops(self, potential_ops): @@ -330,6 +333,10 @@ self.optimizations = optimizations + def force_at_end_of_preamble(self): + for o in self.optimizations: + o.force_at_end_of_preamble() + def reconstruct_for_next_iteration(self, short_boxes, surviving_boxes=None, optimizer=None, valuemap=None): assert optimizer is None diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -269,9 +269,11 @@ return info def get_virtual_state(self, jump_args): + self.optimizer.force_at_end_of_preamble() already_forced = {} values = [self.getvalue(box).force_at_end_of_preamble(already_forced) for box in jump_args] + for value in values: value.get_args_for_fail(self) return VirtualState([self.state(box) for box in jump_args]) From commits-noreply at bitbucket.org Mon Apr 25 21:16:50 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 21:16:50 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Fixed/removed stale external links (and changed some urls to avoid unhelpful redirects) Message-ID: <20110425191650.7568936C203@codespeak.net> Author: Dario Bertini Branch: documentation-cleanup Changeset: r43611:5d78885052fd Date: 2011-04-25 21:11 +0200 http://bitbucket.org/pypy/pypy/changeset/5d78885052fd/ Log: Fixed/removed stale external links (and changed some urls to avoid unhelpful redirects) diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -99,7 +99,7 @@ 9 To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK 2.0`_, while Linux and Mac users +users need the `.NET Framework SDK`_, while Linux and Mac users can use Mono_. To translate and run for the JVM you must have a JDK installed (at least version 5) and ``java``/``javac`` on your path. @@ -354,7 +354,7 @@ install it if they want to run low-level tests. See the `download page of ctypes`_. -.. _`download page of ctypes`: http://sourceforge.net/project/showfiles.php?group_id=71702 +.. _`download page of ctypes`: https://sourceforge.net/projects/ctypes/files/ .. _`ctypes`: http://starship.python.net/crew/theller/ctypes/ .. _`py.test`: @@ -390,7 +390,7 @@ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ -.. _`.NET Framework SDK 2.0`: http://msdn.microsoft.com/netframework/downloads/updates/default.aspx +.. _`.NET Framework SDK`: http://msdn.microsoft.com/netframework/ .. _Mono: http://www.mono-project.com/Main_Page .. _`CLI backend`: cli-backend.html .. _clr: clr-module.html diff --git a/pypy/doc/release-0.6.rst b/pypy/doc/release-0.6.rst --- a/pypy/doc/release-0.6.rst +++ b/pypy/doc/release-0.6.rst @@ -9,11 +9,11 @@ What it is and where to start ----------------------------- -Getting started: http://codespeak.net/pypy/index.cgi?doc/getting-started.html +Getting started: getting-started.html -PyPy Documentation: http://codespeak.net/pypy/index.cgi?doc +PyPy Documentation: index.html -PyPy Homepage: http://codespeak.net/pypy/ +PyPy Homepage: http://pypy.org PyPy is a MIT-licensed reimplementation of Python written in Python itself. The long term goals are an implementation that @@ -89,9 +89,9 @@ from numerous people. Please feel free to give feedback and raise questions. - contact points: http://codespeak.net/pypy/index.cgi?contact + contact points: http://pypy.org/contact.html - contributor list: http://codespeak.net/pypy/index.cgi?doc/contributor.html + contributor list: contributor.html have fun, diff --git a/pypy/doc/video-index.rst b/pypy/doc/video-index.rst --- a/pypy/doc/video-index.rst +++ b/pypy/doc/video-index.rst @@ -20,14 +20,14 @@ such as `mplayer`_, `xine`_, `vlc`_ or the windows media player. .. _`mplayer`: http://www.mplayerhq.hu/design7/dload.html -.. _`xine`: http://xinehq.de/index.php/releases +.. _`xine`: http://www.xine-project.org .. _`vlc`: http://www.videolan.org/vlc/ You can find the necessary codecs in the ffdshow-library: -http://ffdshow.sourceforge.net/tikiwiki/tiki-index.php +http://sourceforge.net/projects/ffdshow/ or use the original divx codec (for Windows): -http://www.divx.com/divx/windows/download/index.php +http://www.divx.com/software/divx-plus Copyrights and Licensing diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -254,7 +254,7 @@ .. _`RPython`: coding-guide.html#rpython -.. _Python: http://docs.python.org/ref +.. _Python: http://docs.python.org/reference/ .. _Psyco: http://psyco.sourceforge.net .. _stackless: stackless.html .. _`generate Just-In-Time Compilers`: jit/index.html diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -13,7 +13,7 @@ differences to CPython and some missing extensions, for details see `CPython differences`_. -.. _Django: http://djangoproject.org +.. _Django: http://djangoproject.com .. _Twisted: http://twistedmatrix.com .. _`CPython differences`: cpython_differences.html diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -65,7 +65,7 @@ .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html -.. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://www.iam.unibe.ch/~verwaest/pygirl.pdf +.. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf .. _`Representation-Based Just-in-Time Specialization and the Psyco Prototype for Python`: http://psyco.sourceforge.net/psyco-pepm-a.ps.gz .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf @@ -342,8 +342,8 @@ .. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html .. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ -.. _IronPython: http://www.codeplex.com/Wiki/View.aspx?ProjectName=IronPython -.. _`Dynamic Native Optimization of Native Interpreters`: http://www.ai.mit.edu/~gregs/dynamorio.html -.. _JikesRVM: http://jikesrvm.sf.net +.. _IronPython: http://ironpython.codeplex.com/ +.. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html +.. _JikesRVM: http://jikesrvm.org/ .. _Tunes: http://tunes.org .. _`old Tunes Wiki`: http://codespeak.net/cliki.tunes.org/ diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -18,7 +18,7 @@ language implementation itself. `more...`_ -.. _Python: http://docs.python.org/ref +.. _Python: http://docs.python.org/reference/ .. _`more...`: architecture.html Just the facts diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -2,7 +2,7 @@ PyPy - a Python_ implementation written in Python ================================================= -.. _Python: http://www.python.org/doc/2.5.2/ +.. _Python: http://docs.python.org/release/2.5.2/ .. contents:: :depth: 1 diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -271,7 +271,7 @@ returns a object with a ``recognize(input)`` method that returns True or False depending on whether ``input`` matches the string or not. -.. _`re`: http://docs.python.org/lib/module-re.html +.. _`re`: http://docs.python.org/library/re.html EBNF ---- diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -99,9 +99,6 @@ ------- We tried pyglet checking it out from its repository at revision 1984. -For convenience a tarball of the checkout can also be found at: - -http://codespeak.net/~pedronis/pyglet-r1984.tgz From pyglet, the following examples are known to work: diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -54,7 +54,7 @@ backends. For more details, read the last `sprint status`_ page and enjoy the pictures_. -.. _`sprint status`: http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/tokyo-planning.html +.. _`sprint status`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/tokyo/tokyo-planning.txt .. _`pictures`: http://www.flickr.com/photos/19046555 at N00/sets/72057594116388174/ PyPy at Python UK/ACCU Conference (United Kingdom) @@ -63,12 +63,12 @@ *April 19th - April 22nd 2006.* Several talks about PyPy were hold at this year's Python UK/ACCU conference. Read more at the `ACCU site`_. -.. _`ACCU site`: http://www.accu.org/ +.. _`ACCU site`: http://accu.org/ PyPy at XPDay France 2006 in Paris March 23rd - March 24th 2006 ================================================================== -Logilab presented PyPy at the first `french XP Day`_ that it was +Logilab presented PyPy at the first french XP Day that it was sponsoring and which was held in Paris. There was over a hundred attendants. Interesting talks included Python as an agile language and Tools for continuous integration. @@ -99,7 +99,7 @@ Talks at PyCon 2006 (Dallas, Texas, USA) =================================================================== -*Feb 24th - Feb 26th 2006.* PyPy developers spoke at `PyCon 2006`_. +*Feb 24th - Feb 26th 2006.* PyPy developers spoke at PyCon 2006. .. _`PyCon 2006`: http://us.pycon.org/TX2006/HomePage @@ -247,7 +247,7 @@ .. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg .. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html .. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ -.. _`Trillke-Gut`: http://www.trillke.net/images/HomePagePictureSmall.jpg +.. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished ====================================================== @@ -310,6 +310,6 @@ Read more in `EuroPython sprint announcement`_, see who is planning to attend on `the people page`_. There is also a page_ in the python wiki. -.. _`EuroPython sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/europython-2006/announce.html -.. _`the people page`: http://codespeak.net/pypy/extradoc/sprintinfo/europython-2006/people.html +.. _`EuroPython sprint announcement`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/post-ep2006/announce.txt +.. _`the people page`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/post-ep2006/people.txt .. _page: http://wiki.python.org/moin/EuroPython2006 diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -146,7 +146,7 @@ implementation must behave in a static way often referenced as "RPythonic". -.. _Starkiller: http://www.python.org/pycon/dc2004/papers/1/paper.pdf +.. _Starkiller: http://people.csail.mit.edu/jrb/Projects/starkiller.pdf .. _ShedSkin: http://shed-skin.blogspot.com/ However, when the PyPy interpreter is started as a Python program, it @@ -486,9 +486,9 @@ logilab-common_ and astng_ that you will need to install too before you can use the tool. -.. _Pylint: http://www.logilab.org/projects/pylint -.. _logilab-common: http://www.logilab.org/projects/common -.. _astng: http://www.logilab.org/projects/astng +.. _Pylint: http://www.logilab.org/project/pylint +.. _logilab-common: http://www.logilab.org/project/logilab-common +.. _astng: http://www.logilab.org/project/logilab-astng @@ -903,7 +903,7 @@ .. _`register with the tracker`: https://codespeak.net/issue/pypy-dev/user?@template=register -.. _`roundup`: http://roundup.sf.net +.. _`roundup`: http://roundup.sourceforge.net/ .. _`testing in PyPy`: @@ -931,7 +931,7 @@ .. _`standard object space`: objspace.html#standard-object-space .. _`objectspace`: objspace.html -.. _`py.test`: http://codespeak.net/py/current/doc/test.html +.. _`py.test`: http://pytest.org/ Interpreter level tests ----------------------- @@ -1030,7 +1030,7 @@ files. Here is a `ReST quickstart`_ but you can also just look at the existing documentation and see how things work. -.. _`ReST quickstart`: http://docutils.sourceforge.net/docs/rst/quickref.html +.. _`ReST quickstart`: http://docutils.sourceforge.net/docs/user/rst/quickref.html Note that the web site of http://pypy.org/ is maintained separately. For now it is in the repository https://bitbucket.org/pypy/extradoc diff --git a/pypy/doc/discussion/distribution-implementation.rst b/pypy/doc/discussion/distribution-implementation.rst --- a/pypy/doc/discussion/distribution-implementation.rst +++ b/pypy/doc/discussion/distribution-implementation.rst @@ -88,4 +88,4 @@ probably using some of the Armin's hacks to rewrite it using greenlets instead of threads. -.. _`py.execnet`: http://codespeak.net/py/current/doc/execnet.html +.. _`py.execnet`: http://codespeak.net/execnet/ diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -96,8 +96,8 @@ -.. _`py-lib`: http://codespeak.net/py/current/doc/ -.. _`py.test`: http://codespeak.net/py/current/doc/test.html +.. _`py-lib`: http://pylib.org/ +.. _`py.test`: http://pytest.org/ .. _codespeak: http://codespeak.net/ .. _`pypy-dev`: http://codespeak.net/mailman/listinfo/pypy-dev diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -197,13 +197,11 @@ It seems that a lot of strange, unexplainable problems can be magically solved by removing all the \*.pyc files from the PyPy source tree -(the script `py.cleanup`_ from py/bin will do that for you). +(the script py.cleanup from pypy/tool will do that for you). Another thing you can do is removing the directory pypy/_cache completely. If the error is persistent and still annoys you after this treatment please send us a bug report (or even better, a fix :-) -.. _`py.cleanup`: http://codespeak.net/py/current/doc/bin.html - ------------------------------------------------------------- OSError: ... cannot restore segment prot after reloc... Help? ------------------------------------------------------------- @@ -258,7 +256,7 @@ .. [BRETT] Brett Cannon, Localized Type Inference of Atomic Types in Python, - http://www.ocf.berkeley.edu/~bac/thesis.pdf + http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.90.3231 .. [D05.1] Compiling Dynamic Language Implementations, Report from the PyPy project to the E.U., diff --git a/pypy/doc/release-0.9.0.rst b/pypy/doc/release-0.9.0.rst --- a/pypy/doc/release-0.9.0.rst +++ b/pypy/doc/release-0.9.0.rst @@ -59,7 +59,7 @@ **testing refinements** py.test, our testing tool, now has preliminary support for doctests. We now run all our tests every night, and you can see the summary at: - http://snake.cs.uni-duesseldorf.de/pypytest/summary.html + http://buildbot.pypy.org/summary What is PyPy (about)? ------------------------------------------------ From commits-noreply at bitbucket.org Mon Apr 25 21:16:56 2011 From: commits-noreply at bitbucket.org (berdario) Date: Mon, 25 Apr 2011 21:16:56 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110425191656.39AEC282B9D@codespeak.net> Author: Dario Bertini Branch: documentation-cleanup Changeset: r43612:b3a8d1672250 Date: 2011-04-25 21:14 +0200 http://bitbucket.org/pypy/pypy/changeset/b3a8d1672250/ Log: merge heads diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -99,7 +99,7 @@ 9 To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK 2.0`_, while Linux and Mac users +users need the `.NET Framework SDK`_, while Linux and Mac users can use Mono_. To translate and run for the JVM you must have a JDK installed (at least version 5) and ``java``/``javac`` on your path. @@ -354,7 +354,7 @@ install it if they want to run low-level tests. See the `download page of ctypes`_. -.. _`download page of ctypes`: http://sourceforge.net/project/showfiles.php?group_id=71702 +.. _`download page of ctypes`: https://sourceforge.net/projects/ctypes/files/ .. _`ctypes`: http://starship.python.net/crew/theller/ctypes/ .. _`py.test`: @@ -390,7 +390,7 @@ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ -.. _`.NET Framework SDK 2.0`: http://msdn.microsoft.com/netframework/downloads/updates/default.aspx +.. _`.NET Framework SDK`: http://msdn.microsoft.com/netframework/ .. _Mono: http://www.mono-project.com/Main_Page .. _`CLI backend`: cli-backend.html .. _clr: clr-module.html diff --git a/pypy/doc/release-0.6.rst b/pypy/doc/release-0.6.rst --- a/pypy/doc/release-0.6.rst +++ b/pypy/doc/release-0.6.rst @@ -9,11 +9,11 @@ What it is and where to start ----------------------------- -Getting started: http://codespeak.net/pypy/index.cgi?doc/getting-started.html +Getting started: getting-started.html -PyPy Documentation: http://codespeak.net/pypy/index.cgi?doc +PyPy Documentation: index.html -PyPy Homepage: http://codespeak.net/pypy/ +PyPy Homepage: http://pypy.org PyPy is a MIT-licensed reimplementation of Python written in Python itself. The long term goals are an implementation that @@ -89,9 +89,9 @@ from numerous people. Please feel free to give feedback and raise questions. - contact points: http://codespeak.net/pypy/index.cgi?contact + contact points: http://pypy.org/contact.html - contributor list: http://codespeak.net/pypy/index.cgi?doc/contributor.html + contributor list: contributor.html have fun, diff --git a/pypy/doc/video-index.rst b/pypy/doc/video-index.rst --- a/pypy/doc/video-index.rst +++ b/pypy/doc/video-index.rst @@ -20,14 +20,14 @@ such as `mplayer`_, `xine`_, `vlc`_ or the windows media player. .. _`mplayer`: http://www.mplayerhq.hu/design7/dload.html -.. _`xine`: http://xinehq.de/index.php/releases +.. _`xine`: http://www.xine-project.org .. _`vlc`: http://www.videolan.org/vlc/ You can find the necessary codecs in the ffdshow-library: -http://ffdshow.sourceforge.net/tikiwiki/tiki-index.php +http://sourceforge.net/projects/ffdshow/ or use the original divx codec (for Windows): -http://www.divx.com/divx/windows/download/index.php +http://www.divx.com/software/divx-plus Copyrights and Licensing diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -254,7 +254,7 @@ .. _`RPython`: coding-guide.html#rpython -.. _Python: http://docs.python.org/ref +.. _Python: http://docs.python.org/reference/ .. _Psyco: http://psyco.sourceforge.net .. _stackless: stackless.html .. _`generate Just-In-Time Compilers`: jit/index.html diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -13,7 +13,7 @@ differences to CPython and some missing extensions, for details see `CPython differences`_. -.. _Django: http://djangoproject.org +.. _Django: http://djangoproject.com .. _Twisted: http://twistedmatrix.com .. _`CPython differences`: cpython_differences.html diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -65,7 +65,7 @@ .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html -.. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://www.iam.unibe.ch/~verwaest/pygirl.pdf +.. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf .. _`Representation-Based Just-in-Time Specialization and the Psyco Prototype for Python`: http://psyco.sourceforge.net/psyco-pepm-a.ps.gz .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf @@ -342,8 +342,8 @@ .. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html .. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ -.. _IronPython: http://www.codeplex.com/Wiki/View.aspx?ProjectName=IronPython -.. _`Dynamic Native Optimization of Native Interpreters`: http://www.ai.mit.edu/~gregs/dynamorio.html -.. _JikesRVM: http://jikesrvm.sf.net +.. _IronPython: http://ironpython.codeplex.com/ +.. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html +.. _JikesRVM: http://jikesrvm.org/ .. _Tunes: http://tunes.org .. _`old Tunes Wiki`: http://codespeak.net/cliki.tunes.org/ diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -18,7 +18,7 @@ language implementation itself. `more...`_ -.. _Python: http://docs.python.org/ref +.. _Python: http://docs.python.org/reference/ .. _`more...`: architecture.html Just the facts diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -2,7 +2,7 @@ PyPy - a Python_ implementation written in Python ================================================= -.. _Python: http://www.python.org/doc/2.5.2/ +.. _Python: http://docs.python.org/release/2.5.2/ .. contents:: :depth: 1 diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -271,7 +271,7 @@ returns a object with a ``recognize(input)`` method that returns True or False depending on whether ``input`` matches the string or not. -.. _`re`: http://docs.python.org/lib/module-re.html +.. _`re`: http://docs.python.org/library/re.html EBNF ---- diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -99,9 +99,6 @@ ------- We tried pyglet checking it out from its repository at revision 1984. -For convenience a tarball of the checkout can also be found at: - -http://codespeak.net/~pedronis/pyglet-r1984.tgz From pyglet, the following examples are known to work: diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -54,7 +54,7 @@ backends. For more details, read the last `sprint status`_ page and enjoy the pictures_. -.. _`sprint status`: http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/tokyo-planning.html +.. _`sprint status`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/tokyo/tokyo-planning.txt .. _`pictures`: http://www.flickr.com/photos/19046555 at N00/sets/72057594116388174/ PyPy at Python UK/ACCU Conference (United Kingdom) @@ -63,12 +63,12 @@ *April 19th - April 22nd 2006.* Several talks about PyPy were hold at this year's Python UK/ACCU conference. Read more at the `ACCU site`_. -.. _`ACCU site`: http://www.accu.org/ +.. _`ACCU site`: http://accu.org/ PyPy at XPDay France 2006 in Paris March 23rd - March 24th 2006 ================================================================== -Logilab presented PyPy at the first `french XP Day`_ that it was +Logilab presented PyPy at the first french XP Day that it was sponsoring and which was held in Paris. There was over a hundred attendants. Interesting talks included Python as an agile language and Tools for continuous integration. @@ -99,7 +99,7 @@ Talks at PyCon 2006 (Dallas, Texas, USA) =================================================================== -*Feb 24th - Feb 26th 2006.* PyPy developers spoke at `PyCon 2006`_. +*Feb 24th - Feb 26th 2006.* PyPy developers spoke at PyCon 2006. .. _`PyCon 2006`: http://us.pycon.org/TX2006/HomePage @@ -247,7 +247,7 @@ .. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg .. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html .. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ -.. _`Trillke-Gut`: http://www.trillke.net/images/HomePagePictureSmall.jpg +.. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished ====================================================== @@ -310,6 +310,6 @@ Read more in `EuroPython sprint announcement`_, see who is planning to attend on `the people page`_. There is also a page_ in the python wiki. -.. _`EuroPython sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/europython-2006/announce.html -.. _`the people page`: http://codespeak.net/pypy/extradoc/sprintinfo/europython-2006/people.html +.. _`EuroPython sprint announcement`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/post-ep2006/announce.txt +.. _`the people page`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/post-ep2006/people.txt .. _page: http://wiki.python.org/moin/EuroPython2006 diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -146,7 +146,7 @@ implementation must behave in a static way often referenced as "RPythonic". -.. _Starkiller: http://www.python.org/pycon/dc2004/papers/1/paper.pdf +.. _Starkiller: http://people.csail.mit.edu/jrb/Projects/starkiller.pdf .. _ShedSkin: http://shed-skin.blogspot.com/ However, when the PyPy interpreter is started as a Python program, it @@ -486,9 +486,9 @@ logilab-common_ and astng_ that you will need to install too before you can use the tool. -.. _Pylint: http://www.logilab.org/projects/pylint -.. _logilab-common: http://www.logilab.org/projects/common -.. _astng: http://www.logilab.org/projects/astng +.. _Pylint: http://www.logilab.org/project/pylint +.. _logilab-common: http://www.logilab.org/project/logilab-common +.. _astng: http://www.logilab.org/project/logilab-astng @@ -903,7 +903,7 @@ .. _`register with the tracker`: https://codespeak.net/issue/pypy-dev/user?@template=register -.. _`roundup`: http://roundup.sf.net +.. _`roundup`: http://roundup.sourceforge.net/ .. _`testing in PyPy`: @@ -931,7 +931,7 @@ .. _`standard object space`: objspace.html#standard-object-space .. _`objectspace`: objspace.html -.. _`py.test`: http://codespeak.net/py/current/doc/test.html +.. _`py.test`: http://pytest.org/ Interpreter level tests ----------------------- @@ -1030,7 +1030,7 @@ files. Here is a `ReST quickstart`_ but you can also just look at the existing documentation and see how things work. -.. _`ReST quickstart`: http://docutils.sourceforge.net/docs/rst/quickref.html +.. _`ReST quickstart`: http://docutils.sourceforge.net/docs/user/rst/quickref.html Note that the web site of http://pypy.org/ is maintained separately. For now it is in the repository https://bitbucket.org/pypy/extradoc diff --git a/pypy/doc/discussion/distribution-implementation.rst b/pypy/doc/discussion/distribution-implementation.rst --- a/pypy/doc/discussion/distribution-implementation.rst +++ b/pypy/doc/discussion/distribution-implementation.rst @@ -88,4 +88,4 @@ probably using some of the Armin's hacks to rewrite it using greenlets instead of threads. -.. _`py.execnet`: http://codespeak.net/py/current/doc/execnet.html +.. _`py.execnet`: http://codespeak.net/execnet/ diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -96,8 +96,8 @@ -.. _`py-lib`: http://codespeak.net/py/current/doc/ -.. _`py.test`: http://codespeak.net/py/current/doc/test.html +.. _`py-lib`: http://pylib.org/ +.. _`py.test`: http://pytest.org/ .. _codespeak: http://codespeak.net/ .. _`pypy-dev`: http://codespeak.net/mailman/listinfo/pypy-dev diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -197,13 +197,11 @@ It seems that a lot of strange, unexplainable problems can be magically solved by removing all the \*.pyc files from the PyPy source tree -(the script `py.cleanup`_ from py/bin will do that for you). +(the script py.cleanup from pypy/tool will do that for you). Another thing you can do is removing the directory pypy/_cache completely. If the error is persistent and still annoys you after this treatment please send us a bug report (or even better, a fix :-) -.. _`py.cleanup`: http://codespeak.net/py/current/doc/bin.html - ------------------------------------------------------------- OSError: ... cannot restore segment prot after reloc... Help? ------------------------------------------------------------- @@ -258,7 +256,7 @@ .. [BRETT] Brett Cannon, Localized Type Inference of Atomic Types in Python, - http://www.ocf.berkeley.edu/~bac/thesis.pdf + http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.90.3231 .. [D05.1] Compiling Dynamic Language Implementations, Report from the PyPy project to the E.U., diff --git a/pypy/doc/release-0.9.0.rst b/pypy/doc/release-0.9.0.rst --- a/pypy/doc/release-0.9.0.rst +++ b/pypy/doc/release-0.9.0.rst @@ -59,7 +59,7 @@ **testing refinements** py.test, our testing tool, now has preliminary support for doctests. We now run all our tests every night, and you can see the summary at: - http://snake.cs.uni-duesseldorf.de/pypytest/summary.html + http://buildbot.pypy.org/summary What is PyPy (about)? ------------------------------------------------ From commits-noreply at bitbucket.org Mon Apr 25 22:34:05 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Mon, 25 Apr 2011 22:34:05 +0200 (CEST) Subject: [pypy-svn] pypy default: os.path.samefile does not exist on windows. Message-ID: <20110425203405.F18B2282B9D@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43613:30610b96e715 Date: 2011-04-25 22:30 +0200 http://bitbucket.org/pypy/pypy/changeset/30610b96e715/ Log: os.path.samefile does not exist on windows. Use the similar function from py.path.local() diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -289,9 +289,11 @@ # Double check to ensure we are not overwriting the current interpreter try: - exe_name = str(drv.compute_exe_name()) - assert not os.path.samefile(exe_name, sys.executable), ( - 'Output file %r is the currently running ' + this_exe = py.path.local(sys.executable).new(ext='') + exe_name = drv.compute_exe_name() + samefile = this_exe.samefile(exe_name) + assert not samefile, ( + 'Output file %s is the currently running ' 'interpreter (use --output=...)'% exe_name) except OSError: pass From commits-noreply at bitbucket.org Mon Apr 25 23:42:53 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Mon, 25 Apr 2011 23:42:53 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix translation on Linux: os.path.samefile() raises subclasses Message-ID: <20110425214253.19D83282BEA@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43614:f7345e7d367d Date: 2011-04-25 23:42 +0200 http://bitbucket.org/pypy/pypy/changeset/f7345e7d367d/ Log: Fix translation on Linux: os.path.samefile() raises subclasses of EnvironmentError diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -295,7 +295,7 @@ assert not samefile, ( 'Output file %s is the currently running ' 'interpreter (use --output=...)'% exe_name) - except OSError: + except EnvironmentError: pass goals = translateconfig.goals From commits-noreply at bitbucket.org Tue Apr 26 00:20:53 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Tue, 26 Apr 2011 00:20:53 +0200 (CEST) Subject: [pypy-svn] pypy default: With the mingw32 compiler, ".section .rdata" are mixed with function code. Message-ID: <20110425222053.39F10282BEB@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43615:11ec65b8347f Date: 2011-04-26 00:20 +0200 http://bitbucket.org/pypy/pypy/changeset/11ec65b8347f/ Log: With the mingw32 compiler, ".section .rdata" are mixed with function code. OTOH, the end of the file is stuffed with "_loc.XXX" labels diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py --- a/pypy/translator/c/gcc/trackgcroot.py +++ b/pypy/translator/c/gcc/trackgcroot.py @@ -1443,6 +1443,7 @@ class Mingw32AssemblerParser(DarwinAssemblerParser): format = "mingw32" + r_sectionstart = re.compile(r"^_loc()") FunctionGcRootTracker = Mingw32FunctionGcRootTracker class MsvcAssemblerParser(AssemblerParser): From commits-noreply at bitbucket.org Tue Apr 26 08:59:03 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 08:59:03 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: add a paragraph about coroutine death Message-ID: <20110426065903.EC265282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43616:6f5332621d03 Date: 2011-04-26 08:58 +0200 http://bitbucket.org/pypy/pypy/changeset/6f5332621d03/ Log: add a paragraph about coroutine death diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -138,6 +138,11 @@ will come from any call to ``coro.switch()`` and can be caught. If the exception isn't caught, it will be propagated to the parent coroutine. +When a coroutine is garbage-collected, it gets the ``.kill()`` method sent to +it. This happens at the point the next ``.switch`` method is called, so the +target coroutine of this call will be executed only after the ``.kill`` has +finished. + Example ~~~~~~~ From commits-noreply at bitbucket.org Tue Apr 26 10:56:28 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 26 Apr 2011 10:56:28 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix tests. Message-ID: <20110426085628.B4118282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43617:52480720380e Date: 2011-04-26 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/52480720380e/ Log: Fix tests. diff --git a/pypy/translator/c/src/asm.h b/pypy/translator/c/src/asm.h --- a/pypy/translator/c/src/asm.h +++ b/pypy/translator/c/src/asm.h @@ -1,3 +1,5 @@ +#ifndef _PYPY_ASM_H +#define _PYPY_ASM_H /* optional assembler bits */ #if defined(__GNUC__) && defined(__i386__) @@ -11,3 +13,5 @@ #if defined(__GNUC__) && defined(__ppc__) # include "src/asm_ppc.h" #endif + +#endif /* _PYPY_ASM_H */ From commits-noreply at bitbucket.org Tue Apr 26 11:41:30 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Tue, 26 Apr 2011 11:41:30 +0200 (CEST) Subject: [pypy-svn] pypy jit-continue_tracing: (cfbolz, hakanardo): reenabled continue tracing after an invalid loop Message-ID: <20110426094130.08390282BEC@codespeak.net> Author: Hakan Ardo Branch: jit-continue_tracing Changeset: r43618:db13dae07387 Date: 2011-04-26 11:40 +0200 http://bitbucket.org/pypy/pypy/changeset/db13dae07387/ Log: (cfbolz, hakanardo): reenabled continue tracing after an invalid loop diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py --- a/pypy/jit/metainterp/compile.py +++ b/pypy/jit/metainterp/compile.py @@ -92,7 +92,7 @@ history = metainterp.history loop = create_empty_loop(metainterp) - loop.inputargs = history.inputargs + loop.inputargs = history.inputargs[:] for box in loop.inputargs: assert isinstance(box, Box) # make a copy, because optimize_loop can mutate the ops and descrs @@ -583,7 +583,7 @@ # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. new_loop = create_empty_loop(metainterp) - new_loop.inputargs = metainterp.history.inputargs + new_loop.inputargs = metainterp.history.inputargs[:] # clone ops, as optimize_bridge can mutate the ops new_loop.operations = [op.clone() for op in metainterp.history.operations] metainterp_sd = metainterp.staticdata diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py --- a/pypy/jit/metainterp/history.py +++ b/pypy/jit/metainterp/history.py @@ -785,7 +785,6 @@ def repr_of_descr(self): return '' % self.number - class TreeLoop(object): inputargs = None operations = None diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1813,9 +1813,9 @@ else: self.compile(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! - #self.staticdata.log('cancelled, tracing more...') - self.staticdata.log('cancelled, stopping tracing') - raise SwitchToBlackhole(ABORT_BAD_LOOP) + self.staticdata.log('cancelled, tracing more...') + #self.staticdata.log('cancelled, stopping tracing') + #raise SwitchToBlackhole(ABORT_BAD_LOOP) # Otherwise, no loop found so far, so continue tracing. start = len(self.history.operations) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -1864,7 +1864,7 @@ return a1.val + b1.val res = self.meta_interp(g, [3, 23]) assert res == 7068153 - self.check_loop_count(6) + self.check_loop_count(7) self.check_loops(guard_true=4, guard_class=0, int_add=2, int_mul=2, guard_false=2) diff --git a/pypy/jit/codewriter/jitcode.py b/pypy/jit/codewriter/jitcode.py --- a/pypy/jit/codewriter/jitcode.py +++ b/pypy/jit/codewriter/jitcode.py @@ -100,6 +100,9 @@ def __repr__(self): return '' % self.name + def _clone_if_mutable(self): + raise NotImplementedError + class MissingLiveness(Exception): pass @@ -111,6 +114,9 @@ dict = getattr(self, 'dict', '?') return '' % (dict,) + def _clone_if_mutable(self): + raise NotImplementedError + class LiveVarsInfo(object): def __init__(self, live_i, live_r, live_f): diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py --- a/pypy/jit/metainterp/test/test_send.py +++ b/pypy/jit/metainterp/test/test_send.py @@ -204,7 +204,6 @@ # InvalidLoop condition, and was then unrolled, giving two copies # of the body in a single bigger loop with no failing guard except # the final one. - py.test.skip('dissabled "try to trace some more when compile fails"') self.check_loop_count(1) self.check_loops(guard_class=0, int_add=2, int_sub=2) @@ -231,6 +230,7 @@ return self.y w1 = W1(10) w2 = W2(20) + def f(x, y): if x & 1: w = w1 @@ -246,7 +246,6 @@ assert res == f(3, 28) res = self.meta_interp(f, [4, 28]) assert res == f(4, 28) - py.test.skip('dissabled "try to trace some more when compile fails"') self.check_loop_count(1) self.check_loops(guard_class=0, int_add=2, int_sub=2) From commits-noreply at bitbucket.org Tue Apr 26 11:44:28 2011 From: commits-noreply at bitbucket.org (lac) Date: Tue, 26 Apr 2011 11:44:28 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: The License trick with ./ did not work. Since it is only used one place, just put it in by hand. Message-ID: <20110426094428.1EF49282BEC@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43619:b70554364f17 Date: 2011-04-26 11:44 +0200 http://bitbucket.org/pypy/pypy/changeset/b70554364f17/ Log: The License trick with ./ did not work. Since it is only used one place, just put it in by hand. diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -57,7 +57,7 @@ `development methodology`_ describes our sprint-driven approach. -`./LICENSE`_ contains licensing details (basically a straight MIT-license). +`LICENSE`_ contains licensing details (basically a straight MIT-license). `Glossary`_ of PyPy words to help you align your inner self with the PyPy universe. @@ -170,6 +170,7 @@ .. _`directory reference`: .. _`rlib`: rlib.html .. _`Sandboxing Python code`: sandbox.html +.. _`LICENSE`: https://bitbucket.org/pypy/pypy/src/default/LICENSE PyPy directory cross-reference ------------------------------ From commits-noreply at bitbucket.org Tue Apr 26 11:47:01 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 26 Apr 2011 11:47:01 +0200 (CEST) Subject: [pypy-svn] pypy default: (rguillebert, arigo) Message-ID: <20110426094701.9827B282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43620:4ff59c1c4f93 Date: 2011-04-26 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/4ff59c1c4f93/ Log: (rguillebert, arigo) Test and fix. As usual, of the kind "let's whack a bit more at imports, hopefully not too randomly". diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -53,10 +53,13 @@ relative_f = "from .imp import get_magic", relative_g = "import imp; from .imp import get_magic", ) - setuppkg("pkg.pkg1", + setuppkg("pkg.pkg1", + __init__ = 'from . import a', a = '', relative_d = "from __future__ import absolute_import\nfrom ..string import inpackage", relative_e = "from __future__ import absolute_import\nfrom .. import string", + relative_g = "from .. import pkg1\nfrom ..pkg1 import b", + b = "insubpackage = 1", ) setuppkg("pkg.pkg2", a='', b='') setuppkg("pkg_r", inpkg = "import x.y") @@ -402,6 +405,12 @@ from pkg.pkg1 import relative_e assert relative_e.string.inpackage == 1 + def test_future_relative_import_level_3(self): + from pkg.pkg1 import relative_g + assert relative_g.b.insubpackage == 1 + import pkg.pkg1 + assert pkg.pkg1.__package__ == 'pkg.pkg1' + def test_future_relative_import_error_when_in_non_package(self): exec """def imp(): from .string import inpackage diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -215,10 +215,12 @@ space.setitem(w_globals, w("__package__"), ctxt_w_name) else: # Normal module, so work out the package name if any - if '.' not in ctxt_name: + last_dot_position = ctxt_name.rfind('.') + if last_dot_position < 0: space.setitem(w_globals, w("__package__"), space.w_None) - elif rel_modulename: - space.setitem(w_globals, w("__package__"), w(rel_modulename)) + else: + space.setitem(w_globals, w("__package__"), + w(ctxt_name[:last_dot_position])) if modulename: if rel_modulename: From commits-noreply at bitbucket.org Tue Apr 26 11:51:09 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 26 Apr 2011 11:51:09 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix the test: cannot use .inlinetime here, because it does not Message-ID: <20110426095109.DEE14282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43621:418448352bd6 Date: 2011-04-26 11:50 +0200 http://bitbucket.org/pypy/pypy/changeset/418448352bd6/ Log: Fix the test: cannot use .inlinetime here, because it does not include the time spent doing the call to time.time(). Anyway the purpose of the test is to check that the result is correctly scaled, for which checking just .totaltime is fine. diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -110,10 +110,12 @@ efoo = entries[foo.func_code] ebar = entries[bar.func_code] assert 0.9 < efoo.totaltime < 2.9 - assert 0.9 < efoo.inlinetime < 2.9 + # --- cannot test .inlinetime, because it does not include + # --- the time spent doing the call to time.time() + #assert 0.9 < efoo.inlinetime < 2.9 for subentry in ebar.calls: assert 0.9 < subentry.totaltime < 2.9 - assert 0.9 < subentry.inlinetime < 2.9 + #assert 0.9 < subentry.inlinetime < 2.9 def test_cprofile(self): import sys, os From commits-noreply at bitbucket.org Tue Apr 26 12:34:27 2011 From: commits-noreply at bitbucket.org (lac) Date: Tue, 26 Apr 2011 12:34:27 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix wrong link. Message-ID: <20110426103427.3159E36C201@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43622:a9527c48fea3 Date: 2011-04-26 12:33 +0200 http://bitbucket.org/pypy/pypy/changeset/a9527c48fea3/ Log: fix wrong link. diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -170,7 +170,7 @@ resulting binary interactively much more pleasant. * `pypy/objspace/std`_ contains the `Standard object space`_. The main file - is `pypy/interpreter/objspace.py`_. For each type, the files ``xxxtype.py`` and + is `pypy/objspace/std/objspace.py`_. For each type, the files ``xxxtype.py`` and ``xxxobject.py`` contain respectively the definition of the type and its (default) implementation. From commits-noreply at bitbucket.org Tue Apr 26 12:38:05 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 12:38:05 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: make makeref complain about missing files Message-ID: <20110426103805.4D65036C203@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43623:3f2c8a1d982a Date: 2011-04-26 12:37 +0200 http://bitbucket.org/pypy/pypy/changeset/3f2c8a1d982a/ Log: make makeref complain about missing files diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -175,7 +175,7 @@ (default) implementation. * `pypy/objspace`_ contains a few other object spaces: the `pypy/objspace/thunk.py`_, - `pypy/objspace/trace`_ and `pypy/objspace/flow`_ object spaces. The latter is a relatively short piece + `pypy/objspace/trace.py`_ and `pypy/objspace/flow`_ object spaces. The latter is a relatively short piece of code that builds the control flow graphs when the bytecode interpreter runs in it. diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -116,7 +116,7 @@ which are the basic data structures of the translation process. -All these types are defined in `pypy/objspace/flow/model/`_ (which is a rather +All these types are defined in `pypy/objspace/flow/model.py`_ (which is a rather important module in the PyPy source base, to reinforce the point). The flow graph of a function is represented by the class ``FunctionGraph``. diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -31,8 +31,8 @@ for linkname in linkrex.findall(content): if '/' in linkname: found = True + assert distdir.join(linkname).check(), "link %s in %s is dead" % (linkname, textfile) if not linkname.endswith("/") and distdir.join(linkname).check(dir=1): - print linkname linkname += "/" addlink(linkname, bitbucket_url + linkname) elif linkname.startswith('issue'): From commits-noreply at bitbucket.org Tue Apr 26 14:13:45 2011 From: commits-noreply at bitbucket.org (lac) Date: Tue, 26 Apr 2011 14:13:45 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix stale link. Message-ID: <20110426121345.CB4E9282B8B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43624:970763a76520 Date: 2011-04-26 14:12 +0200 http://bitbucket.org/pypy/pypy/changeset/970763a76520/ Log: fix stale link. diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -37,7 +37,7 @@ calling its ``frame.eval()`` method. This main entry point initialize appropriate namespaces and then interprets each bytecode instruction. Python's standard library contains -the `lib-python/2.5.2/dis.py`_ module which allows to view +the `lib-python/2.7.0/dis.py`_ module which allows to view the Virtual's machine bytecode instructions:: >>> import dis From commits-noreply at bitbucket.org Tue Apr 26 14:20:19 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Tue, 26 Apr 2011 14:20:19 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix translation tests on Windows Message-ID: <20110426122019.93540282B8B@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43625:1512f617de07 Date: 2011-04-26 14:12 +0200 http://bitbucket.org/pypy/pypy/changeset/1512f617de07/ Log: Fix translation tests on Windows diff --git a/pypy/translator/c/src/debug_print.c b/pypy/translator/c/src/debug_print.c --- a/pypy/translator/c/src/debug_print.c +++ b/pypy/translator/c/src/debug_print.c @@ -4,7 +4,12 @@ #include #include +#ifndef _WIN32 #include +#else +#define WIN32_LEAN_AND_MEAN +#include +#endif #include "src/profiling.h" #include "src/debug_print.h" @@ -21,7 +26,7 @@ { char *filename = getenv("PYPYLOG"); if (filename) -#ifndef MS_WINDOWS +#ifndef _WIN32 unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ #else putenv("PYPYLOG="); /* don't pass it to subprocesses */ From commits-noreply at bitbucket.org Tue Apr 26 14:24:53 2011 From: commits-noreply at bitbucket.org (lac) Date: Tue, 26 Apr 2011 14:24:53 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: replace stale link with working one Message-ID: <20110426122453.0E127282B8B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43626:1792f6d7752e Date: 2011-04-26 14:24 +0200 http://bitbucket.org/pypy/pypy/changeset/1792f6d7752e/ Log: replace stale link with working one diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -90,7 +90,8 @@ You can enable this feature with the :config:`objspace.std.withrope` option. -.. _`"Ropes: An alternative to Strings."`: http://www.cs.ubc.ca/local/reading/proceedings/spe91-95/spe/vol25/issue12/spe986.pdf +.. _`"Ropes: An alternative to Strings."`: http://citeseer.ist.psu.edu/viewdoc/download?doi=10.1.1.14.9450&rep=rep1&type=pdf + Integer Optimizations --------------------- From commits-noreply at bitbucket.org Tue Apr 26 16:02:51 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Tue, 26 Apr 2011 16:02:51 +0200 (CEST) Subject: [pypy-svn] pypy jit-continue_tracing: (cfbolz, hakanardo): restore inputargs or clear it Message-ID: <20110426140251.1AC16282B8B@codespeak.net> Author: Hakan Ardo Branch: jit-continue_tracing Changeset: r43627:e70e82655721 Date: 2011-04-26 16:02 +0200 http://bitbucket.org/pypy/pypy/changeset/e70e82655721/ Log: (cfbolz, hakanardo): restore inputargs or clear it diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py --- a/pypy/jit/metainterp/pyjitpl.py +++ b/pypy/jit/metainterp/pyjitpl.py @@ -1877,6 +1877,7 @@ def compile(self, original_boxes, live_arg_boxes, start, start_resumedescr): num_green_args = self.jitdriver_sd.num_green_args + original_inputargs = self.history.inputargs self.history.inputargs = original_boxes[num_green_args:] greenkey = original_boxes[:num_green_args] old_loop_tokens = self.get_compiled_merge_points(greenkey) @@ -1885,7 +1886,11 @@ greenkey, start, start_resumedescr) if loop_token is not None: # raise if it *worked* correctly self.set_compiled_merge_points(greenkey, old_loop_tokens) + self.history.inputargs = None + self.history.operations = None raise GenerateMergePoint(live_arg_boxes, loop_token) + + self.history.inputargs = original_inputargs self.history.operations.pop() # remove the JUMP # FIXME: Why is self.history.inputargs not restored? @@ -1902,10 +1907,12 @@ target_loop_token = compile.compile_new_bridge(self, old_loop_tokens, self.resumekey) - if target_loop_token is not None: # raise if it *worked* correctly - raise GenerateMergePoint(live_arg_boxes, target_loop_token) finally: self.history.operations.pop() # remove the JUMP + if target_loop_token is not None: # raise if it *worked* correctly + self.history.inputargs = None + self.history.operations = None + raise GenerateMergePoint(live_arg_boxes, target_loop_token) def compile_bridge_and_loop(self, original_boxes, live_arg_boxes, start, bridge_arg_boxes, start_resumedescr): @@ -1940,7 +1947,8 @@ assert False assert target_loop_token is not None - self.history.operations = original_operations + self.history.inputargs = None + self.history.operations = None raise GenerateMergePoint(live_arg_boxes, old_loop_tokens[0]) def compile_done_with_this_frame(self, exitbox): From commits-noreply at bitbucket.org Tue Apr 26 16:46:56 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 26 Apr 2011 16:46:56 +0200 (CEST) Subject: [pypy-svn] pypy default: (antocuni, rguillebert, arigo) Message-ID: <20110426144656.088DB282B8B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43628:2e5dc4da5631 Date: 2011-04-26 16:46 +0200 http://bitbucket.org/pypy/pypy/changeset/2e5dc4da5631/ Log: (antocuni, rguillebert, arigo) Reintroduce the performance hack, maybe. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -259,12 +259,20 @@ # fall back to absolute import at the end of the # function. if level == -1: - tentative = True + # This check is a fast path to avoid redoing the + # following absolute_import() in the common case + w_mod = check_sys_modules_w(space, rel_modulename) + if w_mod is not None and space.is_w(w_mod, space.w_None): + # if we already find space.w_None, it means that we + # already tried and failed and falled back to the + # end of this function. + w_mod = None + else: + w_mod = absolute_import(space, rel_modulename, rel_level, + fromlist_w, tentative=True) else: - tentative = False - - w_mod = absolute_import(space, rel_modulename, rel_level, - fromlist_w, tentative=tentative) + w_mod = absolute_import(space, rel_modulename, rel_level, + fromlist_w, tentative=False) if w_mod is not None: space.timer.stop_name("importhook", modulename) return w_mod @@ -660,7 +668,7 @@ parent_name = '.'.join(namepath[:-1]) parent = None if parent_name: - w_parent = check_sys_modules(space, space.wrap(parent_name)) + w_parent = check_sys_modules_w(space, parent_name) if w_parent is None: raise operationerrfmt( space.w_ImportError, From commits-noreply at bitbucket.org Tue Apr 26 16:46:58 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 26 Apr 2011 16:46:58 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110426144658.69087282B8B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43629:6e2d6a60898c Date: 2011-04-26 16:46 +0200 http://bitbucket.org/pypy/pypy/changeset/6e2d6a60898c/ Log: merge heads diff --git a/pypy/translator/c/src/debug_print.c b/pypy/translator/c/src/debug_print.c --- a/pypy/translator/c/src/debug_print.c +++ b/pypy/translator/c/src/debug_print.c @@ -4,7 +4,12 @@ #include #include +#ifndef _WIN32 #include +#else +#define WIN32_LEAN_AND_MEAN +#include +#endif #include "src/profiling.h" #include "src/debug_print.h" @@ -21,7 +26,7 @@ { char *filename = getenv("PYPYLOG"); if (filename) -#ifndef MS_WINDOWS +#ifndef _WIN32 unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ #else putenv("PYPYLOG="); /* don't pass it to subprocesses */ From commits-noreply at bitbucket.org Tue Apr 26 17:07:18 2011 From: commits-noreply at bitbucket.org (lac) Date: Tue, 26 Apr 2011 17:07:18 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: jit/_ref should not be in the toctree. Message-ID: <20110426150718.5D14F282B8B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43630:49e8f1cbac76 Date: 2011-04-26 16:40 +0200 http://bitbucket.org/pypy/pypy/changeset/49e8f1cbac76/ Log: jit/_ref should not be in the toctree. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -108,7 +108,6 @@ rlib.rst rtyper.rst translation.rst - jit/_ref.rst jit/index.rst jit/overview.rst jit/pyjitpl5.rst diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -37,7 +37,8 @@ calling its ``frame.eval()`` method. This main entry point initialize appropriate namespaces and then interprets each bytecode instruction. Python's standard library contains -the `lib-python/2.7.0/dis.py`_ module which allows to view +the `lib-python/2.7.0/dishpkknosbest +.py`_ module which allows to view the Virtual's machine bytecode instructions:: >>> import dis From commits-noreply at bitbucket.org Tue Apr 26 17:07:19 2011 From: commits-noreply at bitbucket.org (lac) Date: Tue, 26 Apr 2011 17:07:19 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: We don't have static files, and therefore do not need the _static directory. We also do not need a module index. Message-ID: <20110426150719.D4FB3282B8B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43631:7d2b72d4daae Date: 2011-04-26 17:06 +0200 http://bitbucket.org/pypy/pypy/changeset/7d2b72d4daae/ Log: We don't have static files, and therefore do not need the _static directory. We also do not need a module index. diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -120,7 +120,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +# html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. @@ -138,7 +138,7 @@ #html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +html_use_modindex = False # If false, no index is generated. #html_use_index = True @@ -191,7 +191,7 @@ #latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +latex_use_modindex = False # Example configuration for intersphinx: refer to the Python standard library. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -32,13 +32,13 @@ * `Development mailing list`_: development and conceptual discussions. -* `Subversion commit mailing list`_: updates to code and +* `Mercurial commit mailing list`_: updates to code and documentation. +* `Sprint mailing list`_: mailing list for organizing upcoming sprints. + * `Development bug/feature tracker`_: filing bugs and feature requests. -* `Sprint mailing list`_: mailing list for organizing upcoming sprints. - * **IRC channel #pypy on freenode**: Many of the core developers are hanging out at #pypy on irc.freenode.net. You are welcome to join and ask questions (if they are not already developed in the FAQ_). @@ -60,7 +60,7 @@ .. _`development bug/feature tracker`: https://codespeak.net/issue/pypy-dev/ .. _here: http://tismerysoft.de/pypy/irc-logs/pypy .. _`sprint mailing list`: http://codespeak.net/mailman/listinfo/pypy-sprint -.. _`commit mailing list`: http://codespeak.net/mailman/listinfo/pypy-svn +.. _`Mercurial commit mailing list`: http://codespeak.net/mailman/listinfo/pypy-svn .. _`development mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html .. _`Documentation`: docindex.html @@ -134,7 +134,6 @@ ================== * :ref:`genindex` -* :ref:`modindex` * :ref:`search` * :ref:`glossary` From commits-noreply at bitbucket.org Tue Apr 26 17:26:11 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 26 Apr 2011 17:26:11 +0200 (CEST) Subject: [pypy-svn] pypy default: (antocuni, arigo) in-progress, write a test for 2e5dc4da5631 Message-ID: <20110426152611.9A756282B8B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43632:70609ab75433 Date: 2011-04-26 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/70609ab75433/ Log: (antocuni, arigo) in-progress, write a test for 2e5dc4da5631 diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -135,7 +135,7 @@ def format_ops(self, id=None, **kwds): if id is None: - ops = self.allops() + ops = self.allops(**kwds) else: ops = self.ops_by_id(id, **kwds) return '\n'.join(map(str, ops)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -693,6 +693,25 @@ guard_isnull(p16, descr=) """) + def test_import_fast_path(self, tmpdir): + py.test.skip('in-progress') + pkg = tmpdir.join('mypkg').ensure(dir=True) + pkg.join('__init__.py').write("") + pkg.join('mod.py').write(str(py.code.Source(""" + def do_the_import(): + import sys + """))) + def main(path, n): + import sys + sys.path.append(path) + from mypkg.mod import do_the_import + for i in range(n): + do_the_import() + # + log = self.run(main, [str(tmpdir), 300], threshold=200) + loop, = log.loops_by_filename(self.filepath) + # XXX: check the loop + def test_arraycopy_disappears(self): def main(n): i = 0 From commits-noreply at bitbucket.org Tue Apr 26 17:26:12 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Tue, 26 Apr 2011 17:26:12 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110426152612.EF759282B8B@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43633:a618881a5776 Date: 2011-04-26 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/a618881a5776/ Log: merge heads diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -259,12 +259,20 @@ # fall back to absolute import at the end of the # function. if level == -1: - tentative = True + # This check is a fast path to avoid redoing the + # following absolute_import() in the common case + w_mod = check_sys_modules_w(space, rel_modulename) + if w_mod is not None and space.is_w(w_mod, space.w_None): + # if we already find space.w_None, it means that we + # already tried and failed and falled back to the + # end of this function. + w_mod = None + else: + w_mod = absolute_import(space, rel_modulename, rel_level, + fromlist_w, tentative=True) else: - tentative = False - - w_mod = absolute_import(space, rel_modulename, rel_level, - fromlist_w, tentative=tentative) + w_mod = absolute_import(space, rel_modulename, rel_level, + fromlist_w, tentative=False) if w_mod is not None: space.timer.stop_name("importhook", modulename) return w_mod @@ -660,7 +668,7 @@ parent_name = '.'.join(namepath[:-1]) parent = None if parent_name: - w_parent = check_sys_modules(space, space.wrap(parent_name)) + w_parent = check_sys_modules_w(space, parent_name) if w_parent is None: raise operationerrfmt( space.w_ImportError, From commits-noreply at bitbucket.org Tue Apr 26 17:30:58 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Tue, 26 Apr 2011 17:30:58 +0200 (CEST) Subject: [pypy-svn] pypy jit-continue_tracing: (cfbolz, hakanardo): test for e70e82655721 Message-ID: <20110426153058.77575282B8B@codespeak.net> Author: Hakan Ardo Branch: jit-continue_tracing Changeset: r43634:95c9a6c0fb06 Date: 2011-04-26 17:30 +0200 http://bitbucket.org/pypy/pypy/changeset/95c9a6c0fb06/ Log: (cfbolz, hakanardo): test for e70e82655721 diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2101,6 +2101,79 @@ assert self.meta_interp(f, [5, 100]) == 0 self.check_loops(int_rshift=1, everywhere=True) + def test_inputarg_reset_bug(self): + ## j = 0 + ## while j < 100: + ## j += 1 + + ## c = 0 + ## j = 0 + ## while j < 2: + ## j += 1 + ## if c == 0: + ## c = 1 + ## else: + ## c = 0 + + ## j = 0 + ## while j < 100: + ## j += 1 + + def get_printable_location(i): + return str(i) + + myjitdriver = JitDriver(greens = ['i'], reds = ['j', 'c', 'a'], + get_printable_location=get_printable_location) + bytecode = "0j10jc20a3" + def f(): + myjitdriver.set_param('threshold', 7) + myjitdriver.set_param('trace_eagerness', 1) + i = j = c = a = 1 + while True: + myjitdriver.jit_merge_point(i=i, j=j, c=c, a=a) + if i >= len(bytecode): + break + op = bytecode[i] + if op == 'j': + j += 1 + elif op == 'c': + c = hint(c, promote=True) + c = 1 - c + elif op == '2': + if j < 3: + i -= 3 + myjitdriver.can_enter_jit(i=i, j=j, c=c, a=a) + elif op == '1': + k = j*a + if j < 100: + i -= 2 + a += k + myjitdriver.can_enter_jit(i=i, j=j, c=c, a=a) + else: + a += k*2 + elif op == '0': + j = c = a = 0 + elif op == 'a': + j += 1 + a += 1 + elif op == '3': + if a < 100: + i -= 2 + myjitdriver.can_enter_jit(i=i, j=j, c=c, a=a) + + else: + return ord(op) + i += 1 + return 42 + assert f() == 42 + def g(): + res = 1 + for i in range(10): + res = f() + return res + res = self.meta_interp(g, []) + assert res == 42 + def test_read_timestamp(self): import time from pypy.rlib.rtimer import read_timestamp From commits-noreply at bitbucket.org Tue Apr 26 17:38:19 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 26 Apr 2011 17:38:19 +0200 (CEST) Subject: [pypy-svn] pypy default: (antocuni, arigo, rguillebert a bit) Message-ID: <20110426153819.EFA06282B8B@codespeak.net> Author: Armin Rigo Branch: Changeset: r43635:c4d6beb47daa Date: 2011-04-26 17:38 +0200 http://bitbucket.org/pypy/pypy/changeset/c4d6beb47daa/ Log: (antocuni, arigo, rguillebert a bit) Un-(in-progress)-ify the test. diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -128,10 +128,13 @@ if op.name != 'debug_merge_point' or include_debug_merge_points: yield op - def allops(self, include_debug_merge_points=False): + def allops(self, include_debug_merge_points=False, opcode=None): + opcode_name = opcode for chunk in self.flatten_chunks(): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): - yield op + opcode = chunk.getopcode() + if opcode_name is None or opcode.__class__.__name__ == opcode_name: + for op in self._ops_for_chunk(chunk, include_debug_merge_points): + yield op def format_ops(self, id=None, **kwds): if id is None: diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -694,7 +694,6 @@ """) def test_import_fast_path(self, tmpdir): - py.test.skip('in-progress') pkg = tmpdir.join('mypkg').ensure(dir=True) pkg.join('__init__.py').write("") pkg.join('mod.py').write(str(py.code.Source(""" @@ -710,7 +709,10 @@ # log = self.run(main, [str(tmpdir), 300], threshold=200) loop, = log.loops_by_filename(self.filepath) - # XXX: check the loop + # this is a check for a slow-down that introduced a + # call_may_force(absolute_import_with_lock). + for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")): + assert 'call' not in opname # no call-like opcode def test_arraycopy_disappears(self): def main(n): From commits-noreply at bitbucket.org Tue Apr 26 17:43:26 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 26 Apr 2011 17:43:26 +0200 (CEST) Subject: [pypy-svn] pypy post-release-1.5: A precise check: when we attach a bridge to a guard, check Message-ID: <20110426154326.763B2282B8B@codespeak.net> Author: Armin Rigo Branch: post-release-1.5 Changeset: r43636:75fa8b0f2ba5 Date: 2011-04-26 17:43 +0200 http://bitbucket.org/pypy/pypy/changeset/75fa8b0f2ba5/ Log: A precise check: when we attach a bridge to a guard, check that the provided arguments match the number and types of the fail_args- without-the-Nones. diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -407,6 +407,13 @@ guard_op = old_loop.operations[old_index] assert guard_op.is_guard() guard_op.jump_target = new_loop + # check that the bridge's inputargs are of the correct number and + # kind for the guard + if guard_op.fail_args is not None: + argkinds = [v.concretetype for v in guard_op.fail_args if v] + else: + argkinds = [] + assert argkinds == [v.concretetype for v in new_loop.inputargs] # ------------------------------ From commits-noreply at bitbucket.org Tue Apr 26 18:15:19 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 18:15:19 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: enable the graphviz extension and use that instead of our disabled graphviz directive Message-ID: <20110426161519.25C9A282B8B@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43637:3523202cd9f2 Date: 2011-04-26 14:35 +0200 http://bitbucket.org/pypy/pypy/changeset/3523202cd9f2/ Log: enable the graphviz extension and use that instead of our disabled graphviz directive diff --git a/pypy/doc/image/parsing_example9.dot b/pypy/doc/image/parsing_example9.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example9.dot +++ /dev/null @@ -1,13 +0,0 @@ -digraph G{ -"-1219430228" [label="list"]; -"-1219430228" -> "-1213608980"; -"-1213608980" [shape=box,label="DECIMAL\n'1'"]; -"-1219430228" -> "-1213623380"; -"-1213623380" [shape=box,label="DECIMAL\n'2'"]; -"-1219430228" -> "-1213441652"; -"-1213441652" [shape=box,label="DECIMAL\n'3'"]; -"-1219430228" -> "-1213441620"; -"-1213441620" [shape=box,label="DECIMAL\n'4'"]; -"-1219430228" -> "-1213442100"; -"-1213442100" [shape=box,label="DECIMAL\n'5'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example8.dot b/pypy/doc/image/parsing_example8.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example8.dot +++ /dev/null @@ -1,21 +0,0 @@ -digraph G{ -"-1213611892" [label="list"]; -"-1213611892" -> "-1213608980"; -"-1213608980" [shape=box,label="DECIMAL\n'1'"]; -"-1213611892" -> "-1213623476"; -"-1213623476" [label="list"]; -"-1213623476" -> "-1213623380"; -"-1213623380" [shape=box,label="DECIMAL\n'2'"]; -"-1213623476" -> "-1213442868"; -"-1213442868" [label="list"]; -"-1213442868" -> "-1213441652"; -"-1213441652" [shape=box,label="DECIMAL\n'3'"]; -"-1213442868" -> "-1213441332"; -"-1213441332" [label="list"]; -"-1213441332" -> "-1213441620"; -"-1213441620" [shape=box,label="DECIMAL\n'4'"]; -"-1213441332" -> "-1213443060"; -"-1213443060" [label="list"]; -"-1213443060" -> "-1213442100"; -"-1213442100" [shape=box,label="DECIMAL\n'5'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example5.dot b/pypy/doc/image/parsing_example5.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example5.dot +++ /dev/null @@ -1,21 +0,0 @@ -digraph G{ -"-1219949908" [label="n"]; -"-1219949908" -> "-1214026452"; -"-1214026452" [shape=box,label="__0_a\n'a'"]; -"-1219949908" -> "-1214028276"; -"-1214028276" [shape=box,label="__1_b\n'b'"]; -"-1219949908" -> "-1214027316"; -"-1214027316" [shape=box,label="__2_c\n'c'"]; -"-1219949908" -> "-1219949876"; -"-1219949876" [label="n"]; -"-1219949876" -> "-1214141364"; -"-1214141364" [shape=box,label="__0_a\n'a'"]; -"-1219949876" -> "-1214141748"; -"-1214141748" [shape=box,label="__1_b\n'b'"]; -"-1219949876" -> "-1214140756"; -"-1214140756" [shape=box,label="__2_c\n'c'"]; -"-1219949876" -> "-1219949748"; -"-1219949748" [label="m"]; -"-1219949748" -> "-1214414868"; -"-1214414868" [shape=box,label="__5_d\n'd'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example10.dot b/pypy/doc/image/parsing_example10.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example10.dot +++ /dev/null @@ -1,37 +0,0 @@ -digraph G{ -"-1220061652" [label="object"]; -"-1220061652" -> "-1220127636"; -"-1220127636" [label="entry"]; -"-1220127636" -> "-1213915636"; -"-1213915636" [shape=box,label="STRING\n'a'"]; -"-1220127636" -> "-1214251156"; -"-1214251156" [shape=box,label="STRING\n'5'"]; -"-1220061652" -> "-1220063188"; -"-1220063188" [label="entry"]; -"-1220063188" -> "-1214253076"; -"-1214253076" [shape=box,label="STRING\n'b'"]; -"-1220063188" -> "-1220059444"; -"-1220059444" [label="array"]; -"-1220059444" -> "-1214253364"; -"-1214253364" [shape=box,label="NUMBER\n'1'"]; -"-1220059444" -> "-1214254292"; -"-1214254292" [shape=box,label="__0_null\n'null'"]; -"-1220059444" -> "-1214253268"; -"-1214253268" [shape=box,label="NUMBER\n'3'"]; -"-1220059444" -> "-1214252596"; -"-1214252596" [shape=box,label="__1_true\n'true'"]; -"-1220059444" -> "-1220062260"; -"-1220062260" [label="object"]; -"-1220062260" -> "-1220060116"; -"-1220060116" [label="entry"]; -"-1220060116" -> "-1214211860"; -"-1214211860" [shape=box,label="STRING\n'f'"]; -"-1220060116" -> "-1214210132"; -"-1214210132" [shape=box,label="STRING\n'g'"]; -"-1220062260" -> "-1220062868"; -"-1220062868" [label="entry"]; -"-1220062868" -> "-1214211956"; -"-1214211956" [shape=box,label="STRING\n'h'"]; -"-1220062868" -> "-1214212308"; -"-1214212308" [shape=box,label="NUMBER\n'6'"]; -} diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -323,7 +323,35 @@ produces a syntax tree that follows the precedence of the operators. For example the expression ``12 + 4 * 5`` is parsed into the following tree: -.. graphviz:: image/parsing_example1.dot +.. graphviz:: + + digraph G{ + "-1213931828" [label="additive"]; + "-1213931828" -> "-1213951956"; + "-1213951956" [label="multitive"]; + "-1213951956" -> "-1213949172"; + "-1213949172" [label="primary"]; + "-1213949172" -> "-1213949812"; + "-1213949812" [shape=box,label="DECIMAL\l'12'"]; + "-1213931828" -> "-1213935220"; + "-1213935220" [shape=box,label="__0_+\l'+'"]; + "-1213931828" -> "-1213951316"; + "-1213951316" [label="additive"]; + "-1213951316" -> "-1213948180"; + "-1213948180" [label="multitive"]; + "-1213948180" -> "-1213951380"; + "-1213951380" [label="primary"]; + "-1213951380" -> "-1213951508"; + "-1213951508" [shape=box,label="DECIMAL\l'4'"]; + "-1213948180" -> "-1213948788"; + "-1213948788" [shape=box,label="__1_*\l'*'"]; + "-1213948180" -> "-1213951060"; + "-1213951060" [label="multitive"]; + "-1213951060" -> "-1213948980"; + "-1213948980" [label="primary"]; + "-1213948980" -> "-1213950420"; + "-1213950420" [shape=box,label="DECIMAL\l'5'"]; + } Parse Trees ----------- @@ -380,11 +408,43 @@ Parsing the string "A, A, A" gives the tree: -.. graphviz:: image/parsing_example2.dot +.. graphviz:: + + digraph G{ + "-1213678004" [label="n"]; + "-1213678004" -> "-1213681108"; + "-1213681108" [shape=box,label="__0_A\n'A'"]; + "-1213678004" -> "-1213681332"; + "-1213681332" [shape=box,label="__1_,\n','"]; + "-1213678004" -> "-1213837780"; + "-1213837780" [label="n"]; + "-1213837780" -> "-1213837716"; + "-1213837716" [shape=box,label="__0_A\n'A'"]; + "-1213837780" -> "-1213839476"; + "-1213839476" [shape=box,label="__1_,\n','"]; + "-1213837780" -> "-1213839956"; + "-1213839956" [label="n"]; + "-1213839956" -> "-1213840948"; + "-1213840948" [shape=box,label="__0_A\n'A'"]; + } After transformation the tree has the "," nodes removed: -.. graphviz:: image/parsing_example3.dot +.. graphviz:: + + digraph G{ + "-1219325716" [label="n"]; + "-1219325716" -> "-1219325844"; + "-1219325844" [shape=box,label="__0_A\n'A'"]; + "-1219325716" -> "-1219324372"; + "-1219324372" [label="n"]; + "-1219324372" -> "-1219325524"; + "-1219325524" [shape=box,label="__0_A\n'A'"]; + "-1219324372" -> "-1219324308"; + "-1219324308" [label="n"]; + "-1219324308" -> "-1219325492"; + "-1219325492" [shape=box,label="__0_A\n'A'"]; + } ++++++++ @@ -401,12 +461,61 @@ Parsing the string "a b c (a b c d)" gives the tree: -.. graphviz:: image/parsing_example4.dot +.. graphviz:: + + digraph G{ + "-1214029460" [label="n"]; + "-1214029460" -> "-1214026452"; + "-1214026452" [shape=box,label="__0_a\n'a'"]; + "-1214029460" -> "-1214028276"; + "-1214028276" [shape=box,label="__1_b\n'b'"]; + "-1214029460" -> "-1214027316"; + "-1214027316" [shape=box,label="__2_c\n'c'"]; + "-1214029460" -> "-1214026868"; + "-1214026868" [label="m"]; + "-1214026868" -> "-1214140436"; + "-1214140436" [shape=box,label="__3_(\n'('"]; + "-1214026868" -> "-1214143508"; + "-1214143508" [label="n"]; + "-1214143508" -> "-1214141364"; + "-1214141364" [shape=box,label="__0_a\n'a'"]; + "-1214143508" -> "-1214141748"; + "-1214141748" [shape=box,label="__1_b\n'b'"]; + "-1214143508" -> "-1214140756"; + "-1214140756" [shape=box,label="__2_c\n'c'"]; + "-1214143508" -> "-1214144468"; + "-1214144468" [label="m"]; + "-1214144468" -> "-1214414868"; + "-1214414868" [shape=box,label="__5_d\n'd'"]; + "-1214026868" -> "-1214141492"; + "-1214141492" [shape=box,label="__4_)\n')'"]; + } After transformation the tree looks like this: -.. graphviz:: image/parsing_example5.dot +.. graphviz:: + digraph G{ + "-1219949908" [label="n"]; + "-1219949908" -> "-1214026452"; + "-1214026452" [shape=box,label="__0_a\n'a'"]; + "-1219949908" -> "-1214028276"; + "-1214028276" [shape=box,label="__1_b\n'b'"]; + "-1219949908" -> "-1214027316"; + "-1214027316" [shape=box,label="__2_c\n'c'"]; + "-1219949908" -> "-1219949876"; + "-1219949876" [label="n"]; + "-1219949876" -> "-1214141364"; + "-1214141364" [shape=box,label="__0_a\n'a'"]; + "-1219949876" -> "-1214141748"; + "-1214141748" [shape=box,label="__1_b\n'b'"]; + "-1219949876" -> "-1214140756"; + "-1214140756" [shape=box,label="__2_c\n'c'"]; + "-1219949876" -> "-1219949748"; + "-1219949748" [label="m"]; + "-1219949748" -> "-1214414868"; + "-1214414868" [shape=box,label="__5_d\n'd'"]; + } >nonterminal_1 nonterminal_2 ... nonterminal_n< +++++++++++++++++++++++++++++++++++++++++++++++ @@ -421,23 +530,76 @@ Parsing the string "1 2" gives the tree: -.. graphviz:: image/parsing_example6.dot - +.. graphviz:: + + digraph G{ + "-1213518708" [label="list"]; + "-1213518708" -> "-1213518196"; + "-1213518196" [shape=box,label="DECIMAL\n'1'"]; + "-1213518708" -> "-1213518260"; + "-1213518260" [label="list"]; + "-1213518260" -> "-1213520308"; + "-1213520308" [shape=box,label="DECIMAL\n'2'"]; + } + after the transformation the tree looks like: -.. graphviz:: image/parsing_example7.dot +.. graphviz:: + + digraph G{ + "-1219505652" [label="list"]; + "-1219505652" -> "-1213518196"; + "-1213518196" [shape=box,label="DECIMAL\n'1'"]; + "-1219505652" -> "-1213520308"; + "-1213520308" [shape=box,label="DECIMAL\n'2'"]; + } Note that the transformation works recursively. That means that the following also works: if the string "1 2 3 4 5" is parsed the tree at first looks like this: -.. graphviz:: image/parsing_example8.dot +.. graphviz:: + + digraph G{ + "-1213611892" [label="list"]; + "-1213611892" -> "-1213608980"; + "-1213608980" [shape=box,label="DECIMAL\n'1'"]; + "-1213611892" -> "-1213623476"; + "-1213623476" [label="list"]; + "-1213623476" -> "-1213623380"; + "-1213623380" [shape=box,label="DECIMAL\n'2'"]; + "-1213623476" -> "-1213442868"; + "-1213442868" [label="list"]; + "-1213442868" -> "-1213441652"; + "-1213441652" [shape=box,label="DECIMAL\n'3'"]; + "-1213442868" -> "-1213441332"; + "-1213441332" [label="list"]; + "-1213441332" -> "-1213441620"; + "-1213441620" [shape=box,label="DECIMAL\n'4'"]; + "-1213441332" -> "-1213443060"; + "-1213443060" [label="list"]; + "-1213443060" -> "-1213442100"; + "-1213442100" [shape=box,label="DECIMAL\n'5'"]; + } But after transformation the whole thing collapses to one node with a lot of children: -.. graphviz:: image/parsing_example9.dot +.. graphviz:: + digraph G{ + "-1219430228" [label="list"]; + "-1219430228" -> "-1213608980"; + "-1213608980" [shape=box,label="DECIMAL\n'1'"]; + "-1219430228" -> "-1213623380"; + "-1213623380" [shape=box,label="DECIMAL\n'2'"]; + "-1219430228" -> "-1213441652"; + "-1213441652" [shape=box,label="DECIMAL\n'3'"]; + "-1219430228" -> "-1213441620"; + "-1213441620" [shape=box,label="DECIMAL\n'4'"]; + "-1219430228" -> "-1213442100"; + "-1213442100" [shape=box,label="DECIMAL\n'5'"]; + } Extensions to the EBNF grammar format ------------------------------------- @@ -506,8 +668,45 @@ looks like this: -.. graphviz:: image/parsing_example10.dot +.. graphviz:: + digraph G{ + "-1220061652" [label="object"]; + "-1220061652" -> "-1220127636"; + "-1220127636" [label="entry"]; + "-1220127636" -> "-1213915636"; + "-1213915636" [shape=box,label="STRING\n'a'"]; + "-1220127636" -> "-1214251156"; + "-1214251156" [shape=box,label="STRING\n'5'"]; + "-1220061652" -> "-1220063188"; + "-1220063188" [label="entry"]; + "-1220063188" -> "-1214253076"; + "-1214253076" [shape=box,label="STRING\n'b'"]; + "-1220063188" -> "-1220059444"; + "-1220059444" [label="array"]; + "-1220059444" -> "-1214253364"; + "-1214253364" [shape=box,label="NUMBER\n'1'"]; + "-1220059444" -> "-1214254292"; + "-1214254292" [shape=box,label="__0_null\n'null'"]; + "-1220059444" -> "-1214253268"; + "-1214253268" [shape=box,label="NUMBER\n'3'"]; + "-1220059444" -> "-1214252596"; + "-1214252596" [shape=box,label="__1_true\n'true'"]; + "-1220059444" -> "-1220062260"; + "-1220062260" [label="object"]; + "-1220062260" -> "-1220060116"; + "-1220060116" [label="entry"]; + "-1220060116" -> "-1214211860"; + "-1214211860" [shape=box,label="STRING\n'f'"]; + "-1220060116" -> "-1214210132"; + "-1214210132" [shape=box,label="STRING\n'g'"]; + "-1220062260" -> "-1220062868"; + "-1220062868" [label="entry"]; + "-1220062868" -> "-1214211956"; + "-1214211956" [shape=box,label="STRING\n'h'"]; + "-1220062868" -> "-1214212308"; + "-1214212308" [shape=box,label="NUMBER\n'6'"]; + } .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ diff --git a/pypy/doc/image/parsing_example6.dot b/pypy/doc/image/parsing_example6.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example6.dot +++ /dev/null @@ -1,9 +0,0 @@ -digraph G{ -"-1213518708" [label="list"]; -"-1213518708" -> "-1213518196"; -"-1213518196" [shape=box,label="DECIMAL\n'1'"]; -"-1213518708" -> "-1213518260"; -"-1213518260" [label="list"]; -"-1213518260" -> "-1213520308"; -"-1213520308" [shape=box,label="DECIMAL\n'2'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example4.dot b/pypy/doc/image/parsing_example4.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example4.dot +++ /dev/null @@ -1,27 +0,0 @@ -digraph G{ -"-1214029460" [label="n"]; -"-1214029460" -> "-1214026452"; -"-1214026452" [shape=box,label="__0_a\n'a'"]; -"-1214029460" -> "-1214028276"; -"-1214028276" [shape=box,label="__1_b\n'b'"]; -"-1214029460" -> "-1214027316"; -"-1214027316" [shape=box,label="__2_c\n'c'"]; -"-1214029460" -> "-1214026868"; -"-1214026868" [label="m"]; -"-1214026868" -> "-1214140436"; -"-1214140436" [shape=box,label="__3_(\n'('"]; -"-1214026868" -> "-1214143508"; -"-1214143508" [label="n"]; -"-1214143508" -> "-1214141364"; -"-1214141364" [shape=box,label="__0_a\n'a'"]; -"-1214143508" -> "-1214141748"; -"-1214141748" [shape=box,label="__1_b\n'b'"]; -"-1214143508" -> "-1214140756"; -"-1214140756" [shape=box,label="__2_c\n'c'"]; -"-1214143508" -> "-1214144468"; -"-1214144468" [label="m"]; -"-1214144468" -> "-1214414868"; -"-1214414868" [shape=box,label="__5_d\n'd'"]; -"-1214026868" -> "-1214141492"; -"-1214141492" [shape=box,label="__4_)\n')'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example7.dot b/pypy/doc/image/parsing_example7.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example7.dot +++ /dev/null @@ -1,7 +0,0 @@ -digraph G{ -"-1219505652" [label="list"]; -"-1219505652" -> "-1213518196"; -"-1213518196" [shape=box,label="DECIMAL\n'1'"]; -"-1219505652" -> "-1213520308"; -"-1213520308" [shape=box,label="DECIMAL\n'2'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example2.dot b/pypy/doc/image/parsing_example2.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example2.dot +++ /dev/null @@ -1,17 +0,0 @@ -digraph G{ -"-1213678004" [label="n"]; -"-1213678004" -> "-1213681108"; -"-1213681108" [shape=box,label="__0_A\n'A'"]; -"-1213678004" -> "-1213681332"; -"-1213681332" [shape=box,label="__1_,\n','"]; -"-1213678004" -> "-1213837780"; -"-1213837780" [label="n"]; -"-1213837780" -> "-1213837716"; -"-1213837716" [shape=box,label="__0_A\n'A'"]; -"-1213837780" -> "-1213839476"; -"-1213839476" [shape=box,label="__1_,\n','"]; -"-1213837780" -> "-1213839956"; -"-1213839956" [label="n"]; -"-1213839956" -> "-1213840948"; -"-1213840948" [shape=box,label="__0_A\n'A'"]; -} diff --git a/pypy/doc/image/parsing_example3.dot b/pypy/doc/image/parsing_example3.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example3.dot +++ /dev/null @@ -1,13 +0,0 @@ -digraph G{ -"-1219325716" [label="n"]; -"-1219325716" -> "-1219325844"; -"-1219325844" [shape=box,label="__0_A\n'A'"]; -"-1219325716" -> "-1219324372"; -"-1219324372" [label="n"]; -"-1219324372" -> "-1219325524"; -"-1219325524" [shape=box,label="__0_A\n'A'"]; -"-1219324372" -> "-1219324308"; -"-1219324308" [label="n"]; -"-1219324308" -> "-1219325492"; -"-1219325492" [shape=box,label="__0_A\n'A'"]; -} diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -22,7 +22,7 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/pypy/doc/image/parsing_example1.dot b/pypy/doc/image/parsing_example1.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example1.dot +++ /dev/null @@ -1,27 +0,0 @@ -digraph G{ -"-1213931828" [label="additive"]; -"-1213931828" -> "-1213951956"; -"-1213951956" [label="multitive"]; -"-1213951956" -> "-1213949172"; -"-1213949172" [label="primary"]; -"-1213949172" -> "-1213949812"; -"-1213949812" [shape=box,label="DECIMAL\l'12'"]; -"-1213931828" -> "-1213935220"; -"-1213935220" [shape=box,label="__0_+\l'+'"]; -"-1213931828" -> "-1213951316"; -"-1213951316" [label="additive"]; -"-1213951316" -> "-1213948180"; -"-1213948180" [label="multitive"]; -"-1213948180" -> "-1213951380"; -"-1213951380" [label="primary"]; -"-1213951380" -> "-1213951508"; -"-1213951508" [shape=box,label="DECIMAL\l'4'"]; -"-1213948180" -> "-1213948788"; -"-1213948788" [shape=box,label="__1_*\l'*'"]; -"-1213948180" -> "-1213951060"; -"-1213951060" [label="multitive"]; -"-1213951060" -> "-1213948980"; -"-1213948980" [label="primary"]; -"-1213948980" -> "-1213950420"; -"-1213950420" [shape=box,label="DECIMAL\l'5'"]; -} From commits-noreply at bitbucket.org Tue Apr 26 18:15:21 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 18:15:21 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac): kill the graphviz and latex formula support, since sphinx provides it for us anyway. Message-ID: <20110426161521.E10F8282C1A@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43638:44a3480dd2bc Date: 2011-04-26 14:40 +0200 http://bitbucket.org/pypy/pypy/changeset/44a3480dd2bc/ Log: (cfbolz, lac): kill the graphviz and latex formula support, since sphinx provides it for us anyway. diff --git a/pypy/tool/rest/convert.py b/pypy/tool/rest/convert.py deleted file mode 100644 --- a/pypy/tool/rest/convert.py +++ /dev/null @@ -1,163 +0,0 @@ -import py - -ExecutionFailed = py.process.cmdexec.Error -# utility functions to convert between various formats - -format_to_dotargument = {"png": "png", - "eps": "ps", - "ps": "ps", - "pdf": "ps", - } - -def ps2eps(ps): - # XXX write a pure python version - if not py.path.local.sysfind("ps2epsi") and \ - not py.path.local.sysfind("ps2eps"): - raise SystemExit("neither ps2eps nor ps2epsi found") - try: - eps = ps.new(ext=".eps") - py.process.cmdexec('ps2epsi "%s" "%s"' % (ps, eps)) - except ExecutionFailed: - py.process.cmdexec('ps2eps -l -f "%s"' % ps) - -def ps2pdf(ps, compat_level="1.2"): - if not py.path.local.sysfind("gs"): - raise SystemExit("ERROR: gs not found") - pdf = ps.new(ext=".pdf") - options = dict(OPTIONS="-dSAFER -dCompatibilityLevel=%s" % compat_level, - infile=ps, outfile=pdf) - cmd = ('gs %(OPTIONS)s -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite ' - '"-sOutputFile=%(outfile)s" %(OPTIONS)s -c .setpdfwrite ' - '-f "%(infile)s"') % options - py.process.cmdexec(cmd) - return pdf - -def eps2pdf(eps): - # XXX write a pure python version - if not py.path.local.sysfind("epstopdf"): - raise SystemExit("ERROR: epstopdf not found") - py.process.cmdexec('epstopdf "%s"' % eps) - -def dvi2eps(dvi, dest=None): - if dest is None: - dest = eps.new(ext=".eps") - command = 'dvips -q -E -n 1 -D 600 -p 1 -o "%s" "%s"' % (dest, dvi) - if not py.path.local.sysfind("dvips"): - raise SystemExit("ERROR: dvips not found") - py.process.cmdexec(command) - -def convert_dot(fn, new_extension): - if not py.path.local.sysfind("dot"): - raise SystemExit("ERROR: dot not found") - result = fn.new(ext=new_extension) - print result - arg = "-T%s" % (format_to_dotargument[new_extension], ) - py.std.os.system('dot "%s" "%s" > "%s"' % (arg, fn, result)) - if new_extension == "eps": - ps = result.new(ext="ps") - result.move(ps) - ps2eps(ps) - ps.remove() - elif new_extension == "pdf": - # convert to eps file first, to get the bounding box right - eps = result.new(ext="eps") - ps = result.new(ext="ps") - result.move(ps) - ps2eps(ps) - eps2pdf(eps) - ps.remove() - eps.remove() - return result - - -class latexformula2png(object): - def __init__(self, formula, dest, temp=None): - self.formula = formula - try: - import Image - self.Image = Image - self.scale = 2 # create a larger image - self.upscale = 5 # create the image upscale times larger, then scale it down - except ImportError: - self.scale = 2 - self.upscale = 1 - self.Image = None - self.output_format = ('pngmono', 'pnggray', 'pngalpha')[2] - if temp is None: - temp = py.test.ensuretemp("latexformula") - self.temp = temp - self.latex = self.temp.join('formula.tex') - self.dvi = self.temp.join('formula.dvi') - self.eps = self.temp.join('formula.eps') - self.png = self.temp.join('formula.png') - self.saveas(dest) - - def saveas(self, dest): - self.gen_latex() - self.gen_dvi() - dvi2eps(self.dvi, self.eps) - self.gen_png() - self.scale_image() - self.png.copy(dest) - - def gen_latex(self): - self.latex.write (""" - \\documentclass{article} - \\pagestyle{empty} - \\begin{document} - - %s - \\pagebreak - - \\end{document} - """ % (self.formula)) - - def gen_dvi(self): - origdir = py.path.local() - self.temp.chdir() - py.process.cmdexec('latex "%s"' % (self.latex)) - origdir.chdir() - - def gen_png(self): - tempdir = py.path.local.mkdtemp() - - re_bbox = py.std.re.compile('%%BoundingBox:\s*(\d+) (\d+) (\d+) (\d+)') - eps = self.eps.read() - x1, y1, x2, y2 = [int(i) for i in re_bbox.search(eps).groups()] - X = x2 - x1 + 2 - Y = y2 - y1 + 2 - mx = -x1 - my = -y1 - ps = self.temp.join('temp.ps') - source = self.eps - ps.write(""" - 1 1 1 setrgbcolor - newpath - -1 -1 moveto - %(X)d -1 lineto - %(X)d %(Y)d lineto - -1 %(Y)d lineto - closepath - fill - %(mx)d %(my)d translate - 0 0 0 setrgbcolor - (%(source)s) run - - """ % locals()) - - sx = int((x2 - x1) * self.scale * self.upscale) - sy = int((y2 - y1) * self.scale * self.upscale) - res = 72 * self.scale * self.upscale - command = ('gs -q -g%dx%d -r%dx%d -sDEVICE=%s -sOutputFile="%s" ' - '-dNOPAUSE -dBATCH "%s"') % ( - sx, sy, res, res, self.output_format, self.png, ps) - py.process.cmdexec(command) - - def scale_image(self): - if self.Image is None: - return - image = self.Image.open(str(self.png)) - image.resize((image.size[0] / self.upscale, - image.size[1] / self.upscale), - self.Image.ANTIALIAS).save(str(self.png)) - diff --git a/pypy/tool/rest/directive.py b/pypy/tool/rest/directive.py --- a/pypy/tool/rest/directive.py +++ b/pypy/tool/rest/directive.py @@ -1,108 +1,9 @@ -# XXX this file is messy since it tries to deal with several docutils versions import py -from pypy.tool.rest.convert import convert_dot, latexformula2png - import sys import docutils from docutils import nodes -from docutils.parsers.rst import directives, states, roles -from docutils.parsers.rst.directives import images - -if hasattr(images, "image"): - directives_are_functions = True -else: - directives_are_functions = False - -try: - from docutils.utils import unescape # docutils version > 0.3.5 -except ImportError: - from docutils.parsers.rst.states import unescape # docutils 0.3.5 - -if not directives_are_functions: - ImageClass = images.Image - -else: - class ImageClass(object): - option_spec = images.image.options - def run(self): - return images.image(u'image', - self.arguments, - self.options, - self.content, - self.lineno, - self.content_offset, - self.block_text, - self.state, - self.state_machine) - - -backend_to_image_format = {"html": "png", "latex": "pdf"} - -class GraphvizDirective(ImageClass): - def convert(self, fn, path): - path = py.path.local(path).dirpath() - dot = path.join(fn) - result = convert_dot(dot, backend_to_image_format[_backend]) - return result.relto(path) - - def run(self): - newname = self.convert(self.arguments[0], - self.state.document.settings._source) - text = self.block_text.replace("graphviz", "image", 1) - self.block_text = text.replace(self.arguments[0], newname, 1) - self.name = u'image' - self.arguments = [newname] - return ImageClass.run(self) - - def old_interface(self): - def f(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - for arg in "name arguments options content lineno " \ - "content_offset block_text state state_machine".split(): - setattr(self, arg, locals()[arg]) - return self.run() - f.arguments = (1, 0, 1) - f.options = self.option_spec - return f - - -_backend = None -def set_backend_and_register_directives(backend): - #XXX this is only used to work around the inflexibility of docutils: - # a directive does not know the target format - global _backend - _backend = backend - if not directives_are_functions: - directives.register_directive("graphviz", GraphvizDirective) - else: - directives.register_directive("graphviz", - GraphvizDirective().old_interface()) - roles.register_canonical_role("latexformula", latexformula_role) - -def latexformula_role(name, rawtext, text, lineno, inliner, - options={}, content=[]): - if _backend == 'latex': - options['format'] = 'latex' - return roles.raw_role(name, rawtext, text, lineno, inliner, - options, content) - else: - # XXX: make the place of the image directory configurable - sourcedir = py.path.local(inliner.document.settings._source).dirpath() - imagedir = sourcedir.join("img") - if not imagedir.check(): - imagedir.mkdir() - # create halfway senseful imagename: - # use hash of formula + alphanumeric characters of it - # could - imagename = "%s_%s.png" % ( - hash(text), "".join([c for c in text if c.isalnum()])) - image = imagedir.join(imagename) - latexformula2png(unescape(text, True), image) - imagenode = nodes.image(image.relto(sourcedir), uri=image.relto(sourcedir)) - return [imagenode], [] -latexformula_role.content = True -latexformula_role.options = {} +from docutils.parsers.rst import roles def register_linkrole(role_name, callback): def source_role(name, rawtext, text, lineno, inliner, options={}, From commits-noreply at bitbucket.org Tue Apr 26 18:15:23 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 18:15:23 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix this Message-ID: <20110426161523.53F72282C19@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43639:52193f17900c Date: 2011-04-26 17:34 +0200 http://bitbucket.org/pypy/pypy/changeset/52193f17900c/ Log: fix this diff --git a/pypy/doc/config/makemodules.py b/pypy/doc/config/makemodules.py --- a/pypy/doc/config/makemodules.py +++ b/pypy/doc/config/makemodules.py @@ -7,12 +7,12 @@ if __name__ == '__main__': c = config.Config(pypyoption.pypy_optiondescription).usemodules prefix = "objspace.usemodules" - thisdir.join(prefix + ".txt").ensure() + thisdir.join(prefix + ".rst").ensure() for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".txt" + basename = prefix + "." + p + ".rst" f = thisdir.join(basename) - if f.check() and f.size(): - continue + #if f.check() and f.size(): + # continue print "making docs for", p text = ["Use the '%s' module. " % (p, )] if p in pypyoption.essential_modules: From commits-noreply at bitbucket.org Tue Apr 26 18:15:26 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 18:15:26 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: make :config: roles work again Message-ID: <20110426161526.D70E9282BF2@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43640:ae024f6b7cc6 Date: 2011-04-26 18:14 +0200 http://bitbucket.org/pypy/pypy/changeset/ae024f6b7cc6/ Log: make :config: roles work again diff --git a/pypy/doc/pypyconfig.py b/pypy/doc/pypyconfig.py new file mode 100644 --- /dev/null +++ b/pypy/doc/pypyconfig.py @@ -0,0 +1,9 @@ + + +def setup(app): + import sys, os + sys.path.append(os.path.abspath("../../")) + from pypy.config import makerestdoc + import py + role = makerestdoc.register_config_role(py.path.local()) + app.add_role("config", role) diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py --- a/pypy/config/makerestdoc.py +++ b/pypy/config/makerestdoc.py @@ -221,7 +221,7 @@ from docutils import nodes from pypy.config.pypyoption import get_pypy_config from pypy.config.makerestdoc import get_cmdline - txt = docdir.join("config", text + ".txt") + txt = docdir.join("config", text + ".rst") html = docdir.join("config", text + ".html") assert txt.check() assert name == "config" @@ -247,9 +247,8 @@ shortest_long_option = cmd text = shortest_long_option target = prefix + relative - print text, target reference_node = nodes.reference(rawtext, text, name=text, refuri=target) return [reference_node], [] config_role.content = True config_role.options = {} - roles.register_canonical_role("config", config_role) + return config_role diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -16,13 +16,13 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.append(os.path.abspath('.')) +sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -196,3 +196,4 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} + From commits-noreply at bitbucket.org Tue Apr 26 18:21:41 2011 From: commits-noreply at bitbucket.org (lac) Date: Tue, 26 Apr 2011 18:21:41 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Give these orphan files a sphinx toctree Message-ID: <20110426162141.400ED282B8B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43641:010ca37c72ff Date: 2011-04-26 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/010ca37c72ff/ Log: Give these orphan files a sphinx toctree diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -92,11 +92,14 @@ garbage_collection.rst interpreter.rst objspace.rst + __pypy__-module.rst + objspace-proxies.rst dev_method.rst extending.rst extradoc.rst + video-index.rst glossary.rst @@ -107,6 +110,8 @@ parser.rst rlib.rst rtyper.rst + rffi.rst + translation.rst jit/index.rst jit/overview.rst @@ -121,6 +126,7 @@ index-report.rst stackless.rst + sandbox.rst discussions.rst @@ -129,6 +135,7 @@ sprint-reports.rst eventhistory.rst + statistic/index.rst Indices and tables ================== From commits-noreply at bitbucket.org Tue Apr 26 18:21:48 2011 From: commits-noreply at bitbucket.org (lac) Date: Tue, 26 Apr 2011 18:21:48 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110426162148.0CC7B282BEC@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43642:ebc27a998c59 Date: 2011-04-26 18:21 +0200 http://bitbucket.org/pypy/pypy/changeset/ebc27a998c59/ Log: merge heads diff --git a/pypy/doc/pypyconfig.py b/pypy/doc/pypyconfig.py new file mode 100644 --- /dev/null +++ b/pypy/doc/pypyconfig.py @@ -0,0 +1,9 @@ + + +def setup(app): + import sys, os + sys.path.append(os.path.abspath("../../")) + from pypy.config import makerestdoc + import py + role = makerestdoc.register_config_role(py.path.local()) + app.add_role("config", role) diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py --- a/pypy/config/makerestdoc.py +++ b/pypy/config/makerestdoc.py @@ -221,7 +221,7 @@ from docutils import nodes from pypy.config.pypyoption import get_pypy_config from pypy.config.makerestdoc import get_cmdline - txt = docdir.join("config", text + ".txt") + txt = docdir.join("config", text + ".rst") html = docdir.join("config", text + ".html") assert txt.check() assert name == "config" @@ -247,9 +247,8 @@ shortest_long_option = cmd text = shortest_long_option target = prefix + relative - print text, target reference_node = nodes.reference(rawtext, text, name=text, refuri=target) return [reference_node], [] config_role.content = True config_role.options = {} - roles.register_canonical_role("config", config_role) + return config_role diff --git a/pypy/tool/rest/directive.py b/pypy/tool/rest/directive.py --- a/pypy/tool/rest/directive.py +++ b/pypy/tool/rest/directive.py @@ -1,108 +1,9 @@ -# XXX this file is messy since it tries to deal with several docutils versions import py -from pypy.tool.rest.convert import convert_dot, latexformula2png - import sys import docutils from docutils import nodes -from docutils.parsers.rst import directives, states, roles -from docutils.parsers.rst.directives import images - -if hasattr(images, "image"): - directives_are_functions = True -else: - directives_are_functions = False - -try: - from docutils.utils import unescape # docutils version > 0.3.5 -except ImportError: - from docutils.parsers.rst.states import unescape # docutils 0.3.5 - -if not directives_are_functions: - ImageClass = images.Image - -else: - class ImageClass(object): - option_spec = images.image.options - def run(self): - return images.image(u'image', - self.arguments, - self.options, - self.content, - self.lineno, - self.content_offset, - self.block_text, - self.state, - self.state_machine) - - -backend_to_image_format = {"html": "png", "latex": "pdf"} - -class GraphvizDirective(ImageClass): - def convert(self, fn, path): - path = py.path.local(path).dirpath() - dot = path.join(fn) - result = convert_dot(dot, backend_to_image_format[_backend]) - return result.relto(path) - - def run(self): - newname = self.convert(self.arguments[0], - self.state.document.settings._source) - text = self.block_text.replace("graphviz", "image", 1) - self.block_text = text.replace(self.arguments[0], newname, 1) - self.name = u'image' - self.arguments = [newname] - return ImageClass.run(self) - - def old_interface(self): - def f(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - for arg in "name arguments options content lineno " \ - "content_offset block_text state state_machine".split(): - setattr(self, arg, locals()[arg]) - return self.run() - f.arguments = (1, 0, 1) - f.options = self.option_spec - return f - - -_backend = None -def set_backend_and_register_directives(backend): - #XXX this is only used to work around the inflexibility of docutils: - # a directive does not know the target format - global _backend - _backend = backend - if not directives_are_functions: - directives.register_directive("graphviz", GraphvizDirective) - else: - directives.register_directive("graphviz", - GraphvizDirective().old_interface()) - roles.register_canonical_role("latexformula", latexformula_role) - -def latexformula_role(name, rawtext, text, lineno, inliner, - options={}, content=[]): - if _backend == 'latex': - options['format'] = 'latex' - return roles.raw_role(name, rawtext, text, lineno, inliner, - options, content) - else: - # XXX: make the place of the image directory configurable - sourcedir = py.path.local(inliner.document.settings._source).dirpath() - imagedir = sourcedir.join("img") - if not imagedir.check(): - imagedir.mkdir() - # create halfway senseful imagename: - # use hash of formula + alphanumeric characters of it - # could - imagename = "%s_%s.png" % ( - hash(text), "".join([c for c in text if c.isalnum()])) - image = imagedir.join(imagename) - latexformula2png(unescape(text, True), image) - imagenode = nodes.image(image.relto(sourcedir), uri=image.relto(sourcedir)) - return [imagenode], [] -latexformula_role.content = True -latexformula_role.options = {} +from docutils.parsers.rst import roles def register_linkrole(role_name, callback): def source_role(name, rawtext, text, lineno, inliner, options={}, diff --git a/pypy/doc/image/parsing_example8.dot b/pypy/doc/image/parsing_example8.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example8.dot +++ /dev/null @@ -1,21 +0,0 @@ -digraph G{ -"-1213611892" [label="list"]; -"-1213611892" -> "-1213608980"; -"-1213608980" [shape=box,label="DECIMAL\n'1'"]; -"-1213611892" -> "-1213623476"; -"-1213623476" [label="list"]; -"-1213623476" -> "-1213623380"; -"-1213623380" [shape=box,label="DECIMAL\n'2'"]; -"-1213623476" -> "-1213442868"; -"-1213442868" [label="list"]; -"-1213442868" -> "-1213441652"; -"-1213441652" [shape=box,label="DECIMAL\n'3'"]; -"-1213442868" -> "-1213441332"; -"-1213441332" [label="list"]; -"-1213441332" -> "-1213441620"; -"-1213441620" [shape=box,label="DECIMAL\n'4'"]; -"-1213441332" -> "-1213443060"; -"-1213443060" [label="list"]; -"-1213443060" -> "-1213442100"; -"-1213442100" [shape=box,label="DECIMAL\n'5'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example5.dot b/pypy/doc/image/parsing_example5.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example5.dot +++ /dev/null @@ -1,21 +0,0 @@ -digraph G{ -"-1219949908" [label="n"]; -"-1219949908" -> "-1214026452"; -"-1214026452" [shape=box,label="__0_a\n'a'"]; -"-1219949908" -> "-1214028276"; -"-1214028276" [shape=box,label="__1_b\n'b'"]; -"-1219949908" -> "-1214027316"; -"-1214027316" [shape=box,label="__2_c\n'c'"]; -"-1219949908" -> "-1219949876"; -"-1219949876" [label="n"]; -"-1219949876" -> "-1214141364"; -"-1214141364" [shape=box,label="__0_a\n'a'"]; -"-1219949876" -> "-1214141748"; -"-1214141748" [shape=box,label="__1_b\n'b'"]; -"-1219949876" -> "-1214140756"; -"-1214140756" [shape=box,label="__2_c\n'c'"]; -"-1219949876" -> "-1219949748"; -"-1219949748" [label="m"]; -"-1219949748" -> "-1214414868"; -"-1214414868" [shape=box,label="__5_d\n'd'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example10.dot b/pypy/doc/image/parsing_example10.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example10.dot +++ /dev/null @@ -1,37 +0,0 @@ -digraph G{ -"-1220061652" [label="object"]; -"-1220061652" -> "-1220127636"; -"-1220127636" [label="entry"]; -"-1220127636" -> "-1213915636"; -"-1213915636" [shape=box,label="STRING\n'a'"]; -"-1220127636" -> "-1214251156"; -"-1214251156" [shape=box,label="STRING\n'5'"]; -"-1220061652" -> "-1220063188"; -"-1220063188" [label="entry"]; -"-1220063188" -> "-1214253076"; -"-1214253076" [shape=box,label="STRING\n'b'"]; -"-1220063188" -> "-1220059444"; -"-1220059444" [label="array"]; -"-1220059444" -> "-1214253364"; -"-1214253364" [shape=box,label="NUMBER\n'1'"]; -"-1220059444" -> "-1214254292"; -"-1214254292" [shape=box,label="__0_null\n'null'"]; -"-1220059444" -> "-1214253268"; -"-1214253268" [shape=box,label="NUMBER\n'3'"]; -"-1220059444" -> "-1214252596"; -"-1214252596" [shape=box,label="__1_true\n'true'"]; -"-1220059444" -> "-1220062260"; -"-1220062260" [label="object"]; -"-1220062260" -> "-1220060116"; -"-1220060116" [label="entry"]; -"-1220060116" -> "-1214211860"; -"-1214211860" [shape=box,label="STRING\n'f'"]; -"-1220060116" -> "-1214210132"; -"-1214210132" [shape=box,label="STRING\n'g'"]; -"-1220062260" -> "-1220062868"; -"-1220062868" [label="entry"]; -"-1220062868" -> "-1214211956"; -"-1214211956" [shape=box,label="STRING\n'h'"]; -"-1220062868" -> "-1214212308"; -"-1214212308" [shape=box,label="NUMBER\n'6'"]; -} diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -323,7 +323,35 @@ produces a syntax tree that follows the precedence of the operators. For example the expression ``12 + 4 * 5`` is parsed into the following tree: -.. graphviz:: image/parsing_example1.dot +.. graphviz:: + + digraph G{ + "-1213931828" [label="additive"]; + "-1213931828" -> "-1213951956"; + "-1213951956" [label="multitive"]; + "-1213951956" -> "-1213949172"; + "-1213949172" [label="primary"]; + "-1213949172" -> "-1213949812"; + "-1213949812" [shape=box,label="DECIMAL\l'12'"]; + "-1213931828" -> "-1213935220"; + "-1213935220" [shape=box,label="__0_+\l'+'"]; + "-1213931828" -> "-1213951316"; + "-1213951316" [label="additive"]; + "-1213951316" -> "-1213948180"; + "-1213948180" [label="multitive"]; + "-1213948180" -> "-1213951380"; + "-1213951380" [label="primary"]; + "-1213951380" -> "-1213951508"; + "-1213951508" [shape=box,label="DECIMAL\l'4'"]; + "-1213948180" -> "-1213948788"; + "-1213948788" [shape=box,label="__1_*\l'*'"]; + "-1213948180" -> "-1213951060"; + "-1213951060" [label="multitive"]; + "-1213951060" -> "-1213948980"; + "-1213948980" [label="primary"]; + "-1213948980" -> "-1213950420"; + "-1213950420" [shape=box,label="DECIMAL\l'5'"]; + } Parse Trees ----------- @@ -380,11 +408,43 @@ Parsing the string "A, A, A" gives the tree: -.. graphviz:: image/parsing_example2.dot +.. graphviz:: + + digraph G{ + "-1213678004" [label="n"]; + "-1213678004" -> "-1213681108"; + "-1213681108" [shape=box,label="__0_A\n'A'"]; + "-1213678004" -> "-1213681332"; + "-1213681332" [shape=box,label="__1_,\n','"]; + "-1213678004" -> "-1213837780"; + "-1213837780" [label="n"]; + "-1213837780" -> "-1213837716"; + "-1213837716" [shape=box,label="__0_A\n'A'"]; + "-1213837780" -> "-1213839476"; + "-1213839476" [shape=box,label="__1_,\n','"]; + "-1213837780" -> "-1213839956"; + "-1213839956" [label="n"]; + "-1213839956" -> "-1213840948"; + "-1213840948" [shape=box,label="__0_A\n'A'"]; + } After transformation the tree has the "," nodes removed: -.. graphviz:: image/parsing_example3.dot +.. graphviz:: + + digraph G{ + "-1219325716" [label="n"]; + "-1219325716" -> "-1219325844"; + "-1219325844" [shape=box,label="__0_A\n'A'"]; + "-1219325716" -> "-1219324372"; + "-1219324372" [label="n"]; + "-1219324372" -> "-1219325524"; + "-1219325524" [shape=box,label="__0_A\n'A'"]; + "-1219324372" -> "-1219324308"; + "-1219324308" [label="n"]; + "-1219324308" -> "-1219325492"; + "-1219325492" [shape=box,label="__0_A\n'A'"]; + } ++++++++ @@ -401,12 +461,61 @@ Parsing the string "a b c (a b c d)" gives the tree: -.. graphviz:: image/parsing_example4.dot +.. graphviz:: + + digraph G{ + "-1214029460" [label="n"]; + "-1214029460" -> "-1214026452"; + "-1214026452" [shape=box,label="__0_a\n'a'"]; + "-1214029460" -> "-1214028276"; + "-1214028276" [shape=box,label="__1_b\n'b'"]; + "-1214029460" -> "-1214027316"; + "-1214027316" [shape=box,label="__2_c\n'c'"]; + "-1214029460" -> "-1214026868"; + "-1214026868" [label="m"]; + "-1214026868" -> "-1214140436"; + "-1214140436" [shape=box,label="__3_(\n'('"]; + "-1214026868" -> "-1214143508"; + "-1214143508" [label="n"]; + "-1214143508" -> "-1214141364"; + "-1214141364" [shape=box,label="__0_a\n'a'"]; + "-1214143508" -> "-1214141748"; + "-1214141748" [shape=box,label="__1_b\n'b'"]; + "-1214143508" -> "-1214140756"; + "-1214140756" [shape=box,label="__2_c\n'c'"]; + "-1214143508" -> "-1214144468"; + "-1214144468" [label="m"]; + "-1214144468" -> "-1214414868"; + "-1214414868" [shape=box,label="__5_d\n'd'"]; + "-1214026868" -> "-1214141492"; + "-1214141492" [shape=box,label="__4_)\n')'"]; + } After transformation the tree looks like this: -.. graphviz:: image/parsing_example5.dot +.. graphviz:: + digraph G{ + "-1219949908" [label="n"]; + "-1219949908" -> "-1214026452"; + "-1214026452" [shape=box,label="__0_a\n'a'"]; + "-1219949908" -> "-1214028276"; + "-1214028276" [shape=box,label="__1_b\n'b'"]; + "-1219949908" -> "-1214027316"; + "-1214027316" [shape=box,label="__2_c\n'c'"]; + "-1219949908" -> "-1219949876"; + "-1219949876" [label="n"]; + "-1219949876" -> "-1214141364"; + "-1214141364" [shape=box,label="__0_a\n'a'"]; + "-1219949876" -> "-1214141748"; + "-1214141748" [shape=box,label="__1_b\n'b'"]; + "-1219949876" -> "-1214140756"; + "-1214140756" [shape=box,label="__2_c\n'c'"]; + "-1219949876" -> "-1219949748"; + "-1219949748" [label="m"]; + "-1219949748" -> "-1214414868"; + "-1214414868" [shape=box,label="__5_d\n'd'"]; + } >nonterminal_1 nonterminal_2 ... nonterminal_n< +++++++++++++++++++++++++++++++++++++++++++++++ @@ -421,23 +530,76 @@ Parsing the string "1 2" gives the tree: -.. graphviz:: image/parsing_example6.dot - +.. graphviz:: + + digraph G{ + "-1213518708" [label="list"]; + "-1213518708" -> "-1213518196"; + "-1213518196" [shape=box,label="DECIMAL\n'1'"]; + "-1213518708" -> "-1213518260"; + "-1213518260" [label="list"]; + "-1213518260" -> "-1213520308"; + "-1213520308" [shape=box,label="DECIMAL\n'2'"]; + } + after the transformation the tree looks like: -.. graphviz:: image/parsing_example7.dot +.. graphviz:: + + digraph G{ + "-1219505652" [label="list"]; + "-1219505652" -> "-1213518196"; + "-1213518196" [shape=box,label="DECIMAL\n'1'"]; + "-1219505652" -> "-1213520308"; + "-1213520308" [shape=box,label="DECIMAL\n'2'"]; + } Note that the transformation works recursively. That means that the following also works: if the string "1 2 3 4 5" is parsed the tree at first looks like this: -.. graphviz:: image/parsing_example8.dot +.. graphviz:: + + digraph G{ + "-1213611892" [label="list"]; + "-1213611892" -> "-1213608980"; + "-1213608980" [shape=box,label="DECIMAL\n'1'"]; + "-1213611892" -> "-1213623476"; + "-1213623476" [label="list"]; + "-1213623476" -> "-1213623380"; + "-1213623380" [shape=box,label="DECIMAL\n'2'"]; + "-1213623476" -> "-1213442868"; + "-1213442868" [label="list"]; + "-1213442868" -> "-1213441652"; + "-1213441652" [shape=box,label="DECIMAL\n'3'"]; + "-1213442868" -> "-1213441332"; + "-1213441332" [label="list"]; + "-1213441332" -> "-1213441620"; + "-1213441620" [shape=box,label="DECIMAL\n'4'"]; + "-1213441332" -> "-1213443060"; + "-1213443060" [label="list"]; + "-1213443060" -> "-1213442100"; + "-1213442100" [shape=box,label="DECIMAL\n'5'"]; + } But after transformation the whole thing collapses to one node with a lot of children: -.. graphviz:: image/parsing_example9.dot +.. graphviz:: + digraph G{ + "-1219430228" [label="list"]; + "-1219430228" -> "-1213608980"; + "-1213608980" [shape=box,label="DECIMAL\n'1'"]; + "-1219430228" -> "-1213623380"; + "-1213623380" [shape=box,label="DECIMAL\n'2'"]; + "-1219430228" -> "-1213441652"; + "-1213441652" [shape=box,label="DECIMAL\n'3'"]; + "-1219430228" -> "-1213441620"; + "-1213441620" [shape=box,label="DECIMAL\n'4'"]; + "-1219430228" -> "-1213442100"; + "-1213442100" [shape=box,label="DECIMAL\n'5'"]; + } Extensions to the EBNF grammar format ------------------------------------- @@ -506,8 +668,45 @@ looks like this: -.. graphviz:: image/parsing_example10.dot +.. graphviz:: + digraph G{ + "-1220061652" [label="object"]; + "-1220061652" -> "-1220127636"; + "-1220127636" [label="entry"]; + "-1220127636" -> "-1213915636"; + "-1213915636" [shape=box,label="STRING\n'a'"]; + "-1220127636" -> "-1214251156"; + "-1214251156" [shape=box,label="STRING\n'5'"]; + "-1220061652" -> "-1220063188"; + "-1220063188" [label="entry"]; + "-1220063188" -> "-1214253076"; + "-1214253076" [shape=box,label="STRING\n'b'"]; + "-1220063188" -> "-1220059444"; + "-1220059444" [label="array"]; + "-1220059444" -> "-1214253364"; + "-1214253364" [shape=box,label="NUMBER\n'1'"]; + "-1220059444" -> "-1214254292"; + "-1214254292" [shape=box,label="__0_null\n'null'"]; + "-1220059444" -> "-1214253268"; + "-1214253268" [shape=box,label="NUMBER\n'3'"]; + "-1220059444" -> "-1214252596"; + "-1214252596" [shape=box,label="__1_true\n'true'"]; + "-1220059444" -> "-1220062260"; + "-1220062260" [label="object"]; + "-1220062260" -> "-1220060116"; + "-1220060116" [label="entry"]; + "-1220060116" -> "-1214211860"; + "-1214211860" [shape=box,label="STRING\n'f'"]; + "-1220060116" -> "-1214210132"; + "-1214210132" [shape=box,label="STRING\n'g'"]; + "-1220062260" -> "-1220062868"; + "-1220062868" [label="entry"]; + "-1220062868" -> "-1214211956"; + "-1214211956" [shape=box,label="STRING\n'h'"]; + "-1220062868" -> "-1214212308"; + "-1214212308" [shape=box,label="NUMBER\n'6'"]; + } .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ diff --git a/pypy/doc/image/parsing_example6.dot b/pypy/doc/image/parsing_example6.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example6.dot +++ /dev/null @@ -1,9 +0,0 @@ -digraph G{ -"-1213518708" [label="list"]; -"-1213518708" -> "-1213518196"; -"-1213518196" [shape=box,label="DECIMAL\n'1'"]; -"-1213518708" -> "-1213518260"; -"-1213518260" [label="list"]; -"-1213518260" -> "-1213520308"; -"-1213520308" [shape=box,label="DECIMAL\n'2'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example4.dot b/pypy/doc/image/parsing_example4.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example4.dot +++ /dev/null @@ -1,27 +0,0 @@ -digraph G{ -"-1214029460" [label="n"]; -"-1214029460" -> "-1214026452"; -"-1214026452" [shape=box,label="__0_a\n'a'"]; -"-1214029460" -> "-1214028276"; -"-1214028276" [shape=box,label="__1_b\n'b'"]; -"-1214029460" -> "-1214027316"; -"-1214027316" [shape=box,label="__2_c\n'c'"]; -"-1214029460" -> "-1214026868"; -"-1214026868" [label="m"]; -"-1214026868" -> "-1214140436"; -"-1214140436" [shape=box,label="__3_(\n'('"]; -"-1214026868" -> "-1214143508"; -"-1214143508" [label="n"]; -"-1214143508" -> "-1214141364"; -"-1214141364" [shape=box,label="__0_a\n'a'"]; -"-1214143508" -> "-1214141748"; -"-1214141748" [shape=box,label="__1_b\n'b'"]; -"-1214143508" -> "-1214140756"; -"-1214140756" [shape=box,label="__2_c\n'c'"]; -"-1214143508" -> "-1214144468"; -"-1214144468" [label="m"]; -"-1214144468" -> "-1214414868"; -"-1214414868" [shape=box,label="__5_d\n'd'"]; -"-1214026868" -> "-1214141492"; -"-1214141492" [shape=box,label="__4_)\n')'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example7.dot b/pypy/doc/image/parsing_example7.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example7.dot +++ /dev/null @@ -1,7 +0,0 @@ -digraph G{ -"-1219505652" [label="list"]; -"-1219505652" -> "-1213518196"; -"-1213518196" [shape=box,label="DECIMAL\n'1'"]; -"-1219505652" -> "-1213520308"; -"-1213520308" [shape=box,label="DECIMAL\n'2'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example2.dot b/pypy/doc/image/parsing_example2.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example2.dot +++ /dev/null @@ -1,17 +0,0 @@ -digraph G{ -"-1213678004" [label="n"]; -"-1213678004" -> "-1213681108"; -"-1213681108" [shape=box,label="__0_A\n'A'"]; -"-1213678004" -> "-1213681332"; -"-1213681332" [shape=box,label="__1_,\n','"]; -"-1213678004" -> "-1213837780"; -"-1213837780" [label="n"]; -"-1213837780" -> "-1213837716"; -"-1213837716" [shape=box,label="__0_A\n'A'"]; -"-1213837780" -> "-1213839476"; -"-1213839476" [shape=box,label="__1_,\n','"]; -"-1213837780" -> "-1213839956"; -"-1213839956" [label="n"]; -"-1213839956" -> "-1213840948"; -"-1213840948" [shape=box,label="__0_A\n'A'"]; -} diff --git a/pypy/tool/rest/convert.py b/pypy/tool/rest/convert.py deleted file mode 100644 --- a/pypy/tool/rest/convert.py +++ /dev/null @@ -1,163 +0,0 @@ -import py - -ExecutionFailed = py.process.cmdexec.Error -# utility functions to convert between various formats - -format_to_dotargument = {"png": "png", - "eps": "ps", - "ps": "ps", - "pdf": "ps", - } - -def ps2eps(ps): - # XXX write a pure python version - if not py.path.local.sysfind("ps2epsi") and \ - not py.path.local.sysfind("ps2eps"): - raise SystemExit("neither ps2eps nor ps2epsi found") - try: - eps = ps.new(ext=".eps") - py.process.cmdexec('ps2epsi "%s" "%s"' % (ps, eps)) - except ExecutionFailed: - py.process.cmdexec('ps2eps -l -f "%s"' % ps) - -def ps2pdf(ps, compat_level="1.2"): - if not py.path.local.sysfind("gs"): - raise SystemExit("ERROR: gs not found") - pdf = ps.new(ext=".pdf") - options = dict(OPTIONS="-dSAFER -dCompatibilityLevel=%s" % compat_level, - infile=ps, outfile=pdf) - cmd = ('gs %(OPTIONS)s -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite ' - '"-sOutputFile=%(outfile)s" %(OPTIONS)s -c .setpdfwrite ' - '-f "%(infile)s"') % options - py.process.cmdexec(cmd) - return pdf - -def eps2pdf(eps): - # XXX write a pure python version - if not py.path.local.sysfind("epstopdf"): - raise SystemExit("ERROR: epstopdf not found") - py.process.cmdexec('epstopdf "%s"' % eps) - -def dvi2eps(dvi, dest=None): - if dest is None: - dest = eps.new(ext=".eps") - command = 'dvips -q -E -n 1 -D 600 -p 1 -o "%s" "%s"' % (dest, dvi) - if not py.path.local.sysfind("dvips"): - raise SystemExit("ERROR: dvips not found") - py.process.cmdexec(command) - -def convert_dot(fn, new_extension): - if not py.path.local.sysfind("dot"): - raise SystemExit("ERROR: dot not found") - result = fn.new(ext=new_extension) - print result - arg = "-T%s" % (format_to_dotargument[new_extension], ) - py.std.os.system('dot "%s" "%s" > "%s"' % (arg, fn, result)) - if new_extension == "eps": - ps = result.new(ext="ps") - result.move(ps) - ps2eps(ps) - ps.remove() - elif new_extension == "pdf": - # convert to eps file first, to get the bounding box right - eps = result.new(ext="eps") - ps = result.new(ext="ps") - result.move(ps) - ps2eps(ps) - eps2pdf(eps) - ps.remove() - eps.remove() - return result - - -class latexformula2png(object): - def __init__(self, formula, dest, temp=None): - self.formula = formula - try: - import Image - self.Image = Image - self.scale = 2 # create a larger image - self.upscale = 5 # create the image upscale times larger, then scale it down - except ImportError: - self.scale = 2 - self.upscale = 1 - self.Image = None - self.output_format = ('pngmono', 'pnggray', 'pngalpha')[2] - if temp is None: - temp = py.test.ensuretemp("latexformula") - self.temp = temp - self.latex = self.temp.join('formula.tex') - self.dvi = self.temp.join('formula.dvi') - self.eps = self.temp.join('formula.eps') - self.png = self.temp.join('formula.png') - self.saveas(dest) - - def saveas(self, dest): - self.gen_latex() - self.gen_dvi() - dvi2eps(self.dvi, self.eps) - self.gen_png() - self.scale_image() - self.png.copy(dest) - - def gen_latex(self): - self.latex.write (""" - \\documentclass{article} - \\pagestyle{empty} - \\begin{document} - - %s - \\pagebreak - - \\end{document} - """ % (self.formula)) - - def gen_dvi(self): - origdir = py.path.local() - self.temp.chdir() - py.process.cmdexec('latex "%s"' % (self.latex)) - origdir.chdir() - - def gen_png(self): - tempdir = py.path.local.mkdtemp() - - re_bbox = py.std.re.compile('%%BoundingBox:\s*(\d+) (\d+) (\d+) (\d+)') - eps = self.eps.read() - x1, y1, x2, y2 = [int(i) for i in re_bbox.search(eps).groups()] - X = x2 - x1 + 2 - Y = y2 - y1 + 2 - mx = -x1 - my = -y1 - ps = self.temp.join('temp.ps') - source = self.eps - ps.write(""" - 1 1 1 setrgbcolor - newpath - -1 -1 moveto - %(X)d -1 lineto - %(X)d %(Y)d lineto - -1 %(Y)d lineto - closepath - fill - %(mx)d %(my)d translate - 0 0 0 setrgbcolor - (%(source)s) run - - """ % locals()) - - sx = int((x2 - x1) * self.scale * self.upscale) - sy = int((y2 - y1) * self.scale * self.upscale) - res = 72 * self.scale * self.upscale - command = ('gs -q -g%dx%d -r%dx%d -sDEVICE=%s -sOutputFile="%s" ' - '-dNOPAUSE -dBATCH "%s"') % ( - sx, sy, res, res, self.output_format, self.png, ps) - py.process.cmdexec(command) - - def scale_image(self): - if self.Image is None: - return - image = self.Image.open(str(self.png)) - image.resize((image.size[0] / self.upscale, - image.size[1] / self.upscale), - self.Image.ANTIALIAS).save(str(self.png)) - diff --git a/pypy/doc/image/parsing_example9.dot b/pypy/doc/image/parsing_example9.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example9.dot +++ /dev/null @@ -1,13 +0,0 @@ -digraph G{ -"-1219430228" [label="list"]; -"-1219430228" -> "-1213608980"; -"-1213608980" [shape=box,label="DECIMAL\n'1'"]; -"-1219430228" -> "-1213623380"; -"-1213623380" [shape=box,label="DECIMAL\n'2'"]; -"-1219430228" -> "-1213441652"; -"-1213441652" [shape=box,label="DECIMAL\n'3'"]; -"-1219430228" -> "-1213441620"; -"-1213441620" [shape=box,label="DECIMAL\n'4'"]; -"-1219430228" -> "-1213442100"; -"-1213442100" [shape=box,label="DECIMAL\n'5'"]; -} \ No newline at end of file diff --git a/pypy/doc/image/parsing_example3.dot b/pypy/doc/image/parsing_example3.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example3.dot +++ /dev/null @@ -1,13 +0,0 @@ -digraph G{ -"-1219325716" [label="n"]; -"-1219325716" -> "-1219325844"; -"-1219325844" [shape=box,label="__0_A\n'A'"]; -"-1219325716" -> "-1219324372"; -"-1219324372" [label="n"]; -"-1219324372" -> "-1219325524"; -"-1219325524" [shape=box,label="__0_A\n'A'"]; -"-1219324372" -> "-1219324308"; -"-1219324308" [label="n"]; -"-1219324308" -> "-1219325492"; -"-1219325492" [shape=box,label="__0_A\n'A'"]; -} diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -16,13 +16,13 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.append(os.path.abspath('.')) +sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -196,3 +196,4 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} + diff --git a/pypy/doc/image/parsing_example1.dot b/pypy/doc/image/parsing_example1.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example1.dot +++ /dev/null @@ -1,27 +0,0 @@ -digraph G{ -"-1213931828" [label="additive"]; -"-1213931828" -> "-1213951956"; -"-1213951956" [label="multitive"]; -"-1213951956" -> "-1213949172"; -"-1213949172" [label="primary"]; -"-1213949172" -> "-1213949812"; -"-1213949812" [shape=box,label="DECIMAL\l'12'"]; -"-1213931828" -> "-1213935220"; -"-1213935220" [shape=box,label="__0_+\l'+'"]; -"-1213931828" -> "-1213951316"; -"-1213951316" [label="additive"]; -"-1213951316" -> "-1213948180"; -"-1213948180" [label="multitive"]; -"-1213948180" -> "-1213951380"; -"-1213951380" [label="primary"]; -"-1213951380" -> "-1213951508"; -"-1213951508" [shape=box,label="DECIMAL\l'4'"]; -"-1213948180" -> "-1213948788"; -"-1213948788" [shape=box,label="__1_*\l'*'"]; -"-1213948180" -> "-1213951060"; -"-1213951060" [label="multitive"]; -"-1213951060" -> "-1213948980"; -"-1213948980" [label="primary"]; -"-1213948980" -> "-1213950420"; -"-1213950420" [shape=box,label="DECIMAL\l'5'"]; -} diff --git a/pypy/doc/config/makemodules.py b/pypy/doc/config/makemodules.py --- a/pypy/doc/config/makemodules.py +++ b/pypy/doc/config/makemodules.py @@ -7,12 +7,12 @@ if __name__ == '__main__': c = config.Config(pypyoption.pypy_optiondescription).usemodules prefix = "objspace.usemodules" - thisdir.join(prefix + ".txt").ensure() + thisdir.join(prefix + ".rst").ensure() for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".txt" + basename = prefix + "." + p + ".rst" f = thisdir.join(basename) - if f.check() and f.size(): - continue + #if f.check() and f.size(): + # continue print "making docs for", p text = ["Use the '%s' module. " % (p, )] if p in pypyoption.essential_modules: From commits-noreply at bitbucket.org Tue Apr 26 19:07:29 2011 From: commits-noreply at bitbucket.org (arigo) Date: Tue, 26 Apr 2011 19:07:29 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Another task. Message-ID: <20110426170729.73654282B8B@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3535:ef24135960d3 Date: 2011-04-26 19:07 +0200 http://bitbucket.org/pypy/extradoc/changeset/ef24135960d3/ Log: Another task. diff --git a/sprintinfo/gothenburg-2011/planning.txt b/sprintinfo/gothenburg-2011/planning.txt --- a/sprintinfo/gothenburg-2011/planning.txt +++ b/sprintinfo/gothenburg-2011/planning.txt @@ -33,6 +33,7 @@ - continue tracing after invalid loops - look into cython - investigate Open End software on top of PyPy (Lukas, Anders) + - (feedback to wesley chun's paragraphs: Armin, Laura) - presentations/discussions - Lukas' presentation on memory improvements (Tuesday) From commits-noreply at bitbucket.org Tue Apr 26 19:54:02 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Tue, 26 Apr 2011 19:54:02 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: enumerate the VirtualInfo objects and check that the numbering maches before jumping to a compiled loop. This will check that assumptions made in the loop about which values are equale holds. Message-ID: <20110426175402.AB68B282B8B@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43643:9b8e39aec901 Date: 2011-04-26 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/9b8e39aec901/ Log: enumerate the VirtualInfo objects and check that the numbering maches before jumping to a compiled loop. This will check that assumptions made in the loop about which values are equale holds. diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -11,6 +11,8 @@ from pypy.jit.metainterp.resoperation import rop, ResOperation class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): + position = -1 + def generalization_of(self, other): raise NotImplementedError @@ -24,12 +26,25 @@ def enum_forced_boxes(self, boxes, already_seen, value): raise NotImplementedError + + def enum(self, virtual_state): + if self.position != -1: + return + virtual_state.info_counter += 1 + self.position = virtual_state.info_counter + self._enum(virtual_state) + + def _enum(self, virtual_state): + raise NotImplementedError class AbstractVirtualStructStateInfo(AbstractVirtualStateInfo): def __init__(self, fielddescrs): self.fielddescrs = fielddescrs def generalization_of(self, other): + assert self.position != -1 + if self.position != other.position: + return False if not self._generalization_of(other): return False assert len(self.fielddescrs) == len(self.fieldstate) @@ -60,6 +75,10 @@ self.fieldstate[i].enum_forced_boxes(boxes, already_seen, v) else: boxes.append(value.box) + + def _enum(self, virtual_state): + for s in self.fieldstate: + s.enum(virtual_state) class VirtualStateInfo(AbstractVirtualStructStateInfo): def __init__(self, known_class, fielddescrs): @@ -90,6 +109,9 @@ self.arraydescr = arraydescr def generalization_of(self, other): + assert self.position != -1 + if self.position != other.position: + return False if self.arraydescr is not other.arraydescr: return False if len(self.fieldstate) != len(other.fieldstate): @@ -112,6 +134,10 @@ else: boxes.append(value.box) + def _enum(self, virtual_state): + for s in self.fieldstate: + s.enum(virtual_state) + class NotVirtualStateInfo(AbstractVirtualStateInfo): def __init__(self, value): self.known_class = value.known_class @@ -128,6 +154,9 @@ def generalization_of(self, other): # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? + assert self.position != -1 + if self.position != other.position: + return False if not isinstance(other, NotVirtualStateInfo): return False if other.level < self.level: @@ -203,11 +232,17 @@ if key not in already_seen: boxes.append(value.force_box()) already_seen[value.get_key_box()] = None - + + def _enum(self, virtual_state): + virtual_state.notvirtuals.append(self) class VirtualState(object): def __init__(self, state): self.state = state + self.info_counter = -1 + self.notvirtuals = [] + for s in state: + s.enum(self) def generalization_of(self, other): assert len(self.state) == len(other.state) From commits-noreply at bitbucket.org Tue Apr 26 19:54:03 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Tue, 26 Apr 2011 19:54:03 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: make the inputarg layout depend on the virtualstate only and not on the current values Message-ID: <20110426175403.DC93B282B8B@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43644:ecf14bebdc38 Date: 2011-04-26 19:53 +0200 http://bitbucket.org/pypy/pypy/changeset/ecf14bebdc38/ Log: make the inputarg layout depend on the virtualstate only and not on the current values diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py --- a/pypy/jit/metainterp/optimizeopt/virtualstate.py +++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py @@ -9,6 +9,7 @@ from pypy.jit.metainterp.optimizeutil import InvalidLoop from pypy.jit.metainterp.optimizeopt.intutils import IntBound from pypy.jit.metainterp.resoperation import rop, ResOperation +from pypy.rlib.objectmodel import we_are_translated class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): position = -1 @@ -24,7 +25,7 @@ def _generate_guards(self, other, box, cpu, extra_guards): raise InvalidLoop - def enum_forced_boxes(self, boxes, already_seen, value): + def enum_forced_boxes(self, boxes, value): raise NotImplementedError def enum(self, virtual_state): @@ -63,18 +64,14 @@ def _generalization_of(self, other): raise NotImplementedError - def enum_forced_boxes(self, boxes, already_seen, value): + def enum_forced_boxes(self, boxes, value): assert isinstance(value, virtualize.AbstractVirtualStructValue) - key = value.get_key_box() - if key in already_seen: - return - already_seen[key] = None - if value.box is None: - for i in range(len(self.fielddescrs)): - v = value._fields[self.fielddescrs[i]] - self.fieldstate[i].enum_forced_boxes(boxes, already_seen, v) - else: - boxes.append(value.box) + assert value.is_virtual() + for i in range(len(self.fielddescrs)): + v = value._fields[self.fielddescrs[i]] + s = self.fieldstate[i] + if s.position > self.position: + s.enum_forced_boxes(boxes, v) def _enum(self, virtual_state): for s in self.fieldstate: @@ -121,18 +118,14 @@ return False return True - def enum_forced_boxes(self, boxes, already_seen, value): + def enum_forced_boxes(self, boxes, value): assert isinstance(value, virtualize.VArrayValue) - key = value.get_key_box() - if key in already_seen: - return - already_seen[key] = None - if value.box is None: - for i in range(len(self.fieldstate)): - v = value._items[i] - self.fieldstate[i].enum_forced_boxes(boxes, already_seen, v) - else: - boxes.append(value.box) + assert value.is_virtual() + for i in range(len(self.fieldstate)): + v = value._items[i] + s = self.fieldstate[i] + if s.position > self.position: + s.enum_forced_boxes(boxes, v) def _enum(self, virtual_state): for s in self.fieldstate: @@ -150,6 +143,7 @@ self.constbox = value.box else: self.constbox = None + self.position_in_notvirtuals = -1 def generalization_of(self, other): # XXX This will always retrace instead of forcing anything which @@ -225,22 +219,23 @@ import pdb; pdb.set_trace() raise NotImplementedError - def enum_forced_boxes(self, boxes, already_seen, value): + def enum_forced_boxes(self, boxes, value): if self.level == LEVEL_CONSTANT: return - key = value.get_key_box() - if key not in already_seen: - boxes.append(value.force_box()) - already_seen[value.get_key_box()] = None + assert 0 <= self.position_in_notvirtuals + boxes[self.position_in_notvirtuals] = value.force_box() def _enum(self, virtual_state): + if self.level == LEVEL_CONSTANT: + return + self.position_in_notvirtuals = len(virtual_state.notvirtuals) virtual_state.notvirtuals.append(self) class VirtualState(object): def __init__(self, state): self.state = state self.info_counter = -1 - self.notvirtuals = [] + self.notvirtuals = [] # FIXME: We dont need this list, only it's length for s in state: s.enum(self) @@ -259,17 +254,20 @@ def make_inputargs(self, values, keyboxes=False): assert len(values) == len(self.state) - inputargs = [] - seen_inputargs = {} + inputargs = [None] * len(self.notvirtuals) for i in range(len(values)): - self.state[i].enum_forced_boxes(inputargs, seen_inputargs, - values[i]) - + self.state[i].enum_forced_boxes(inputargs, values[i]) + if keyboxes: - for value in values: - box = value.get_key_box() - if box not in inputargs and not isinstance(box, Const): + for i in range(len(values)): + if not isinstance(self.state[i], NotVirtualStateInfo): + box = values[i].get_key_box() + assert not isinstance(box, Const) inputargs.append(box) + + if not we_are_translated(): + assert len(set(inputargs)) == len(inputargs) + assert None not in inputargs return inputargs @@ -310,7 +308,10 @@ for box in jump_args] for value in values: - value.get_args_for_fail(self) + if value.is_virtual(): + value.get_args_for_fail(self) + else: + self.make_not_virtual(value) return VirtualState([self.state(box) for box in jump_args]) def make_not_virtual(self, value): From commits-noreply at bitbucket.org Tue Apr 26 22:31:31 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Tue, 26 Apr 2011 22:31:31 +0200 (CEST) Subject: [pypy-svn] pypy default: dissable the shift optimizations if the shift count is not known to be less than LONG_BIT Message-ID: <20110426203131.9D63B282B8B@codespeak.net> Author: Hakan Ardo Branch: Changeset: r43645:4b667c14b991 Date: 2011-04-26 22:31 +0200 http://bitbucket.org/pypy/pypy/changeset/4b667c14b991/ Log: dissable the shift optimizations if the shift count is not known to be less than LONG_BIT diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py --- a/pypy/jit/metainterp/optimizeopt/intutils.py +++ b/pypy/jit/metainterp/optimizeopt/intutils.py @@ -1,4 +1,4 @@ -from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift +from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift, LONG_BIT class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') @@ -20,7 +20,7 @@ def make_lt(self, other): return self.make_le(other.add(-1)) - + def make_ge(self, other): if other.has_lower: if not self.has_lower or other.lower > self.lower: @@ -161,7 +161,8 @@ def lshift_bound(self, other): if self.has_upper and self.has_lower and \ other.has_upper and other.has_lower and \ - other.known_ge(IntBound(0, 0)): + other.known_ge(IntBound(0, 0)) and \ + other.known_lt(IntBound(LONG_BIT, LONG_BIT)): try: vals = (ovfcheck_lshift(self.upper, other.upper), ovfcheck_lshift(self.upper, other.lower), @@ -176,7 +177,8 @@ def rshift_bound(self, other): if self.has_upper and self.has_lower and \ other.has_upper and other.has_lower and \ - other.known_ge(IntBound(0, 0)): + other.known_ge(IntBound(0, 0)) and \ + other.known_lt(IntBound(LONG_BIT, LONG_BIT)): vals = (self.upper >> other.upper, self.upper >> other.lower, self.lower >> other.upper, diff --git a/pypy/jit/metainterp/test/test_intbound.py b/pypy/jit/metainterp/test/test_intbound.py --- a/pypy/jit/metainterp/test/test_intbound.py +++ b/pypy/jit/metainterp/test/test_intbound.py @@ -2,6 +2,7 @@ IntLowerBound, IntUnbounded from copy import copy import sys +from pypy.rlib.rarithmetic import LONG_BIT def bound(a,b): if a is None and b is None: @@ -229,6 +230,12 @@ assert not b10.lshift_bound(b100).has_upper assert not bmax.lshift_bound(b10).has_upper assert b10.lshift_bound(b10).has_upper + + for b in (b10, b100, bmax, IntBound(0, 0)): + for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)): + #assert not b.lshift_bound(shift_count_bound).has_upper + assert not b.rshift_bound(shift_count_bound).has_upper + def test_div_bound(): for _, _, b1 in some_bounds(): From commits-noreply at bitbucket.org Tue Apr 26 23:36:10 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 23:36:10 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: rename the config description files to .txt again, because sphinx is not supposed to touch them Message-ID: <20110426213610.A550E36C204@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43646:2841a5a35149 Date: 2011-04-26 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/2841a5a35149/ Log: rename the config description files to .txt again, because sphinx is not supposed to touch them diff --git a/pypy/doc/config/objspace.std.optimized_int_add.rst b/pypy/doc/config/objspace.std.optimized_int_add.txt copy from pypy/doc/config/objspace.std.optimized_int_add.rst copy to pypy/doc/config/objspace.std.optimized_int_add.txt diff --git a/pypy/doc/config/objspace.std.withcelldict.rst b/pypy/doc/config/objspace.std.withcelldict.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withcelldict.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable cell-dicts. This optimization is not helpful without the JIT. In the -presence of the JIT, it greatly helps looking up globals. diff --git a/pypy/doc/config/objspace.std.withprebuiltint.rst b/pypy/doc/config/objspace.std.withprebuiltint.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withprebuiltint.rst +++ /dev/null @@ -1,5 +0,0 @@ -This option enables the caching of small integer objects (similar to what -CPython does). The range of which integers are cached can be influenced with -the :config:`objspace.std.prebuiltintfrom` and -:config:`objspace.std.prebuiltintto` options. - diff --git a/pypy/doc/config/objspace.honor__builtins__.rst b/pypy/doc/config/objspace.honor__builtins__.txt copy from pypy/doc/config/objspace.honor__builtins__.rst copy to pypy/doc/config/objspace.honor__builtins__.txt diff --git a/pypy/doc/config/objspace.std.optimized_comparison_op.rst b/pypy/doc/config/objspace.std.optimized_comparison_op.txt copy from pypy/doc/config/objspace.std.optimized_comparison_op.rst copy to pypy/doc/config/objspace.std.optimized_comparison_op.txt diff --git a/pypy/doc/config/translation.list_comprehension_operations.rst b/pypy/doc/config/translation.list_comprehension_operations.rst deleted file mode 100644 --- a/pypy/doc/config/translation.list_comprehension_operations.rst +++ /dev/null @@ -1,2 +0,0 @@ -Experimental optimization for list comprehensions in RPython. - diff --git a/pypy/doc/config/objspace.soabi.rst b/pypy/doc/config/objspace.soabi.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.soabi.rst +++ /dev/null @@ -1,14 +0,0 @@ -This option controls the tag included into extension module file names. The -default is something like `pypy-14`, which means that `import foo` will look for -a file named `foo.pypy-14.so` (or `foo.pypy-14.pyd` on Windows). - -This is an implementation of PEP3149_, with two differences: - - * the filename without tag `foo.so` is not considered. - * the feature is also available on Windows. - -When set to the empty string (with `--soabi=`), the interpreter will only look -for a file named `foo.so`, and will crash if this file was compiled for another -Python interpreter. - -.. _PEP3149: http://www.python.org/dev/peps/pep-3149/ diff --git a/pypy/doc/config/objspace.usemodules._ast.rst b/pypy/doc/config/objspace.usemodules._ast.txt copy from pypy/doc/config/objspace.usemodules._ast.rst copy to pypy/doc/config/objspace.usemodules._ast.txt diff --git a/pypy/doc/config/translation.ootype.mangle.rst b/pypy/doc/config/translation.ootype.mangle.rst deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.rst +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/objspace.usemodules._warnings.rst b/pypy/doc/config/objspace.usemodules._warnings.txt copy from pypy/doc/config/objspace.usemodules._warnings.rst copy to pypy/doc/config/objspace.usemodules._warnings.txt diff --git a/pypy/doc/config/objspace.usemodules._random.rst b/pypy/doc/config/objspace.usemodules._random.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._random.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_random' module. It is necessary to use the module "random" from the standard library. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.pyexpat.rst b/pypy/doc/config/objspace.usemodules.pyexpat.txt copy from pypy/doc/config/objspace.usemodules.pyexpat.rst copy to pypy/doc/config/objspace.usemodules.pyexpat.txt diff --git a/pypy/doc/config/objspace.std.withmapdict.rst b/pypy/doc/config/objspace.std.withmapdict.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.rst +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/doc/config/translation.stackless.rst b/pypy/doc/config/translation.stackless.rst deleted file mode 100644 --- a/pypy/doc/config/translation.stackless.rst +++ /dev/null @@ -1,5 +0,0 @@ -Run the `stackless transform`_ on each generated graph, which enables the use -of coroutines at RPython level and the "stackless" module when translating -PyPy. - -.. _`stackless transform`: ../stackless.html diff --git a/pypy/doc/config/translation.backendopt.none.rst b/pypy/doc/config/translation.backendopt.none.txt copy from pypy/doc/config/translation.backendopt.none.rst copy to pypy/doc/config/translation.backendopt.none.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.txt copy from pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst copy to pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.txt diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.rst b/pypy/doc/config/objspace.std.withprebuiltchar.rst deleted file mode 100644 diff --git a/pypy/doc/config/translation.backend.rst b/pypy/doc/config/translation.backend.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backend.rst +++ /dev/null @@ -1,3 +0,0 @@ -Which backend to use when translating, see `translation documentation`_. - -.. _`translation documentation`: ../translation.html diff --git a/pypy/doc/config/objspace.usemodules.token.rst b/pypy/doc/config/objspace.usemodules.token.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.token.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'token' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._winreg.rst b/pypy/doc/config/objspace.usemodules._winreg.txt copy from pypy/doc/config/objspace.usemodules._winreg.rst copy to pypy/doc/config/objspace.usemodules._winreg.txt diff --git a/pypy/doc/config/translation.debug.rst b/pypy/doc/config/translation.debug.rst deleted file mode 100644 --- a/pypy/doc/config/translation.debug.rst +++ /dev/null @@ -1,2 +0,0 @@ -Record extra debugging information during annotation. This leads to slightly -less obscure error messages. diff --git a/pypy/doc/config/objspace.usemodules.math.rst b/pypy/doc/config/objspace.usemodules.math.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.math.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'math' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.backendopt.rst b/pypy/doc/config/translation.backendopt.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.rst +++ /dev/null @@ -1,5 +0,0 @@ -This group contains options about various backend optimization passes. Most of -them are described in the `EU report about optimization`_ - -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf - diff --git a/pypy/doc/config/translation.jit.rst b/pypy/doc/config/translation.jit.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable the JIT generator, for targets that have JIT support. -Experimental so far. diff --git a/pypy/doc/config/objspace.std.builtinshortcut.rst b/pypy/doc/config/objspace.std.builtinshortcut.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.builtinshortcut.rst +++ /dev/null @@ -1,5 +0,0 @@ -A shortcut speeding up primitive operations between built-in types. - -This is a space-time trade-off: at the moment, this option makes a -translated pypy-c executable bigger by about 1.7 MB. (This can probably -be improved with careful analysis.) diff --git a/pypy/doc/config/translation.cc.rst b/pypy/doc/config/translation.cc.txt copy from pypy/doc/config/translation.cc.rst copy to pypy/doc/config/translation.cc.txt diff --git a/pypy/doc/config/objspace.std.prebuiltintfrom.rst b/pypy/doc/config/objspace.std.prebuiltintfrom.txt copy from pypy/doc/config/objspace.std.prebuiltintfrom.rst copy to pypy/doc/config/objspace.std.prebuiltintfrom.txt diff --git a/pypy/doc/config/objspace.usemodules.operator.rst b/pypy/doc/config/objspace.usemodules.operator.txt copy from pypy/doc/config/objspace.usemodules.operator.rst copy to pypy/doc/config/objspace.usemodules.operator.txt diff --git a/pypy/doc/config/objspace.usemodules.__pypy__.rst b/pypy/doc/config/objspace.usemodules.__pypy__.txt copy from pypy/doc/config/objspace.usemodules.__pypy__.rst copy to pypy/doc/config/objspace.usemodules.__pypy__.txt diff --git a/pypy/doc/config/objspace.honor__builtins__.rst b/pypy/doc/config/objspace.honor__builtins__.rst deleted file mode 100644 diff --git a/pypy/doc/config/objspace.std.multimethods.rst b/pypy/doc/config/objspace.std.multimethods.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.multimethods.rst +++ /dev/null @@ -1,8 +0,0 @@ -Choose the multimethod implementation. - -* ``doubledispatch`` turns - a multimethod call into a sequence of normal method calls. - -* ``mrd`` uses a technique known as Multiple Row Displacement - which precomputes a few compact tables of numbers and - function pointers. diff --git a/pypy/doc/config/translation.linkerflags.rst b/pypy/doc/config/translation.linkerflags.rst deleted file mode 100644 --- a/pypy/doc/config/translation.linkerflags.rst +++ /dev/null @@ -1,1 +0,0 @@ -Experimental. Specify extra flags to pass to the linker. diff --git a/pypy/doc/config/objspace.std.withmethodcache.rst b/pypy/doc/config/objspace.std.withmethodcache.txt copy from pypy/doc/config/objspace.std.withmethodcache.rst copy to pypy/doc/config/objspace.std.withmethodcache.txt diff --git a/pypy/doc/config/objspace.usemodules._random.rst b/pypy/doc/config/objspace.usemodules._random.txt copy from pypy/doc/config/objspace.usemodules._random.rst copy to pypy/doc/config/objspace.usemodules._random.txt diff --git a/pypy/doc/config/objspace.usemodules.mmap.rst b/pypy/doc/config/objspace.usemodules.mmap.txt copy from pypy/doc/config/objspace.usemodules.mmap.rst copy to pypy/doc/config/objspace.usemodules.mmap.txt diff --git a/pypy/doc/config/translation.simplifying.rst b/pypy/doc/config/translation.simplifying.txt copy from pypy/doc/config/translation.simplifying.rst copy to pypy/doc/config/translation.simplifying.txt diff --git a/pypy/doc/config/objspace.std.withmethodcache.rst b/pypy/doc/config/objspace.std.withmethodcache.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. diff --git a/pypy/doc/config/translation.jit_backend.rst b/pypy/doc/config/translation.jit_backend.txt copy from pypy/doc/config/translation.jit_backend.rst copy to pypy/doc/config/translation.jit_backend.txt diff --git a/pypy/doc/config/objspace.usemodules.__pypy__.rst b/pypy/doc/config/objspace.usemodules.__pypy__.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.__pypy__.rst +++ /dev/null @@ -1,9 +0,0 @@ -Use the '__pypy__' module. -This module is expected to be working and is included by default. -It contains special PyPy-specific functionality. -For example most of the special functions described in the `object space proxies` -document are in the module. -See the `__pypy__ module documentation`_ for more details. - -.. _`object space proxy`: ../objspace-proxies.html -.. _`__pypy__ module documentation`: ../__pypy__-module.html diff --git a/pypy/doc/config/objspace.usemodules._hashlib.rst b/pypy/doc/config/objspace.usemodules._hashlib.txt copy from pypy/doc/config/objspace.usemodules._hashlib.rst copy to pypy/doc/config/objspace.usemodules._hashlib.txt diff --git a/pypy/doc/config/objspace.usemodules.posix.rst b/pypy/doc/config/objspace.usemodules.posix.txt copy from pypy/doc/config/objspace.usemodules.posix.rst copy to pypy/doc/config/objspace.usemodules.posix.txt diff --git a/pypy/doc/config/objspace.std.sharesmallstr.rst b/pypy/doc/config/objspace.std.sharesmallstr.rst deleted file mode 100644 diff --git a/pypy/doc/config/objspace.geninterp.rst b/pypy/doc/config/objspace.geninterp.txt copy from pypy/doc/config/objspace.geninterp.rst copy to pypy/doc/config/objspace.geninterp.txt diff --git a/pypy/doc/config/translation.cli.exception_transformer.rst b/pypy/doc/config/translation.cli.exception_transformer.txt copy from pypy/doc/config/translation.cli.exception_transformer.rst copy to pypy/doc/config/translation.cli.exception_transformer.txt diff --git a/pypy/doc/config/translation.backendopt.stack_optimization.rst b/pypy/doc/config/translation.backendopt.stack_optimization.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.stack_optimization.rst +++ /dev/null @@ -1,1 +0,0 @@ -Enable the optimized code generation for stack based machine, if the backend support it diff --git a/pypy/doc/config/objspace.usemodules._codecs.rst b/pypy/doc/config/objspace.usemodules._codecs.txt copy from pypy/doc/config/objspace.usemodules._codecs.rst copy to pypy/doc/config/objspace.usemodules._codecs.txt diff --git a/pypy/doc/config/objspace.usemodules.unicodedata.rst b/pypy/doc/config/objspace.usemodules.unicodedata.txt copy from pypy/doc/config/objspace.usemodules.unicodedata.rst copy to pypy/doc/config/objspace.usemodules.unicodedata.txt diff --git a/pypy/doc/config/objspace.std.withrope.rst b/pypy/doc/config/objspace.std.withrope.txt copy from pypy/doc/config/objspace.std.withrope.rst copy to pypy/doc/config/objspace.std.withrope.txt diff --git a/pypy/doc/config/translation.jit_backend.rst b/pypy/doc/config/translation.jit_backend.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit_backend.rst +++ /dev/null @@ -1,2 +0,0 @@ -Choose the backend to use for the JIT. -By default, this is the best backend for the current platform. diff --git a/pypy/doc/config/objspace.std.newshortcut.rst b/pypy/doc/config/objspace.std.newshortcut.txt copy from pypy/doc/config/objspace.std.newshortcut.rst copy to pypy/doc/config/objspace.std.newshortcut.txt diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.rst b/pypy/doc/config/objspace.std.methodcachesizeexp.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.methodcachesizeexp.rst +++ /dev/null @@ -1,1 +0,0 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. diff --git a/pypy/doc/config/objspace.std.rst b/pypy/doc/config/objspace.std.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.extmodules.rst +++ /dev/null @@ -1,12 +0,0 @@ -You can pass a comma-separated list of third-party builtin modules -which should be translated along with the standard modules within -``pypy.module``. - -The module names need to be fully qualified (i.e. have a ``.`` in them), -be on the ``$PYTHONPATH`` and not conflict with any existing ones, e.g. -``mypkg.somemod``. - -Once translated, the module will be accessible with a simple:: - - import somemod - diff --git a/pypy/doc/config/objspace.usemodules._ast.rst b/pypy/doc/config/objspace.usemodules._ast.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._ast.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_ast' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.sys.rst b/pypy/doc/config/objspace.usemodules.sys.txt copy from pypy/doc/config/objspace.usemodules.sys.rst copy to pypy/doc/config/objspace.usemodules.sys.txt diff --git a/pypy/doc/config/translation.ootype.rst b/pypy/doc/config/translation.ootype.txt copy from pypy/doc/config/translation.ootype.rst copy to pypy/doc/config/translation.ootype.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline.rst b/pypy/doc/config/translation.backendopt.profile_based_inline.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.profile_based_inline.rst +++ /dev/null @@ -1,10 +0,0 @@ -Inline flowgraphs only for call-sites for which there was a minimal -number of calls during an instrumented run of the program. Callee -flowgraphs are considered candidates based on a weight heuristic like -for basic inlining. (see :config:`translation.backendopt.inline`, -:config:`translation.backendopt.profile_based_inline_threshold` ). - -The option takes as value a string which is the arguments to pass to -the program for the instrumented run. - -This optimization is not used by default. \ No newline at end of file diff --git a/pypy/doc/config/objspace.usemodules._md5.rst b/pypy/doc/config/objspace.usemodules._md5.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._md5.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the built-in '_md5' module. -This module is expected to be working and is included by default. -There is also a pure Python version in lib_pypy which is used -if the built-in is disabled, but it is several orders of magnitude -slower. diff --git a/pypy/doc/config/objspace.std.builtinshortcut.rst b/pypy/doc/config/objspace.std.builtinshortcut.txt copy from pypy/doc/config/objspace.std.builtinshortcut.rst copy to pypy/doc/config/objspace.std.builtinshortcut.txt diff --git a/pypy/doc/config/objspace.std.optimized_list_getitem.rst b/pypy/doc/config/objspace.std.optimized_list_getitem.txt copy from pypy/doc/config/objspace.std.optimized_list_getitem.rst copy to pypy/doc/config/objspace.std.optimized_list_getitem.txt diff --git a/pypy/doc/config/objspace.std.withropeunicode.rst b/pypy/doc/config/objspace.std.withropeunicode.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withropeunicode.rst +++ /dev/null @@ -1,7 +0,0 @@ -Use ropes to implement unicode strings (and also normal strings). - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#ropes - - diff --git a/pypy/doc/config/objspace.std.optimized_int_add.rst b/pypy/doc/config/objspace.std.optimized_int_add.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_int_add.rst +++ /dev/null @@ -1,2 +0,0 @@ -Optimize the addition of two integers a bit. Enabling this option gives small -speedups. diff --git a/pypy/doc/config/objspace.usemodules._multiprocessing.rst b/pypy/doc/config/objspace.usemodules._multiprocessing.txt copy from pypy/doc/config/objspace.usemodules._multiprocessing.rst copy to pypy/doc/config/objspace.usemodules._multiprocessing.txt diff --git a/pypy/doc/config/translation.rst b/pypy/doc/config/translation.rst deleted file mode 100644 --- a/pypy/doc/config/translation.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.usemodules.operator.rst b/pypy/doc/config/objspace.usemodules.operator.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.operator.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'operator' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.name.rst b/pypy/doc/config/objspace.name.txt copy from pypy/doc/config/objspace.name.rst copy to pypy/doc/config/objspace.name.txt diff --git a/pypy/doc/config/translation.stackless.rst b/pypy/doc/config/translation.stackless.txt copy from pypy/doc/config/translation.stackless.rst copy to pypy/doc/config/translation.stackless.txt diff --git a/pypy/doc/config/objspace.usemodules.termios.rst b/pypy/doc/config/objspace.usemodules.termios.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.termios.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'termios' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.timing.rst b/pypy/doc/config/objspace.timing.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.timing.rst +++ /dev/null @@ -1,1 +0,0 @@ -timing of various parts of the interpreter (simple profiling) diff --git a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst b/pypy/doc/config/translation.builtins_can_raise_exceptions.rst deleted file mode 100644 --- a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/objspace.usepycfiles.rst b/pypy/doc/config/objspace.usepycfiles.txt copy from pypy/doc/config/objspace.usepycfiles.rst copy to pypy/doc/config/objspace.usepycfiles.txt diff --git a/pypy/doc/config/translation.gcremovetypeptr.rst b/pypy/doc/config/translation.gcremovetypeptr.txt copy from pypy/doc/config/translation.gcremovetypeptr.rst copy to pypy/doc/config/translation.gcremovetypeptr.txt diff --git a/pypy/doc/config/objspace.timing.rst b/pypy/doc/config/objspace.timing.txt copy from pypy/doc/config/objspace.timing.rst copy to pypy/doc/config/objspace.timing.txt diff --git a/pypy/doc/config/translation.shared.rst b/pypy/doc/config/translation.shared.rst deleted file mode 100644 --- a/pypy/doc/config/translation.shared.rst +++ /dev/null @@ -1,2 +0,0 @@ -Build pypy as a shared library or a DLL, with a small executable to run it. -This is necessary on Windows to expose the C API provided by the cpyext module. diff --git a/pypy/doc/config/objspace.std.sharesmallstr.rst b/pypy/doc/config/objspace.std.sharesmallstr.txt copy from pypy/doc/config/objspace.std.sharesmallstr.rst copy to pypy/doc/config/objspace.std.sharesmallstr.txt diff --git a/pypy/doc/config/objspace.usemodules.select.rst b/pypy/doc/config/objspace.usemodules.select.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.select.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'select' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.usemodules.select.rst b/pypy/doc/config/objspace.usemodules.select.txt copy from pypy/doc/config/objspace.usemodules.select.rst copy to pypy/doc/config/objspace.usemodules.select.txt diff --git a/pypy/doc/config/objspace.usemodules._md5.rst b/pypy/doc/config/objspace.usemodules._md5.txt copy from pypy/doc/config/objspace.usemodules._md5.rst copy to pypy/doc/config/objspace.usemodules._md5.txt diff --git a/pypy/doc/config/translation.platform.rst b/pypy/doc/config/translation.platform.txt copy from pypy/doc/config/translation.platform.rst copy to pypy/doc/config/translation.platform.txt diff --git a/pypy/doc/config/objspace.usemodules.signal.rst b/pypy/doc/config/objspace.usemodules.signal.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.signal.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'signal' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.rst b/pypy/doc/config/objspace.std.methodcachesizeexp.txt copy from pypy/doc/config/objspace.std.methodcachesizeexp.rst copy to pypy/doc/config/objspace.std.methodcachesizeexp.txt diff --git a/pypy/doc/config/translation.backendopt.constfold.rst b/pypy/doc/config/translation.backendopt.constfold.txt copy from pypy/doc/config/translation.backendopt.constfold.rst copy to pypy/doc/config/translation.backendopt.constfold.txt diff --git a/pypy/doc/config/objspace.std.withrangelist.rst b/pypy/doc/config/objspace.std.withrangelist.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrangelist.rst +++ /dev/null @@ -1,11 +0,0 @@ -Enable "range list" objects. They are an additional implementation of the Python -``list`` type, indistinguishable for the normal user. Whenever the ``range`` -builtin is called, an range list is returned. As long as this list is not -mutated (and for example only iterated over), it uses only enough memory to -store the start, stop and step of the range. This makes using ``range`` as -efficient as ``xrange``, as long as the result is only used in a ``for``-loop. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists - diff --git a/pypy/doc/config/translation.gcrootfinder.rst b/pypy/doc/config/translation.gcrootfinder.txt copy from pypy/doc/config/translation.gcrootfinder.rst copy to pypy/doc/config/translation.gcrootfinder.txt diff --git a/pypy/doc/config/translation.force_make.rst b/pypy/doc/config/translation.force_make.rst deleted file mode 100644 --- a/pypy/doc/config/translation.force_make.rst +++ /dev/null @@ -1,1 +0,0 @@ -Force executing makefile instead of using platform. diff --git a/pypy/doc/config/objspace.std.withrangelist.rst b/pypy/doc/config/objspace.std.withrangelist.txt copy from pypy/doc/config/objspace.std.withrangelist.rst copy to pypy/doc/config/objspace.std.withrangelist.txt diff --git a/pypy/doc/config/objspace.soabi.rst b/pypy/doc/config/objspace.soabi.txt copy from pypy/doc/config/objspace.soabi.rst copy to pypy/doc/config/objspace.soabi.txt diff --git a/pypy/doc/config/objspace.std.newshortcut.rst b/pypy/doc/config/objspace.std.newshortcut.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.newshortcut.rst +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: cache and shortcut calling __new__ from builtin types diff --git a/pypy/doc/config/objspace.std.optimized_list_getitem.rst b/pypy/doc/config/objspace.std.optimized_list_getitem.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_list_getitem.rst +++ /dev/null @@ -1,1 +0,0 @@ -Optimized list[int] a bit. diff --git a/pypy/doc/config/objspace.std.prebuiltintto.rst b/pypy/doc/config/objspace.std.prebuiltintto.txt copy from pypy/doc/config/objspace.std.prebuiltintto.rst copy to pypy/doc/config/objspace.std.prebuiltintto.txt diff --git a/pypy/doc/config/objspace.usemodules.parser.rst b/pypy/doc/config/objspace.usemodules.parser.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.parser.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the 'parser' module. -This is PyPy implementation of the standard library 'parser' module (e.g. if -this option is enabled and you say ``import parser`` you get this module). -It is enabled by default. diff --git a/pypy/doc/config/objspace.disable_call_speedhacks.rst b/pypy/doc/config/objspace.disable_call_speedhacks.txt copy from pypy/doc/config/objspace.disable_call_speedhacks.rst copy to pypy/doc/config/objspace.disable_call_speedhacks.txt diff --git a/pypy/doc/config/translation.jit_profiler.rst b/pypy/doc/config/translation.jit_profiler.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit_profiler.rst +++ /dev/null @@ -1,1 +0,0 @@ -Integrate profiler support into the JIT diff --git a/pypy/doc/config/translation.rst b/pypy/doc/config/translation.txt copy from pypy/doc/config/translation.rst copy to pypy/doc/config/translation.txt diff --git a/pypy/doc/config/objspace.std.withcelldict.rst b/pypy/doc/config/objspace.std.withcelldict.txt copy from pypy/doc/config/objspace.std.withcelldict.rst copy to pypy/doc/config/objspace.std.withcelldict.txt diff --git a/pypy/doc/config/objspace.usemodules.signal.rst b/pypy/doc/config/objspace.usemodules.signal.txt copy from pypy/doc/config/objspace.usemodules.signal.rst copy to pypy/doc/config/objspace.usemodules.signal.txt diff --git a/pypy/doc/config/translation.output.rst b/pypy/doc/config/translation.output.rst deleted file mode 100644 --- a/pypy/doc/config/translation.output.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify file name that the produced executable gets. diff --git a/pypy/doc/config/objspace.allworkingmodules.rst b/pypy/doc/config/objspace.allworkingmodules.txt copy from pypy/doc/config/objspace.allworkingmodules.rst copy to pypy/doc/config/objspace.allworkingmodules.txt diff --git a/pypy/doc/config/objspace.usemodules.fcntl.rst b/pypy/doc/config/objspace.usemodules.fcntl.txt copy from pypy/doc/config/objspace.usemodules.fcntl.rst copy to pypy/doc/config/objspace.usemodules.fcntl.txt diff --git a/pypy/doc/config/objspace.rst b/pypy/doc/config/objspace.txt copy from pypy/doc/config/objspace.rst copy to pypy/doc/config/objspace.txt diff --git a/pypy/doc/config/objspace.usemodules._weakref.rst b/pypy/doc/config/objspace.usemodules._weakref.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._weakref.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use the '_weakref' module, necessary for the standard lib 'weakref' module. -PyPy's weakref implementation is not completely stable yet. The first -difference to CPython is that weak references only go away after the next -garbage collection, not immediately. The other problem seems to be that under -certain circumstances (that we have not determined) weak references keep the -object alive. diff --git a/pypy/doc/config/objspace.usemodules.array.rst b/pypy/doc/config/objspace.usemodules.array.txt copy from pypy/doc/config/objspace.usemodules.array.rst copy to pypy/doc/config/objspace.usemodules.array.txt diff --git a/pypy/doc/config/translation.make_jobs.rst b/pypy/doc/config/translation.make_jobs.txt copy from pypy/doc/config/translation.make_jobs.rst copy to pypy/doc/config/translation.make_jobs.txt diff --git a/pypy/doc/config/objspace.std.prebuiltintto.rst b/pypy/doc/config/objspace.std.prebuiltintto.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.prebuiltintto.rst +++ /dev/null @@ -1,1 +0,0 @@ -See :config:`objspace.std.withprebuiltint`. diff --git a/pypy/doc/config/translation.vanilla.rst b/pypy/doc/config/translation.vanilla.rst deleted file mode 100644 --- a/pypy/doc/config/translation.vanilla.rst +++ /dev/null @@ -1,2 +0,0 @@ -Try to make the resulting compiled program as portable (=movable to another -machine) as possible. Which is not much. diff --git a/pypy/doc/config/objspace.usemodules._multiprocessing.rst b/pypy/doc/config/objspace.usemodules._multiprocessing.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._multiprocessing.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_multiprocessing' module. -Used by the 'multiprocessing' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.oracle.rst b/pypy/doc/config/objspace.usemodules.oracle.txt copy from pypy/doc/config/objspace.usemodules.oracle.rst copy to pypy/doc/config/objspace.usemodules.oracle.txt diff --git a/pypy/doc/config/objspace.usemodules.errno.rst b/pypy/doc/config/objspace.usemodules.errno.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.errno.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'errno' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.posix.rst b/pypy/doc/config/objspace.usemodules.posix.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.posix.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the essential 'posix' module. -This module is essential, included by default and cannot be removed (even when -specified explicitly, the option gets overridden later). diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.rst b/pypy/doc/config/objspace.std.getattributeshortcut.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.rst +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/doc/config/objspace.usemodules.cpyext.rst b/pypy/doc/config/objspace.usemodules.cpyext.txt copy from pypy/doc/config/objspace.usemodules.cpyext.rst copy to pypy/doc/config/objspace.usemodules.cpyext.txt diff --git a/pypy/doc/config/translation.platform.rst b/pypy/doc/config/translation.platform.rst deleted file mode 100644 --- a/pypy/doc/config/translation.platform.rst +++ /dev/null @@ -1,1 +0,0 @@ -select the target platform, in case of cross-compilation diff --git a/pypy/doc/config/translation.instrumentctl.rst b/pypy/doc/config/translation.instrumentctl.txt copy from pypy/doc/config/translation.instrumentctl.rst copy to pypy/doc/config/translation.instrumentctl.txt diff --git a/pypy/doc/config/translation.fork_before.rst b/pypy/doc/config/translation.fork_before.rst deleted file mode 100644 --- a/pypy/doc/config/translation.fork_before.rst +++ /dev/null @@ -1,4 +0,0 @@ -This is an option mostly useful when working on the PyPy toolchain. If you use -it, translate.py will fork before the specified phase. If the translation -crashes after that fork, you can fix the bug in the toolchain, and continue -translation at the fork-point. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.rst b/pypy/doc/config/objspace.std.withmethodcachecounter.txt copy from pypy/doc/config/objspace.std.withmethodcachecounter.rst copy to pypy/doc/config/objspace.std.withmethodcachecounter.txt diff --git a/pypy/doc/config/translation.fork_before.rst b/pypy/doc/config/translation.fork_before.txt copy from pypy/doc/config/translation.fork_before.rst copy to pypy/doc/config/translation.fork_before.txt diff --git a/pypy/doc/config/translation.gcremovetypeptr.rst b/pypy/doc/config/translation.gcremovetypeptr.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gcremovetypeptr.rst +++ /dev/null @@ -1,1 +0,0 @@ -If set, save one word in every object. Framework GC only. diff --git a/pypy/doc/config/objspace.usemodules._lsprof.rst b/pypy/doc/config/objspace.usemodules._lsprof.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._lsprof.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the '_lsprof' module. diff --git a/pypy/doc/config/translation.jit_profiler.rst b/pypy/doc/config/translation.jit_profiler.txt copy from pypy/doc/config/translation.jit_profiler.rst copy to pypy/doc/config/translation.jit_profiler.txt diff --git a/pypy/doc/config/objspace.usemodules._sha.rst b/pypy/doc/config/objspace.usemodules._sha.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._sha.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the built-in _'sha' module. -This module is expected to be working and is included by default. -There is also a pure Python version in lib_pypy which is used -if the built-in is disabled, but it is several orders of magnitude -slower. diff --git a/pypy/doc/config/translation.force_make.rst b/pypy/doc/config/translation.force_make.txt copy from pypy/doc/config/translation.force_make.rst copy to pypy/doc/config/translation.force_make.txt diff --git a/pypy/doc/config/objspace.usemodules._minimal_curses.rst b/pypy/doc/config/objspace.usemodules._minimal_curses.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._minimal_curses.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_curses' module. -This module is just a stub. It only implements a few functions. diff --git a/pypy/doc/config/translation.instrumentctl.rst b/pypy/doc/config/translation.instrumentctl.rst deleted file mode 100644 --- a/pypy/doc/config/translation.instrumentctl.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/translation.backendopt.constfold.rst b/pypy/doc/config/translation.backendopt.constfold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.constfold.rst +++ /dev/null @@ -1,1 +0,0 @@ -Do constant folding of operations and constant propagation on flowgraphs. diff --git a/pypy/doc/config/objspace.usemodules.pyexpat.rst b/pypy/doc/config/objspace.usemodules.pyexpat.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.pyexpat.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use (experimental) pyexpat module written in RPython, instead of CTypes -version which is used by default. diff --git a/pypy/doc/config/objspace.usemodules.binascii.rst b/pypy/doc/config/objspace.usemodules.binascii.txt copy from pypy/doc/config/objspace.usemodules.binascii.rst copy to pypy/doc/config/objspace.usemodules.binascii.txt diff --git a/pypy/doc/config/translation.type_system.rst b/pypy/doc/config/translation.type_system.txt copy from pypy/doc/config/translation.type_system.rst copy to pypy/doc/config/translation.type_system.txt diff --git a/pypy/doc/config/objspace.std.withtypeversion.rst b/pypy/doc/config/objspace.std.withtypeversion.txt copy from pypy/doc/config/objspace.std.withtypeversion.rst copy to pypy/doc/config/objspace.std.withtypeversion.txt diff --git a/pypy/doc/config/objspace.usemodules._io.rst b/pypy/doc/config/objspace.usemodules._io.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._io.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_io module. -Used by the 'io' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._lsprof.rst b/pypy/doc/config/objspace.usemodules._lsprof.txt copy from pypy/doc/config/objspace.usemodules._lsprof.rst copy to pypy/doc/config/objspace.usemodules._lsprof.txt diff --git a/pypy/doc/config/translation.backendopt.remove_asserts.rst b/pypy/doc/config/translation.backendopt.remove_asserts.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.remove_asserts.rst +++ /dev/null @@ -1,1 +0,0 @@ -Remove raising of assertions from the flowgraphs, which might give small speedups. diff --git a/pypy/doc/config/objspace.translationmodules.rst b/pypy/doc/config/objspace.translationmodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.translationmodules.rst +++ /dev/null @@ -1,1 +0,0 @@ -This option enables all modules which are needed to translate PyPy using PyPy. diff --git a/pypy/doc/config/objspace.usemodules.array.rst b/pypy/doc/config/objspace.usemodules.array.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.array.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use interpreter-level version of array module (on by default). diff --git a/pypy/doc/config/objspace.usemodules.termios.rst b/pypy/doc/config/objspace.usemodules.termios.txt copy from pypy/doc/config/objspace.usemodules.termios.rst copy to pypy/doc/config/objspace.usemodules.termios.txt diff --git a/pypy/doc/config/translation.backendopt.mallocs.rst b/pypy/doc/config/translation.backendopt.mallocs.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.mallocs.rst +++ /dev/null @@ -1,29 +0,0 @@ -This optimization enables "malloc removal", which "explodes" -allocations of structures which do not escape from the function they -are allocated in into one or more additional local variables. - -An example. Consider this rather unlikely seeming code:: - - class C: - pass - def f(y): - c = C() - c.x = y - return c.x - -Malloc removal will spot that the ``C`` object can never leave ``f`` -and replace the above with code like this:: - - def f(y): - _c__x = y - return _c__x - -It is rare for code to be directly written in a way that allows this -optimization to be useful, but inlining often results in opportunities -for its use (and indeed, this is one of the main reasons PyPy does its -own inlining rather than relying on the C compilers). - -For much more information about this and other optimizations you can -read section 4.1 of the technical report on "Massive Parallelism and -Translation Aspects" which you can find on the `Technical reports page -<../index-report.html>`__. diff --git a/pypy/doc/config/translation.rweakref.rst b/pypy/doc/config/translation.rweakref.rst deleted file mode 100644 --- a/pypy/doc/config/translation.rweakref.rst +++ /dev/null @@ -1,3 +0,0 @@ -This indicates if the backend and GC policy support RPython-level weakrefs. -Can be tested in an RPython program to select between two implementation -strategies. diff --git a/pypy/doc/config/objspace.opcodes.rst b/pypy/doc/config/objspace.opcodes.txt copy from pypy/doc/config/objspace.opcodes.rst copy to pypy/doc/config/objspace.opcodes.txt diff --git a/pypy/doc/config/objspace.usemodules.errno.rst b/pypy/doc/config/objspace.usemodules.errno.txt copy from pypy/doc/config/objspace.usemodules.errno.rst copy to pypy/doc/config/objspace.usemodules.errno.txt diff --git a/pypy/doc/config/objspace.usemodules.cStringIO.rst b/pypy/doc/config/objspace.usemodules.cStringIO.txt copy from pypy/doc/config/objspace.usemodules.cStringIO.rst copy to pypy/doc/config/objspace.usemodules.cStringIO.txt diff --git a/pypy/doc/config/objspace.usemodules.thread.rst b/pypy/doc/config/objspace.usemodules.thread.txt copy from pypy/doc/config/objspace.usemodules.thread.rst copy to pypy/doc/config/objspace.usemodules.thread.txt diff --git a/pypy/doc/config/objspace.std.logspaceoptypes.rst b/pypy/doc/config/objspace.std.logspaceoptypes.txt copy from pypy/doc/config/objspace.std.logspaceoptypes.rst copy to pypy/doc/config/objspace.std.logspaceoptypes.txt diff --git a/pypy/doc/config/translation.simplifying.rst b/pypy/doc/config/translation.simplifying.rst deleted file mode 100644 --- a/pypy/doc/config/translation.simplifying.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.txt copy from pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst copy to pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.txt diff --git a/pypy/doc/config/objspace.usemodules.cStringIO.rst b/pypy/doc/config/objspace.usemodules.cStringIO.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.cStringIO.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the built-in cStringIO module. - -If not enabled, importing cStringIO gives you the app-level -implementation from the standard library StringIO module. diff --git a/pypy/doc/config/objspace.usemodules._stackless.rst b/pypy/doc/config/objspace.usemodules._stackless.txt copy from pypy/doc/config/objspace.usemodules._stackless.rst copy to pypy/doc/config/objspace.usemodules._stackless.txt diff --git a/pypy/doc/config/translation.instrument.rst b/pypy/doc/config/translation.instrument.txt copy from pypy/doc/config/translation.instrument.rst copy to pypy/doc/config/translation.instrument.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal option. Switch to a different weight heuristic for inlining. -This is for profile-based inlining (:config:`translation.backendopt.profile_based_inline`). - -.. internal diff --git a/pypy/doc/config/translation.thread.rst b/pypy/doc/config/translation.thread.txt copy from pypy/doc/config/translation.thread.rst copy to pypy/doc/config/translation.thread.txt diff --git a/pypy/doc/config/objspace.std.withmapdict.rst b/pypy/doc/config/objspace.std.withmapdict.txt copy from pypy/doc/config/objspace.std.withmapdict.rst copy to pypy/doc/config/objspace.std.withmapdict.txt diff --git a/pypy/doc/config/objspace.usemodules._ssl.rst b/pypy/doc/config/objspace.usemodules._ssl.txt copy from pypy/doc/config/objspace.usemodules._ssl.rst copy to pypy/doc/config/objspace.usemodules._ssl.txt diff --git a/pypy/doc/config/translation.linkerflags.rst b/pypy/doc/config/translation.linkerflags.txt copy from pypy/doc/config/translation.linkerflags.rst copy to pypy/doc/config/translation.linkerflags.txt diff --git a/pypy/doc/config/translation.withsmallfuncsets.rst b/pypy/doc/config/translation.withsmallfuncsets.rst deleted file mode 100644 --- a/pypy/doc/config/translation.withsmallfuncsets.rst +++ /dev/null @@ -1,3 +0,0 @@ -Represent function sets smaller than this option's value as an integer instead -of a function pointer. A call is then done via a switch on that integer, which -allows inlining etc. Small numbers for this can speed up PyPy (try 5). diff --git a/pypy/doc/config/objspace.usemodules._locale.rst b/pypy/doc/config/objspace.usemodules._locale.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._locale.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the '_locale' module. -This module runs _locale written in RPython (instead of ctypes version). -It's not really finished yet; it's enabled by default on Windows. diff --git a/pypy/doc/config/objspace.usemodules.itertools.rst b/pypy/doc/config/objspace.usemodules.itertools.txt copy from pypy/doc/config/objspace.usemodules.itertools.rst copy to pypy/doc/config/objspace.usemodules.itertools.txt diff --git a/pypy/doc/config/translation.cli.exception_transformer.rst b/pypy/doc/config/translation.cli.exception_transformer.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cli.exception_transformer.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the exception transformer instead of the native .NET exceptions to -implement RPython exceptions. Enable this option only if you know what -you are doing. diff --git a/pypy/doc/config/objspace.usemodules.marshal.rst b/pypy/doc/config/objspace.usemodules.marshal.txt copy from pypy/doc/config/objspace.usemodules.marshal.rst copy to pypy/doc/config/objspace.usemodules.marshal.txt diff --git a/pypy/doc/config/objspace.std.withsmallint.rst b/pypy/doc/config/objspace.std.withsmallint.txt copy from pypy/doc/config/objspace.std.withsmallint.rst copy to pypy/doc/config/objspace.std.withsmallint.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline_threshold.txt diff --git a/pypy/doc/config/objspace.usemodules._sre.rst b/pypy/doc/config/objspace.usemodules._sre.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._sre.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_sre' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.backendopt.print_statistics.rst b/pypy/doc/config/translation.backendopt.print_statistics.txt copy from pypy/doc/config/translation.backendopt.print_statistics.rst copy to pypy/doc/config/translation.backendopt.print_statistics.txt diff --git a/pypy/doc/config/translation.taggedpointers.rst b/pypy/doc/config/translation.taggedpointers.rst deleted file mode 100644 --- a/pypy/doc/config/translation.taggedpointers.rst +++ /dev/null @@ -1,3 +0,0 @@ -Enable tagged pointers. This option is mostly useful for the Smalltalk and -Prolog interpreters. For the Python interpreter the option -:config:`objspace.std.withsmallint` should be used. diff --git a/pypy/doc/config/objspace.usemodules.imp.rst b/pypy/doc/config/objspace.usemodules.imp.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.imp.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'imp' module. -This module is included by default. diff --git a/pypy/doc/config/objspace.usemodules.time.rst b/pypy/doc/config/objspace.usemodules.time.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.time.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the 'time' module. - -Obsolete; use :config:`objspace.usemodules.rctime` for our up-to-date version -of the application-level 'time' module. diff --git a/pypy/doc/config/objspace.std.withtproxy.rst b/pypy/doc/config/objspace.std.withtproxy.txt copy from pypy/doc/config/objspace.std.withtproxy.rst copy to pypy/doc/config/objspace.std.withtproxy.txt diff --git a/pypy/doc/config/translation.output.rst b/pypy/doc/config/translation.output.txt copy from pypy/doc/config/translation.output.rst copy to pypy/doc/config/translation.output.txt diff --git a/pypy/doc/config/objspace.std.mutable_builtintypes.rst b/pypy/doc/config/objspace.std.mutable_builtintypes.txt copy from pypy/doc/config/objspace.std.mutable_builtintypes.rst copy to pypy/doc/config/objspace.std.mutable_builtintypes.txt diff --git a/pypy/doc/config/translation.taggedpointers.rst b/pypy/doc/config/translation.taggedpointers.txt copy from pypy/doc/config/translation.taggedpointers.rst copy to pypy/doc/config/translation.taggedpointers.txt diff --git a/pypy/doc/config/translation.backendopt.print_statistics.rst b/pypy/doc/config/translation.backendopt.print_statistics.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.print_statistics.rst +++ /dev/null @@ -1,2 +0,0 @@ -Debugging option. Print statics about the forest of flowgraphs as they -go through the various backend optimizations. \ No newline at end of file diff --git a/pypy/doc/config/objspace.usemodules._locale.rst b/pypy/doc/config/objspace.usemodules._locale.txt copy from pypy/doc/config/objspace.usemodules._locale.rst copy to pypy/doc/config/objspace.usemodules._locale.txt diff --git a/pypy/doc/config/translation.backendopt.really_remove_asserts.rst b/pypy/doc/config/translation.backendopt.really_remove_asserts.rst deleted file mode 100644 diff --git a/pypy/doc/config/objspace.usemodules._warnings.rst b/pypy/doc/config/objspace.usemodules._warnings.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._warnings.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the '_warning' module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.rst b/pypy/doc/config/objspace.std.getattributeshortcut.txt copy from pypy/doc/config/objspace.std.getattributeshortcut.rst copy to pypy/doc/config/objspace.std.getattributeshortcut.txt diff --git a/pypy/doc/config/objspace.usemodules.bz2.rst b/pypy/doc/config/objspace.usemodules.bz2.txt copy from pypy/doc/config/objspace.usemodules.bz2.rst copy to pypy/doc/config/objspace.usemodules.bz2.txt diff --git a/pypy/doc/config/objspace.std.withstrjoin.rst b/pypy/doc/config/objspace.std.withstrjoin.txt copy from pypy/doc/config/objspace.std.withstrjoin.rst copy to pypy/doc/config/objspace.std.withstrjoin.txt diff --git a/pypy/doc/config/translation.debug.rst b/pypy/doc/config/translation.debug.txt copy from pypy/doc/config/translation.debug.rst copy to pypy/doc/config/translation.debug.txt diff --git a/pypy/doc/config/objspace.usemodules.token.rst b/pypy/doc/config/objspace.usemodules.token.txt copy from pypy/doc/config/objspace.usemodules.token.rst copy to pypy/doc/config/objspace.usemodules.token.txt diff --git a/pypy/doc/config/objspace.std.mutable_builtintypes.rst b/pypy/doc/config/objspace.std.mutable_builtintypes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.mutable_builtintypes.rst +++ /dev/null @@ -1,1 +0,0 @@ -Allow modification of builtin types. Disabled by default. diff --git a/pypy/doc/config/translation.vanilla.rst b/pypy/doc/config/translation.vanilla.txt copy from pypy/doc/config/translation.vanilla.rst copy to pypy/doc/config/translation.vanilla.txt diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.rst b/pypy/doc/config/objspace.std.withprebuiltchar.txt copy from pypy/doc/config/objspace.std.withprebuiltchar.rst copy to pypy/doc/config/objspace.std.withprebuiltchar.txt diff --git a/pypy/doc/config/translation.profopt.rst b/pypy/doc/config/translation.profopt.rst deleted file mode 100644 --- a/pypy/doc/config/translation.profopt.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use GCCs profile-guided optimizations. This option specifies the the -arguments with which to call pypy-c (and in general the translated -RPython program) to gather profile data. Example for pypy-c: "-c 'from -richards import main;main(); from test import pystone; -pystone.main()'" diff --git a/pypy/doc/config/objspace.usemodules.clr.rst b/pypy/doc/config/objspace.usemodules.clr.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/objspace.usemodules.crypt.rst b/pypy/doc/config/objspace.usemodules.crypt.txt copy from pypy/doc/config/objspace.usemodules.crypt.rst copy to pypy/doc/config/objspace.usemodules.crypt.txt diff --git a/pypy/doc/config/objspace.usemodules._ssl.rst b/pypy/doc/config/objspace.usemodules._ssl.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._ssl.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the '_ssl' module, which implements SSL socket operations. diff --git a/pypy/doc/config/objspace.usemodules._socket.rst b/pypy/doc/config/objspace.usemodules._socket.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._socket.rst +++ /dev/null @@ -1,7 +0,0 @@ -Use the '_socket' module. - -This is our implementation of '_socket', the Python builtin module -exposing socket primitives, which is wrapped and used by the standard -library 'socket.py' module. It is based on `rffi`_. - -.. _`rffi`: ../rffi.html diff --git a/pypy/doc/config/translation.backendopt.inline.rst b/pypy/doc/config/translation.backendopt.inline.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.inline.rst +++ /dev/null @@ -1,10 +0,0 @@ -Inline flowgraphs based on an heuristic, the default one considers -essentially the a weight for the flowgraph based on the number of -low-level operations in them (see -:config:`translation.backendopt.inline_threshold` ). - -Some amount of inlining in order to have RPython builtin type helpers -inlined is needed for malloc removal -(:config:`translation.backendopt.mallocs`) to be effective. - -This optimization is used by default. diff --git a/pypy/doc/config/objspace.std.withropeunicode.rst b/pypy/doc/config/objspace.std.withropeunicode.txt copy from pypy/doc/config/objspace.std.withropeunicode.rst copy to pypy/doc/config/objspace.std.withropeunicode.txt diff --git a/pypy/doc/config/objspace.std.multimethods.rst b/pypy/doc/config/objspace.std.multimethods.txt copy from pypy/doc/config/objspace.std.multimethods.rst copy to pypy/doc/config/objspace.std.multimethods.txt diff --git a/pypy/doc/config/objspace.name.rst b/pypy/doc/config/objspace.name.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.name.rst +++ /dev/null @@ -1,16 +0,0 @@ -Determine which `Object Space`_ to use. The `Standard Object Space`_ gives the -normal Python semantics, the others are `Object Space Proxies`_ giving -additional features (except the Flow Object Space which is not intended -for normal usage): - - * thunk_: The thunk object space adds lazy evaluation to PyPy. - * taint_: The taint object space adds soft security features. - * dump_: Using this object spaces results in the dumpimp of all operations - to a log. - -.. _`Object Space`: ../objspace.html -.. _`Object Space Proxies`: ../objspace-proxies.html -.. _`Standard Object Space`: ../objspace.html#standard-object-space -.. _thunk: ../objspace-proxies.html#thunk -.. _taint: ../objspace-proxies.html#taint -.. _dump: ../objspace-proxies.html#dump diff --git a/pypy/doc/config/objspace.std.withsmalllong.rst b/pypy/doc/config/objspace.std.withsmalllong.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withsmalllong.rst +++ /dev/null @@ -1,5 +0,0 @@ -Enable "small longs", an additional implementation of the Python -type "long", implemented with a C long long. It is mostly useful -on 32-bit; on 64-bit, a C long long is the same as a C long, so -its usefulness is limited to Python objects of type "long" that -would anyway fit in an "int". diff --git a/pypy/doc/config/objspace.opcodes.rst b/pypy/doc/config/objspace.opcodes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.opcodes.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.usemodules.parser.rst b/pypy/doc/config/objspace.usemodules.parser.txt copy from pypy/doc/config/objspace.usemodules.parser.rst copy to pypy/doc/config/objspace.usemodules.parser.txt diff --git a/pypy/doc/config/objspace.std.withrope.rst b/pypy/doc/config/objspace.std.withrope.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrope.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enable ropes to be the default string implementation. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#ropes - - diff --git a/pypy/doc/config/objspace.usemodules.crypt.rst b/pypy/doc/config/objspace.usemodules.crypt.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.crypt.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'crypt' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.std.logspaceoptypes.rst b/pypy/doc/config/objspace.std.logspaceoptypes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.logspaceoptypes.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. internal - -Wrap "simple" bytecode implementations like BINARY_ADD with code that collects -information about which types these bytecodes receive as arguments. diff --git a/pypy/doc/config/objspace.usemodules.rst b/pypy/doc/config/objspace.usemodules.txt copy from pypy/doc/config/objspace.usemodules.rst copy to pypy/doc/config/objspace.usemodules.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal option. Switch to a different weight heuristic for inlining. -This is for clever malloc removal (:config:`translation.backendopt.clever_malloc_removal`). - -.. internal diff --git a/pypy/doc/config/objspace.usemodules._demo.rst b/pypy/doc/config/objspace.usemodules._demo.txt copy from pypy/doc/config/objspace.usemodules._demo.rst copy to pypy/doc/config/objspace.usemodules._demo.txt diff --git a/pypy/doc/config/translation.noprofopt.rst b/pypy/doc/config/translation.noprofopt.txt copy from pypy/doc/config/translation.noprofopt.rst copy to pypy/doc/config/translation.noprofopt.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst +++ /dev/null @@ -1,2 +0,0 @@ -Weight threshold used to decide whether to inline flowgraphs. -This is for clever malloc removal (:config:`translation.backendopt.clever_malloc_removal`). diff --git a/pypy/doc/config/translation.gcrootfinder.rst b/pypy/doc/config/translation.gcrootfinder.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gcrootfinder.rst +++ /dev/null @@ -1,15 +0,0 @@ -Choose method how to find roots in the GC. Boehm and refcounting have their own -methods, this is mostly only interesting for framework GCs. For those you have -a choice of various alternatives: - - - use a shadow stack (XXX link to paper), e.g. explicitly maintaining a stack - of roots - - - use stackless to find roots by unwinding the stack. Requires - :config:`translation.stackless`. Note that this turned out to - be slower than just using a shadow stack. - - - use GCC and i386 specific assembler hackery to find the roots on the stack. - This is fastest but platform specific. - - - Use LLVM's GC facilities to find the roots. diff --git a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst b/pypy/doc/config/translation.builtins_can_raise_exceptions.txt copy from pypy/doc/config/translation.builtins_can_raise_exceptions.rst copy to pypy/doc/config/translation.builtins_can_raise_exceptions.txt diff --git a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst b/pypy/doc/config/translation.backendopt.raisingop2direct_call.txt copy from pypy/doc/config/translation.backendopt.raisingop2direct_call.rst copy to pypy/doc/config/translation.backendopt.raisingop2direct_call.txt diff --git a/pypy/doc/config/objspace.usemodules._minimal_curses.rst b/pypy/doc/config/objspace.usemodules._minimal_curses.txt copy from pypy/doc/config/objspace.usemodules._minimal_curses.rst copy to pypy/doc/config/objspace.usemodules._minimal_curses.txt diff --git a/pypy/doc/config/objspace.std.withdictmeasurement.rst b/pypy/doc/config/objspace.std.withdictmeasurement.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withdictmeasurement.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/translation.ootype.mangle.rst b/pypy/doc/config/translation.ootype.mangle.txt copy from pypy/doc/config/translation.ootype.mangle.rst copy to pypy/doc/config/translation.ootype.mangle.txt diff --git a/pypy/doc/config/objspace.usemodules.zipimport.rst b/pypy/doc/config/objspace.usemodules.zipimport.txt copy from pypy/doc/config/objspace.usemodules.zipimport.rst copy to pypy/doc/config/objspace.usemodules.zipimport.txt diff --git a/pypy/doc/config/translation.jit_ffi.rst b/pypy/doc/config/translation.jit_ffi.txt copy from pypy/doc/config/translation.jit_ffi.rst copy to pypy/doc/config/translation.jit_ffi.txt diff --git a/pypy/doc/config/objspace.usemodules.itertools.rst b/pypy/doc/config/objspace.usemodules.itertools.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.itertools.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the interp-level 'itertools' module. -If not included, a slower app-level version of itertools is used. diff --git a/pypy/doc/config/translation.list_comprehension_operations.rst b/pypy/doc/config/translation.list_comprehension_operations.txt copy from pypy/doc/config/translation.list_comprehension_operations.rst copy to pypy/doc/config/translation.list_comprehension_operations.txt diff --git a/pypy/doc/config/objspace.usemodules.rst b/pypy/doc/config/objspace.usemodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.usemodules._rawffi.rst b/pypy/doc/config/objspace.usemodules._rawffi.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._rawffi.rst +++ /dev/null @@ -1,3 +0,0 @@ -An experimental module providing very low-level interface to -C-level libraries, for use when implementing ctypes, not -intended for a direct use at all. \ No newline at end of file diff --git a/pypy/doc/config/objspace.usemodules._pickle_support.rst b/pypy/doc/config/objspace.usemodules._pickle_support.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._pickle_support.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use the '_pickle_support' module. -Internal helpers for pickling runtime builtin types (frames, cells, etc) -for `stackless`_ tasklet pickling support. -.. _`stackless`: ../stackless.html - -.. internal diff --git a/pypy/doc/config/objspace.usemodules._demo.rst b/pypy/doc/config/objspace.usemodules._demo.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._demo.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the '_demo' module. - -This is the demo module for mixed modules. Not enabled by default. diff --git a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst b/pypy/doc/config/translation.backendopt.merge_if_blocks.txt copy from pypy/doc/config/translation.backendopt.merge_if_blocks.rst copy to pypy/doc/config/translation.backendopt.merge_if_blocks.txt diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.txt copy from pypy/doc/config/objspace.extmodules.rst copy to pypy/doc/config/objspace.extmodules.txt diff --git a/pypy/doc/config/objspace.usemodules._rawffi.rst b/pypy/doc/config/objspace.usemodules._rawffi.txt copy from pypy/doc/config/objspace.usemodules._rawffi.rst copy to pypy/doc/config/objspace.usemodules._rawffi.txt diff --git a/pypy/doc/config/translation.ootype.rst b/pypy/doc/config/translation.ootype.rst deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.rst +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/config/objspace.usemodules._hashlib.rst b/pypy/doc/config/objspace.usemodules._hashlib.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._hashlib.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_hashlib' module. -Used by the 'hashlib' standard lib module, and indirectly by the various cryptographic libs. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._socket.rst b/pypy/doc/config/objspace.usemodules._socket.txt copy from pypy/doc/config/objspace.usemodules._socket.rst copy to pypy/doc/config/objspace.usemodules._socket.txt diff --git a/pypy/doc/config/translation.cc.rst b/pypy/doc/config/translation.cc.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cc.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify which C compiler to use. diff --git a/pypy/doc/config/objspace.lonepycfiles.rst b/pypy/doc/config/objspace.lonepycfiles.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.lonepycfiles.rst +++ /dev/null @@ -1,16 +0,0 @@ -If turned on, PyPy accepts to import a module ``x`` if it finds a -file ``x.pyc`` even if there is no file ``x.py``. - -This is the way that CPython behaves, but it is disabled by -default for PyPy because it is a common cause of issues: most -typically, the ``x.py`` file is removed (manually or by a -version control system) but the ``x`` module remains -accidentally importable because the ``x.pyc`` file stays -around. - -The usual reason for wanting this feature is to distribute -non-open-source Python programs by distributing ``pyc`` files -only, but this use case is not practical for PyPy at the -moment because multiple versions of PyPy compiled with various -optimizations might be unable to load each other's ``pyc`` -files. diff --git a/pypy/doc/config/objspace.std.withtypeversion.rst b/pypy/doc/config/objspace.std.withtypeversion.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.rst +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/config/translation.insist.rst b/pypy/doc/config/translation.insist.txt copy from pypy/doc/config/translation.insist.rst copy to pypy/doc/config/translation.insist.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.txt diff --git a/pypy/doc/config/objspace.usemodules.fcntl.rst b/pypy/doc/config/objspace.usemodules.fcntl.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.fcntl.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'fcntl' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.usemodules.marshal.rst b/pypy/doc/config/objspace.usemodules.marshal.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.marshal.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'marshal' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.gc.rst b/pypy/doc/config/objspace.usemodules.gc.txt copy from pypy/doc/config/objspace.usemodules.gc.rst copy to pypy/doc/config/objspace.usemodules.gc.txt diff --git a/pypy/doc/config/objspace.std.withsmalllong.rst b/pypy/doc/config/objspace.std.withsmalllong.txt copy from pypy/doc/config/objspace.std.withsmalllong.rst copy to pypy/doc/config/objspace.std.withsmalllong.txt diff --git a/pypy/doc/config/objspace.nofaking.rst b/pypy/doc/config/objspace.nofaking.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.nofaking.rst +++ /dev/null @@ -1,7 +0,0 @@ -This options prevents the automagic borrowing of implementations of -modules and types not present in PyPy from CPython. - -As such, it is required when translating, as then there is no CPython -to borrow from. For running py.py it is useful for testing the -implementation of modules like "posix", but it makes everything even -slower than it is already. diff --git a/pypy/doc/config/translation.gctransformer.rst b/pypy/doc/config/translation.gctransformer.txt copy from pypy/doc/config/translation.gctransformer.rst copy to pypy/doc/config/translation.gctransformer.txt diff --git a/pypy/doc/config/translation.backend.rst b/pypy/doc/config/translation.backend.txt copy from pypy/doc/config/translation.backend.rst copy to pypy/doc/config/translation.backend.txt diff --git a/pypy/doc/config/translation.backendopt.really_remove_asserts.rst b/pypy/doc/config/translation.backendopt.really_remove_asserts.txt copy from pypy/doc/config/translation.backendopt.really_remove_asserts.rst copy to pypy/doc/config/translation.backendopt.really_remove_asserts.txt diff --git a/pypy/doc/config/objspace.usemodules.exceptions.rst b/pypy/doc/config/objspace.usemodules.exceptions.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.exceptions.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'exceptions' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/config/objspace.std.withstrjoin.rst b/pypy/doc/config/objspace.std.withstrjoin.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrjoin.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enable "string join" objects. - -See the page about `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#string-join-objects - - diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal.txt copy from pypy/doc/config/translation.backendopt.clever_malloc_removal.rst copy to pypy/doc/config/translation.backendopt.clever_malloc_removal.txt diff --git a/pypy/doc/config/objspace.usemodules.gc.rst b/pypy/doc/config/objspace.usemodules.gc.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.gc.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the 'gc' module. -This module is expected to be working and is included by default. -Note that since the gc module is highly implementation specific, it contains -only the ``collect`` function in PyPy, which forces a collection when compiled -with the framework or with Boehm. diff --git a/pypy/doc/config/objspace.usemodules.micronumpy.rst b/pypy/doc/config/objspace.usemodules.micronumpy.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.micronumpy.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the micronumpy module. -This module provides a very basic numpy-like interface. Major use-case -is to show how jit scales for other code. diff --git a/pypy/doc/config/translation.log.rst b/pypy/doc/config/translation.log.rst deleted file mode 100644 --- a/pypy/doc/config/translation.log.rst +++ /dev/null @@ -1,5 +0,0 @@ -Include debug prints in the translation. - -These must be enabled by setting the PYPYLOG environment variable. -The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug.h. diff --git a/pypy/doc/config/objspace.usemodules.rbench.rst b/pypy/doc/config/objspace.usemodules.rbench.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rbench.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the built-in 'rbench' module. -This module contains geninterpreted versions of pystone and richards, -so it is useful to measure the interpretation overhead of the various -pypy-\*. diff --git a/pypy/doc/config/objspace.usemodules.__builtin__.rst b/pypy/doc/config/objspace.usemodules.__builtin__.txt copy from pypy/doc/config/objspace.usemodules.__builtin__.rst copy to pypy/doc/config/objspace.usemodules.__builtin__.txt diff --git a/pypy/doc/config/objspace.std.withstrbuf.rst b/pypy/doc/config/objspace.std.withstrbuf.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrbuf.rst +++ /dev/null @@ -1,4 +0,0 @@ -Enable "string buffer" objects. - -Similar to "string join" objects, but using a StringBuilder to represent -a string built by repeated application of ``+=``. diff --git a/pypy/doc/config/translation.compilerflags.rst b/pypy/doc/config/translation.compilerflags.txt copy from pypy/doc/config/translation.compilerflags.rst copy to pypy/doc/config/translation.compilerflags.txt diff --git a/pypy/doc/config/objspace.usemodules.cmath.rst b/pypy/doc/config/objspace.usemodules.cmath.txt copy from pypy/doc/config/objspace.usemodules.cmath.rst copy to pypy/doc/config/objspace.usemodules.cmath.txt diff --git a/pypy/doc/config/objspace.usemodules._bisect.rst b/pypy/doc/config/objspace.usemodules._bisect.txt copy from pypy/doc/config/objspace.usemodules._bisect.rst copy to pypy/doc/config/objspace.usemodules._bisect.txt diff --git a/pypy/doc/config/translation.no__thread.rst b/pypy/doc/config/translation.no__thread.txt copy from pypy/doc/config/translation.no__thread.rst copy to pypy/doc/config/translation.no__thread.txt diff --git a/pypy/doc/config/translation.noprofopt.rst b/pypy/doc/config/translation.noprofopt.rst deleted file mode 100644 diff --git a/pypy/doc/config/translation.countmallocs.rst b/pypy/doc/config/translation.countmallocs.txt copy from pypy/doc/config/translation.countmallocs.rst copy to pypy/doc/config/translation.countmallocs.txt diff --git a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst b/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst +++ /dev/null @@ -1,12 +0,0 @@ -Introduce a new opcode called ``CALL_LIKELY_BUILTIN``. It is used when something -is called, that looks like a builtin function (but could in reality be shadowed -by a name in the module globals). For all module globals dictionaries it is -then tracked which builtin name is shadowed in this module. If the -``CALL_LIKELY_BUILTIN`` opcode is executed, it is checked whether the builtin is -shadowed. If not, the corresponding builtin is called. Otherwise the object that -is shadowing it is called instead. If no shadowing is happening, this saves two -dictionary lookups on calls to builtins. - -For more information, see the section in `Standard Interpreter Optimizations`_. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#call-likely-builtin diff --git a/pypy/doc/config/objspace.usemodules.symbol.rst b/pypy/doc/config/objspace.usemodules.symbol.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.symbol.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'symbol' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.backendopt.storesink.rst b/pypy/doc/config/translation.backendopt.storesink.txt copy from pypy/doc/config/translation.backendopt.storesink.rst copy to pypy/doc/config/translation.backendopt.storesink.txt diff --git a/pypy/doc/config/translation.cli.rst b/pypy/doc/config/translation.cli.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cli.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/translation.backendopt.remove_asserts.rst b/pypy/doc/config/translation.backendopt.remove_asserts.txt copy from pypy/doc/config/translation.backendopt.remove_asserts.rst copy to pypy/doc/config/translation.backendopt.remove_asserts.txt diff --git a/pypy/doc/config/translation.cli.rst b/pypy/doc/config/translation.cli.txt copy from pypy/doc/config/translation.cli.rst copy to pypy/doc/config/translation.cli.txt diff --git a/pypy/doc/config/translation.backendopt.none.rst b/pypy/doc/config/translation.backendopt.none.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.none.rst +++ /dev/null @@ -1,1 +0,0 @@ -Do not run any backend optimizations. diff --git a/pypy/doc/config/objspace.std.optimized_comparison_op.rst b/pypy/doc/config/objspace.std.optimized_comparison_op.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_comparison_op.rst +++ /dev/null @@ -1,1 +0,0 @@ -Optimize the comparison of two integers a bit. diff --git a/pypy/doc/config/objspace.usemodules._testing.rst b/pypy/doc/config/objspace.usemodules._testing.txt copy from pypy/doc/config/objspace.usemodules._testing.rst copy to pypy/doc/config/objspace.usemodules._testing.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline.rst b/pypy/doc/config/translation.backendopt.profile_based_inline.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline.txt diff --git a/pypy/doc/config/objspace.geninterp.rst b/pypy/doc/config/objspace.geninterp.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.geninterp.rst +++ /dev/null @@ -1,4 +0,0 @@ -This option enables `geninterp`_. This will usually make the PyPy interpreter -significantly faster (but also a bit bigger). - -.. _`geninterp`: ../geninterp.html diff --git a/pypy/doc/config/objspace.usemodules.zipimport.rst b/pypy/doc/config/objspace.usemodules.zipimport.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.zipimport.rst +++ /dev/null @@ -1,3 +0,0 @@ -This module implements zipimport mechanism described -in PEP 302. It's supposed to work and translate, so it's included -by default \ No newline at end of file diff --git a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst b/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst +++ /dev/null @@ -1,10 +0,0 @@ -Enable a pair of bytecodes that speed up method calls. -See ``pypy.interpreter.callmethod`` for a description. - -The goal is to avoid creating the bound method object in the common -case. So far, this only works for calls with no keyword, no ``*arg`` -and no ``**arg`` but it would be easy to extend. - -For more information, see the section in `Standard Interpreter Optimizations`_. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#lookup-method-call-method diff --git a/pypy/doc/config/objspace.disable_call_speedhacks.rst b/pypy/doc/config/objspace.disable_call_speedhacks.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.disable_call_speedhacks.rst +++ /dev/null @@ -1,2 +0,0 @@ -disable the speed hacks that the interpreter normally does. Usually you don't -want to set this to False, but some object spaces require it. diff --git a/pypy/doc/config/objspace.std.withtproxy.rst b/pypy/doc/config/objspace.std.withtproxy.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtproxy.rst +++ /dev/null @@ -1,3 +0,0 @@ -Enable `transparent proxies`_. - -.. _`transparent proxies`: ../objspace-proxies.html#tproxy diff --git a/pypy/doc/config/objspace.usemodules._codecs.rst b/pypy/doc/config/objspace.usemodules._codecs.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._codecs.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_codecs' module. -Used by the 'codecs' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst b/pypy/doc/config/translation.backendopt.merge_if_blocks.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst +++ /dev/null @@ -1,26 +0,0 @@ -This optimization converts parts of flow graphs that result from -chains of ifs and elifs like this into merged blocks. - -By default flow graphing this kind of code:: - - if x == 0: - f() - elif x == 1: - g() - elif x == 4: - h() - else: - j() - -will result in a chain of blocks with two exits, somewhat like this: - -.. image:: unmergedblocks.png - -(reflecting how Python would interpret this code). Running this -optimization will transform the block structure to contain a single -"choice block" with four exits: - -.. image:: mergedblocks.png - -This can then be turned into a switch by the C backend, allowing the C -compiler to produce more efficient code. diff --git a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst b/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. Transformation required by the LLVM backend. - -.. internal diff --git a/pypy/doc/config/translation.log.rst b/pypy/doc/config/translation.log.txt copy from pypy/doc/config/translation.log.rst copy to pypy/doc/config/translation.log.txt diff --git a/pypy/doc/config/objspace.usemodules.rbench.rst b/pypy/doc/config/objspace.usemodules.rbench.txt copy from pypy/doc/config/objspace.usemodules.rbench.rst copy to pypy/doc/config/objspace.usemodules.rbench.txt diff --git a/pypy/doc/config/objspace.usemodules._file.rst b/pypy/doc/config/objspace.usemodules._file.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._file.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the '_file' module. It is an internal module that contains helper -functionality for the builtin ``file`` type. - -.. internal diff --git a/pypy/doc/config/objspace.usemodules.pypyjit.rst b/pypy/doc/config/objspace.usemodules.pypyjit.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.pypyjit.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'pypyjit' module. diff --git a/pypy/doc/config/translation.secondaryentrypoints.rst b/pypy/doc/config/translation.secondaryentrypoints.rst deleted file mode 100644 --- a/pypy/doc/config/translation.secondaryentrypoints.rst +++ /dev/null @@ -1,1 +0,0 @@ -Enable secondary entrypoints support list. Needed for cpyext module. diff --git a/pypy/doc/config/translation.backendopt.mallocs.rst b/pypy/doc/config/translation.backendopt.mallocs.txt copy from pypy/doc/config/translation.backendopt.mallocs.rst copy to pypy/doc/config/translation.backendopt.mallocs.txt diff --git a/pypy/doc/config/translation.dump_static_data_info.rst b/pypy/doc/config/translation.dump_static_data_info.txt copy from pypy/doc/config/translation.dump_static_data_info.rst copy to pypy/doc/config/translation.dump_static_data_info.txt diff --git a/pypy/doc/config/objspace.usemodules.zlib.rst b/pypy/doc/config/objspace.usemodules.zlib.txt copy from pypy/doc/config/objspace.usemodules.zlib.rst copy to pypy/doc/config/objspace.usemodules.zlib.txt diff --git a/pypy/doc/config/translation.backendopt.inline_heuristic.rst b/pypy/doc/config/translation.backendopt.inline_heuristic.txt copy from pypy/doc/config/translation.backendopt.inline_heuristic.rst copy to pypy/doc/config/translation.backendopt.inline_heuristic.txt diff --git a/pypy/doc/config/objspace.usemodules.symbol.rst b/pypy/doc/config/objspace.usemodules.symbol.txt copy from pypy/doc/config/objspace.usemodules.symbol.rst copy to pypy/doc/config/objspace.usemodules.symbol.txt diff --git a/pypy/doc/config/translation.instrument.rst b/pypy/doc/config/translation.instrument.rst deleted file mode 100644 --- a/pypy/doc/config/translation.instrument.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/translation.make_jobs.rst b/pypy/doc/config/translation.make_jobs.rst deleted file mode 100644 --- a/pypy/doc/config/translation.make_jobs.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify number of make jobs for make command. diff --git a/pypy/doc/config/objspace.rst b/pypy/doc/config/objspace.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.usemodules.rctime.rst b/pypy/doc/config/objspace.usemodules.rctime.txt copy from pypy/doc/config/objspace.usemodules.rctime.rst copy to pypy/doc/config/objspace.usemodules.rctime.txt diff --git a/pypy/doc/config/objspace.usemodules._sre.rst b/pypy/doc/config/objspace.usemodules._sre.txt copy from pypy/doc/config/objspace.usemodules._sre.rst copy to pypy/doc/config/objspace.usemodules._sre.txt diff --git a/pypy/doc/config/objspace.nofaking.rst b/pypy/doc/config/objspace.nofaking.txt copy from pypy/doc/config/objspace.nofaking.rst copy to pypy/doc/config/objspace.nofaking.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst +++ /dev/null @@ -1,10 +0,0 @@ -Try to inline flowgraphs based on whether doing so would enable malloc -removal (:config:`translation.backendopt.mallocs`.) by eliminating -calls that result in escaping. This is an experimental optimization, -also right now some eager inlining is necessary for helpers doing -malloc itself to be inlined first for this to be effective. -This option enable also an extra subsequent malloc removal phase. - -Callee flowgraphs are considered candidates based on a weight heuristic like -for basic inlining. (see :config:`translation.backendopt.inline`, -:config:`translation.backendopt.clever_malloc_removal_threshold` ). diff --git a/pypy/doc/config/objspace.usemodules.sys.rst b/pypy/doc/config/objspace.usemodules.sys.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.sys.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'sys' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/config/objspace.usemodules._collections.rst b/pypy/doc/config/objspace.usemodules._collections.txt copy from pypy/doc/config/objspace.usemodules._collections.rst copy to pypy/doc/config/objspace.usemodules._collections.txt diff --git a/pypy/doc/config/translation.backendopt.inline.rst b/pypy/doc/config/translation.backendopt.inline.txt copy from pypy/doc/config/translation.backendopt.inline.rst copy to pypy/doc/config/translation.backendopt.inline.txt diff --git a/pypy/doc/config/objspace.std.prebuiltintfrom.rst b/pypy/doc/config/objspace.std.prebuiltintfrom.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.prebuiltintfrom.rst +++ /dev/null @@ -1,1 +0,0 @@ -see :config:`objspace.std.withprebuiltint`. diff --git a/pypy/doc/config/translation.countmallocs.rst b/pypy/doc/config/translation.countmallocs.rst deleted file mode 100644 --- a/pypy/doc/config/translation.countmallocs.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal; used by some of the C backend tests to check that the number of -allocations matches the number of frees. - -.. internal diff --git a/pypy/doc/config/objspace.usemodules._io.rst b/pypy/doc/config/objspace.usemodules._io.txt copy from pypy/doc/config/objspace.usemodules._io.rst copy to pypy/doc/config/objspace.usemodules._io.txt diff --git a/pypy/doc/config/objspace.usemodules._winreg.rst b/pypy/doc/config/objspace.usemodules._winreg.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._winreg.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the built-in '_winreg' module, provides access to the Windows registry. -This module is expected to be working and is included by default on Windows. diff --git a/pypy/doc/config/objspace.usemodules.clr.rst b/pypy/doc/config/objspace.usemodules.clr.txt copy from pypy/doc/config/objspace.usemodules.clr.rst copy to pypy/doc/config/objspace.usemodules.clr.txt diff --git a/pypy/doc/config/translation.jit_ffi.rst b/pypy/doc/config/translation.jit_ffi.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit_ffi.rst +++ /dev/null @@ -1,1 +0,0 @@ -Internal option: enable OptFfiCall in the jit optimizations. diff --git a/pypy/doc/config/objspace.usemodules._pickle_support.rst b/pypy/doc/config/objspace.usemodules._pickle_support.txt copy from pypy/doc/config/objspace.usemodules._pickle_support.rst copy to pypy/doc/config/objspace.usemodules._pickle_support.txt diff --git a/pypy/doc/config/translation.verbose.rst b/pypy/doc/config/translation.verbose.rst deleted file mode 100644 --- a/pypy/doc/config/translation.verbose.rst +++ /dev/null @@ -1,1 +0,0 @@ -Print some more information during translation. diff --git a/pypy/doc/config/objspace.usemodules.math.rst b/pypy/doc/config/objspace.usemodules.math.txt copy from pypy/doc/config/objspace.usemodules.math.rst copy to pypy/doc/config/objspace.usemodules.math.txt diff --git a/pypy/doc/config/translation.compilerflags.rst b/pypy/doc/config/translation.compilerflags.rst deleted file mode 100644 --- a/pypy/doc/config/translation.compilerflags.rst +++ /dev/null @@ -1,1 +0,0 @@ -Experimental. Specify extra flags to pass to the C compiler. diff --git a/pypy/doc/config/objspace.std.withsmallint.rst b/pypy/doc/config/objspace.std.withsmallint.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withsmallint.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use "tagged pointers" to represent small enough integer values: Integers that -fit into 31 bits (respective 63 bits on 64 bit machines) are not represented by -boxing them in an instance of ``W_IntObject``. Instead they are represented as a -pointer having the lowest bit set and the rest of the bits used to store the -value of the integer. This gives a small speedup for integer operations as well -as better memory behaviour. diff --git a/pypy/doc/config/translation.sandbox.rst b/pypy/doc/config/translation.sandbox.txt copy from pypy/doc/config/translation.sandbox.rst copy to pypy/doc/config/translation.sandbox.txt diff --git a/pypy/doc/config/translation.gctransformer.rst b/pypy/doc/config/translation.gctransformer.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gctransformer.rst +++ /dev/null @@ -1,1 +0,0 @@ -internal option diff --git a/pypy/doc/config/objspace.usemodules.binascii.rst b/pypy/doc/config/objspace.usemodules.binascii.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.binascii.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the RPython 'binascii' module. diff --git a/pypy/doc/config/objspace.usemodules.zlib.rst b/pypy/doc/config/objspace.usemodules.zlib.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.zlib.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'zlib' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.std.rst b/pypy/doc/config/objspace.std.txt copy from pypy/doc/config/objspace.std.rst copy to pypy/doc/config/objspace.std.txt diff --git a/pypy/doc/config/objspace.usemodules.micronumpy.rst b/pypy/doc/config/objspace.usemodules.micronumpy.txt copy from pypy/doc/config/objspace.usemodules.micronumpy.rst copy to pypy/doc/config/objspace.usemodules.micronumpy.txt diff --git a/pypy/doc/config/objspace.usemodules.thread.rst b/pypy/doc/config/objspace.usemodules.thread.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.thread.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'thread' module. diff --git a/pypy/doc/config/objspace.usemodules.mmap.rst b/pypy/doc/config/objspace.usemodules.mmap.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.mmap.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'mmap' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.std.withstrbuf.rst b/pypy/doc/config/objspace.std.withstrbuf.txt copy from pypy/doc/config/objspace.std.withstrbuf.rst copy to pypy/doc/config/objspace.std.withstrbuf.txt diff --git a/pypy/doc/config/translation.backendopt.rst b/pypy/doc/config/translation.backendopt.txt copy from pypy/doc/config/translation.backendopt.rst copy to pypy/doc/config/translation.backendopt.txt diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.rst b/pypy/doc/config/objspace.std.withmethodcachecounter.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcachecounter.rst +++ /dev/null @@ -1,1 +0,0 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. diff --git a/pypy/doc/config/objspace.usemodules.rctime.rst b/pypy/doc/config/objspace.usemodules.rctime.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rctime.rst +++ /dev/null @@ -1,7 +0,0 @@ -Use the 'rctime' module. - -'rctime' is our `rffi`_ based implementation of the builtin 'time' module. -It supersedes the less complete :config:`objspace.usemodules.time`, -at least for C-like targets (the C and LLVM backends). - -.. _`rffi`: ../rffi.html diff --git a/pypy/doc/config/objspace.usemodules._sha.rst b/pypy/doc/config/objspace.usemodules._sha.txt copy from pypy/doc/config/objspace.usemodules._sha.rst copy to pypy/doc/config/objspace.usemodules._sha.txt diff --git a/pypy/doc/config/objspace.usemodules.time.rst b/pypy/doc/config/objspace.usemodules.time.txt copy from pypy/doc/config/objspace.usemodules.time.rst copy to pypy/doc/config/objspace.usemodules.time.txt diff --git a/pypy/doc/config/objspace.translationmodules.rst b/pypy/doc/config/objspace.translationmodules.txt copy from pypy/doc/config/objspace.translationmodules.rst copy to pypy/doc/config/objspace.translationmodules.txt diff --git a/pypy/doc/config/translation.backendopt.inline_threshold.rst b/pypy/doc/config/translation.backendopt.inline_threshold.txt copy from pypy/doc/config/translation.backendopt.inline_threshold.rst copy to pypy/doc/config/translation.backendopt.inline_threshold.txt diff --git a/pypy/doc/config/translation.backendopt.inline_heuristic.rst b/pypy/doc/config/translation.backendopt.inline_heuristic.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.inline_heuristic.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal option. Switch to a different weight heuristic for inlining. -This is for basic inlining (:config:`translation.backendopt.inline`). - -.. internal diff --git a/pypy/doc/config/translation.backendopt.stack_optimization.rst b/pypy/doc/config/translation.backendopt.stack_optimization.txt copy from pypy/doc/config/translation.backendopt.stack_optimization.rst copy to pypy/doc/config/translation.backendopt.stack_optimization.txt diff --git a/pypy/doc/config/translation.verbose.rst b/pypy/doc/config/translation.verbose.txt copy from pypy/doc/config/translation.verbose.rst copy to pypy/doc/config/translation.verbose.txt diff --git a/pypy/doc/config/translation.secondaryentrypoints.rst b/pypy/doc/config/translation.secondaryentrypoints.txt copy from pypy/doc/config/translation.secondaryentrypoints.rst copy to pypy/doc/config/translation.secondaryentrypoints.txt diff --git a/pypy/doc/config/objspace.lonepycfiles.rst b/pypy/doc/config/objspace.lonepycfiles.txt copy from pypy/doc/config/objspace.lonepycfiles.rst copy to pypy/doc/config/objspace.lonepycfiles.txt diff --git a/pypy/doc/config/objspace.usemodules.oracle.rst b/pypy/doc/config/objspace.usemodules.oracle.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.oracle.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'oracle' module. -This module is off by default, requires oracle client installed. diff --git a/pypy/doc/config/translation.cli.trace_calls.rst b/pypy/doc/config/translation.cli.trace_calls.txt copy from pypy/doc/config/translation.cli.trace_calls.rst copy to pypy/doc/config/translation.cli.trace_calls.txt diff --git a/pypy/doc/config/objspace.usemodules.struct.rst b/pypy/doc/config/objspace.usemodules.struct.txt copy from pypy/doc/config/objspace.usemodules.struct.rst copy to pypy/doc/config/objspace.usemodules.struct.txt diff --git a/pypy/doc/config/objspace.usemodules._collections.rst b/pypy/doc/config/objspace.usemodules._collections.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._collections.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_collections' module. -Used by the 'collections' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._testing.rst b/pypy/doc/config/objspace.usemodules._testing.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._testing.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the '_testing' module. This module exists only for PyPy own testing purposes. - -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -1,15 +1,47 @@ import autopath import py -from pypy.config import pypyoption, translationoption, config +from pypy.config import pypyoption, translationoption, config, makerestdoc from pypy.doc.config.confrest import all_optiondescrs +all_optiondescrs = [pypyoption.pypy_optiondescription, + translationoption.translation_optiondescription, + ] +start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) + + thisdir = py.path.local(__file__).dirpath() for descr in all_optiondescrs: prefix = descr._name c = config.Config(descr) - thisdir.join(prefix + ".rst").ensure() + thisdir.join(prefix + ".txt").ensure() for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".rst" - f = thisdir.join(basename) - f.ensure() + basename = prefix + "." + p + ".txt" + txtpath = thisdir.join(basename) + txtpath.ensure() + rstpath = txtpath.new(ext=".rst") + + fullpath = txtpath.purebasename + start = fullpath.split(".")[0] + path = fullpath.rsplit(".", 1)[0] + basedescr = start_to_descr.get(start) + if basedescr is None: + continue + if fullpath.count(".") == 0: + descr = basedescr + path = "" + else: + conf = config.Config(basedescr) + subconf, step = conf._cfgimpl_get_home_by_path( + fullpath.split(".", 1)[1]) + descr = getattr(subconf._cfgimpl_descr, step) + text = unicode(descr.make_rest_doc(path).text()) + if txtpath.check(file=True): + content = txtpath.read() + if content: + text += "\nDescription\n===========" + text = u"%s\n\n%s" % (text, unicode(txtpath.read(), "utf-8")) + print path + print "***************************" + print text + rstpath.write(text.encode("utf-8")) diff --git a/pypy/doc/config/objspace.usemodules._weakref.rst b/pypy/doc/config/objspace.usemodules._weakref.txt copy from pypy/doc/config/objspace.usemodules._weakref.rst copy to pypy/doc/config/objspace.usemodules._weakref.txt diff --git a/pypy/doc/config/objspace.usemodules.struct.rst b/pypy/doc/config/objspace.usemodules.struct.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.struct.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the built-in 'struct' module. -This module is expected to be working and is included by default. -There is also a pure Python version in lib_pypy which is used -if the built-in is disabled, but it is several orders of magnitude -slower. diff --git a/pypy/doc/config/translation.cli.trace_calls.rst b/pypy/doc/config/translation.cli.trace_calls.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/config/objspace.std.withstrslice.rst b/pypy/doc/config/objspace.std.withstrslice.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrslice.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enable "string slice" objects. - -See the page about `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#string-slice-objects - - diff --git a/pypy/doc/config/translation.dump_static_data_info.rst b/pypy/doc/config/translation.dump_static_data_info.rst deleted file mode 100644 --- a/pypy/doc/config/translation.dump_static_data_info.rst +++ /dev/null @@ -1,3 +0,0 @@ -Dump information about static prebuilt constants, to the file -TARGETNAME.staticdata.info in the /tmp/usession-... directory. This file can -be later inspected using the script ``bin/reportstaticdata.py``. diff --git a/pypy/doc/config/objspace.allworkingmodules.rst b/pypy/doc/config/objspace.allworkingmodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.allworkingmodules.rst +++ /dev/null @@ -1,6 +0,0 @@ -This option enables the usage of all modules that are known to be working well -and that translate without problems. - -Note that this option defaults to True (except when running -``py.py`` because it takes a long time to start). To force it -to False, use ``--no-allworkingmodules``. diff --git a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst b/pypy/doc/config/objspace.opcodes.CALL_METHOD.txt copy from pypy/doc/config/objspace.opcodes.CALL_METHOD.rst copy to pypy/doc/config/objspace.opcodes.CALL_METHOD.txt diff --git a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst b/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt copy from pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst copy to pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt diff --git a/pypy/doc/config/translation.sandbox.rst b/pypy/doc/config/translation.sandbox.rst deleted file mode 100644 --- a/pypy/doc/config/translation.sandbox.rst +++ /dev/null @@ -1,15 +0,0 @@ -Generate a special fully-sandboxed executable. - -The fully-sandboxed executable cannot be run directly, but -only as a subprocess of an outer "controlling" process. The -sandboxed process is "safe" in the sense that it doesn't do -any library or system call - instead, whenever it would like -to perform such an operation, it marshals the operation name -and the arguments to its stdout and it waits for the -marshalled result on its stdin. This controller process must -handle these operation requests, in any way it likes, allowing -full virtualization. - -For examples of controller processes, see -``pypy/translator/sandbox/interact.py`` and -``pypy/translator/sandbox/pypy_interact.py``. diff --git a/pypy/doc/config/translation.rweakref.rst b/pypy/doc/config/translation.rweakref.txt copy from pypy/doc/config/translation.rweakref.rst copy to pypy/doc/config/translation.rweakref.txt diff --git a/pypy/doc/config/objspace.std.withstrslice.rst b/pypy/doc/config/objspace.std.withstrslice.txt copy from pypy/doc/config/objspace.std.withstrslice.rst copy to pypy/doc/config/objspace.std.withstrslice.txt diff --git a/pypy/doc/config/objspace.std.withprebuiltint.rst b/pypy/doc/config/objspace.std.withprebuiltint.txt copy from pypy/doc/config/objspace.std.withprebuiltint.rst copy to pypy/doc/config/objspace.std.withprebuiltint.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst +++ /dev/null @@ -1,2 +0,0 @@ -Weight threshold used to decide whether to inline flowgraphs. -This is for profile-based inlining (:config:`translation.backendopt.profile_based_inline`). diff --git a/pypy/doc/config/translation.withsmallfuncsets.rst b/pypy/doc/config/translation.withsmallfuncsets.txt copy from pypy/doc/config/translation.withsmallfuncsets.rst copy to pypy/doc/config/translation.withsmallfuncsets.txt diff --git a/pypy/doc/config/translation.gc.rst b/pypy/doc/config/translation.gc.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gc.rst +++ /dev/null @@ -1,13 +0,0 @@ -Choose the Garbage Collector used by the translated program: - - - "ref": reference counting. Takes very long to translate and the result is - slow. - - - "marksweep": naive mark & sweep. - - - "semispace": a copying semi-space GC. - - - "generation": a generational GC using the semi-space GC for the - older generation. - - - "boehm": use the Boehm conservative GC. diff --git a/pypy/doc/config/translation.gc.rst b/pypy/doc/config/translation.gc.txt copy from pypy/doc/config/translation.gc.rst copy to pypy/doc/config/translation.gc.txt diff --git a/pypy/doc/config/objspace.usemodules.imp.rst b/pypy/doc/config/objspace.usemodules.imp.txt copy from pypy/doc/config/objspace.usemodules.imp.rst copy to pypy/doc/config/objspace.usemodules.imp.txt diff --git a/pypy/doc/config/objspace.usemodules.bz2.rst b/pypy/doc/config/objspace.usemodules.bz2.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.bz2.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'bz2' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.pypyjit.rst b/pypy/doc/config/objspace.usemodules.pypyjit.txt copy from pypy/doc/config/objspace.usemodules.pypyjit.rst copy to pypy/doc/config/objspace.usemodules.pypyjit.txt diff --git a/pypy/doc/config/objspace.usemodules._file.rst b/pypy/doc/config/objspace.usemodules._file.txt copy from pypy/doc/config/objspace.usemodules._file.rst copy to pypy/doc/config/objspace.usemodules._file.txt diff --git a/pypy/doc/config/objspace.usemodules.unicodedata.rst b/pypy/doc/config/objspace.usemodules.unicodedata.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.unicodedata.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'unicodedata' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/translation.type_system.rst b/pypy/doc/config/translation.type_system.rst deleted file mode 100644 --- a/pypy/doc/config/translation.type_system.rst +++ /dev/null @@ -1,4 +0,0 @@ -Which type system to use when rtyping_. This option should not be set -explicitly. - -.. _rtyping: ../rtyper.html diff --git a/pypy/doc/config/objspace.usemodules._ffi.rst b/pypy/doc/config/objspace.usemodules._ffi.txt copy from pypy/doc/config/objspace.usemodules._ffi.rst copy to pypy/doc/config/objspace.usemodules._ffi.txt diff --git a/pypy/doc/config/translation.jit.rst b/pypy/doc/config/translation.jit.txt copy from pypy/doc/config/translation.jit.rst copy to pypy/doc/config/translation.jit.txt diff --git a/pypy/doc/config/objspace.logbytecodes.rst b/pypy/doc/config/objspace.logbytecodes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.logbytecodes.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/objspace.usemodules.__builtin__.rst b/pypy/doc/config/objspace.usemodules.__builtin__.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.__builtin__.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '__builtin__' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/config/objspace.usemodules._bisect.rst b/pypy/doc/config/objspace.usemodules._bisect.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._bisect.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the '_bisect' module. -Used, optionally, by the 'bisect' standard lib module. This module is expected to be working and is included by default. - - diff --git a/pypy/doc/config/translation.insist.rst b/pypy/doc/config/translation.insist.rst deleted file mode 100644 --- a/pypy/doc/config/translation.insist.rst +++ /dev/null @@ -1,4 +0,0 @@ -Don't stop on the first `rtyping`_ error. Instead, try to rtype as much as -possible and show the collected error messages in the end. - -.. _`rtyping`: ../rtyper.html diff --git a/pypy/doc/config/objspace.usemodules.exceptions.rst b/pypy/doc/config/objspace.usemodules.exceptions.txt copy from pypy/doc/config/objspace.usemodules.exceptions.rst copy to pypy/doc/config/objspace.usemodules.exceptions.txt diff --git a/pypy/doc/config/objspace.usepycfiles.rst b/pypy/doc/config/objspace.usepycfiles.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usepycfiles.rst +++ /dev/null @@ -1,4 +0,0 @@ -If this option is used, then PyPy imports and generates "pyc" files in the -same way as CPython. This is true by default and there is not much reason -to turn it off nowadays. If off, PyPy never produces "pyc" files and -ignores any "pyc" file that might already be present. diff --git a/pypy/doc/config/objspace.usemodules.cpyext.rst b/pypy/doc/config/objspace.usemodules.cpyext.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.cpyext.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use (experimental) cpyext module, that tries to load and run CPython extension modules diff --git a/pypy/doc/config/translation.profopt.rst b/pypy/doc/config/translation.profopt.txt copy from pypy/doc/config/translation.profopt.rst copy to pypy/doc/config/translation.profopt.txt diff --git a/pypy/doc/config/objspace.usemodules._ffi.rst b/pypy/doc/config/objspace.usemodules._ffi.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._ffi.rst +++ /dev/null @@ -1,1 +0,0 @@ -Applevel interface to libffi. It is more high level than _rawffi, and most importantly it is JIT friendly diff --git a/pypy/doc/config/translation.backendopt.inline_threshold.rst b/pypy/doc/config/translation.backendopt.inline_threshold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.inline_threshold.rst +++ /dev/null @@ -1,2 +0,0 @@ -Weight threshold used to decide whether to inline flowgraphs. -This is for basic inlining (:config:`translation.backendopt.inline`). diff --git a/pypy/doc/config/objspace.std.withdictmeasurement.rst b/pypy/doc/config/objspace.std.withdictmeasurement.txt copy from pypy/doc/config/objspace.std.withdictmeasurement.rst copy to pypy/doc/config/objspace.std.withdictmeasurement.txt diff --git a/pypy/doc/config/objspace.usemodules._stackless.rst b/pypy/doc/config/objspace.usemodules._stackless.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._stackless.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use the '_stackless' module. - -Exposes the `stackless` primitives, and also implies a stackless build. -See also :config:`translation.stackless`. - -.. _`stackless`: ../stackless.html diff --git a/pypy/doc/config/translation.shared.rst b/pypy/doc/config/translation.shared.txt copy from pypy/doc/config/translation.shared.rst copy to pypy/doc/config/translation.shared.txt diff --git a/pypy/doc/config/objspace.logbytecodes.rst b/pypy/doc/config/objspace.logbytecodes.txt copy from pypy/doc/config/objspace.logbytecodes.rst copy to pypy/doc/config/objspace.logbytecodes.txt diff --git a/pypy/doc/config/translation.backendopt.storesink.rst b/pypy/doc/config/translation.backendopt.storesink.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.storesink.rst +++ /dev/null @@ -1,1 +0,0 @@ -Store sinking optimization. On by default. diff --git a/pypy/doc/config/translation.thread.rst b/pypy/doc/config/translation.thread.rst deleted file mode 100644 --- a/pypy/doc/config/translation.thread.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable threading. The only target where this has visible effect is PyPy (this -also enables the ``thread`` module then). diff --git a/pypy/doc/config/translation.no__thread.rst b/pypy/doc/config/translation.no__thread.rst deleted file mode 100644 --- a/pypy/doc/config/translation.no__thread.rst +++ /dev/null @@ -1,4 +0,0 @@ -Don't use gcc __thread attribute for fast thread local storage -implementation . Increases the chance that moving the resulting -executable to another same processor Linux machine will work. (see -:config:`translation.vanilla`). diff --git a/pypy/doc/config/objspace.usemodules.cmath.rst b/pypy/doc/config/objspace.usemodules.cmath.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.cmath.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'cmath' module. -This module is expected to be working and is included by default. From commits-noreply at bitbucket.org Tue Apr 26 23:36:15 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 23:36:15 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: whack until we get documentation for all config options in a sphinx style. some Message-ID: <20110426213615.4C2E5282C1D@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43647:4cd84b62116f Date: 2011-04-26 23:31 +0200 http://bitbucket.org/pypy/pypy/changeset/4cd84b62116f/ Log: whack until we get documentation for all config options in a sphinx style. some of this is not pretty nor proper, but I want to spend no more time on this. diff --git a/pypy/config/test/test_makerestdoc.py b/pypy/config/test/test_makerestdoc.py --- a/pypy/config/test/test_makerestdoc.py +++ b/pypy/config/test/test_makerestdoc.py @@ -20,14 +20,14 @@ config = Config(descr) txt = descr.make_rest_doc().text() - result = {"": checkrest(txt, descr._name + ".txt")} + result = {"": txt} for path in config.getpaths(include_groups=True): subconf, step = config._cfgimpl_get_home_by_path(path) fullpath = (descr._name + "." + path) prefix = fullpath.rsplit(".", 1)[0] txt = getattr(subconf._cfgimpl_descr, step).make_rest_doc( prefix).text() - result[path] = checkrest(txt, fullpath + ".txt") + result[path] = txt return result def test_simple(): @@ -68,7 +68,6 @@ ChoiceOption("bar", "more doc", ["a", "b", "c"], default="a")]) result = generate_html(descr) - assert "more doc" in result[""] def test_cmdline_overview(): descr = OptionDescription("foo", "doc", [ diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py --- a/pypy/config/makerestdoc.py +++ b/pypy/config/makerestdoc.py @@ -28,9 +28,6 @@ fullpath = get_fullpath(self, path) result = Rest( Title(fullpath, abovechar="=", belowchar="="), - Directive("contents"), - Paragraph(Link("back to parent", path + ".html")), - Title("Basic Option Information"), ListItem(Strong("name:"), self._name), ListItem(Strong("description:"), self.doc)) if self.cmdline is not None: @@ -132,36 +129,18 @@ def make_rest_doc(self, path=""): fullpath = get_fullpath(self, path) content = Rest( - Title(fullpath, abovechar="=", belowchar="="), - Directive("contents")) - if path: - content.add( - Paragraph(Link("back to parent", path + ".html"))) + Title(fullpath, abovechar="=", belowchar="=")) + toctree = [] + for child in self._children: + subpath = fullpath + "." + child._name + toctree.append(subpath) + content.add(Directive("toctree", *toctree, maxdepth=4)) content.join( - Title("Basic Option Information"), ListItem(Strong("name:"), self._name), - ListItem(Strong("description:"), self.doc), - Title("Sub-Options")) + ListItem(Strong("description:"), self.doc)) stack = [] - prefix = fullpath curr = content config = Config(self) - for ending in self.getpaths(include_groups=True): - subpath = fullpath + "." + ending - while not (subpath.startswith(prefix) and - subpath[len(prefix)] == "."): - curr, prefix = stack.pop() - print subpath, fullpath, ending, curr - sub, step = config._cfgimpl_get_home_by_path(ending) - doc = getattr(sub._cfgimpl_descr, step).doc - if doc: - new = curr.add(ListItem(Link(subpath + ":", subpath + ".html"), - Em(doc))) - else: - new = curr.add(ListItem(Link(subpath + ":", subpath + ".html"))) - stack.append((curr, prefix)) - prefix = subpath - curr = new return content diff --git a/pypy/tool/rest/rest.py b/pypy/tool/rest/rest.py --- a/pypy/tool/rest/rest.py +++ b/pypy/tool/rest/rest.py @@ -10,14 +10,12 @@ pass def convert_rest_html(source, source_path, stylesheet=None, encoding='latin1'): - from pypy.tool.rest import directive """ return html latin1-encoded document for the given input. source a ReST-string sourcepath where to look for includes (basically) stylesheet path (to be used if any) """ from docutils.core import publish_string - directive.set_backend_and_register_directives("html") kwargs = { 'stylesheet' : stylesheet, 'stylesheet_path': None, diff --git a/pypy/tool/rest/rst.py b/pypy/tool/rest/rst.py --- a/pypy/tool/rest/rst.py +++ b/pypy/tool/rest/rst.py @@ -389,18 +389,14 @@ indent = ' ' def __init__(self, name, *args, **options): self.name = name - self.content = options.pop('content', []) - children = list(args) - super(Directive, self).__init__(*children) + self.content = args + super(Directive, self).__init__() self.options = options def text(self): # XXX not very pretty... - namechunksize = len(self.name) + 2 - self.children.insert(0, Text('X' * namechunksize)) - txt = super(Directive, self).text() - txt = '.. %s::%s' % (self.name, txt[namechunksize + 3:],) - options = '\n'.join([' :%s: %s' % (k, v) for (k, v) in + txt = '.. %s::' % (self.name,) + options = '\n'.join([' :%s: %s' % (k, v) for (k, v) in self.options.iteritems()]) if options: txt += '\n%s' % (options,) @@ -408,10 +404,7 @@ if self.content: txt += '\n' for item in self.content: - assert item.parentclass == Rest, 'only top-level items allowed' - assert not item.indent - item.indent = ' ' - txt += '\n' + item.text() + txt += '\n ' + item return txt diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -31,32 +31,38 @@ -rm -rf $(BUILDDIR)/* html: + python config/generate.py $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: + python config/generate.py $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: + python config/generate.py $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: + python config/generate.py $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: + python config/generate.py $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: + python config/generate.py $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ @@ -66,6 +72,7 @@ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" latex: + python config/generate.py $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @@ -73,17 +80,20 @@ "run these through (pdf)latex." changes: + python config/generate.py $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: + python config/generate.py $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: + python config/generate.py $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -17,6 +17,7 @@ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ ^pypy/doc/.+\.html$ +^pypy/doc/config/.+\.rst$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -8,6 +8,33 @@ ] start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) +def make_rst(basename): + txtpath = thisdir.join(basename) + txtpath.ensure() + rstpath = txtpath.new(ext=".rst") + + fullpath = txtpath.purebasename + start = fullpath.split(".")[0] + path = fullpath.rsplit(".", 1)[0] + basedescr = start_to_descr.get(start) + if basedescr is None: + return + if fullpath.count(".") == 0: + descr = basedescr + path = "" + else: + conf = config.Config(basedescr) + subconf, step = conf._cfgimpl_get_home_by_path( + fullpath.split(".", 1)[1]) + descr = getattr(subconf._cfgimpl_descr, step) + text = unicode(descr.make_rest_doc(path).text()) + if txtpath.check(file=True): + content = txtpath.read() + if content: + text += "\n\n" + text = u"%s\n\n%s" % (text, unicode(txtpath.read(), "utf-8")) + rstpath.write(text.encode("utf-8")) + thisdir = py.path.local(__file__).dirpath() @@ -15,33 +42,7 @@ prefix = descr._name c = config.Config(descr) thisdir.join(prefix + ".txt").ensure() + make_rst(prefix + ".txt") for p in c.getpaths(include_groups=True): basename = prefix + "." + p + ".txt" - txtpath = thisdir.join(basename) - txtpath.ensure() - rstpath = txtpath.new(ext=".rst") - - fullpath = txtpath.purebasename - start = fullpath.split(".")[0] - path = fullpath.rsplit(".", 1)[0] - basedescr = start_to_descr.get(start) - if basedescr is None: - continue - if fullpath.count(".") == 0: - descr = basedescr - path = "" - else: - conf = config.Config(basedescr) - subconf, step = conf._cfgimpl_get_home_by_path( - fullpath.split(".", 1)[1]) - descr = getattr(subconf._cfgimpl_descr, step) - text = unicode(descr.make_rest_doc(path).text()) - if txtpath.check(file=True): - content = txtpath.read() - if content: - text += "\nDescription\n===========" - text = u"%s\n\n%s" % (text, unicode(txtpath.read(), "utf-8")) - print path - print "***************************" - print text - rstpath.write(text.encode("utf-8")) + make_rst(basename) From commits-noreply at bitbucket.org Tue Apr 26 23:36:16 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Tue, 26 Apr 2011 23:36:16 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge Message-ID: <20110426213616.AE78536C204@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43648:21e6016a9220 Date: 2011-04-26 23:31 +0200 http://bitbucket.org/pypy/pypy/changeset/21e6016a9220/ Log: merge diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -92,11 +92,14 @@ garbage_collection.rst interpreter.rst objspace.rst + __pypy__-module.rst + objspace-proxies.rst dev_method.rst extending.rst extradoc.rst + video-index.rst glossary.rst @@ -107,6 +110,8 @@ parser.rst rlib.rst rtyper.rst + rffi.rst + translation.rst jit/index.rst jit/overview.rst @@ -121,6 +126,7 @@ index-report.rst stackless.rst + sandbox.rst discussions.rst @@ -129,6 +135,7 @@ sprint-reports.rst eventhistory.rst + statistic/index.rst Indices and tables ================== From commits-noreply at bitbucket.org Wed Apr 27 01:19:10 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Wed, 27 Apr 2011 01:19:10 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix this test, it got more optimized, yay! Message-ID: <20110426231910.A0D88282B8B@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43649:b8138392013a Date: 2011-04-26 19:18 -0400 http://bitbucket.org/pypy/pypy/changeset/b8138392013a/ Log: Fix this test, it got more optimized, yay! diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -164,9 +164,7 @@ # assert entry_bridge.match_by_id('call', """ p29 = getfield_gc(ConstPtr(ptr28), descr=) - guard_nonnull_class(p29, ConstClass(Function), descr=) - i32 = getfield_gc(p0, descr=) - guard_false(i32, descr=) + guard_nonnull_class(p29, ConstClass(Function), descr=) p33 = getfield_gc(p29, descr=) guard_value(p33, ConstPtr(ptr34), descr=) p35 = getfield_gc(p29, descr=) From commits-noreply at bitbucket.org Wed Apr 27 01:19:17 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Wed, 27 Apr 2011 01:19:17 +0200 (CEST) Subject: [pypy-svn] pypy default: mreged upstream Message-ID: <20110426231917.2FCA0282BEC@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43650:df6f7a526b60 Date: 2011-04-26 19:18 -0400 http://bitbucket.org/pypy/pypy/changeset/df6f7a526b60/ Log: mreged upstream diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py --- a/pypy/jit/metainterp/optimizeopt/intutils.py +++ b/pypy/jit/metainterp/optimizeopt/intutils.py @@ -1,4 +1,4 @@ -from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift +from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift, LONG_BIT class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') @@ -20,7 +20,7 @@ def make_lt(self, other): return self.make_le(other.add(-1)) - + def make_ge(self, other): if other.has_lower: if not self.has_lower or other.lower > self.lower: @@ -161,7 +161,8 @@ def lshift_bound(self, other): if self.has_upper and self.has_lower and \ other.has_upper and other.has_lower and \ - other.known_ge(IntBound(0, 0)): + other.known_ge(IntBound(0, 0)) and \ + other.known_lt(IntBound(LONG_BIT, LONG_BIT)): try: vals = (ovfcheck_lshift(self.upper, other.upper), ovfcheck_lshift(self.upper, other.lower), @@ -176,7 +177,8 @@ def rshift_bound(self, other): if self.has_upper and self.has_lower and \ other.has_upper and other.has_lower and \ - other.known_ge(IntBound(0, 0)): + other.known_ge(IntBound(0, 0)) and \ + other.known_lt(IntBound(LONG_BIT, LONG_BIT)): vals = (self.upper >> other.upper, self.upper >> other.lower, self.lower >> other.upper, diff --git a/pypy/jit/metainterp/test/test_intbound.py b/pypy/jit/metainterp/test/test_intbound.py --- a/pypy/jit/metainterp/test/test_intbound.py +++ b/pypy/jit/metainterp/test/test_intbound.py @@ -2,6 +2,7 @@ IntLowerBound, IntUnbounded from copy import copy import sys +from pypy.rlib.rarithmetic import LONG_BIT def bound(a,b): if a is None and b is None: @@ -229,6 +230,12 @@ assert not b10.lshift_bound(b100).has_upper assert not bmax.lshift_bound(b10).has_upper assert b10.lshift_bound(b10).has_upper + + for b in (b10, b100, bmax, IntBound(0, 0)): + for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)): + #assert not b.lshift_bound(shift_count_bound).has_upper + assert not b.rshift_bound(shift_count_bound).has_upper + def test_div_bound(): for _, _, b1 in some_bounds(): From commits-noreply at bitbucket.org Wed Apr 27 10:12:01 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 10:12:01 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: shadow tracking is gone Message-ID: <20110427081201.97B8D282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43651:e7fb44a4f028 Date: 2011-04-27 10:11 +0200 http://bitbucket.org/pypy/pypy/changeset/e7fb44a4f028/ Log: shadow tracking is gone diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -206,28 +206,11 @@ User Class Optimizations ------------------------ -Shadow Tracking -+++++++++++++++ - -Shadow tracking is a general optimization that speeds up method calls for user -classes (that don't have special meta-class). For this a special dict -representation is used together with multidicts. This dict representation is -used only for instance dictionaries. The instance dictionary tracks whether an -instance attribute shadows an attribute of its class. This makes method calls -slightly faster in the following way: When calling a method the first thing that -is checked is the class dictionary to find descriptors. Normally, when a method -is found, the instance dictionary is then checked for instance attributes -shadowing the class attribute. If we know that there is no shadowing (since -instance dict tells us that) we can save this lookup on the instance dictionary. - -*This was deprecated and is no longer available.* - Method Caching ++++++++++++++ -Shadow tracking is also an important building block for the method caching -optimization. A method cache is introduced where the result of a method lookup +A method cache is introduced where the result of a method lookup is stored (which involves potentially many lookups in the base classes of a class). Entries in the method cache are stored using a hash computed from the name being looked up, the call site (i.e. the bytecode object and @@ -345,14 +328,12 @@ improving results by anything from 15-40 per cent. Another optimization, or rather set of optimizations, that has a uniformly good -effect is the set of three 'method optimizations', i.e. shadow tracking, the +effect are the two 'method optimizations', i.e. the method cache and the LOOKUP_METHOD and CALL_METHOD opcodes. On a heavily object-oriented benchmark (richards) they combine to give a speed-up of nearly 50%, and even on the extremely un-object-oriented pystone benchmark, the improvement is over 20%. -.. waffles about ropes - When building pypy, all generally useful optimizations are turned on by default unless you explicitly lower the translation optimization level with the ``--opt`` option. From commits-noreply at bitbucket.org Wed Apr 27 10:14:01 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 10:14:01 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: kill superfluous word Message-ID: <20110427081401.E1555282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43652:66ea5cdd58db Date: 2011-04-27 10:13 +0200 http://bitbucket.org/pypy/pypy/changeset/66ea5cdd58db/ Log: kill superfluous word diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -135,7 +135,6 @@ implementations for various purposes (see below). This is now the default implementation of dictionaries in the Python interpreter. -option. Sharing Dicts +++++++++++++ From commits-noreply at bitbucket.org Wed Apr 27 10:18:43 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 27 Apr 2011 10:18:43 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: testing surviving virtual strings Message-ID: <20110427081843.4A9F3282BF2@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43653:941d81df1e0d Date: 2011-04-27 09:31 +0200 http://bitbucket.org/pypy/pypy/changeset/941d81df1e0d/ Log: testing surviving virtual strings diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -407,6 +407,43 @@ return len(sa) assert self.meta_interp(f, [16]) == f(16) + def test_loop_invariant_string_slize(self): + _str = self._str + mydriver = JitDriver(reds = ['i', 'n', 'sa', 's', 's1'], greens = []) + def f(n, c): + s = s1 = _str(c*10) + sa = i = 0 + while i < n: + mydriver.jit_merge_point(i=i, n=n, sa=sa, s=s, s1=s1) + sa += len(s) + if i < n/2: + s = s1[1:3] + else: + s = s1[2:3] + i += 1 + return sa + assert self.meta_interp(f, [16, 'a']) == f(16, 'a') + + def test_loop_invariant_string_slize_boxed(self): + class Str(object): + def __init__(self, value): + self.value = value + _str = self._str + mydriver = JitDriver(reds = ['i', 'n', 'sa', 's', 's1'], greens = []) + def f(n, c): + s = s1 = Str(_str(c*10)) + sa = i = 0 + while i < n: + mydriver.jit_merge_point(i=i, n=n, sa=sa, s=s, s1=s1) + sa += len(s.value) + if i < n/2: + s = Str(s1.value[1:3]) + else: + s = Str(s1.value[2:3]) + i += 1 + return sa + assert self.meta_interp(f, [16, 'a']) == f(16, 'a') + #class TestOOtype(StringTests, OOJitMixin): # CALL = "oosend" # CALL_PURE = "oosend_pure" From commits-noreply at bitbucket.org Wed Apr 27 10:22:11 2011 From: commits-noreply at bitbucket.org (hakanardo) Date: Wed, 27 Apr 2011 10:22:11 +0200 (CEST) Subject: [pypy-svn] pypy jit-short_from_state: ensure arrays forces unsuported vritaul staring members Message-ID: <20110427082211.8CB86282BF2@codespeak.net> Author: Hakan Ardo Branch: jit-short_from_state Changeset: r43654:4b1dd6e30af2 Date: 2011-04-27 10:21 +0200 http://bitbucket.org/pypy/pypy/changeset/4b1dd6e30af2/ Log: ensure arrays forces unsuported vritaul staring members diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py --- a/pypy/jit/metainterp/test/test_string.py +++ b/pypy/jit/metainterp/test/test_string.py @@ -444,6 +444,23 @@ return sa assert self.meta_interp(f, [16, 'a']) == f(16, 'a') + def test_loop_invariant_string_slize_in_array(self): + _str = self._str + mydriver = JitDriver(reds = ['i', 'n', 'sa', 's', 's1'], greens = []) + def f(n, c): + s = s1 = [_str(c*10)] + sa = i = 0 + while i < n: + mydriver.jit_merge_point(i=i, n=n, sa=sa, s=s, s1=s1) + sa += len(s[0]) + if i < n/2: + s = [s1[0][1:3]] + else: + s = [s1[0][2:3]] + i += 1 + return sa + assert self.meta_interp(f, [16, 'a']) == f(16, 'a') + #class TestOOtype(StringTests, OOJitMixin): # CALL = "oosend" # CALL_PURE = "oosend_pure" From commits-noreply at bitbucket.org Wed Apr 27 10:47:26 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 10:47:26 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: (all): planning for today Message-ID: <20110427084726.1DB5836C20D@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3536:de6875eef83e Date: 2011-04-26 10:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/de6875eef83e/ Log: (all): planning for today diff --git a/sprintinfo/gothenburg-2011/planning.txt b/sprintinfo/gothenburg-2011/planning.txt --- a/sprintinfo/gothenburg-2011/planning.txt +++ b/sprintinfo/gothenburg-2011/planning.txt @@ -12,26 +12,28 @@ tasks: - fix the projector DONE (Jacob) - release 1.5 - - fix the import problem (Armin, Romain) - - fix the jit tests (Håkan, Armin around + - fix the import problem (Armin, Romain) (NOT DONE) + - fix the jit tests DONE - do we have 2.7 or 2.7.1? rename the directory - - merge jit-lsprof (Carl Friedrich, Dario) - - merge exarkun's branches, after review (Armin, Romain) - - merge jitypes2? - - documentation (Laura, Carl Friedrich) + - merge jit-lsprof DONE + - merge exarkun's branches, after review DONE + - documentation (Laura, Carl Friedrich) IN PROGRESS - look at the tracker + - investigate breakage (Armin, Romain) + - investigate Mac problems - branches to be integrated/finished afterwards - 32-on-64 - lukas' branches: list-strategies/dict-strategies - - new-dict-proxy (Lukas) + - new-dict-proxy MORE TESTING - out-of-line guards - refactor-not-in-translator - håkan's branches + - jitypes2 - other tasks - - continue tracing after invalid loops - - look into cython + - continue tracing after invalid loops (Håkan, Carl Friedrich) + - look into cython (Armin, Romain, Dario) - investigate Open End software on top of PyPy (Lukas, Anders) - (feedback to wesley chun's paragraphs: Armin, Laura) From commits-noreply at bitbucket.org Wed Apr 27 10:47:27 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 10:47:27 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: (all): planning for today Message-ID: <20110427084727.32E3B36C20D@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3537:1bef285478ff Date: 2011-04-27 10:46 +0200 http://bitbucket.org/pypy/extradoc/changeset/1bef285478ff/ Log: (all): planning for today diff --git a/sprintinfo/gothenburg-2011/planning.txt b/sprintinfo/gothenburg-2011/planning.txt --- a/sprintinfo/gothenburg-2011/planning.txt +++ b/sprintinfo/gothenburg-2011/planning.txt @@ -10,33 +10,33 @@ tasks: -- fix the projector DONE (Jacob) - release 1.5 - - fix the import problem (Armin, Romain) (NOT DONE) - - fix the jit tests DONE - - do we have 2.7 or 2.7.1? rename the directory - - merge jit-lsprof DONE - - merge exarkun's branches, after review DONE - - documentation (Laura, Carl Friedrich) IN PROGRESS + - fix the import problem TESTING + - merge in 2.7.1 stuff (Anto, Dario) + - documentation (Anto, Carl Friedrich, a bit of Laura) MORE PROGRESS - look at the tracker - - investigate breakage (Armin, Romain) - - investigate Mac problems + - investigate breakage DONE + - investigate Mac problems (Armin, Dario, Romain, Iko) + - find out whether cProfile is giving interesting information (Anto, Lukas) - branches to be integrated/finished afterwards - 32-on-64 - lukas' branches: list-strategies/dict-strategies - - new-dict-proxy MORE TESTING + - new-dict-proxy READY + - merge new-dict-proxy into post-release (Lukas) - out-of-line guards - refactor-not-in-translator - håkan's branches - jitypes2 - other tasks - - continue tracing after invalid loops (Håkan, Carl Friedrich) - - look into cython (Armin, Romain, Dario) - - investigate Open End software on top of PyPy (Lukas, Anders) + - continue tracing after invalid loops TESTING + - look into cython (Armin, Romain, Dario) FORK + BASIC ARCH HAPPENED + - investigate Open End software on top of PyPy EASIER THAN FEARED - (feedback to wesley chun's paragraphs: Armin, Laura) - presentations/discussions - - Lukas' presentation on memory improvements (Tuesday) + - Lukas' presentation on memory improvements DONE + - what are håkan's branches doing? (today after lunch) - codespeak migration + - EuroPython keynote/training (Anto, Armin) From commits-noreply at bitbucket.org Wed Apr 27 11:09:58 2011 From: commits-noreply at bitbucket.org (l.diekmann) Date: Wed, 27 Apr 2011 11:09:58 +0200 (CEST) Subject: [pypy-svn] pypy post-release-1.5: Merged new-dict-proxy into post-realease-1.5 Message-ID: <20110427090958.E8FA436C20F@codespeak.net> Author: Lukas Diekmann Branch: post-release-1.5 Changeset: r43655:45911bf54093 Date: 2011-04-27 10:53 +0200 http://bitbucket.org/pypy/pypy/changeset/45911bf54093/ Log: Merged new-dict-proxy into post-realease-1.5 diff --git a/pypy/objspace/std/dictproxytype.py b/pypy/objspace/std/dictproxytype.py deleted file mode 100644 --- a/pypy/objspace/std/dictproxytype.py +++ /dev/null @@ -1,51 +0,0 @@ -from pypy.interpreter import gateway -from pypy.interpreter.typedef import GetSetProperty -from pypy.interpreter.error import OperationError -from pypy.objspace.std.stdtypedef import StdTypeDef - -# ____________________________________________________________ - -def _proxymethod(name): - def fget(space, w_obj): - from pypy.objspace.std.dictproxyobject import W_DictProxyObject - if not isinstance(w_obj, W_DictProxyObject): - raise OperationError(space.w_TypeError, - space.wrap("expected dictproxy")) - return space.getattr(w_obj.w_dict, space.wrap(name)) - return GetSetProperty(fget) - -def _compareproxymethod(opname): - def compare(space, w_obj1, w_obj2): - from pypy.objspace.std.dictproxyobject import W_DictProxyObject - if not isinstance(w_obj1, W_DictProxyObject): - raise OperationError(space.w_TypeError, - space.wrap("expected dictproxy")) - return getattr(space, opname)(w_obj1.w_dict, w_obj2) - compare.func_name = "dictproxy_compare_%s" % (opname, ) - return gateway.interp2app(compare) - -# ____________________________________________________________ - -dictproxy_typedef = StdTypeDef("dictproxy", - has_key = _proxymethod('has_key'), - get = _proxymethod('get'), - keys = _proxymethod('keys'), - values = _proxymethod('values'), - items = _proxymethod('items'), - iterkeys = _proxymethod('iterkeys'), - itervalues = _proxymethod('itervalues'), - iteritems = _proxymethod('iteritems'), - copy = _proxymethod('copy'), - __len__ = _proxymethod('__len__'), - __getitem__ = _proxymethod('__getitem__'), - __contains__ = _proxymethod('__contains__'), - __str__ = _proxymethod('__str__'), - __iter__ = _proxymethod('__iter__'), - __lt__ = _compareproxymethod('lt'), - __le__ = _compareproxymethod('le'), - __eq__ = _compareproxymethod('eq'), - __ne__ = _compareproxymethod('ne'), - __gt__ = _compareproxymethod('gt'), - __ge__ = _compareproxymethod('ge'), -) -dictproxy_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -189,6 +189,30 @@ assert btag is atag assert btag is not None + def test_version_tag_when_changing_a_lot(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(1)) + assert w_A.version_tag() is not atag + assert space.int_w(space.getattr(w_A, w_x)) == 1 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(2)) + assert w_A.version_tag() is not atag + assert space.int_w(space.getattr(w_A, w_x)) == 2 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(3)) + assert w_A.version_tag() is atag + assert space.int_w(space.getattr(w_A, w_x)) == 3 + + space.setattr(w_A, w_x, space.newint(4)) + assert w_A.version_tag() is atag + assert space.int_w(space.getattr(w_A, w_x)) == 4 + + class AppTestVersionedType(test_typeobject.AppTestTypeObject): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtypeversion": True}) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -54,7 +54,6 @@ from pypy.objspace.std.slicetype import slice_typedef from pypy.objspace.std.longtype import long_typedef from pypy.objspace.std.unicodetype import unicode_typedef - from pypy.objspace.std.dictproxytype import dictproxy_typedef from pypy.objspace.std.nonetype import none_typedef from pypy.objspace.std.itertype import iter_typedef self.pythontypes = [value for key, value in result.__dict__.items() @@ -123,7 +122,6 @@ iterobject.W_FastTupleIterObject: [], iterobject.W_ReverseSeqIterObject: [], unicodeobject.W_UnicodeObject: [], - dictproxyobject.W_DictProxyObject: [], dictmultiobject.W_DictViewKeysObject: [], dictmultiobject.W_DictViewItemsObject: [], dictmultiobject.W_DictViewValuesObject: [], diff --git a/lib-python/modified-2.7.0/test/test_descr.py b/lib-python/modified-2.7.0/test/test_descr.py --- a/lib-python/modified-2.7.0/test/test_descr.py +++ b/lib-python/modified-2.7.0/test/test_descr.py @@ -3189,7 +3189,8 @@ except TypeError: pass else: - self.fail("%r's __dict__ can be modified" % cls) + if test_support.check_impl_detail(pypy=False): + self.fail("%r's __dict__ can be modified" % cls) # Modules also disallow __dict__ assignment class Module1(types.ModuleType, Base): diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1072,6 +1072,50 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) + + def test_mutate_class(self): + def fn(n): + class A(object): + count = 1 + def __init__(self, a): + self.a = a + def f(self): + return self.count + i = 0 + a = A(1) + while i < n: + A.count += 1 # ID: mutate + i = a.f() # ID: meth1 + return i + # + log = self.run(fn, [1000], threshold=10) + assert log.result == 1000 + # + # first, we test the entry bridge + # ------------------------------- + entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) + ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') + assert log.opnames(ops) == ['guard_value', 'getfield_gc', 'guard_value', + 'getfield_gc', 'guard_nonnull_class'] + # the STORE_ATTR is folded away + assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] + # + # then, the actual loop + # ---------------------- + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = getfield_gc_pure(p5, descr=) + i9 = int_lt(i8, i7) + guard_true(i9, descr=.*) + i11 = int_add(i8, 1) + i12 = force_token() + --TICK-- + p20 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p20, i11, descr=) + setfield_gc(ConstPtr(ptr21), p20, descr=) + jump(p0, p1, p2, p3, p4, p20, p6, i7, descr=) + """) + def test_intbound_simple(self): """ diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -615,7 +615,7 @@ if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) - w_dict = space.newdict(from_strdict_shared=w_obj.dict_w) + w_dict = w_obj.getdict(space) pto.c_tp_dict = make_ref(space, w_dict) @cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -303,11 +303,10 @@ return W_ListObject(list_w) def newdict(self, module=False, instance=False, classofinstance=None, - from_strdict_shared=None, strdict=False): + strdict=False): return W_DictMultiObject.allocate_and_init_instance( self, module=module, instance=instance, classofinstance=classofinstance, - from_strdict_shared=from_strdict_shared, strdict=strdict) def newslice(self, w_start, w_end, w_step): diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py --- a/pypy/jit/tl/pypyjit_demo.py +++ b/pypy/jit/tl/pypyjit_demo.py @@ -1,17 +1,24 @@ try: - def main(n): - def g(n): - return range(n) - s = 0 - for i in range(n): # ID: for - tmp = g(n) - s += tmp[i] # ID: getitem - a = 0 - return s - main(10) + try: + import pypyjit + pypyjit.set_param(threshold=3, inlining=True) + except ImportError: + pass + class A(object): + x = 1 + y = 2 + def sqrt(y): + a = A() + for i in range(y): + assert a.y == 2 + assert A.__dict__['x'] == i + 1 + A.x += 1 + return a.x + + print sqrt(1000000) except Exception, e: print "Exception: ", type(e) print e - + diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -34,13 +34,7 @@ @staticmethod def allocate_and_init_instance(space, w_type=None, module=False, instance=False, classofinstance=None, - from_strdict_shared=None, strdict=False): - if from_strdict_shared is not None: - assert w_type is None - assert not module and not instance and classofinstance is None - w_self = StrDictImplementation(space) - w_self.content = from_strdict_shared - return w_self + strdict=False): if space.config.objspace.std.withcelldict and module: from pypy.objspace.std.celldict import ModuleDictImplementation assert w_type is None diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -131,25 +131,6 @@ assert self.space.eq_w(space.call_function(get, w("33")), w(None)) assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) - def test_initialize_from_strdict_shared(self): - space = self.space - w = space.wrap - d = {"a": w(1), "b": w(2)} - w_d = space.newdict(from_strdict_shared=d) - assert self.space.eq_w(space.getitem(w_d, w("a")), w(1)) - assert self.space.eq_w(space.getitem(w_d, w("b")), w(2)) - - def test_initialize_from_strdict_really_shared(self): - space = self.space - w = space.wrap - d = {"a": w(1), "b": w(2)} - w_d = space.newdict(from_strdict_shared=d) - assert self.space.eq_w(space.getitem(w_d, w("a")), w(1)) - assert self.space.eq_w(space.getitem(w_d, w("b")), w(2)) - d["c"] = w(41) - assert self.space.eq_w(space.getitem(w_d, w("c")), w(41)) - - class AppTest_DictObject: def setup_class(cls): @@ -775,12 +756,10 @@ def newtuple(self, l): return tuple(l) - def newdict(self, module=False, instance=False, classofinstance=None, - from_strdict_shared=None): + def newdict(self, module=False, instance=False, classofinstance=None): return W_DictMultiObject.allocate_and_init_instance( self, module=module, instance=instance, - classofinstance=classofinstance, - from_strdict_shared=from_strdict_shared) + classofinstance=classofinstance) def finditem_str(self, w_dict, s): return w_dict.getitem_str(s) # assume it's a multidict diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -65,7 +65,7 @@ cache_counter = __pypy__.method_cache_counter("f") # the cache hits come from A.f = ..., which first does a lookup on A as # well - assert cache_counter == (9, 11) + assert cache_counter == (17, 3) def test_subclasses(self): import __pypy__ @@ -148,3 +148,32 @@ assert cache_counter[0] >= 5 assert cache_counter[1] >= 1 # should be (27, 3) assert sum(cache_counter) == 10 + + def test_mutate_class(self): + import __pypy__ + class A(object): + x = 1 + y = 2 + __pypy__.reset_method_cache_counter() + a = A() + for i in range(100): + assert a.y == 2 + assert a.x == i + 1 + A.x += 1 + cache_counter = __pypy__.method_cache_counter("x") + assert cache_counter[0] >= 350 + assert cache_counter[1] >= 1 + assert sum(cache_counter) == 400 + + __pypy__.reset_method_cache_counter() + a = A() + for i in range(100): + assert a.y == 2 + setattr(a, "a%s" % i, i) + cache_counter = __pypy__.method_cache_counter("x") + assert cache_counter[0] == 0 # 0 hits, because all the attributes are new + + def test_get_module_from_namedtuple(self): + # this used to crash + from collections import namedtuple + assert namedtuple("a", "b").__module__ diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -1,15 +1,88 @@ from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.dictmultiobject import W_DictMultiObject, IteratorImplementation +from pypy.objspace.std.typeobject import unwrap_cell +from pypy.interpreter.error import OperationError -def descr_get_dictproxy(space, w_obj): - return W_DictProxyObject(w_obj.getdict(space)) -class W_DictProxyObject(W_Object): - from pypy.objspace.std.dictproxytype import dictproxy_typedef as typedef +class W_DictProxyObject(W_DictMultiObject): + def __init__(w_self, space, w_type): + W_DictMultiObject.__init__(w_self, space) + w_self.w_type = w_type - def __init__(w_self, w_dict): - w_self.w_dict = w_dict + def impl_getitem(self, w_lookup): + space = self.space + w_lookup_type = space.type(w_lookup) + if space.is_w(w_lookup_type, space.w_str): + return self.impl_getitem_str(space.str_w(w_lookup)) + else: + return None -registerimplementation(W_DictProxyObject) + def impl_getitem_str(self, lookup): + return self.w_type.getdictvalue(self.space, lookup) -register_all(vars()) + def impl_setitem(self, w_key, w_value): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + self.impl_setitem_str(self.space.str_w(w_key), w_value) + else: + raise OperationError(space.w_TypeError, space.wrap("cannot add non-string keys to dict of a type")) + + def impl_setitem_str(self, name, w_value): + self.w_type.setdictvalue(self.space, name, w_value) + + def impl_setdefault(self, w_key, w_default): + space = self.space + w_result = self.impl_getitem(w_key) + if w_result is not None: + return w_result + self.impl_setitem(w_key, w_default) + return w_default + + def impl_delitem(self, w_key): + space = self.space + w_key_type = space.type(w_key) + if space.is_w(w_key_type, space.w_str): + if not self.w_type.deldictvalue(space, w_key): + raise KeyError + else: + raise KeyError + + def impl_length(self): + return len(self.w_type.dict_w) + + def impl_iter(self): + return DictProxyIteratorImplementation(self.space, self) + + def impl_keys(self): + space = self.space + return [space.wrap(key) for key in self.w_type.dict_w.iterkeys()] + + def impl_values(self): + return [unwrap_cell(self.space, w_value) for w_value in self.w_type.dict_w.itervalues()] + + def impl_items(self): + space = self.space + return [space.newtuple([space.wrap(key), unwrap_cell(self.space, w_value)]) + for (key, w_value) in self.w_type.dict_w.iteritems()] + + def impl_clear(self): + self.w_type.dict_w.clear() + self.w_type.mutated() + + def _as_rdict(self): + assert 0, "should be unreachable" + + def _clear_fields(self): + assert 0, "should be unreachable" + +class DictProxyIteratorImplementation(IteratorImplementation): + def __init__(self, space, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + self.iterator = dictimplementation.w_type.dict_w.iteritems() + + def next_entry(self): + for key, w_value in self.iterator: + return (self.space.wrap(key), unwrap_cell(self.space, w_value)) + else: + return (None, None) diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -111,6 +111,7 @@ del X.__abstractmethods__ X() raises(AttributeError, getattr, type, "__abstractmethods__") + raises(TypeError, "int.__abstractmethods__ = ('abc', )") def test_call_type(self): assert type(42) is int @@ -1015,6 +1016,25 @@ __weakref__ = 42 assert B().__weakref__ == 42 + def test_change_dict(self): + class A(object): + pass + + a = A() + A.x = 1 + assert A.__dict__["x"] == 1 + raises(AttributeError, "del A.__dict__") + raises((AttributeError, TypeError), "A.__dict__ = {}") + + def test_mutate_dict(self): + class A(object): + pass + + a = A() + A.x = 1 + assert A.__dict__["x"] == 1 + A.__dict__['x'] = 5 + assert A.x == 5 class AppTestMutableBuiltintypes: diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py --- a/pypy/objspace/std/typetype.py +++ b/pypy/objspace/std/typetype.py @@ -207,38 +207,28 @@ def descr_set__module(space, w_type, w_value): w_type = _check(space, w_type) - if not w_type.is_heaptype(): - raise operationerrfmt(space.w_TypeError, - "can't set %s.__module__", - w_type.name) - w_type.mutated() - w_type.dict_w['__module__'] = w_value + w_type.setdictvalue(space, '__module__', w_value) def descr_get___abstractmethods__(space, w_type): w_type = _check(space, w_type) # type itself has an __abstractmethods__ descriptor (this). Don't return it if not space.is_w(w_type, space.w_type): - try: - return w_type.dict_w["__abstractmethods__"] - except KeyError: - pass + w_result = w_type.getdictvalue(space, "__abstractmethods__") + if w_result is not None: + return w_result raise OperationError(space.w_AttributeError, space.wrap("__abstractmethods__")) def descr_set___abstractmethods__(space, w_type, w_new): w_type = _check(space, w_type) - w_type.dict_w["__abstractmethods__"] = w_new - w_type.mutated() + w_type.setdictvalue(space, "__abstractmethods__", w_new) w_type.set_abstract(space.is_true(w_new)) def descr_del___abstractmethods__(space, w_type): w_type = _check(space, w_type) - try: - del w_type.dict_w["__abstractmethods__"] - except KeyError: + if not w_type.deldictvalue(space, space.wrap("__abstractmethods__")): raise OperationError(space.w_AttributeError, space.wrap("__abstractmethods__")) - w_type.mutated() w_type.set_abstract(False) def descr___subclasses__(space, w_type): diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -1,21 +1,26 @@ - +from pypy.conftest import gettestobjspace class AppTestUserObject: def test_dictproxy(self): class NotEmpty(object): a = 1 - assert isinstance(NotEmpty.__dict__, dict) == False + NotEmpty.a = 1 + NotEmpty.a = 1 + NotEmpty.a = 1 + NotEmpty.a = 1 assert 'a' in NotEmpty.__dict__ assert 'a' in NotEmpty.__dict__.keys() assert 'b' not in NotEmpty.__dict__ - assert isinstance(NotEmpty.__dict__.copy(), dict) - assert NotEmpty.__dict__ == NotEmpty.__dict__.copy() - try: - NotEmpty.__dict__['b'] = 1 - except: - pass - else: - raise AssertionError, 'this should not have been writable' + NotEmpty.__dict__['b'] = 4 + assert NotEmpty.b == 4 + del NotEmpty.__dict__['b'] + assert NotEmpty.__dict__.get("b") is None + raises(TypeError, 'NotEmpty.__dict__[15] = "y"') + raises(KeyError, 'del NotEmpty.__dict__[15]') + assert NotEmpty.__dict__.setdefault("string", 1) == 1 + assert NotEmpty.__dict__.setdefault("string", 2) == 1 + assert NotEmpty.string == 1 + raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)') def test_dictproxyeq(self): class a(object): @@ -33,7 +38,13 @@ def test_str_repr(self): class a(object): pass - s = repr(a.__dict__) - assert s.startswith('') - s = str(a.__dict__) - assert s.startswith('{') and s.endswith('}') + s1 = repr(a.__dict__) + s2 = str(a.__dict__) + assert s1 == s2 + assert s1.startswith('{') and s1.endswith('}') + +class AppTestUserObjectMethodCache(AppTestUserObject): + def setup_class(cls): + cls.space = gettestobjspace( + **{"objspace.std.withmethodcachecounter": True}) + diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -4,15 +4,25 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.typedef import weakref_descr +from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member from pypy.objspace.std.objecttype import object_typedef -from pypy.objspace.std.dictproxyobject import W_DictProxyObject from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint +class TypeCell(W_Root): + def __init__(self, w_value=None): + self.w_value = w_value + +def unwrap_cell(space, w_value): + if (space.config.objspace.std.withtypeversion and + isinstance(w_value, TypeCell)): + return w_value.w_value + return w_value + # from compiler/misc.py MANGLE_LEN = 256 # magic constant from compile.c @@ -211,6 +221,17 @@ return compute_C3_mro(w_self.space, w_self) def getdictvalue(w_self, space, attr): + if space.config.objspace.std.withtypeversion: + version_tag = w_self.version_tag() + if version_tag is not None: + return unwrap_cell( + space, + w_self._pure_getdictvalue_no_unwrapping( + space, version_tag, attr)) + w_value = w_self._getdictvalue_no_unwrapping(space, attr) + return unwrap_cell(space, w_value) + + def _getdictvalue_no_unwrapping(w_self, space, attr): w_value = w_self.dict_w.get(attr, None) if w_self.lazyloaders and w_value is None: if attr in w_self.lazyloaders: @@ -225,6 +246,48 @@ return w_value return w_value + @purefunction + def _pure_getdictvalue_no_unwrapping(w_self, space, version_tag, attr): + return w_self._getdictvalue_no_unwrapping(space, attr) + + def setdictvalue(w_self, space, name, w_value): + if (not space.config.objspace.std.mutable_builtintypes + and not w_self.is_heaptype()): + msg = "can't set attributes on type object '%s'" + raise operationerrfmt(space.w_TypeError, msg, w_self.name) + if name == "__del__" and name not in w_self.dict_w: + msg = "a __del__ method added to an existing type will not be called" + space.warn(msg, space.w_RuntimeWarning) + if space.config.objspace.std.withtypeversion: + version_tag = w_self.version_tag() + if version_tag is not None: + w_curr = w_self._pure_getdictvalue_no_unwrapping( + space, version_tag, name) + if w_curr is not None: + if isinstance(w_curr, TypeCell): + w_curr.w_value = w_value + return True + w_value = TypeCell(w_value) + w_self.mutated() + w_self.dict_w[name] = w_value + return True + + def deldictvalue(w_self, space, w_key): + if w_self.lazyloaders: + w_self._freeze_() # force un-lazification + key = space.str_w(w_key) + if (not space.config.objspace.std.mutable_builtintypes + and not w_self.is_heaptype()): + msg = "can't delete attributes on type object '%s'" + raise operationerrfmt(space.w_TypeError, msg, w_self.name) + try: + del w_self.dict_w[key] + except KeyError: + return False + else: + w_self.mutated() + return True + def lookup(w_self, name): # note that this doesn't call __get__ on the result at all space = w_self.space @@ -280,7 +343,7 @@ space = w_self.space for w_class in w_self.mro_w: assert isinstance(w_class, W_TypeObject) - w_value = w_class.getdictvalue(space, key) + w_value = w_class._getdictvalue_no_unwrapping(space, key) if w_value is not None: return w_class, w_value return None, None @@ -293,7 +356,8 @@ if version_tag is None: tup = w_self._lookup_where(name) return tup - return w_self._pure_lookup_where_with_method_cache(name, version_tag) + w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) + return w_class, unwrap_cell(space, w_value) @purefunction def _pure_lookup_where_with_method_cache(w_self, name, version_tag): @@ -358,10 +422,10 @@ return False def getdict(w_self, space): # returning a dict-proxy! + from pypy.objspace.std.dictproxyobject import W_DictProxyObject if w_self.lazyloaders: w_self._freeze_() # force un-lazification - newdic = space.newdict(from_strdict_shared=w_self.dict_w) - return W_DictProxyObject(newdic) + return W_DictProxyObject(space, w_self) def unwrap(w_self, space): if w_self.instancetypedef.fakedcpytype is not None: @@ -395,15 +459,15 @@ def get_module(w_self): space = w_self.space if w_self.is_heaptype() and '__module__' in w_self.dict_w: - return w_self.dict_w['__module__'] + return w_self.getdictvalue(space, '__module__') else: # for non-heap types, CPython checks for a module.name in the # type name. That's a hack, so we're allowed to use a different # hack... if ('__module__' in w_self.dict_w and - space.is_true(space.isinstance(w_self.dict_w['__module__'], + space.is_true(space.isinstance(w_self.getdictvalue(space, '__module__'), space.w_str))): - return w_self.dict_w['__module__'] + return w_self.getdictvalue(space, '__module__') return space.wrap('__builtin__') def get_module_type_name(w_self): @@ -800,52 +864,9 @@ "type object '%s' has no attribute '%s'", w_type.name, name) -def setattr__Type_ANY_ANY(space, w_type, w_name, w_value): - # Note. This is exactly the same thing as descroperation.descr__setattr__, - # but it is needed at bootstrap to avoid a call to w_type.getdict() which - # would un-lazify the whole type. - name = space.str_w(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - space.set(w_descr, w_type, w_value) - return - - if (not space.config.objspace.std.mutable_builtintypes - and not w_type.is_heaptype()): - msg = "can't set attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_type.name) - if name == "__del__" and name not in w_type.dict_w: - msg = "a __del__ method added to an existing type will not be called" - space.warn(msg, space.w_RuntimeWarning) - w_type.mutated() - w_type.dict_w[name] = w_value - def eq__Type_Type(space, w_self, w_other): return space.is_(w_self, w_other) -def delattr__Type_ANY(space, w_type, w_name): - if w_type.lazyloaders: - w_type._freeze_() # force un-lazification - name = space.str_w(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - space.delete(w_descr, w_type) - return - if (not space.config.objspace.std.mutable_builtintypes - and not w_type.is_heaptype()): - msg = "can't delete attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_type.name) - try: - del w_type.dict_w[name] - except KeyError: - raise OperationError(space.w_AttributeError, w_name) - else: - w_type.mutated() - return - - # ____________________________________________________________ From commits-noreply at bitbucket.org Wed Apr 27 11:27:31 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 27 Apr 2011 11:27:31 +0200 (CEST) Subject: [pypy-svn] pypy merge-stdlib-2.7.1: (antocuni, berdario) make a branch to merge the stdlib 2.7.1 Message-ID: <20110427092731.9F6D3282BEC@codespeak.net> Author: Antonio Cuni Branch: merge-stdlib-2.7.1 Changeset: r43656:3350fb094c26 Date: 2011-04-27 11:00 +0200 http://bitbucket.org/pypy/pypy/changeset/3350fb094c26/ Log: (antocuni, berdario) make a branch to merge the stdlib 2.7.1 From commits-noreply at bitbucket.org Wed Apr 27 11:28:28 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 27 Apr 2011 11:28:28 +0200 (CEST) Subject: [pypy-svn] pypy merge-stdlib-2.7.1: (antocuni, berdario) merge the 2.7.1 version of the stdlib Message-ID: <20110427092828.95418282BEC@codespeak.net> Author: Antonio Cuni Branch: merge-stdlib-2.7.1 Changeset: r43657:a9a6d0d17124 Date: 2011-04-27 10:59 +0200 http://bitbucket.org/pypy/pypy/changeset/a9a6d0d17124/ Log: (antocuni, berdario) merge the 2.7.1 version of the stdlib At the moment, the directory is still named 2.7.0, we will rename it in a later checkin. The changes in modified-2.7.0 have been merged using the following 3 -way-merge command for each file under the modified-2.7.0 directory: merge -A UserDict.py ../2.7.0/UserDict.py ../../../cpython/Lib/UserDict.py diff --git a/lib-python/2.7.0/test/test_cookielib.py b/lib-python/2.7.0/test/test_cookielib.py --- a/lib-python/2.7.0/test/test_cookielib.py +++ b/lib-python/2.7.0/test/test_cookielib.py @@ -18,10 +18,10 @@ base = 1019227000 day = 24*3600 - self.assertEquals(time2isoz(base), "2002-04-19 14:36:40Z") - self.assertEquals(time2isoz(base+day), "2002-04-20 14:36:40Z") - self.assertEquals(time2isoz(base+2*day), "2002-04-21 14:36:40Z") - self.assertEquals(time2isoz(base+3*day), "2002-04-22 14:36:40Z") + self.assertEqual(time2isoz(base), "2002-04-19 14:36:40Z") + self.assertEqual(time2isoz(base+day), "2002-04-20 14:36:40Z") + self.assertEqual(time2isoz(base+2*day), "2002-04-21 14:36:40Z") + self.assertEqual(time2isoz(base+3*day), "2002-04-22 14:36:40Z") az = time2isoz() bz = time2isoz(500000) @@ -35,13 +35,13 @@ def parse_date(text): return time.gmtime(http2time(text))[:6] - self.assertEquals(parse_date("01 Jan 2001"), (2001, 1, 1, 0, 0, 0.0)) + self.assertEqual(parse_date("01 Jan 2001"), (2001, 1, 1, 0, 0, 0.0)) # this test will break around year 2070 - self.assertEquals(parse_date("03-Feb-20"), (2020, 2, 3, 0, 0, 0.0)) + self.assertEqual(parse_date("03-Feb-20"), (2020, 2, 3, 0, 0, 0.0)) # this test will break around year 2048 - self.assertEquals(parse_date("03-Feb-98"), (1998, 2, 3, 0, 0, 0.0)) + self.assertEqual(parse_date("03-Feb-98"), (1998, 2, 3, 0, 0, 0.0)) def test_http2time_formats(self): from cookielib import http2time, time2isoz @@ -71,8 +71,8 @@ test_t = 760233600 # assume broken POSIX counting of seconds result = time2isoz(test_t) expected = "1994-02-03 00:00:00Z" - self.assertEquals(result, expected, - "%s => '%s' (%s)" % (test_t, result, expected)) + self.assertEqual(result, expected, + "%s => '%s' (%s)" % (test_t, result, expected)) for s in tests: t = http2time(s) @@ -114,7 +114,7 @@ 'foo=bar; expires=01 Jan 2040 22:23:32 GMT', 'foo=bar; expires="01 Jan 2040 22:23:32 GMT"', ]: - self.assertEquals(parse_ns_headers([hdr]), expected) + self.assertEqual(parse_ns_headers([hdr]), expected) def test_parse_ns_headers_version(self): from cookielib import parse_ns_headers @@ -125,7 +125,7 @@ 'foo=bar; version="1"', 'foo=bar; Version="1"', ]: - self.assertEquals(parse_ns_headers([hdr]), expected) + self.assertEqual(parse_ns_headers([hdr]), expected) def test_parse_ns_headers_special_names(self): # names such as 'expires' are not special in first name=value pair @@ -135,15 +135,15 @@ # Cookie with name 'expires' hdr = 'expires=01 Jan 2040 22:23:32 GMT' expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]] - self.assertEquals(parse_ns_headers([hdr]), expected) + self.assertEqual(parse_ns_headers([hdr]), expected) def test_join_header_words(self): from cookielib import join_header_words joined = join_header_words([[("foo", None), ("bar", "baz")]]) - self.assertEquals(joined, "foo; bar=baz") + self.assertEqual(joined, "foo; bar=baz") - self.assertEquals(join_header_words([[]]), "") + self.assertEqual(join_header_words([[]]), "") def test_split_header_words(self): from cookielib import split_header_words @@ -175,7 +175,7 @@ f = StringIO.StringIO() traceback.print_exc(None, f) result = "(error -- traceback follows)\n\n%s" % f.getvalue() - self.assertEquals(result, expect, """ + self.assertEqual(result, expect, """ When parsing: '%s' Expected: '%s' Got: '%s' @@ -209,7 +209,7 @@ for arg, expect in tests: input = split_header_words([arg]) res = join_header_words(input) - self.assertEquals(res, expect, """ + self.assertEqual(res, expect, """ When parsing: '%s' Expected: '%s' Got: '%s' @@ -380,11 +380,11 @@ interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/') cookie = c._cookies["www.acme.com"]["/"]["eggs"] self.assertTrue(cookie.value is None) - self.assertEquals(cookie.name, "eggs") + self.assertEqual(cookie.name, "eggs") cookie = c._cookies["www.acme.com"]['/foo/']['"spam"'] self.assertTrue(cookie.value is None) - self.assertEquals(cookie.name, '"spam"') - self.assertEquals(lwp_cookie_str(cookie), ( + self.assertEqual(cookie.name, '"spam"') + self.assertEqual(lwp_cookie_str(cookie), ( r'"spam"; path="/foo/"; domain="www.acme.com"; ' 'path_spec; discard; version=0')) old_str = repr(c) @@ -395,13 +395,13 @@ finally: os.unlink(c.filename) # cookies unchanged apart from lost info re. whether path was specified - self.assertEquals( + self.assertEqual( repr(c), re.sub("path_specified=%s" % True, "path_specified=%s" % False, old_str) ) - self.assertEquals(interact_netscape(c, "http://www.acme.com/foo/"), - '"spam"; eggs') + self.assertEqual(interact_netscape(c, "http://www.acme.com/foo/"), + '"spam"; eggs') def test_rfc2109_handling(self): # RFC 2109 cookies are handled as RFC 2965 or Netscape cookies, @@ -449,18 +449,18 @@ 'expires="Foo Bar 25 33:22:11 3022"') cookie = c._cookies[".acme.com"]["/"]["spam"] - self.assertEquals(cookie.domain, ".acme.com") + self.assertEqual(cookie.domain, ".acme.com") self.assertTrue(cookie.domain_specified) - self.assertEquals(cookie.port, DEFAULT_HTTP_PORT) + self.assertEqual(cookie.port, DEFAULT_HTTP_PORT) self.assertTrue(not cookie.port_specified) # case is preserved self.assertTrue(cookie.has_nonstandard_attr("blArgh") and not cookie.has_nonstandard_attr("blargh")) cookie = c._cookies["www.acme.com"]["/"]["ni"] - self.assertEquals(cookie.domain, "www.acme.com") + self.assertEqual(cookie.domain, "www.acme.com") self.assertTrue(not cookie.domain_specified) - self.assertEquals(cookie.port, "80,8080") + self.assertEqual(cookie.port, "80,8080") self.assertTrue(cookie.port_specified) cookie = c._cookies["www.acme.com"]["/"]["nini"] @@ -494,13 +494,13 @@ future = time2netscape(time.time()+3600) interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' % future) - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) now = time2netscape(time.time()-1) # ... and if in past or present, discard it interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' % now) h = interact_netscape(c, "http://www.acme.com/") - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) self.assertTrue('spam="bar"' in h and "foo" not in h) # max-age takes precedence over expires, and zero max-age is request to @@ -509,19 +509,19 @@ future) interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' % future) - self.assertEquals(len(c), 3) + self.assertEqual(len(c), 3) interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; ' 'expires=%s; max-age=0' % future) interact_netscape(c, "http://www.acme.com/", 'bar="bar"; ' 'max-age=0; expires=%s' % future) h = interact_netscape(c, "http://www.acme.com/") - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) # test expiry at end of session for cookies with no expires attribute interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"') - self.assertEquals(len(c), 2) + self.assertEqual(len(c), 2) c.clear_session_cookies() - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) self.assertIn('spam="bar"', h) # XXX RFC 2965 expiry rules (some apply to V0 too) @@ -576,7 +576,7 @@ # default path does not include query, so is "/", not "/?spam" self.assertIn("/", cj._cookies["example.com"]) # cookie is sent back to the same URI - self.assertEquals(interact_netscape(cj, uri), value) + self.assertEqual(interact_netscape(cj, uri), value) def test_escape_path(self): from cookielib import escape_path @@ -600,7 +600,7 @@ (u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded ] for arg, result in cases: - self.assertEquals(escape_path(arg), result) + self.assertEqual(escape_path(arg), result) def test_request_path(self): from urllib2 import Request @@ -608,25 +608,25 @@ # with parameters req = Request("http://www.example.com/rheum/rhaponticum;" "foo=bar;sing=song?apples=pears&spam=eggs#ni") - self.assertEquals(request_path(req), - "/rheum/rhaponticum;foo=bar;sing=song") + self.assertEqual(request_path(req), + "/rheum/rhaponticum;foo=bar;sing=song") # without parameters req = Request("http://www.example.com/rheum/rhaponticum?" "apples=pears&spam=eggs#ni") - self.assertEquals(request_path(req), "/rheum/rhaponticum") + self.assertEqual(request_path(req), "/rheum/rhaponticum") # missing final slash req = Request("http://www.example.com") - self.assertEquals(request_path(req), "/") + self.assertEqual(request_path(req), "/") def test_request_port(self): from urllib2 import Request from cookielib import request_port, DEFAULT_HTTP_PORT req = Request("http://www.acme.com:1234/", headers={"Host": "www.acme.com:4321"}) - self.assertEquals(request_port(req), "1234") + self.assertEqual(request_port(req), "1234") req = Request("http://www.acme.com/", headers={"Host": "www.acme.com:4321"}) - self.assertEquals(request_port(req), DEFAULT_HTTP_PORT) + self.assertEqual(request_port(req), DEFAULT_HTTP_PORT) def test_request_host(self): from urllib2 import Request @@ -636,20 +636,20 @@ headers={"Host": "www.acme.com:80"}) # libwww-perl wants this response, but that seems wrong (RFC 2616, # section 5.2, point 1., and RFC 2965 section 1, paragraph 3) - #self.assertEquals(request_host(req), "www.acme.com") - self.assertEquals(request_host(req), "1.1.1.1") + #self.assertEqual(request_host(req), "www.acme.com") + self.assertEqual(request_host(req), "1.1.1.1") req = Request("http://www.acme.com/", headers={"Host": "irrelevant.com"}) - self.assertEquals(request_host(req), "www.acme.com") + self.assertEqual(request_host(req), "www.acme.com") # not actually sure this one is valid Request object, so maybe should # remove test for no host in url in request_host function? req = Request("/resource.html", headers={"Host": "www.acme.com"}) - self.assertEquals(request_host(req), "www.acme.com") + self.assertEqual(request_host(req), "www.acme.com") # port shouldn't be in request-host req = Request("http://www.acme.com:2345/resource.html", headers={"Host": "www.acme.com:5432"}) - self.assertEquals(request_host(req), "www.acme.com") + self.assertEqual(request_host(req), "www.acme.com") def test_is_HDN(self): from cookielib import is_HDN @@ -664,14 +664,14 @@ def test_reach(self): from cookielib import reach - self.assertEquals(reach("www.acme.com"), ".acme.com") - self.assertEquals(reach("acme.com"), "acme.com") - self.assertEquals(reach("acme.local"), ".local") - self.assertEquals(reach(".local"), ".local") - self.assertEquals(reach(".com"), ".com") - self.assertEquals(reach("."), ".") - self.assertEquals(reach(""), "") - self.assertEquals(reach("192.168.0.1"), "192.168.0.1") + self.assertEqual(reach("www.acme.com"), ".acme.com") + self.assertEqual(reach("acme.com"), "acme.com") + self.assertEqual(reach("acme.local"), ".local") + self.assertEqual(reach(".local"), ".local") + self.assertEqual(reach(".com"), ".com") + self.assertEqual(reach("."), ".") + self.assertEqual(reach(""), "") + self.assertEqual(reach("192.168.0.1"), "192.168.0.1") def test_domain_match(self): from cookielib import domain_match, user_domain_match @@ -719,7 +719,7 @@ c = CookieJar() interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"') - self.assertEquals(len(c), 0) + self.assertEqual(len(c), 0) def test_strict_domain(self): # Cookies whose domain is a country-code tld like .co.uk should @@ -731,11 +731,11 @@ interact_netscape(cj, "http://example.co.uk/", 'no=problemo') interact_netscape(cj, "http://example.co.uk/", 'okey=dokey; Domain=.example.co.uk') - self.assertEquals(len(cj), 2) + self.assertEqual(len(cj), 2) for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]: interact_netscape(cj, "http://example.%s/" % pseudo_tld, 'spam=eggs; Domain=.co.uk') - self.assertEquals(len(cj), 2) + self.assertEqual(len(cj), 2) def test_two_component_domain_ns(self): # Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain @@ -747,17 +747,17 @@ # two-component V0 domain is OK interact_netscape(c, "http://foo.net/", 'ns=bar') - self.assertEquals(len(c), 1) - self.assertEquals(c._cookies["foo.net"]["/"]["ns"].value, "bar") - self.assertEquals(interact_netscape(c, "http://foo.net/"), "ns=bar") + self.assertEqual(len(c), 1) + self.assertEqual(c._cookies["foo.net"]["/"]["ns"].value, "bar") + self.assertEqual(interact_netscape(c, "http://foo.net/"), "ns=bar") # *will* be returned to any other domain (unlike RFC 2965)... - self.assertEquals(interact_netscape(c, "http://www.foo.net/"), - "ns=bar") + self.assertEqual(interact_netscape(c, "http://www.foo.net/"), + "ns=bar") # ...unless requested otherwise pol = DefaultCookiePolicy( strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain) c.set_policy(pol) - self.assertEquals(interact_netscape(c, "http://www.foo.net/"), "") + self.assertEqual(interact_netscape(c, "http://www.foo.net/"), "") # unlike RFC 2965, even explicit two-component domain is OK, # because .foo.net matches foo.net @@ -766,17 +766,17 @@ # even if starts with a dot -- in NS rules, .foo.net matches foo.net! interact_netscape(c, "http://foo.net/foo/bar/", 'spam2=eggs; domain=.foo.net') - self.assertEquals(len(c), 3) - self.assertEquals(c._cookies[".foo.net"]["/foo"]["spam1"].value, - "eggs") - self.assertEquals(c._cookies[".foo.net"]["/foo/bar"]["spam2"].value, - "eggs") - self.assertEquals(interact_netscape(c, "http://foo.net/foo/bar/"), - "spam2=eggs; spam1=eggs; ns=bar") + self.assertEqual(len(c), 3) + self.assertEqual(c._cookies[".foo.net"]["/foo"]["spam1"].value, + "eggs") + self.assertEqual(c._cookies[".foo.net"]["/foo/bar"]["spam2"].value, + "eggs") + self.assertEqual(interact_netscape(c, "http://foo.net/foo/bar/"), + "spam2=eggs; spam1=eggs; ns=bar") # top-level domain is too general interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net') - self.assertEquals(len(c), 3) + self.assertEqual(len(c), 3) ## # Netscape protocol doesn't allow non-special top level domains (such ## # as co.uk) in the domain attribute unless there are at least three @@ -784,8 +784,8 @@ # Oh yes it does! Real implementations don't check this, and real # cookies (of course) rely on that behaviour. interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk') -## self.assertEquals(len(c), 2) - self.assertEquals(len(c), 4) +## self.assertEqual(len(c), 2) + self.assertEqual(len(c), 4) def test_two_component_domain_rfc2965(self): from cookielib import CookieJar, DefaultCookiePolicy @@ -795,43 +795,43 @@ # two-component V1 domain is OK interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"') - self.assertEquals(len(c), 1) - self.assertEquals(c._cookies["foo.net"]["/"]["foo"].value, "bar") - self.assertEquals(interact_2965(c, "http://foo.net/"), - "$Version=1; foo=bar") + self.assertEqual(len(c), 1) + self.assertEqual(c._cookies["foo.net"]["/"]["foo"].value, "bar") + self.assertEqual(interact_2965(c, "http://foo.net/"), + "$Version=1; foo=bar") # won't be returned to any other domain (because domain was implied) - self.assertEquals(interact_2965(c, "http://www.foo.net/"), "") + self.assertEqual(interact_2965(c, "http://www.foo.net/"), "") # unless domain is given explicitly, because then it must be # rewritten to start with a dot: foo.net --> .foo.net, which does # not domain-match foo.net interact_2965(c, "http://foo.net/foo", 'spam=eggs; domain=foo.net; path=/foo; Version="1"') - self.assertEquals(len(c), 1) - self.assertEquals(interact_2965(c, "http://foo.net/foo"), - "$Version=1; foo=bar") + self.assertEqual(len(c), 1) + self.assertEqual(interact_2965(c, "http://foo.net/foo"), + "$Version=1; foo=bar") # explicit foo.net from three-component domain www.foo.net *does* get # set, because .foo.net domain-matches .foo.net interact_2965(c, "http://www.foo.net/foo/", 'spam=eggs; domain=foo.net; Version="1"') - self.assertEquals(c._cookies[".foo.net"]["/foo/"]["spam"].value, - "eggs") - self.assertEquals(len(c), 2) - self.assertEquals(interact_2965(c, "http://foo.net/foo/"), - "$Version=1; foo=bar") - self.assertEquals(interact_2965(c, "http://www.foo.net/foo/"), - '$Version=1; spam=eggs; $Domain="foo.net"') + self.assertEqual(c._cookies[".foo.net"]["/foo/"]["spam"].value, + "eggs") + self.assertEqual(len(c), 2) + self.assertEqual(interact_2965(c, "http://foo.net/foo/"), + "$Version=1; foo=bar") + self.assertEqual(interact_2965(c, "http://www.foo.net/foo/"), + '$Version=1; spam=eggs; $Domain="foo.net"') # top-level domain is too general interact_2965(c, "http://foo.net/", 'ni="ni"; domain=".net"; Version="1"') - self.assertEquals(len(c), 2) + self.assertEqual(len(c), 2) # RFC 2965 doesn't require blocking this interact_2965(c, "http://foo.co.uk/", 'nasty=trick; domain=.co.uk; Version="1"') - self.assertEquals(len(c), 3) + self.assertEqual(len(c), 3) def test_domain_allow(self): from cookielib import CookieJar, DefaultCookiePolicy @@ -845,24 +845,24 @@ headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"] res = FakeResponse(headers, "http://acme.com/") c.extract_cookies(res, req) - self.assertEquals(len(c), 0) + self.assertEqual(len(c), 0) req = Request("http://www.acme.com/") res = FakeResponse(headers, "http://www.acme.com/") c.extract_cookies(res, req) - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) req = Request("http://www.coyote.com/") res = FakeResponse(headers, "http://www.coyote.com/") c.extract_cookies(res, req) - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) # set a cookie with non-allowed domain... req = Request("http://www.coyote.com/") res = FakeResponse(headers, "http://www.coyote.com/") cookies = c.make_cookies(res, req) c.set_cookie(cookies[0]) - self.assertEquals(len(c), 2) + self.assertEqual(len(c), 2) # ... and check is doesn't get returned c.add_cookie_header(req) self.assertTrue(not req.has_header("Cookie")) @@ -879,17 +879,17 @@ req = Request("http://www.acme.com/") res = FakeResponse(headers, "http://www.acme.com/") c.extract_cookies(res, req) - self.assertEquals(len(c), 0) + self.assertEqual(len(c), 0) p = pol.set_blocked_domains(["acme.com"]) c.extract_cookies(res, req) - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) c.clear() req = Request("http://www.roadrunner.net/") res = FakeResponse(headers, "http://www.roadrunner.net/") c.extract_cookies(res, req) - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) req = Request("http://www.roadrunner.net/") c.add_cookie_header(req) self.assertTrue((req.has_header("Cookie") and @@ -898,14 +898,14 @@ c.clear() pol.set_blocked_domains([".acme.com"]) c.extract_cookies(res, req) - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) # set a cookie with blocked domain... req = Request("http://www.acme.com/") res = FakeResponse(headers, "http://www.acme.com/") cookies = c.make_cookies(res, req) c.set_cookie(cookies[0]) - self.assertEquals(len(c), 2) + self.assertEqual(len(c), 2) # ... and check is doesn't get returned c.add_cookie_header(req) self.assertTrue(not req.has_header("Cookie")) @@ -940,7 +940,7 @@ c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True)) interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1') h = interact_2965(c, "http://www.acme.com/") - self.assertEquals(h, r'$Version=1; foo=\\b\"a\"r') + self.assertEqual(h, r'$Version=1; foo=\\b\"a\"r') def test_missing_final_slash(self): # Missing slash from request URL's abs_path should be assumed present. @@ -950,7 +950,7 @@ c = CookieJar(DefaultCookiePolicy(rfc2965=True)) interact_2965(c, url, "foo=bar; Version=1") req = Request(url) - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) c.add_cookie_header(req) self.assertTrue(req.has_header("Cookie")) @@ -1070,33 +1070,33 @@ i = 0 for c in cs: self.assertIsInstance(c, Cookie) - self.assertEquals(c.version, versions[i]) - self.assertEquals(c.name, names[i]) - self.assertEquals(c.domain, domains[i]) - self.assertEquals(c.path, paths[i]) + self.assertEqual(c.version, versions[i]) + self.assertEqual(c.name, names[i]) + self.assertEqual(c.domain, domains[i]) + self.assertEqual(c.path, paths[i]) i = i + 1 def test_parse_ns_headers(self): from cookielib import parse_ns_headers # missing domain value (invalid cookie) - self.assertEquals( + self.assertEqual( parse_ns_headers(["foo=bar; path=/; domain"]), [[("foo", "bar"), ("path", "/"), ("domain", None), ("version", "0")]] ) # invalid expires value - self.assertEquals( + self.assertEqual( parse_ns_headers(["foo=bar; expires=Foo Bar 12 33:22:11 2000"]), [[("foo", "bar"), ("expires", None), ("version", "0")]] ) # missing cookie value (valid cookie) - self.assertEquals( + self.assertEqual( parse_ns_headers(["foo"]), [[("foo", None), ("version", "0")]] ) # shouldn't add version if header is empty - self.assertEquals(parse_ns_headers([""]), []) + self.assertEqual(parse_ns_headers([""]), []) def test_bad_cookie_header(self): @@ -1122,7 +1122,7 @@ ]: c = cookiejar_from_cookie_headers(headers) # these bad cookies shouldn't be set - self.assertEquals(len(c), 0) + self.assertEqual(len(c), 0) # cookie with invalid expires is treated as session cookie headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"] @@ -1267,7 +1267,7 @@ req = Request("http://www.acme.com/") c.add_cookie_header(req) - self.assertEquals(req.get_header("Cookie"), + self.assertEqual(req.get_header("Cookie"), "PART_NUMBER=ROCKET_LAUNCHER_0001") headers.append( @@ -1471,40 +1471,40 @@ # legal domain cookie = interact_2965(c, "http://www.acme.com", 'ping=pong; domain="acme.com"; version=1') - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) # illegal domain (host prefix "www.a" contains a dot) cookie = interact_2965(c, "http://www.a.acme.com", 'whiz=bang; domain="acme.com"; version=1') - self.assertEquals(len(c), 1) + self.assertEqual(len(c), 1) # legal domain cookie = interact_2965(c, "http://www.a.acme.com", 'wow=flutter; domain=".a.acme.com"; version=1') - self.assertEquals(len(c), 2) + self.assertEqual(len(c), 2) # can't partially match an IP-address cookie = interact_2965(c, "http://125.125.125.125", 'zzzz=ping; domain="125.125.125"; version=1') - self.assertEquals(len(c), 2) + self.assertEqual(len(c), 2) # illegal path (must be prefix of request path) cookie = interact_2965(c, "http://www.sol.no", 'blah=rhubarb; domain=".sol.no"; path="/foo"; ' 'version=1') - self.assertEquals(len(c), 2) + self.assertEqual(len(c), 2) # legal path cookie = interact_2965(c, "http://www.sol.no/foo/bar", 'bing=bong; domain=".sol.no"; path="/foo"; ' 'version=1') - self.assertEquals(len(c), 3) + self.assertEqual(len(c), 3) # illegal port (request-port not in list) cookie = interact_2965(c, "http://www.sol.no", 'whiz=ffft; domain=".sol.no"; port="90,100"; ' 'version=1') - self.assertEquals(len(c), 3) + self.assertEqual(len(c), 3) # legal port cookie = interact_2965( @@ -1512,13 +1512,13 @@ r'bang=wallop; version=1; domain=".sol.no"; ' r'port="90,100, 80,8080"; ' r'max-age=100; Comment = "Just kidding! (\"|\\\\) "') - self.assertEquals(len(c), 4) + self.assertEqual(len(c), 4) # port attribute without any value (current port) cookie = interact_2965(c, "http://www.sol.no", 'foo9=bar; version=1; domain=".sol.no"; port; ' 'max-age=100;') - self.assertEquals(len(c), 5) + self.assertEqual(len(c), 5) # encoded path # LWP has this test, but unescaping allowed path characters seems @@ -1529,7 +1529,7 @@ # character: cookie = interact_2965(c, "http://www.sol.no/ # -# $Id: test_dbtables.py 79285 2010-03-22 14:22:26Z jesus.cea $ +# $Id$ import os, re, sys @@ -84,8 +84,8 @@ colval = pickle.loads(values[0][colname]) else : colval = pickle.loads(bytes(values[0][colname], "iso8859-1")) - self.assert_(colval > 3.141) - self.assert_(colval < 3.142) + self.assertTrue(colval > 3.141) + self.assertTrue(colval < 3.142) def test02(self): diff --git a/lib-python/2.7.0/test/test_fileio.py b/lib-python/2.7.0/test/test_fileio.py --- a/lib-python/2.7.0/test/test_fileio.py +++ b/lib-python/2.7.0/test/test_fileio.py @@ -31,31 +31,31 @@ # verify weak references p = proxy(self.f) p.write(bytes(range(10))) - self.assertEquals(self.f.tell(), p.tell()) + self.assertEqual(self.f.tell(), p.tell()) self.f.close() self.f = None self.assertRaises(ReferenceError, getattr, p, 'tell') def testSeekTell(self): self.f.write(bytes(range(20))) - self.assertEquals(self.f.tell(), 20) + self.assertEqual(self.f.tell(), 20) self.f.seek(0) - self.assertEquals(self.f.tell(), 0) + self.assertEqual(self.f.tell(), 0) self.f.seek(10) - self.assertEquals(self.f.tell(), 10) + self.assertEqual(self.f.tell(), 10) self.f.seek(5, 1) - self.assertEquals(self.f.tell(), 15) + self.assertEqual(self.f.tell(), 15) self.f.seek(-5, 1) - self.assertEquals(self.f.tell(), 10) + self.assertEqual(self.f.tell(), 10) self.f.seek(-5, 2) - self.assertEquals(self.f.tell(), 15) + self.assertEqual(self.f.tell(), 15) def testAttributes(self): # verify expected attributes exist f = self.f - self.assertEquals(f.mode, "wb") - self.assertEquals(f.closed, False) + self.assertEqual(f.mode, "wb") + self.assertEqual(f.closed, False) # verify the attributes are readonly for attr in 'mode', 'closed': @@ -69,7 +69,7 @@ a = array(b'b', b'x'*10) self.f = _FileIO(TESTFN, 'r') n = self.f.readinto(a) - self.assertEquals(array(b'b', [1, 2]), a[:n]) + self.assertEqual(array(b'b', [1, 2]), a[:n]) def test_none_args(self): self.f.write(b"hi\nbye\nabc") @@ -81,19 +81,19 @@ self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"]) def testRepr(self): - self.assertEquals(repr(self.f), "<_io.FileIO name=%r mode='%s'>" - % (self.f.name, self.f.mode)) + self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>" + % (self.f.name, self.f.mode)) del self.f.name - self.assertEquals(repr(self.f), "<_io.FileIO fd=%r mode='%s'>" - % (self.f.fileno(), self.f.mode)) + self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>" + % (self.f.fileno(), self.f.mode)) self.f.close() - self.assertEquals(repr(self.f), "<_io.FileIO [closed]>") + self.assertEqual(repr(self.f), "<_io.FileIO [closed]>") def testErrors(self): f = self.f self.assertTrue(not f.isatty()) self.assertTrue(not f.closed) - #self.assertEquals(f.name, TESTFN) + #self.assertEqual(f.name, TESTFN) self.assertRaises(ValueError, f.read, 10) # Open for reading f.close() self.assertTrue(f.closed) @@ -236,22 +236,22 @@ def testAbles(self): try: f = _FileIO(TESTFN, "w") - self.assertEquals(f.readable(), False) - self.assertEquals(f.writable(), True) - self.assertEquals(f.seekable(), True) + self.assertEqual(f.readable(), False) + self.assertEqual(f.writable(), True) + self.assertEqual(f.seekable(), True) f.close() f = _FileIO(TESTFN, "r") - self.assertEquals(f.readable(), True) - self.assertEquals(f.writable(), False) - self.assertEquals(f.seekable(), True) + self.assertEqual(f.readable(), True) + self.assertEqual(f.writable(), False) + self.assertEqual(f.seekable(), True) f.close() f = _FileIO(TESTFN, "a+") - self.assertEquals(f.readable(), True) - self.assertEquals(f.writable(), True) - self.assertEquals(f.seekable(), True) - self.assertEquals(f.isatty(), False) + self.assertEqual(f.readable(), True) + self.assertEqual(f.writable(), True) + self.assertEqual(f.seekable(), True) + self.assertEqual(f.isatty(), False) f.close() if sys.platform != "win32": @@ -263,14 +263,14 @@ # OS'es that don't support /dev/tty. pass else: - self.assertEquals(f.readable(), False) - self.assertEquals(f.writable(), True) + self.assertEqual(f.readable(), False) + self.assertEqual(f.writable(), True) if sys.platform != "darwin" and \ 'bsd' not in sys.platform and \ not sys.platform.startswith('sunos'): # Somehow /dev/tty appears seekable on some BSDs - self.assertEquals(f.seekable(), False) - self.assertEquals(f.isatty(), True) + self.assertEqual(f.seekable(), False) + self.assertEqual(f.isatty(), True) f.close() finally: os.unlink(TESTFN) @@ -304,7 +304,7 @@ f.write(b"abc") f.close() with open(TESTFN, "rb") as f: - self.assertEquals(f.read(), b"abc") + self.assertEqual(f.read(), b"abc") finally: os.unlink(TESTFN) diff --git a/lib-python/2.7.0/distutils/command/build_ext.py b/lib-python/2.7.0/distutils/command/build_ext.py --- a/lib-python/2.7.0/distutils/command/build_ext.py +++ b/lib-python/2.7.0/distutils/command/build_ext.py @@ -6,7 +6,7 @@ # This module should be kept compatible with Python 2.1. -__revision__ = "$Id: build_ext.py 84683 2010-09-10 20:03:17Z antoine.pitrou $" +__revision__ = "$Id$" import sys, os, string, re from types import * diff --git a/lib-python/2.7.0/test/test_httplib.py b/lib-python/2.7.0/test/test_httplib.py --- a/lib-python/2.7.0/test/test_httplib.py +++ b/lib-python/2.7.0/test/test_httplib.py @@ -97,6 +97,26 @@ conn.putheader('Content-length',42) self.assertTrue('Content-length: 42' in conn._buffer) + def test_ipv6host_header(self): + # Default host header on IPv6 transaction should wrapped by [] if + # its actual IPv6 address + expected = 'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \ + 'Accept-Encoding: identity\r\n\r\n' + conn = httplib.HTTPConnection('[2001::]:81') + sock = FakeSocket('') + conn.sock = sock + conn.request('GET', '/foo') + self.assertTrue(sock.data.startswith(expected)) + + expected = 'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \ + 'Accept-Encoding: identity\r\n\r\n' + conn = httplib.HTTPConnection('[2001:102A::]') + sock = FakeSocket('') + conn.sock = sock + conn.request('GET', '/foo') + self.assertTrue(sock.data.startswith(expected)) + + class BasicTest(TestCase): def test_status_lines(self): # Test HTTP status lines @@ -115,7 +135,7 @@ def test_bad_status_repr(self): exc = httplib.BadStatusLine('') - self.assertEquals(repr(exc), '''BadStatusLine("\'\'",)''') + self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''') def test_partial_reads(self): # if we have a lenght, the system knows when to close itself @@ -196,13 +216,13 @@ sock = FakeSocket(None) conn.sock = sock conn.send(expected) - self.assertEquals(expected, sock.data) + self.assertEqual(expected, sock.data) sock.data = '' conn.send(array.array('c', expected)) - self.assertEquals(expected, sock.data) + self.assertEqual(expected, sock.data) sock.data = '' conn.send(StringIO.StringIO(expected)) - self.assertEquals(expected, sock.data) + self.assertEqual(expected, sock.data) def test_chunked(self): chunked_start = ( @@ -216,7 +236,7 @@ sock = FakeSocket(chunked_start + '0\r\n') resp = httplib.HTTPResponse(sock, method="GET") resp.begin() - self.assertEquals(resp.read(), 'hello world') + self.assertEqual(resp.read(), 'hello world') resp.close() for x in ('', 'foo\r\n'): @@ -226,7 +246,7 @@ try: resp.read() except httplib.IncompleteRead, i: - self.assertEquals(i.partial, 'hello world') + self.assertEqual(i.partial, 'hello world') self.assertEqual(repr(i),'IncompleteRead(11 bytes read)') self.assertEqual(str(i),'IncompleteRead(11 bytes read)') else: @@ -246,9 +266,9 @@ sock = FakeSocket(chunked_start + '0\r\n') resp = httplib.HTTPResponse(sock, method="HEAD") resp.begin() - self.assertEquals(resp.read(), '') - self.assertEquals(resp.status, 200) - self.assertEquals(resp.reason, 'OK') + self.assertEqual(resp.read(), '') + self.assertEqual(resp.status, 200) + self.assertEqual(resp.reason, 'OK') self.assertTrue(resp.isclosed()) def test_negative_content_length(self): @@ -256,7 +276,7 @@ 'Content-Length: -1\r\n\r\nHello\r\n') resp = httplib.HTTPResponse(sock, method="GET") resp.begin() - self.assertEquals(resp.read(), 'Hello\r\n') + self.assertEqual(resp.read(), 'Hello\r\n') resp.close() def test_incomplete_read(self): @@ -266,7 +286,7 @@ try: resp.read() except httplib.IncompleteRead as i: - self.assertEquals(i.partial, 'Hello\r\n') + self.assertEqual(i.partial, 'Hello\r\n') self.assertEqual(repr(i), "IncompleteRead(7 bytes read, 3 more expected)") self.assertEqual(str(i), @@ -301,7 +321,7 @@ class OfflineTest(TestCase): def test_responses(self): - self.assertEquals(httplib.responses[httplib.NOT_FOUND], "Not Found") + self.assertEqual(httplib.responses[httplib.NOT_FOUND], "Not Found") class SourceAddressTest(TestCase): diff --git a/lib-python/2.7.0/test/test_py3kwarn.py b/lib-python/2.7.0/test/test_py3kwarn.py --- a/lib-python/2.7.0/test/test_py3kwarn.py +++ b/lib-python/2.7.0/test/test_py3kwarn.py @@ -397,7 +397,7 @@ reset_module_registry(mod) with check_py3k_warnings() as w: mod.walk("crashers", dumbo, None) - self.assertEquals(str(w.message), msg) + self.assertEqual(str(w.message), msg) def test_reduce_move(self): from operator import add diff --git a/lib-python/2.7.0/distutils/dist.py b/lib-python/2.7.0/distutils/dist.py --- a/lib-python/2.7.0/distutils/dist.py +++ b/lib-python/2.7.0/distutils/dist.py @@ -4,7 +4,7 @@ being built/installed/distributed. """ -__revision__ = "$Id: dist.py 77717 2010-01-24 00:33:32Z tarek.ziade $" +__revision__ = "$Id$" import sys, os, re from email import message_from_file @@ -1101,9 +1101,11 @@ def write_pkg_info(self, base_dir): """Write the PKG-INFO file into the release tree. """ - pkg_info = open( os.path.join(base_dir, 'PKG-INFO'), 'w') - self.write_pkg_file(pkg_info) - pkg_info.close() + pkg_info = open(os.path.join(base_dir, 'PKG-INFO'), 'w') + try: + self.write_pkg_file(pkg_info) + finally: + pkg_info.close() def write_pkg_file(self, file): """Write the PKG-INFO format data to a file object. diff --git a/lib-python/2.7.0/distutils/core.py b/lib-python/2.7.0/distutils/core.py --- a/lib-python/2.7.0/distutils/core.py +++ b/lib-python/2.7.0/distutils/core.py @@ -6,7 +6,7 @@ really defined in distutils.dist and distutils.cmd. """ -__revision__ = "$Id: core.py 77704 2010-01-23 09:23:15Z tarek.ziade $" +__revision__ = "$Id$" import sys import os @@ -216,7 +216,11 @@ sys.argv[0] = script_name if script_args is not None: sys.argv[1:] = script_args - exec open(script_name, 'r').read() in g, l + f = open(script_name) + try: + exec f.read() in g, l + finally: + f.close() finally: sys.argv = save_argv _setup_stop_after = None diff --git a/lib-python/2.7.0/distutils/command/build_clib.py b/lib-python/2.7.0/distutils/command/build_clib.py --- a/lib-python/2.7.0/distutils/command/build_clib.py +++ b/lib-python/2.7.0/distutils/command/build_clib.py @@ -4,7 +4,7 @@ that is included in the module distribution and needed by an extension module.""" -__revision__ = "$Id: build_clib.py 84610 2010-09-07 22:18:34Z eric.araujo $" +__revision__ = "$Id$" # XXX this module has *lots* of code ripped-off quite transparently from diff --git a/lib-python/2.7.0/distutils/extension.py b/lib-python/2.7.0/distutils/extension.py --- a/lib-python/2.7.0/distutils/extension.py +++ b/lib-python/2.7.0/distutils/extension.py @@ -3,7 +3,7 @@ Provides the Extension class, used to describe C/C++ extension modules in setup scripts.""" -__revision__ = "$Id: extension.py 78666 2010-03-05 00:16:02Z tarek.ziade $" +__revision__ = "$Id$" import os, string, sys from types import * @@ -150,87 +150,96 @@ file = TextFile(filename, strip_comments=1, skip_blanks=1, join_lines=1, lstrip_ws=1, rstrip_ws=1) - extensions = [] + try: + extensions = [] - while 1: - line = file.readline() - if line is None: # eof - break - if _variable_rx.match(line): # VAR=VALUE, handled in first pass - continue - - if line[0] == line[-1] == "*": - file.warn("'%s' lines not handled yet" % line) - continue - - #print "original line: " + line - line = expand_makefile_vars(line, vars) - words = split_quoted(line) - #print "expanded line: " + line - - # NB. this parses a slightly different syntax than the old - # makesetup script: here, there must be exactly one extension per - # line, and it must be the first word of the line. I have no idea - # why the old syntax supported multiple extensions per line, as - # they all wind up being the same. - - module = words[0] - ext = Extension(module, []) - append_next_word = None - - for word in words[1:]: - if append_next_word is not None: - append_next_word.append(word) - append_next_word = None + while 1: + line = file.readline() + if line is None: # eof + break + if _variable_rx.match(line): # VAR=VALUE, handled in first pass continue - suffix = os.path.splitext(word)[1] - switch = word[0:2] ; value = word[2:] + if line[0] == line[-1] == "*": + file.warn("'%s' lines not handled yet" % line) + continue - if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"): - # hmm, should we do something about C vs. C++ sources? - # or leave it up to the CCompiler implementation to - # worry about? - ext.sources.append(word) - elif switch == "-I": - ext.include_dirs.append(value) - elif switch == "-D": - equals = string.find(value, "=") - if equals == -1: # bare "-DFOO" -- no value - ext.define_macros.append((value, None)) - else: # "-DFOO=blah" - ext.define_macros.append((value[0:equals], - value[equals+2:])) - elif switch == "-U": - ext.undef_macros.append(value) - elif switch == "-C": # only here 'cause makesetup has it! - ext.extra_compile_args.append(word) - elif switch == "-l": - ext.libraries.append(value) - elif switch == "-L": - ext.library_dirs.append(value) - elif switch == "-R": - ext.runtime_library_dirs.append(value) - elif word == "-rpath": - append_next_word = ext.runtime_library_dirs - elif word == "-Xlinker": - append_next_word = ext.extra_link_args - elif word == "-Xcompiler": - append_next_word = ext.extra_compile_args - elif switch == "-u": - ext.extra_link_args.append(word) - if not value: + #print "original line: " + line + line = expand_makefile_vars(line, vars) + words = split_quoted(line) + #print "expanded line: " + line + + # NB. this parses a slightly different syntax than the old + # makesetup script: here, there must be exactly one extension per + # line, and it must be the first word of the line. I have no idea + # why the old syntax supported multiple extensions per line, as + # they all wind up being the same. + + module = words[0] + ext = Extension(module, []) + append_next_word = None + + for word in words[1:]: + if append_next_word is not None: + append_next_word.append(word) + append_next_word = None + continue + + suffix = os.path.splitext(word)[1] + switch = word[0:2] ; value = word[2:] + + if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"): + # hmm, should we do something about C vs. C++ sources? + # or leave it up to the CCompiler implementation to + # worry about? + ext.sources.append(word) + elif switch == "-I": + ext.include_dirs.append(value) + elif switch == "-D": + equals = string.find(value, "=") + if equals == -1: # bare "-DFOO" -- no value + ext.define_macros.append((value, None)) + else: # "-DFOO=blah" + ext.define_macros.append((value[0:equals], + value[equals+2:])) + elif switch == "-U": + ext.undef_macros.append(value) + elif switch == "-C": # only here 'cause makesetup has it! + ext.extra_compile_args.append(word) + elif switch == "-l": + ext.libraries.append(value) + elif switch == "-L": + ext.library_dirs.append(value) + elif switch == "-R": + ext.runtime_library_dirs.append(value) + elif word == "-rpath": + append_next_word = ext.runtime_library_dirs + elif word == "-Xlinker": append_next_word = ext.extra_link_args - elif suffix in (".a", ".so", ".sl", ".o", ".dylib"): - # NB. a really faithful emulation of makesetup would - # append a .o file to extra_objects only if it - # had a slash in it; otherwise, it would s/.o/.c/ - # and append it to sources. Hmmmm. - ext.extra_objects.append(word) - else: - file.warn("unrecognized argument '%s'" % word) + elif word == "-Xcompiler": + append_next_word = ext.extra_compile_args + elif switch == "-u": + ext.extra_link_args.append(word) + if not value: + append_next_word = ext.extra_link_args + elif word == "-Xcompiler": + append_next_word = ext.extra_compile_args + elif switch == "-u": + ext.extra_link_args.append(word) + if not value: + append_next_word = ext.extra_link_args + elif suffix in (".a", ".so", ".sl", ".o", ".dylib"): + # NB. a really faithful emulation of makesetup would + # append a .o file to extra_objects only if it + # had a slash in it; otherwise, it would s/.o/.c/ + # and append it to sources. Hmmmm. + ext.extra_objects.append(word) + else: + file.warn("unrecognized argument '%s'" % word) - extensions.append(ext) + extensions.append(ext) + finally: + file.close() #print "module:", module #print "source files:", source_files diff --git a/lib-python/2.7.0/distutils/errors.py b/lib-python/2.7.0/distutils/errors.py --- a/lib-python/2.7.0/distutils/errors.py +++ b/lib-python/2.7.0/distutils/errors.py @@ -8,7 +8,7 @@ This module is safe to use in "from ... import *" mode; it only exports symbols whose names start with "Distutils" and end with "Error".""" -__revision__ = "$Id: errors.py 75901 2009-10-28 06:45:18Z tarek.ziade $" +__revision__ = "$Id$" class DistutilsError(Exception): """The root of all Distutils evil.""" diff --git a/lib-python/2.7.0/test/test_file2k.py b/lib-python/2.7.0/test/test_file2k.py --- a/lib-python/2.7.0/test/test_file2k.py +++ b/lib-python/2.7.0/test/test_file2k.py @@ -29,7 +29,7 @@ # verify weak references p = proxy(self.f) p.write('teststring') - self.assertEquals(self.f.tell(), p.tell()) + self.assertEqual(self.f.tell(), p.tell()) self.f.close() self.f = None self.assertRaises(ReferenceError, getattr, p, 'tell') @@ -58,7 +58,7 @@ a = array('c', 'x'*10) self.f = open(TESTFN, 'rb') n = self.f.readinto(a) - self.assertEquals('12', a.tostring()[:n]) + self.assertEqual('12', a.tostring()[:n]) def testWritelinesUserList(self): # verify writelines with instance sequence @@ -67,7 +67,7 @@ self.f.close() self.f = open(TESTFN, 'rb') buf = self.f.read() - self.assertEquals(buf, '12') + self.assertEqual(buf, '12') def testWritelinesIntegers(self): # verify writelines with integers @@ -94,7 +94,7 @@ self.f.close() self.f = open(TESTFN, 'rb') f = self.f - self.assertEquals(f.name, TESTFN) + self.assertEqual(f.name, TESTFN) self.assertTrue(not f.isatty()) self.assertTrue(not f.closed) @@ -125,12 +125,12 @@ self.assertRaises(ValueError, self.f.writelines, []) # file is closed, __exit__ shouldn't do anything - self.assertEquals(self.f.__exit__(None, None, None), None) + self.assertEqual(self.f.__exit__(None, None, None), None) # it must also return None if an exception was given try: 1 // 0 except: - self.assertEquals(self.f.__exit__(*sys.exc_info()), None) + self.assertEqual(self.f.__exit__(*sys.exc_info()), None) def testReadWhenWriting(self): self.assertRaises(IOError, self.f.read) @@ -261,7 +261,7 @@ f.close() except IOError, msg: self.fail('error setting buffer size %d: %s' % (s, str(msg))) - self.assertEquals(d, s) + self.assertEqual(d, s) def testTruncateOnWindows(self): os.unlink(TESTFN) @@ -621,7 +621,7 @@ try: print except RuntimeError as e: - self.assertEquals(str(e), "lost sys.stdout") + self.assertEqual(str(e), "lost sys.stdout") else: self.fail("Expected RuntimeError") finally: diff --git a/lib-python/2.7.0/distutils/command/install_scripts.py b/lib-python/2.7.0/distutils/command/install_scripts.py --- a/lib-python/2.7.0/distutils/command/install_scripts.py +++ b/lib-python/2.7.0/distutils/command/install_scripts.py @@ -5,7 +5,7 @@ # contributed by Bastian Kleineidam -__revision__ = "$Id: install_scripts.py 68943 2009-01-25 22:09:10Z tarek.ziade $" +__revision__ = "$Id$" import os from distutils.core import Command diff --git a/lib-python/2.7.0/bsddb/test/test_distributed_transactions.py b/lib-python/2.7.0/bsddb/test/test_distributed_transactions.py --- a/lib-python/2.7.0/bsddb/test/test_distributed_transactions.py +++ b/lib-python/2.7.0/bsddb/test/test_distributed_transactions.py @@ -88,9 +88,9 @@ # Get "to be recovered" transactions but # let them be garbage collected. recovered_txns=self.dbenv.txn_recover() - self.assertEquals(self.num_txns,len(recovered_txns)) + self.assertEqual(self.num_txns,len(recovered_txns)) for gid,txn in recovered_txns : - self.assert_(gid in txns) + self.assertTrue(gid in txns) del txn del recovered_txns @@ -99,7 +99,7 @@ # Get "to be recovered" transactions. Commit, abort and # discard them. recovered_txns=self.dbenv.txn_recover() - self.assertEquals(self.num_txns,len(recovered_txns)) + self.assertEqual(self.num_txns,len(recovered_txns)) discard_txns=set() committed_txns=set() state=0 @@ -122,7 +122,7 @@ # Verify the discarded transactions are still # around, and dispose them. recovered_txns=self.dbenv.txn_recover() - self.assertEquals(len(discard_txns),len(recovered_txns)) + self.assertEqual(len(discard_txns),len(recovered_txns)) for gid,txn in recovered_txns : txn.abort() del txn @@ -133,8 +133,8 @@ # Be sure there are not pending transactions. # Check also database size. recovered_txns=self.dbenv.txn_recover() - self.assert_(len(recovered_txns)==0) - self.assertEquals(len(committed_txns),self.db.stat()["nkeys"]) + self.assertTrue(len(recovered_txns)==0) + self.assertEqual(len(committed_txns),self.db.stat()["nkeys"]) class DBTxn_distributedSYNC(DBTxn_distributed): nosync=False diff --git a/lib-python/2.7.0/test/test_minidom.py b/lib-python/2.7.0/test/test_minidom.py --- a/lib-python/2.7.0/test/test_minidom.py +++ b/lib-python/2.7.0/test/test_minidom.py @@ -748,7 +748,7 @@ def check_clone_pi(self, deep, testName): doc = parseString("") pi = doc.firstChild - self.assertEquals(pi.nodeType, Node.PROCESSING_INSTRUCTION_NODE) + self.assertEqual(pi.nodeType, Node.PROCESSING_INSTRUCTION_NODE) clone = pi.cloneNode(deep) self.confirm(clone.target == pi.target and clone.data == pi.data) @@ -948,7 +948,7 @@ def testBug0777884(self): doc = parseString("text") text = doc.documentElement.childNodes[0] - self.assertEquals(text.nodeType, Node.TEXT_NODE) + self.assertEqual(text.nodeType, Node.TEXT_NODE) # Should run quietly, doing nothing. text.normalize() doc.unlink() @@ -1226,7 +1226,7 @@ doc = parseString("a") elem = doc.documentElement text = elem.childNodes[0] - self.assertEquals(text.nodeType, Node.TEXT_NODE) + self.assertEqual(text.nodeType, Node.TEXT_NODE) self.checkWholeText(text, "a") elem.appendChild(doc.createTextNode("b")) @@ -1483,6 +1483,13 @@ doc.appendChild(doc.createComment("foo--bar")) self.assertRaises(ValueError, doc.toxml) + def testEmptyXMLNSValue(self): + doc = parseString("\n" + "\n") + doc2 = parseString(doc.toxml()) + self.confirm(doc2.namespaceURI == xml.dom.EMPTY_NAMESPACE) + + def test_main(): run_unittest(MinidomTest) diff --git a/lib-python/2.7.0/distutils/tests/test_file_util.py b/lib-python/2.7.0/distutils/tests/test_file_util.py --- a/lib-python/2.7.0/distutils/tests/test_file_util.py +++ b/lib-python/2.7.0/distutils/tests/test_file_util.py @@ -31,19 +31,21 @@ def test_move_file_verbosity(self): f = open(self.source, 'w') - f.write('some content') - f.close() + try: + f.write('some content') + finally: + f.close() move_file(self.source, self.target, verbose=0) wanted = [] - self.assertEquals(self._logs, wanted) + self.assertEqual(self._logs, wanted) # back to original state move_file(self.target, self.source, verbose=0) move_file(self.source, self.target, verbose=1) wanted = ['moving %s -> %s' % (self.source, self.target)] - self.assertEquals(self._logs, wanted) + self.assertEqual(self._logs, wanted) # back to original state move_file(self.target, self.source, verbose=0) @@ -53,7 +55,7 @@ os.mkdir(self.target_dir) move_file(self.source, self.target_dir, verbose=1) wanted = ['moving %s -> %s' % (self.source, self.target_dir)] - self.assertEquals(self._logs, wanted) + self.assertEqual(self._logs, wanted) def test_write_file(self): lines = ['a', 'b', 'c'] @@ -61,7 +63,7 @@ foo = os.path.join(dir, 'foo') write_file(foo, lines) content = [line.strip() for line in open(foo).readlines()] - self.assertEquals(content, lines) + self.assertEqual(content, lines) def test_copy_file(self): src_dir = self.mkdtemp() diff --git a/lib-python/2.7.0/distutils/tests/test_text_file.py b/lib-python/2.7.0/distutils/tests/test_text_file.py --- a/lib-python/2.7.0/distutils/tests/test_text_file.py +++ b/lib-python/2.7.0/distutils/tests/test_text_file.py @@ -48,7 +48,7 @@ def test_input(count, description, file, expected_result): result = file.readlines() - self.assertEquals(result, expected_result) + self.assertEqual(result, expected_result) tmpdir = self.mkdtemp() filename = os.path.join(tmpdir, "test.txt") @@ -58,28 +58,46 @@ finally: out_file.close() - in_file = TextFile (filename, strip_comments=0, skip_blanks=0, - lstrip_ws=0, rstrip_ws=0) - test_input (1, "no processing", in_file, result1) + in_file = TextFile(filename, strip_comments=0, skip_blanks=0, + lstrip_ws=0, rstrip_ws=0) + try: + test_input(1, "no processing", in_file, result1) + finally: + in_file.close() - in_file = TextFile (filename, strip_comments=1, skip_blanks=0, - lstrip_ws=0, rstrip_ws=0) - test_input (2, "strip comments", in_file, result2) + in_file = TextFile(filename, strip_comments=1, skip_blanks=0, + lstrip_ws=0, rstrip_ws=0) + try: + test_input(2, "strip comments", in_file, result2) + finally: + in_file.close() - in_file = TextFile (filename, strip_comments=0, skip_blanks=1, - lstrip_ws=0, rstrip_ws=0) - test_input (3, "strip blanks", in_file, result3) + in_file = TextFile(filename, strip_comments=0, skip_blanks=1, + lstrip_ws=0, rstrip_ws=0) + try: + test_input(3, "strip blanks", in_file, result3) + finally: + in_file.close() - in_file = TextFile (filename) - test_input (4, "default processing", in_file, result4) + in_file = TextFile(filename) + try: + test_input(4, "default processing", in_file, result4) + finally: + in_file.close() - in_file = TextFile (filename, strip_comments=1, skip_blanks=1, - join_lines=1, rstrip_ws=1) - test_input (5, "join lines without collapsing", in_file, result5) + in_file = TextFile(filename, strip_comments=1, skip_blanks=1, + join_lines=1, rstrip_ws=1) + try: + test_input(5, "join lines without collapsing", in_file, result5) + finally: + in_file.close() - in_file = TextFile (filename, strip_comments=1, skip_blanks=1, - join_lines=1, rstrip_ws=1, collapse_join=1) - test_input (6, "join lines with collapsing", in_file, result6) + in_file = TextFile(filename, strip_comments=1, skip_blanks=1, + join_lines=1, rstrip_ws=1, collapse_join=1) + try: + test_input(6, "join lines with collapsing", in_file, result6) + finally: + in_file.close() def test_suite(): return unittest.makeSuite(TextFileTestCase) diff --git a/lib-python/2.7.0/test/test_codeop.py b/lib-python/2.7.0/test/test_codeop.py --- a/lib-python/2.7.0/test/test_codeop.py +++ b/lib-python/2.7.0/test/test_codeop.py @@ -37,14 +37,14 @@ ctx = {'a': 2} d = { 'value': eval(code,ctx) } r = { 'value': eval(str,ctx) } - self.assertEquals(unify_callables(r),unify_callables(d)) + self.assertEqual(unify_callables(r),unify_callables(d)) else: expected = compile(str, "", symbol, PyCF_DONT_IMPLY_DEDENT) - self.assertEquals( compile_command(str, "", symbol), expected) + self.assertEqual(compile_command(str, "", symbol), expected) def assertIncomplete(self, str, symbol='single'): '''succeed iff str is the start of a valid piece of code''' - self.assertEquals( compile_command(str, symbol=symbol), None) + self.assertEqual(compile_command(str, symbol=symbol), None) def assertInvalid(self, str, symbol='single', is_syntax=1): '''succeed iff str is the start of an invalid piece of code''' @@ -61,12 +61,12 @@ # special case if not is_jython: - self.assertEquals(compile_command(""), - compile("pass", "", 'single', - PyCF_DONT_IMPLY_DEDENT)) - self.assertEquals(compile_command("\n"), - compile("pass", "", 'single', - PyCF_DONT_IMPLY_DEDENT)) + self.assertEqual(compile_command(""), + compile("pass", "", 'single', + PyCF_DONT_IMPLY_DEDENT)) + self.assertEqual(compile_command("\n"), + compile("pass", "", 'single', + PyCF_DONT_IMPLY_DEDENT)) else: av("") av("\n") @@ -290,10 +290,10 @@ ai("[i for i in range(10)] = (1, 2, 3)") def test_filename(self): - self.assertEquals(compile_command("a = 1\n", "abc").co_filename, - compile("a = 1\n", "abc", 'single').co_filename) - self.assertNotEquals(compile_command("a = 1\n", "abc").co_filename, - compile("a = 1\n", "def", 'single').co_filename) + self.assertEqual(compile_command("a = 1\n", "abc").co_filename, + compile("a = 1\n", "abc", 'single').co_filename) + self.assertNotEqual(compile_command("a = 1\n", "abc").co_filename, + compile("a = 1\n", "def", 'single').co_filename) def test_main(): diff --git a/lib-python/2.7.0/distutils/tests/test_dep_util.py b/lib-python/2.7.0/distutils/tests/test_dep_util.py --- a/lib-python/2.7.0/distutils/tests/test_dep_util.py +++ b/lib-python/2.7.0/distutils/tests/test_dep_util.py @@ -42,8 +42,8 @@ self.write_file(two) self.write_file(four) - self.assertEquals(newer_pairwise([one, two], [three, four]), - ([one],[three])) + self.assertEqual(newer_pairwise([one, two], [three, four]), + ([one],[three])) def test_newer_group(self): tmpdir = self.mkdtemp() diff --git a/lib-python/2.7.0/json/tests/test_encode_basestring_ascii.py b/lib-python/2.7.0/json/tests/test_encode_basestring_ascii.py --- a/lib-python/2.7.0/json/tests/test_encode_basestring_ascii.py +++ b/lib-python/2.7.0/json/tests/test_encode_basestring_ascii.py @@ -36,7 +36,7 @@ fname = encode_basestring_ascii.__name__ for input_string, expect in CASES: result = encode_basestring_ascii(input_string) - self.assertEquals(result, expect, + self.assertEqual(result, expect, '{0!r} != {1!r} for {2}({3!r})'.format( result, expect, fname, input_string)) diff --git a/lib-python/2.7.0/test/test_htmllib.py b/lib-python/2.7.0/test/test_htmllib.py --- a/lib-python/2.7.0/test/test_htmllib.py +++ b/lib-python/2.7.0/test/test_htmllib.py @@ -38,11 +38,11 @@ """) parser.close() - self.assertEquals(parser.get_anchor_info(), - [('http://foo.org/', 'splat', ''), - ('http://www.python.org/', '', ''), - ('', 'frob', ''), - ]) + self.assertEqual(parser.get_anchor_info(), + [('http://foo.org/', 'splat', ''), + ('http://www.python.org/', '', ''), + ('', 'frob', ''), + ]) def test_decl_collection(self): # See SF patch #545300 @@ -56,10 +56,10 @@ """) parser.close() - self.assertEquals(parser.get_decl_info(), - ["if !supportEmptyParas", - "endif" - ]) + self.assertEqual(parser.get_decl_info(), + ["if !supportEmptyParas", + "endif" + ]) def test_main(): test_support.run_unittest(HTMLParserTestCase) diff --git a/lib-python/2.7.0/distutils/cygwinccompiler.py b/lib-python/2.7.0/distutils/cygwinccompiler.py --- a/lib-python/2.7.0/distutils/cygwinccompiler.py +++ b/lib-python/2.7.0/distutils/cygwinccompiler.py @@ -47,7 +47,7 @@ # This module should be kept compatible with Python 2.1. -__revision__ = "$Id: cygwinccompiler.py 78666 2010-03-05 00:16:02Z tarek.ziade $" +__revision__ = "$Id$" import os,sys,copy from distutils.ccompiler import gen_preprocess_options, gen_lib_options @@ -382,8 +382,10 @@ # It would probably better to read single lines to search. # But we do this only once, and it is fast enough f = open(fn) - s = f.read() - f.close() + try: + s = f.read() + finally: + f.close() except IOError, exc: # if we can't read this file, we cannot say it is wrong diff --git a/lib-python/2.7.0/test/test_pow.py b/lib-python/2.7.0/test/test_pow.py --- a/lib-python/2.7.0/test/test_pow.py +++ b/lib-python/2.7.0/test/test_pow.py @@ -5,17 +5,17 @@ def powtest(self, type): if type != float: for i in range(-1000, 1000): - self.assertEquals(pow(type(i), 0), 1) - self.assertEquals(pow(type(i), 1), type(i)) - self.assertEquals(pow(type(0), 1), type(0)) - self.assertEquals(pow(type(1), 1), type(1)) + self.assertEqual(pow(type(i), 0), 1) + self.assertEqual(pow(type(i), 1), type(i)) + self.assertEqual(pow(type(0), 1), type(0)) + self.assertEqual(pow(type(1), 1), type(1)) for i in range(-100, 100): - self.assertEquals(pow(type(i), 3), i*i*i) + self.assertEqual(pow(type(i), 3), i*i*i) pow2 = 1 for i in range(0,31): - self.assertEquals(pow(2, i), pow2) + self.assertEqual(pow(2, i), pow2) if i != 30 : pow2 = pow2*2 for othertype in int, long: @@ -67,30 +67,30 @@ def test_other(self): # Other tests-- not very systematic - self.assertEquals(pow(3,3) % 8, pow(3,3,8)) - self.assertEquals(pow(3,3) % -8, pow(3,3,-8)) - self.assertEquals(pow(3,2) % -2, pow(3,2,-2)) - self.assertEquals(pow(-3,3) % 8, pow(-3,3,8)) - self.assertEquals(pow(-3,3) % -8, pow(-3,3,-8)) - self.assertEquals(pow(5,2) % -8, pow(5,2,-8)) + self.assertEqual(pow(3,3) % 8, pow(3,3,8)) + self.assertEqual(pow(3,3) % -8, pow(3,3,-8)) + self.assertEqual(pow(3,2) % -2, pow(3,2,-2)) + self.assertEqual(pow(-3,3) % 8, pow(-3,3,8)) + self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8)) + self.assertEqual(pow(5,2) % -8, pow(5,2,-8)) - self.assertEquals(pow(3L,3L) % 8, pow(3L,3L,8)) - self.assertEquals(pow(3L,3L) % -8, pow(3L,3L,-8)) - self.assertEquals(pow(3L,2) % -2, pow(3L,2,-2)) - self.assertEquals(pow(-3L,3L) % 8, pow(-3L,3L,8)) - self.assertEquals(pow(-3L,3L) % -8, pow(-3L,3L,-8)) - self.assertEquals(pow(5L,2) % -8, pow(5L,2,-8)) + self.assertEqual(pow(3L,3L) % 8, pow(3L,3L,8)) + self.assertEqual(pow(3L,3L) % -8, pow(3L,3L,-8)) + self.assertEqual(pow(3L,2) % -2, pow(3L,2,-2)) + self.assertEqual(pow(-3L,3L) % 8, pow(-3L,3L,8)) + self.assertEqual(pow(-3L,3L) % -8, pow(-3L,3L,-8)) + self.assertEqual(pow(5L,2) % -8, pow(5L,2,-8)) for i in range(-10, 11): for j in range(0, 6): for k in range(-7, 11): if j >= 0 and k != 0: - self.assertEquals( + self.assertEqual( pow(i,j) % k, pow(i,j,k) ) if j >= 0 and k != 0: - self.assertEquals( + self.assertEqual( pow(long(i),j) % k, pow(long(i),j,k) ) @@ -104,7 +104,7 @@ def test_bug705231(self): # -1.0 raised to an integer should never blow up. It did if the # platform pow() was buggy, and Python didn't worm around it. - eq = self.assertEquals + eq = self.assertEqual a = -1.0 # The next two tests can still fail if the platform floor() # function doesn't treat all large inputs as integers diff --git a/lib-python/2.7.0/sha.py b/lib-python/2.7.0/sha.py --- a/lib-python/2.7.0/sha.py +++ b/lib-python/2.7.0/sha.py @@ -1,4 +1,4 @@ -# $Id: sha.py 58064 2007-09-09 20:25:00Z gregory.p.smith $ +# $Id$ # # Copyright (C) 2005 Gregory P. Smith (greg at krypto.org) # Licensed to PSF under a Contributor Agreement. diff --git a/lib-python/2.7.0/lib-tk/Tix.py b/lib-python/2.7.0/lib-tk/Tix.py --- a/lib-python/2.7.0/lib-tk/Tix.py +++ b/lib-python/2.7.0/lib-tk/Tix.py @@ -1,6 +1,6 @@ # -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*- # -# $Id: Tix.py 81008 2010-05-08 20:59:42Z benjamin.peterson $ +# $Id$ # # Tix.py -- Tix widget wrappers. # diff --git a/lib-python/2.7.0/test/test_tcl.py b/lib-python/2.7.0/test/test_tcl.py --- a/lib-python/2.7.0/test/test_tcl.py +++ b/lib-python/2.7.0/test/test_tcl.py @@ -147,7 +147,7 @@ env.unset("TCL_LIBRARY") f = os.popen('%s -c "import Tkinter; print Tkinter"' % (unc_name,)) - self.assert_('Tkinter.py' in f.read()) + self.assertTrue('Tkinter.py' in f.read()) # exit code must be zero self.assertEqual(f.close(), None) diff --git a/lib-python/2.7.0/distutils/command/register.py b/lib-python/2.7.0/distutils/command/register.py --- a/lib-python/2.7.0/distutils/command/register.py +++ b/lib-python/2.7.0/distutils/command/register.py @@ -5,7 +5,7 @@ # created 2002/10/21, Richard Jones -__revision__ = "$Id: register.py 77717 2010-01-24 00:33:32Z tarek.ziade $" +__revision__ = "$Id$" import urllib2 import getpass diff --git a/lib-python/2.7.0/bsddb/test/test_sequence.py b/lib-python/2.7.0/bsddb/test/test_sequence.py --- a/lib-python/2.7.0/bsddb/test/test_sequence.py +++ b/lib-python/2.7.0/bsddb/test/test_sequence.py @@ -37,53 +37,53 @@ self.seq = db.DBSequence(self.d, flags=0) start_value = 10 * self.int_32_max self.assertEqual(0xA00000000, start_value) - self.assertEquals(None, self.seq.initial_value(start_value)) - self.assertEquals(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE)) - self.assertEquals(start_value, self.seq.get(5)) - self.assertEquals(start_value + 5, self.seq.get()) + self.assertEqual(None, self.seq.initial_value(start_value)) + self.assertEqual(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE)) + self.assertEqual(start_value, self.seq.get(5)) + self.assertEqual(start_value + 5, self.seq.get()) def test_remove(self): self.seq = db.DBSequence(self.d, flags=0) - self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) - self.assertEquals(None, self.seq.remove(txn=None, flags=0)) + self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEqual(None, self.seq.remove(txn=None, flags=0)) del self.seq def test_get_key(self): self.seq = db.DBSequence(self.d, flags=0) key = 'foo' - self.assertEquals(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE)) - self.assertEquals(key, self.seq.get_key()) + self.assertEqual(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE)) + self.assertEqual(key, self.seq.get_key()) def test_get_dbp(self): self.seq = db.DBSequence(self.d, flags=0) - self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) - self.assertEquals(self.d, self.seq.get_dbp()) + self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEqual(self.d, self.seq.get_dbp()) def test_cachesize(self): self.seq = db.DBSequence(self.d, flags=0) cashe_size = 10 - self.assertEquals(None, self.seq.set_cachesize(cashe_size)) - self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) - self.assertEquals(cashe_size, self.seq.get_cachesize()) + self.assertEqual(None, self.seq.set_cachesize(cashe_size)) + self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEqual(cashe_size, self.seq.get_cachesize()) def test_flags(self): self.seq = db.DBSequence(self.d, flags=0) flag = db.DB_SEQ_WRAP; - self.assertEquals(None, self.seq.set_flags(flag)) - self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) - self.assertEquals(flag, self.seq.get_flags() & flag) + self.assertEqual(None, self.seq.set_flags(flag)) + self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEqual(flag, self.seq.get_flags() & flag) def test_range(self): self.seq = db.DBSequence(self.d, flags=0) seq_range = (10 * self.int_32_max, 11 * self.int_32_max - 1) - self.assertEquals(None, self.seq.set_range(seq_range)) + self.assertEqual(None, self.seq.set_range(seq_range)) self.seq.initial_value(seq_range[0]) - self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) - self.assertEquals(seq_range, self.seq.get_range()) + self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEqual(seq_range, self.seq.get_range()) def test_stat(self): self.seq = db.DBSequence(self.d, flags=0) - self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) stat = self.seq.stat() for param in ('nowait', 'min', 'max', 'value', 'current', 'flags', 'cache_size', 'last_value', 'wait'): @@ -106,24 +106,24 @@ def test_64bits(self) : # We don't use both extremes because they are problematic value_plus=(1L<<63)-2 - self.assertEquals(9223372036854775806L,value_plus) + self.assertEqual(9223372036854775806L,value_plus) value_minus=(-1L<<63)+1 # Two complement - self.assertEquals(-9223372036854775807L,value_minus) + self.assertEqual(-9223372036854775807L,value_minus) self.seq = db.DBSequence(self.d, flags=0) - self.assertEquals(None, self.seq.initial_value(value_plus-1)) - self.assertEquals(None, self.seq.open(key='id', txn=None, + self.assertEqual(None, self.seq.initial_value(value_plus-1)) + self.assertEqual(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE)) - self.assertEquals(value_plus-1, self.seq.get(1)) - self.assertEquals(value_plus, self.seq.get(1)) + self.assertEqual(value_plus-1, self.seq.get(1)) + self.assertEqual(value_plus, self.seq.get(1)) self.seq.remove(txn=None, flags=0) self.seq = db.DBSequence(self.d, flags=0) - self.assertEquals(None, self.seq.initial_value(value_minus)) - self.assertEquals(None, self.seq.open(key='id', txn=None, + self.assertEqual(None, self.seq.initial_value(value_minus)) + self.assertEqual(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE)) - self.assertEquals(value_minus, self.seq.get(1)) - self.assertEquals(value_minus+1, self.seq.get(1)) + self.assertEqual(value_minus, self.seq.get(1)) + self.assertEqual(value_minus+1, self.seq.get(1)) def test_multiple_close(self): self.seq = db.DBSequence(self.d) diff --git a/lib-python/2.7.0/email/test/test_email.py b/lib-python/2.7.0/email/test/test_email.py --- a/lib-python/2.7.0/email/test/test_email.py +++ b/lib-python/2.7.0/email/test/test_email.py @@ -40,13 +40,13 @@ SPACE = ' ' - + def openfile(filename, mode='r'): path = os.path.join(os.path.dirname(landmark), 'data', filename) return open(path, mode) - + # Base test class class TestEmailBase(unittest.TestCase): def ndiffAssertEqual(self, first, second): @@ -68,7 +68,7 @@ return msg - + # Test various aspects of the Message class's API class TestMessageAPI(TestEmailBase): def test_get_all(self): @@ -543,7 +543,7 @@ self.assertEqual('us-ascii', msg.get_content_charset()) - + # Test the email.Encoders module class TestEncoders(unittest.TestCase): def test_encode_empty_payload(self): @@ -572,7 +572,7 @@ msg = email.MIMEText.MIMEText('\xca\xb8', _charset='euc-jp') eq(msg['content-transfer-encoding'], '7bit') - + # Test long header wrapping class TestLongHeaders(TestEmailBase): def test_split_long_continuation(self): @@ -893,7 +893,7 @@ """) - + # Test mangling of "From " lines in the body of a message class TestFromMangling(unittest.TestCase): def setUp(self): @@ -927,7 +927,7 @@ """) - + # Test the basic MIMEAudio class class TestMIMEAudio(unittest.TestCase): def setUp(self): @@ -976,7 +976,7 @@ header='foobar') is missing) - + # Test the basic MIMEImage class class TestMIMEImage(unittest.TestCase): def setUp(self): @@ -1019,7 +1019,7 @@ header='foobar') is missing) - + # Test the basic MIMEText class class TestMIMEText(unittest.TestCase): def setUp(self): @@ -1071,7 +1071,7 @@ self.assertRaises(UnicodeEncodeError, MIMEText, teststr) - + # Test complicated multipart/* messages class TestMultipart(TestEmailBase): def setUp(self): @@ -1447,10 +1447,10 @@ YXNkZg== --===============0012394164==--""") - self.assertEquals(m.get_payload(0).get_payload(), 'YXNkZg==') - - - + self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==') + + + # Test some badly formatted messages class TestNonConformant(TestEmailBase): def test_parse_missing_minor_type(self): @@ -1565,7 +1565,7 @@ - + # Test RFC 2047 header encoding and decoding class TestRFC2047(unittest.TestCase): def test_rfc2047_multiline(self): @@ -1627,7 +1627,7 @@ self.assertEqual(decode_header(s), [(b'andr\xe9=zz', 'iso-8659-1')]) - + # Test the MIMEMessage class class TestMIMEMessage(TestEmailBase): def setUp(self): @@ -1940,7 +1940,7 @@ msg = MIMEMultipart() self.assertTrue(msg.is_multipart()) - + # A general test of parser->model->generator idempotency. IOW, read a message # in, parse it into a message object tree, then without touching the tree, # regenerate the plain text. The original text and the transformed text @@ -1964,7 +1964,7 @@ eq(text, s.getvalue()) def test_parse_text_message(self): - eq = self.assertEquals + eq = self.assertEqual msg, text = self._msgobj('msg_01.txt') eq(msg.get_content_type(), 'text/plain') eq(msg.get_content_maintype(), 'text') @@ -1976,7 +1976,7 @@ self._idempotent(msg, text) def test_parse_untyped_message(self): - eq = self.assertEquals + eq = self.assertEqual msg, text = self._msgobj('msg_03.txt') eq(msg.get_content_type(), 'text/plain') eq(msg.get_params(), None) @@ -2048,7 +2048,7 @@ self._idempotent(msg, text) def test_content_type(self): - eq = self.assertEquals + eq = self.assertEqual unless = self.assertTrue # Get a message object and reset the seek pointer for other tests msg, text = self._msgobj('msg_05.txt') @@ -2080,7 +2080,7 @@ eq(msg4.get_payload(), 'Yadda yadda yadda\n') def test_parser(self): - eq = self.assertEquals + eq = self.assertEqual unless = self.assertTrue msg, text = self._msgobj('msg_06.txt') # Check some of the outer headers @@ -2097,7 +2097,7 @@ eq(msg1.get_payload(), '\n') - + # Test various other bits of the package's functionality class TestMiscellaneous(TestEmailBase): def test_message_from_string(self): @@ -2452,7 +2452,7 @@ """) - + # Test the iterator/generators class TestIterators(TestEmailBase): def test_body_line_iterator(self): @@ -2545,7 +2545,7 @@ self.assertTrue(''.join([il for il, n in imt]) == ''.join(om)) - + class TestParsers(TestEmailBase): def test_header_parser(self): eq = self.assertEqual @@ -2708,7 +2708,7 @@ msg = email.message_from_string(m) self.assertTrue(msg.get_payload(0).get_payload().endswith('\r\n')) - + class TestBase64(unittest.TestCase): def test_len(self): eq = self.assertEqual @@ -2780,7 +2780,7 @@ =?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""") - + class TestQuopri(unittest.TestCase): def setUp(self): self.hlit = [chr(x) for x in range(ord('a'), ord('z')+1)] + \ @@ -2890,7 +2890,7 @@ two line""") - + # Test the Charset class class TestCharset(unittest.TestCase): def tearDown(self): @@ -2951,7 +2951,7 @@ charset = Charset('utf8') self.assertEqual(str(charset), 'utf-8') - + # Test multilingual MIME headers. class TestHeader(TestEmailBase): def test_simple(self): @@ -3114,7 +3114,7 @@ raises(Errors.HeaderParseError, decode_header, s) - + # Test RFC 2231 header parameters (en/de)coding class TestRFC2231(TestEmailBase): def test_get_param(self): @@ -3426,7 +3426,7 @@ eq(s, 'My Document For You') - + # Tests to ensure that signed parts of an email are completely preserved, as # required by RFC1847 section 2.1. Note that these are incomplete, because the # email package does not currently always preserve the body. See issue 1670765. @@ -3462,7 +3462,7 @@ self._signed_parts_eq(original, result) - + def _testclasses(): mod = sys.modules[__name__] return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')] @@ -3480,6 +3480,6 @@ run_unittest(testclass) - + if __name__ == '__main__': unittest.main(defaultTest='suite') diff --git a/lib-python/2.7.0/distutils/spawn.py b/lib-python/2.7.0/distutils/spawn.py --- a/lib-python/2.7.0/distutils/spawn.py +++ b/lib-python/2.7.0/distutils/spawn.py @@ -6,7 +6,7 @@ executable name. """ -__revision__ = "$Id: spawn.py 73147 2009-06-02 15:58:43Z tarek.ziade $" +__revision__ = "$Id$" import sys import os diff --git a/lib-python/2.7.0/sysconfig.py b/lib-python/2.7.0/sysconfig.py --- a/lib-python/2.7.0/sysconfig.py +++ b/lib-python/2.7.0/sysconfig.py @@ -635,13 +635,16 @@ # behaviour. pass else: - m = re.search( - r'ProductUserVisibleVersion\s*' + - r'(.*?)', f.read()) - f.close() - if m is not None: - macrelease = '.'.join(m.group(1).split('.')[:2]) - # else: fall back to the default behaviour + try: + m = re.search( + r'ProductUserVisibleVersion\s*' + + r'(.*?)', f.read()) + f.close() + if m is not None: + macrelease = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + finally: + f.close() if not macver: macver = macrelease diff --git a/lib-python/2.7.0/distutils/tests/test_upload.py b/lib-python/2.7.0/distutils/tests/test_upload.py --- a/lib-python/2.7.0/distutils/tests/test_upload.py +++ b/lib-python/2.7.0/distutils/tests/test_upload.py @@ -80,7 +80,7 @@ for attr, waited in (('username', 'me'), ('password', 'secret'), ('realm', 'pypi'), ('repository', 'http://pypi.python.org/pypi')): - self.assertEquals(getattr(cmd, attr), waited) + self.assertEqual(getattr(cmd, attr), waited) def test_saved_password(self): # file with no password @@ -90,14 +90,14 @@ dist = Distribution() cmd = upload(dist) cmd.finalize_options() - self.assertEquals(cmd.password, None) + self.assertEqual(cmd.password, None) # make sure we get it as well, if another command # initialized it at the dist level dist.password = 'xxx' cmd = upload(dist) cmd.finalize_options() - self.assertEquals(cmd.password, 'xxx') + self.assertEqual(cmd.password, 'xxx') def test_upload(self): tmp = self.mkdtemp() @@ -116,11 +116,11 @@ # what did we send ? self.assertIn('dédé', self.last_open.req.data) headers = dict(self.last_open.req.headers) - self.assertEquals(headers['Content-length'], '2085') + self.assertEqual(headers['Content-length'], '2085') self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) - self.assertEquals(self.last_open.req.get_method(), 'POST') - self.assertEquals(self.last_open.req.get_full_url(), - 'http://pypi.python.org/pypi') + self.assertEqual(self.last_open.req.get_method(), 'POST') + self.assertEqual(self.last_open.req.get_full_url(), + 'http://pypi.python.org/pypi') self.assertTrue('xxx' in self.last_open.req.data) auth = self.last_open.req.headers['Authorization'] self.assertFalse('\n' in auth) diff --git a/lib-python/2.7.0/json/tests/test_unicode.py b/lib-python/2.7.0/json/tests/test_unicode.py --- a/lib-python/2.7.0/json/tests/test_unicode.py +++ b/lib-python/2.7.0/json/tests/test_unicode.py @@ -10,50 +10,50 @@ s = u.encode('utf-8') ju = encoder.encode(u) js = encoder.encode(s) - self.assertEquals(ju, js) + self.assertEqual(ju, js) def test_encoding2(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' s = u.encode('utf-8') ju = json.dumps(u, encoding='utf-8') js = json.dumps(s, encoding='utf-8') - self.assertEquals(ju, js) + self.assertEqual(ju, js) def test_encoding3(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' j = json.dumps(u) - self.assertEquals(j, '"\\u03b1\\u03a9"') + self.assertEqual(j, '"\\u03b1\\u03a9"') def test_encoding4(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' j = json.dumps([u]) - self.assertEquals(j, '["\\u03b1\\u03a9"]') + self.assertEqual(j, '["\\u03b1\\u03a9"]') def test_encoding5(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' j = json.dumps(u, ensure_ascii=False) - self.assertEquals(j, u'"{0}"'.format(u)) + self.assertEqual(j, u'"{0}"'.format(u)) def test_encoding6(self): u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' j = json.dumps([u], ensure_ascii=False) - self.assertEquals(j, u'["{0}"]'.format(u)) + self.assertEqual(j, u'["{0}"]'.format(u)) def test_big_unicode_encode(self): u = u'\U0001d120' - self.assertEquals(json.dumps(u), '"\\ud834\\udd20"') - self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"') + self.assertEqual(json.dumps(u), '"\\ud834\\udd20"') + self.assertEqual(json.dumps(u, ensure_ascii=False), u'"\U0001d120"') def test_big_unicode_decode(self): u = u'z\U0001d120x' - self.assertEquals(json.loads('"' + u + '"'), u) - self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u) + self.assertEqual(json.loads('"' + u + '"'), u) + self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u) def test_unicode_decode(self): for i in range(0, 0xd7ff): u = unichr(i) s = '"\\u{0:04x}"'.format(i) - self.assertEquals(json.loads(s), u) + self.assertEqual(json.loads(s), u) def test_object_pairs_hook_with_unicode(self): s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' @@ -71,12 +71,12 @@ OrderedDict(p)) def test_default_encoding(self): - self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')), + self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')), {'a': u'\xe9'}) def test_unicode_preservation(self): - self.assertEquals(type(json.loads(u'""')), unicode) - self.assertEquals(type(json.loads(u'"a"')), unicode) - self.assertEquals(type(json.loads(u'["a"]')[0]), unicode) + self.assertEqual(type(json.loads(u'""')), unicode) + self.assertEqual(type(json.loads(u'"a"')), unicode) + self.assertEqual(type(json.loads(u'["a"]')[0]), unicode) # Issue 10038. - self.assertEquals(type(json.loads('"foo"')), unicode) + self.assertEqual(type(json.loads('"foo"')), unicode) diff --git a/lib-python/2.7.0/test/test_coercion.py b/lib-python/2.7.0/test/test_coercion.py --- a/lib-python/2.7.0/test/test_coercion.py +++ b/lib-python/2.7.0/test/test_coercion.py @@ -266,9 +266,9 @@ self.assertRaises(TypeError, eval, 'a %s b' % op, {'a': a, 'b': b}) else: - self.assertEquals(format_result(res), - format_result(eval('a %s b' % op)), - '%s %s %s == %s failed' % (a, op, b, res)) + self.assertEqual(format_result(res), + format_result(eval('a %s b' % op)), + '%s %s %s == %s failed' % (a, op, b, res)) try: z = copy.copy(a) except copy.Error: @@ -282,7 +282,7 @@ self.fail("TypeError not raised") else: exec('z %s= b' % op) - self.assertEquals(ires, z) + self.assertEqual(ires, z) def test_prefix_binops(self): for ia, a in enumerate(candidates): @@ -293,9 +293,9 @@ self.assertRaises(TypeError, eval, '%s(a, b)' % op, {'a': a, 'b': b}) else: - self.assertEquals(format_result(res), - format_result(eval('%s(a, b)' % op)), - '%s(%s, %s) == %s failed' % (op, a, b, res)) + self.assertEqual(format_result(res), + format_result(eval('%s(a, b)' % op)), + '%s(%s, %s) == %s failed' % (op, a, b, res)) def test_cmptypes(self): # Built-in tp_compare slots expect their arguments to have the @@ -303,21 +303,21 @@ # SF #980352 evil_coercer = CoerceTo(42) # Make sure these don't crash any more - self.assertNotEquals(cmp(u'fish', evil_coercer), 0) - self.assertNotEquals(cmp(slice(1), evil_coercer), 0) + self.assertNotEqual(cmp(u'fish', evil_coercer), 0) + self.assertNotEqual(cmp(slice(1), evil_coercer), 0) # ...but that this still works class WackyComparer(object): def __cmp__(slf, other): self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) return 0 __hash__ = None # Invalid cmp makes this unhashable - self.assertEquals(cmp(WackyComparer(), evil_coercer), 0) + self.assertEqual(cmp(WackyComparer(), evil_coercer), 0) # ...and classic classes too, since that code path is a little different class ClassicWackyComparer: def __cmp__(slf, other): self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other) return 0 - self.assertEquals(cmp(ClassicWackyComparer(), evil_coercer), 0) + self.assertEqual(cmp(ClassicWackyComparer(), evil_coercer), 0) def test_infinite_rec_classic_classes(self): # if __coerce__() returns its arguments reversed it causes an infinite diff --git a/lib-python/2.7.0/test/test_file.py b/lib-python/2.7.0/test/test_file.py --- a/lib-python/2.7.0/test/test_file.py +++ b/lib-python/2.7.0/test/test_file.py @@ -30,7 +30,7 @@ # verify weak references p = proxy(self.f) p.write(b'teststring') - self.assertEquals(self.f.tell(), p.tell()) + self.assertEqual(self.f.tell(), p.tell()) self.f.close() self.f = None self.assertRaises(ReferenceError, getattr, p, 'tell') @@ -49,7 +49,7 @@ a = array('b', b'x'*10) self.f = self.open(TESTFN, 'rb') n = self.f.readinto(a) - self.assertEquals(b'12', a.tostring()[:n]) + self.assertEqual(b'12', a.tostring()[:n]) def testReadinto_text(self): # verify readinto refuses text files @@ -66,7 +66,7 @@ self.f.close() self.f = self.open(TESTFN, 'rb') buf = self.f.read() - self.assertEquals(buf, b'12') + self.assertEqual(buf, b'12') def testWritelinesIntegers(self): # verify writelines with integers @@ -87,7 +87,7 @@ def testErrors(self): f = self.f - self.assertEquals(f.name, TESTFN) + self.assertEqual(f.name, TESTFN) self.assertTrue(not f.isatty()) self.assertTrue(not f.closed) @@ -124,12 +124,12 @@ self.assertRaises(ValueError, method, *args) # file is closed, __exit__ shouldn't do anything - self.assertEquals(self.f.__exit__(None, None, None), None) + self.assertEqual(self.f.__exit__(None, None, None), None) # it must also return None if an exception was given try: 1 // 0 except: - self.assertEquals(self.f.__exit__(*sys.exc_info()), None) + self.assertEqual(self.f.__exit__(*sys.exc_info()), None) def testReadWhenWriting(self): self.assertRaises(IOError, self.f.read) @@ -195,7 +195,7 @@ f.close() except IOError as msg: self.fail('error setting buffer size %d: %s' % (s, str(msg))) - self.assertEquals(d, s) + self.assertEqual(d, s) def testTruncateOnWindows(self): # SF bug diff --git a/lib-python/2.7.0/distutils/tests/test_archive_util.py b/lib-python/2.7.0/distutils/tests/test_archive_util.py --- a/lib-python/2.7.0/distutils/tests/test_archive_util.py +++ b/lib-python/2.7.0/distutils/tests/test_archive_util.py @@ -1,5 +1,5 @@ """Tests for distutils.archive_util.""" -__revision__ = "$Id: test_archive_util.py 75659 2009-10-24 13:29:44Z tarek.ziade $" +__revision__ = "$Id$" import unittest import os @@ -129,7 +129,7 @@ self.assertTrue(os.path.exists(tarball2)) # let's compare both tarballs - self.assertEquals(self._tarinfo(tarball), self._tarinfo(tarball2)) + self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2)) # trying an uncompressed one base_name = os.path.join(tmpdir2, 'archive') @@ -169,7 +169,7 @@ os.chdir(old_dir) tarball = base_name + '.tar.Z' self.assertTrue(os.path.exists(tarball)) - self.assertEquals(len(w.warnings), 1) + self.assertEqual(len(w.warnings), 1) # same test with dry_run os.remove(tarball) @@ -183,7 +183,7 @@ finally: os.chdir(old_dir) self.assertTrue(not os.path.exists(tarball)) - self.assertEquals(len(w.warnings), 1) + self.assertEqual(len(w.warnings), 1) @unittest.skipUnless(zlib, "Requires zlib") @unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run') @@ -201,9 +201,9 @@ tarball = base_name + '.zip' def test_check_archive_formats(self): - self.assertEquals(check_archive_formats(['gztar', 'xxx', 'zip']), - 'xxx') - self.assertEquals(check_archive_formats(['gztar', 'zip']), None) + self.assertEqual(check_archive_formats(['gztar', 'xxx', 'zip']), + 'xxx') + self.assertEqual(check_archive_formats(['gztar', 'zip']), None) def test_make_archive(self): tmpdir = self.mkdtemp() @@ -258,8 +258,8 @@ archive = tarfile.open(archive_name) try: for member in archive.getmembers(): - self.assertEquals(member.uid, 0) - self.assertEquals(member.gid, 0) + self.assertEqual(member.uid, 0) + self.assertEqual(member.gid, 0) finally: archive.close() @@ -273,7 +273,7 @@ make_archive('xxx', 'xxx', root_dir=self.mkdtemp()) except: pass - self.assertEquals(os.getcwd(), current_dir) + self.assertEqual(os.getcwd(), current_dir) finally: del ARCHIVE_FORMATS['xxx'] diff --git a/lib-python/2.7.0/test/test_inspect.py b/lib-python/2.7.0/test/test_inspect.py --- a/lib-python/2.7.0/test/test_inspect.py +++ b/lib-python/2.7.0/test/test_inspect.py @@ -384,8 +384,8 @@ self.assertRaises(IOError, inspect.findsource, co) self.assertRaises(IOError, inspect.getsource, co) linecache.cache[co.co_filename] = (1, None, lines, co.co_filename) - self.assertEquals(inspect.findsource(co), (lines,0)) - self.assertEquals(inspect.getsource(co), lines[0]) + self.assertEqual(inspect.findsource(co), (lines,0)) + self.assertEqual(inspect.getsource(co), lines[0]) # Helper for testing classify_class_attrs. def attrs_wo_objs(cls): diff --git a/lib-python/2.7.0/distutils/ccompiler.py b/lib-python/2.7.0/distutils/ccompiler.py --- a/lib-python/2.7.0/distutils/ccompiler.py +++ b/lib-python/2.7.0/distutils/ccompiler.py @@ -3,7 +3,7 @@ Contains CCompiler, an abstract base class that defines the interface for the Distutils compiler abstraction model.""" -__revision__ = "$Id: ccompiler.py 77704 2010-01-23 09:23:15Z tarek.ziade $" +__revision__ = "$Id$" import sys import os @@ -794,14 +794,16 @@ library_dirs = [] fd, fname = tempfile.mkstemp(".c", funcname, text=True) f = os.fdopen(fd, "w") - for incl in includes: - f.write("""#include "%s"\n""" % incl) - f.write("""\ + try: + for incl in includes: + f.write("""#include "%s"\n""" % incl) + f.write("""\ main (int argc, char **argv) { %s(); } """ % funcname) - f.close() + finally: + f.close() try: objects = self.compile([fname], include_dirs=include_dirs) except CompileError: diff --git a/lib-python/2.7.0/test/test_multiprocessing.py b/lib-python/2.7.0/test/test_multiprocessing.py --- a/lib-python/2.7.0/test/test_multiprocessing.py +++ b/lib-python/2.7.0/test/test_multiprocessing.py @@ -183,30 +183,30 @@ current = self.current_process() if self.TYPE != 'threads': - self.assertEquals(p.authkey, current.authkey) - self.assertEquals(p.is_alive(), False) - self.assertEquals(p.daemon, True) + self.assertEqual(p.authkey, current.authkey) + self.assertEqual(p.is_alive(), False) + self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() - self.assertEquals(p.exitcode, None) - self.assertEquals(p.is_alive(), True) + self.assertEqual(p.exitcode, None) + self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) - self.assertEquals(q.get(), args[1:]) - self.assertEquals(q.get(), kwargs) - self.assertEquals(q.get(), p.name) + self.assertEqual(q.get(), args[1:]) + self.assertEqual(q.get(), kwargs) + self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': - self.assertEquals(q.get(), current.authkey) - self.assertEquals(q.get(), p.pid) + self.assertEqual(q.get(), current.authkey) + self.assertEqual(q.get(), p.pid) p.join() - self.assertEquals(p.exitcode, 0) - self.assertEquals(p.is_alive(), False) + self.assertEqual(p.exitcode, 0) + self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) @classmethod @@ -812,8 +812,6 @@ # # - at unittest.skipUnless(HAS_SHAREDCTYPES, - "requires multiprocessing.sharedctypes") class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) @@ -825,6 +823,10 @@ ('c', latin('x'), latin('y')) ] + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocessing.sharedctypes") + @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): @@ -1614,12 +1616,14 @@ ('y', c_double) ] - at unittest.skipUnless(HAS_SHAREDCTYPES, - "requires multiprocessing.sharedctypes") class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocessing.sharedctypes") + @classmethod def _double(cls, x, y, foo, arr, string): x.value *= 2 diff --git a/lib-python/2.7.0/test/test_unicodedata.py b/lib-python/2.7.0/test/test_unicodedata.py --- a/lib-python/2.7.0/test/test_unicodedata.py +++ b/lib-python/2.7.0/test/test_unicodedata.py @@ -252,7 +252,7 @@ self.assertTrue(count >= 10) # should have tested at least the ASCII digits def test_bug_1704793(self): - self.assertEquals(self.db.lookup("GOTHIC LETTER FAIHU"), u'\U00010346') + self.assertEqual(self.db.lookup("GOTHIC LETTER FAIHU"), u'\U00010346') def test_ucd_510(self): import unicodedata diff --git a/lib-python/2.7.0/ctypes/test/test_keeprefs.py b/lib-python/2.7.0/ctypes/test/test_keeprefs.py --- a/lib-python/2.7.0/ctypes/test/test_keeprefs.py +++ b/lib-python/2.7.0/ctypes/test/test_keeprefs.py @@ -4,19 +4,19 @@ class SimpleTestCase(unittest.TestCase): def test_cint(self): x = c_int() - self.assertEquals(x._objects, None) + self.assertEqual(x._objects, None) x.value = 42 - self.assertEquals(x._objects, None) + self.assertEqual(x._objects, None) x = c_int(99) - self.assertEquals(x._objects, None) + self.assertEqual(x._objects, None) def test_ccharp(self): x = c_char_p() - self.assertEquals(x._objects, None) + self.assertEqual(x._objects, None) x.value = "abc" - self.assertEquals(x._objects, "abc") + self.assertEqual(x._objects, "abc") x = c_char_p("spam") - self.assertEquals(x._objects, "spam") + self.assertEqual(x._objects, "spam") class StructureTestCase(unittest.TestCase): def test_cint_struct(self): @@ -25,21 +25,21 @@ ("b", c_int)] x = X() - self.assertEquals(x._objects, None) + self.assertEqual(x._objects, None) x.a = 42 x.b = 99 - self.assertEquals(x._objects, None) + self.assertEqual(x._objects, None) def test_ccharp_struct(self): class X(Structure): _fields_ = [("a", c_char_p), ("b", c_char_p)] x = X() - self.assertEquals(x._objects, None) + self.assertEqual(x._objects, None) x.a = "spam" x.b = "foo" - self.assertEquals(x._objects, {"0": "spam", "1": "foo"}) + self.assertEqual(x._objects, {"0": "spam", "1": "foo"}) def test_struct_struct(self): class POINT(Structure): @@ -52,28 +52,28 @@ r.ul.y = 1 r.lr.x = 2 r.lr.y = 3 - self.assertEquals(r._objects, None) + self.assertEqual(r._objects, None) r = RECT() pt = POINT(1, 2) r.ul = pt - self.assertEquals(r._objects, {'0': {}}) + self.assertEqual(r._objects, {'0': {}}) r.ul.x = 22 r.ul.y = 44 - self.assertEquals(r._objects, {'0': {}}) + self.assertEqual(r._objects, {'0': {}}) r.lr = POINT() - self.assertEquals(r._objects, {'0': {}, '1': {}}) + self.assertEqual(r._objects, {'0': {}, '1': {}}) class ArrayTestCase(unittest.TestCase): def test_cint_array(self): INTARR = c_int * 3 ia = INTARR() - self.assertEquals(ia._objects, None) + self.assertEqual(ia._objects, None) ia[0] = 1 ia[1] = 2 ia[2] = 3 - self.assertEquals(ia._objects, None) + self.assertEqual(ia._objects, None) class X(Structure): _fields_ = [("x", c_int), @@ -83,9 +83,9 @@ x.x = 1000 x.a[0] = 42 x.a[1] = 96 - self.assertEquals(x._objects, None) + self.assertEqual(x._objects, None) x.a = ia - self.assertEquals(x._objects, {'1': {}}) + self.assertEqual(x._objects, {'1': {}}) class PointerTestCase(unittest.TestCase): def test_p_cint(self): diff --git a/lib-python/2.7.0/distutils/tests/test_dist.py b/lib-python/2.7.0/distutils/tests/test_dist.py --- a/lib-python/2.7.0/distutils/tests/test_dist.py +++ b/lib-python/2.7.0/distutils/tests/test_dist.py @@ -70,13 +70,13 @@ with captured_stdout() as stdout: self.create_distribution(files) stdout.seek(0) - self.assertEquals(stdout.read(), '') + self.assertEqual(stdout.read(), '') distutils.dist.DEBUG = True try: with captured_stdout() as stdout: self.create_distribution(files) stdout.seek(0) - self.assertEquals(stdout.read(), '') + self.assertEqual(stdout.read(), '') finally: distutils.dist.DEBUG = False @@ -102,29 +102,29 @@ def test_command_packages_configfile(self): sys.argv.append("build") + self.addCleanup(os.unlink, TESTFN) f = open(TESTFN, "w") try: print >>f, "[global]" print >>f, "command_packages = foo.bar, splat" + finally: f.close() - d = self.create_distribution([TESTFN]) - self.assertEqual(d.get_command_packages(), - ["distutils.command", "foo.bar", "splat"]) - # ensure command line overrides config: - sys.argv[1:] = ["--command-packages", "spork", "build"] - d = self.create_distribution([TESTFN]) - self.assertEqual(d.get_command_packages(), - ["distutils.command", "spork"]) + d = self.create_distribution([TESTFN]) + self.assertEqual(d.get_command_packages(), + ["distutils.command", "foo.bar", "splat"]) - # Setting --command-packages to '' should cause the default to - # be used even if a config file specified something else: - sys.argv[1:] = ["--command-packages", "", "build"] - d = self.create_distribution([TESTFN]) - self.assertEqual(d.get_command_packages(), ["distutils.command"]) + # ensure command line overrides config: + sys.argv[1:] = ["--command-packages", "spork", "build"] + d = self.create_distribution([TESTFN]) + self.assertEqual(d.get_command_packages(), + ["distutils.command", "spork"]) - finally: - os.unlink(TESTFN) + # Setting --command-packages to '' should cause the default to + # be used even if a config file specified something else: + sys.argv[1:] = ["--command-packages", "", "build"] + d = self.create_distribution([TESTFN]) + self.assertEqual(d.get_command_packages(), ["distutils.command"]) def test_write_pkg_file(self): # Check DistributionMetadata handling of Unicode fields @@ -175,7 +175,7 @@ finally: warnings.warn = old_warn - self.assertEquals(len(warns), 0) + self.assertEqual(len(warns), 0) def test_finalize_options(self): @@ -186,20 +186,20 @@ dist.finalize_options() # finalize_option splits platforms and keywords - self.assertEquals(dist.metadata.platforms, ['one', 'two']) - self.assertEquals(dist.metadata.keywords, ['one', 'two']) + self.assertEqual(dist.metadata.platforms, ['one', 'two']) + self.assertEqual(dist.metadata.keywords, ['one', 'two']) def test_get_command_packages(self): dist = Distribution() - self.assertEquals(dist.command_packages, None) + self.assertEqual(dist.command_packages, None) cmds = dist.get_command_packages() - self.assertEquals(cmds, ['distutils.command']) - self.assertEquals(dist.command_packages, - ['distutils.command']) + self.assertEqual(cmds, ['distutils.command']) + self.assertEqual(dist.command_packages, + ['distutils.command']) dist.command_packages = 'one,two' cmds = dist.get_command_packages() - self.assertEquals(cmds, ['distutils.command', 'one', 'two']) + self.assertEqual(cmds, ['distutils.command', 'one', 'two']) def test_announce(self): @@ -236,7 +236,7 @@ os.path.expanduser = old_expander # make sure --no-user-cfg disables the user cfg file - self.assertEquals(len(all_files)-1, len(files)) + self.assertEqual(len(all_files)-1, len(files)) class MetadataTestCase(support.TempdirManager, support.EnvironGuard, @@ -341,8 +341,10 @@ temp_dir = self.mkdtemp() user_filename = os.path.join(temp_dir, user_filename) f = open(user_filename, 'w') - f.write('.') - f.close() + try: + f.write('.') + finally: + f.close() try: dist = Distribution() @@ -366,8 +368,8 @@ def test_fix_help_options(self): help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)] fancy_options = fix_help_options(help_tuples) - self.assertEquals(fancy_options[0], ('a', 'b', 'c')) - self.assertEquals(fancy_options[1], (1, 2, 3)) + self.assertEqual(fancy_options[0], ('a', 'b', 'c')) + self.assertEqual(fancy_options[1], (1, 2, 3)) def test_show_help(self): # smoke test, just makes sure some help is displayed @@ -415,14 +417,14 @@ PKG_INFO.seek(0) metadata.read_pkg_file(PKG_INFO) - self.assertEquals(metadata.name, "package") - self.assertEquals(metadata.version, "1.0") - self.assertEquals(metadata.description, "xxx") - self.assertEquals(metadata.download_url, 'http://example.com') - self.assertEquals(metadata.keywords, ['one', 'two']) - self.assertEquals(metadata.platforms, ['UNKNOWN']) - self.assertEquals(metadata.obsoletes, None) - self.assertEquals(metadata.requires, ['foo']) + self.assertEqual(metadata.name, "package") + self.assertEqual(metadata.version, "1.0") + self.assertEqual(metadata.description, "xxx") + self.assertEqual(metadata.download_url, 'http://example.com') + self.assertEqual(metadata.keywords, ['one', 'two']) + self.assertEqual(metadata.platforms, ['UNKNOWN']) + self.assertEqual(metadata.obsoletes, None) + self.assertEqual(metadata.requires, ['foo']) def test_suite(): suite = unittest.TestSuite() diff --git a/lib-python/2.7.0/distutils/command/install_lib.py b/lib-python/2.7.0/distutils/command/install_lib.py --- a/lib-python/2.7.0/distutils/command/install_lib.py +++ b/lib-python/2.7.0/distutils/command/install_lib.py @@ -3,7 +3,7 @@ Implements the Distutils 'install_lib' command (install all Python modules).""" -__revision__ = "$Id: install_lib.py 75671 2009-10-24 15:51:30Z tarek.ziade $" +__revision__ = "$Id$" import os import sys diff --git a/lib-python/2.7.0/sqlite3/test/dbapi.py b/lib-python/2.7.0/sqlite3/test/dbapi.py --- a/lib-python/2.7.0/sqlite3/test/dbapi.py +++ b/lib-python/2.7.0/sqlite3/test/dbapi.py @@ -44,8 +44,8 @@ sqlite.paramstyle) def CheckWarning(self): - self.assert_(issubclass(sqlite.Warning, StandardError), - "Warning is not a subclass of StandardError") + self.assertTrue(issubclass(sqlite.Warning, StandardError), + "Warning is not a subclass of StandardError") def CheckError(self): self.assertTrue(issubclass(sqlite.Error, StandardError), diff --git a/lib-python/2.7.0/test/test_linuxaudiodev.py b/lib-python/2.7.0/test/test_linuxaudiodev.py --- a/lib-python/2.7.0/test/test_linuxaudiodev.py +++ b/lib-python/2.7.0/test/test_linuxaudiodev.py @@ -61,29 +61,29 @@ try: self.dev.setparameters(-1, size, nchannels, fmt) except ValueError, err: - self.assertEquals(err.args[0], "expected rate >= 0, not -1") + self.assertEqual(err.args[0], "expected rate >= 0, not -1") try: self.dev.setparameters(rate, -2, nchannels, fmt) except ValueError, err: - self.assertEquals(err.args[0], "expected sample size >= 0, not -2") + self.assertEqual(err.args[0], "expected sample size >= 0, not -2") try: self.dev.setparameters(rate, size, 3, fmt) except ValueError, err: - self.assertEquals(err.args[0], "nchannels must be 1 or 2, not 3") + self.assertEqual(err.args[0], "nchannels must be 1 or 2, not 3") try: self.dev.setparameters(rate, size, nchannels, 177) except ValueError, err: - self.assertEquals(err.args[0], "unknown audio encoding: 177") + self.assertEqual(err.args[0], "unknown audio encoding: 177") try: self.dev.setparameters(rate, size, nchannels, linuxaudiodev.AFMT_U16_LE) except ValueError, err: - self.assertEquals(err.args[0], "for linear unsigned 16-bit little-endian " - "audio, expected sample size 16, not 8") + self.assertEqual(err.args[0], "for linear unsigned 16-bit little-endian " + "audio, expected sample size 16, not 8") try: self.dev.setparameters(rate, 16, nchannels, fmt) except ValueError, err: - self.assertEquals(err.args[0], "for linear unsigned 8-bit audio, expected " - "sample size 8, not 16") + self.assertEqual(err.args[0], "for linear unsigned 8-bit audio, expected " + "sample size 8, not 16") def test_main(): try: diff --git a/lib-python/2.7.0/test/test_socket.py b/lib-python/2.7.0/test/test_socket.py --- a/lib-python/2.7.0/test/test_socket.py +++ b/lib-python/2.7.0/test/test_socket.py @@ -425,8 +425,8 @@ return # No inet_aton, nothing to check # Test that issue1008086 and issue767150 are fixed. # It must return 4 bytes. - self.assertEquals('\x00'*4, socket.inet_aton('0.0.0.0')) - self.assertEquals('\xff'*4, socket.inet_aton('255.255.255.255')) + self.assertEqual('\x00'*4, socket.inet_aton('0.0.0.0')) + self.assertEqual('\xff'*4, socket.inet_aton('255.255.255.255')) def testIPv4toString(self): if not hasattr(socket, 'inet_pton'): @@ -434,16 +434,16 @@ from socket import inet_aton as f, inet_pton, AF_INET g = lambda a: inet_pton(AF_INET, a) - self.assertEquals('\x00\x00\x00\x00', f('0.0.0.0')) - self.assertEquals('\xff\x00\xff\x00', f('255.0.255.0')) - self.assertEquals('\xaa\xaa\xaa\xaa', f('170.170.170.170')) - self.assertEquals('\x01\x02\x03\x04', f('1.2.3.4')) - self.assertEquals('\xff\xff\xff\xff', f('255.255.255.255')) + self.assertEqual('\x00\x00\x00\x00', f('0.0.0.0')) + self.assertEqual('\xff\x00\xff\x00', f('255.0.255.0')) + self.assertEqual('\xaa\xaa\xaa\xaa', f('170.170.170.170')) + self.assertEqual('\x01\x02\x03\x04', f('1.2.3.4')) + self.assertEqual('\xff\xff\xff\xff', f('255.255.255.255')) - self.assertEquals('\x00\x00\x00\x00', g('0.0.0.0')) - self.assertEquals('\xff\x00\xff\x00', g('255.0.255.0')) - self.assertEquals('\xaa\xaa\xaa\xaa', g('170.170.170.170')) - self.assertEquals('\xff\xff\xff\xff', g('255.255.255.255')) + self.assertEqual('\x00\x00\x00\x00', g('0.0.0.0')) + self.assertEqual('\xff\x00\xff\x00', g('255.0.255.0')) + self.assertEqual('\xaa\xaa\xaa\xaa', g('170.170.170.170')) + self.assertEqual('\xff\xff\xff\xff', g('255.255.255.255')) def testIPv6toString(self): if not hasattr(socket, 'inet_pton'): @@ -456,10 +456,10 @@ return f = lambda a: inet_pton(AF_INET6, a) - self.assertEquals('\x00' * 16, f('::')) - self.assertEquals('\x00' * 16, f('0::0')) - self.assertEquals('\x00\x01' + '\x00' * 14, f('1::')) - self.assertEquals( + self.assertEqual('\x00' * 16, f('::')) + self.assertEqual('\x00' * 16, f('0::0')) + self.assertEqual('\x00\x01' + '\x00' * 14, f('1::')) + self.assertEqual( '\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae', f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae') ) @@ -470,14 +470,14 @@ from socket import inet_ntoa as f, inet_ntop, AF_INET g = lambda a: inet_ntop(AF_INET, a) - self.assertEquals('1.0.1.0', f('\x01\x00\x01\x00')) - self.assertEquals('170.85.170.85', f('\xaa\x55\xaa\x55')) - self.assertEquals('255.255.255.255', f('\xff\xff\xff\xff')) - self.assertEquals('1.2.3.4', f('\x01\x02\x03\x04')) + self.assertEqual('1.0.1.0', f('\x01\x00\x01\x00')) + self.assertEqual('170.85.170.85', f('\xaa\x55\xaa\x55')) + self.assertEqual('255.255.255.255', f('\xff\xff\xff\xff')) + self.assertEqual('1.2.3.4', f('\x01\x02\x03\x04')) - self.assertEquals('1.0.1.0', g('\x01\x00\x01\x00')) - self.assertEquals('170.85.170.85', g('\xaa\x55\xaa\x55')) - self.assertEquals('255.255.255.255', g('\xff\xff\xff\xff')) + self.assertEqual('1.0.1.0', g('\x01\x00\x01\x00')) + self.assertEqual('170.85.170.85', g('\xaa\x55\xaa\x55')) + self.assertEqual('255.255.255.255', g('\xff\xff\xff\xff')) def testStringToIPv6(self): if not hasattr(socket, 'inet_ntop'): @@ -490,9 +490,9 @@ return f = lambda a: inet_ntop(AF_INET6, a) - self.assertEquals('::', f('\x00' * 16)) - self.assertEquals('::1', f('\x00' * 15 + '\x01')) - self.assertEquals( + self.assertEqual('::', f('\x00' * 16)) + self.assertEqual('::1', f('\x00' * 15 + '\x01')) + self.assertEqual( 'aef:b01:506:1001:ffff:9997:55:170', f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70') ) @@ -523,7 +523,11 @@ # XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate # it reasonable to get the host's addr in addition to 0.0.0.0. # At least for eCos. This is required for the S/390 to pass. - my_ip_addr = socket.gethostbyname(socket.gethostname()) + try: + my_ip_addr = socket.gethostbyname(socket.gethostname()) + except socket.error: + # Probably name lookup wasn't set up right; skip this test + return self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0]) self.assertEqual(name[1], port) @@ -1016,8 +1020,8 @@ lambda : "", ]) fo = socket._fileobject(mock_sock, **kwargs) - self.assertEquals(fo.readline(size), "This is the first line\n") - self.assertEquals(fo.readline(size), "And the second line is here\n") + self.assertEqual(fo.readline(size), "This is the first line\n") + self.assertEqual(fo.readline(size), "And the second line is here\n") def _test_read(self, size=-1, **kwargs): mock_sock = self.MockSocket(recv_funcs=[ @@ -1027,7 +1031,7 @@ lambda : "", ]) fo = socket._fileobject(mock_sock, **kwargs) - self.assertEquals(fo.read(size), "This is the first line\n" + self.assertEqual(fo.read(size), "This is the first line\n" "And the second line is here\n") def test_default(self): @@ -1052,8 +1056,8 @@ lambda : "", ]) fo = socket._fileobject(mock_sock, bufsize=0) - self.assertEquals(fo.readline(size), "aa\n") - self.assertEquals(fo.readline(size), "BBbb") + self.assertEqual(fo.readline(size), "aa\n") + self.assertEqual(fo.readline(size), "BBbb") def test_no_buffer(self): self._test_readline_no_buffer() @@ -1192,7 +1196,7 @@ self.addCleanup(self.cli.close) finally: socket.setdefaulttimeout(None) - self.assertEquals(self.cli.gettimeout(), 42) + self.assertEqual(self.cli.gettimeout(), 42) testTimeoutNone = _justAccept def _testTimeoutNone(self): diff --git a/lib-python/2.7.0/sqlite3/test/types.py b/lib-python/2.7.0/sqlite3/test/types.py --- a/lib-python/2.7.0/sqlite3/test/types.py +++ b/lib-python/2.7.0/sqlite3/test/types.py @@ -306,7 +306,7 @@ no row returned. """ self.cur.execute("select * from test where 0 = 1") - self.assert_(self.cur.description[0][0] == "x") + self.assertEqual(self.cur.description[0][0], "x") class ObjectAdaptationTests(unittest.TestCase): def cast(obj): diff --git a/lib-python/2.7.0/distutils/debug.py b/lib-python/2.7.0/distutils/debug.py --- a/lib-python/2.7.0/distutils/debug.py +++ b/lib-python/2.7.0/distutils/debug.py @@ -1,6 +1,6 @@ import os -__revision__ = "$Id: debug.py 68943 2009-01-25 22:09:10Z tarek.ziade $" +__revision__ = "$Id$" # If DISTUTILS_DEBUG is anything other than the empty string, we run in # debug mode. diff --git a/lib-python/2.7.0/test/test_fractions.py b/lib-python/2.7.0/test/test_fractions.py --- a/lib-python/2.7.0/test/test_fractions.py +++ b/lib-python/2.7.0/test/test_fractions.py @@ -91,16 +91,16 @@ class GcdTest(unittest.TestCase): def testMisc(self): - self.assertEquals(0, gcd(0, 0)) - self.assertEquals(1, gcd(1, 0)) - self.assertEquals(-1, gcd(-1, 0)) - self.assertEquals(1, gcd(0, 1)) - self.assertEquals(-1, gcd(0, -1)) - self.assertEquals(1, gcd(7, 1)) - self.assertEquals(-1, gcd(7, -1)) - self.assertEquals(1, gcd(-23, 15)) - self.assertEquals(12, gcd(120, 84)) - self.assertEquals(-12, gcd(84, -120)) + self.assertEqual(0, gcd(0, 0)) + self.assertEqual(1, gcd(1, 0)) + self.assertEqual(-1, gcd(-1, 0)) + self.assertEqual(1, gcd(0, 1)) + self.assertEqual(-1, gcd(0, -1)) + self.assertEqual(1, gcd(7, 1)) + self.assertEqual(-1, gcd(7, -1)) + self.assertEqual(1, gcd(-23, 15)) + self.assertEqual(12, gcd(120, 84)) + self.assertEqual(-12, gcd(84, -120)) def _components(r): @@ -111,8 +111,8 @@ def assertTypedEquals(self, expected, actual): """Asserts that both the types and values are the same.""" - self.assertEquals(type(expected), type(actual)) - self.assertEquals(expected, actual) + self.assertEqual(type(expected), type(actual)) + self.assertEqual(expected, actual) def assertRaisesMessage(self, exc_type, message, callable, *args, **kwargs): @@ -120,25 +120,25 @@ try: callable(*args, **kwargs) except exc_type, e: - self.assertEquals(message, str(e)) + self.assertEqual(message, str(e)) else: self.fail("%s not raised" % exc_type.__name__) def testInit(self): - self.assertEquals((0, 1), _components(F())) - self.assertEquals((7, 1), _components(F(7))) - self.assertEquals((7, 3), _components(F(F(7, 3)))) + self.assertEqual((0, 1), _components(F())) + self.assertEqual((7, 1), _components(F(7))) + self.assertEqual((7, 3), _components(F(F(7, 3)))) - self.assertEquals((-1, 1), _components(F(-1, 1))) - self.assertEquals((-1, 1), _components(F(1, -1))) - self.assertEquals((1, 1), _components(F(-2, -2))) - self.assertEquals((1, 2), _components(F(5, 10))) - self.assertEquals((7, 15), _components(F(7, 15))) - self.assertEquals((10**23, 1), _components(F(10**23))) + self.assertEqual((-1, 1), _components(F(-1, 1))) + self.assertEqual((-1, 1), _components(F(1, -1))) + self.assertEqual((1, 1), _components(F(-2, -2))) + self.assertEqual((1, 2), _components(F(5, 10))) + self.assertEqual((7, 15), _components(F(7, 15))) + self.assertEqual((10**23, 1), _components(F(10**23))) - self.assertEquals((3, 77), _components(F(F(3, 7), 11))) - self.assertEquals((-9, 5), _components(F(2, F(-10, 9)))) - self.assertEquals((2486, 2485), _components(F(F(22, 7), F(355, 113)))) + self.assertEqual((3, 77), _components(F(F(3, 7), 11))) + self.assertEqual((-9, 5), _components(F(2, F(-10, 9)))) + self.assertEqual((2486, 2485), _components(F(F(22, 7), F(355, 113)))) self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)", F, 12, 0) @@ -150,43 +150,43 @@ @requires_IEEE_754 def testInitFromFloat(self): - self.assertEquals((5, 2), _components(F(2.5))) - self.assertEquals((0, 1), _components(F(-0.0))) - self.assertEquals((3602879701896397, 36028797018963968), - _components(F(0.1))) + self.assertEqual((5, 2), _components(F(2.5))) + self.assertEqual((0, 1), _components(F(-0.0))) + self.assertEqual((3602879701896397, 36028797018963968), + _components(F(0.1))) self.assertRaises(TypeError, F, float('nan')) self.assertRaises(TypeError, F, float('inf')) self.assertRaises(TypeError, F, float('-inf')) def testInitFromDecimal(self): - self.assertEquals((11, 10), - _components(F(Decimal('1.1')))) - self.assertEquals((7, 200), - _components(F(Decimal('3.5e-2')))) - self.assertEquals((0, 1), - _components(F(Decimal('.000e20')))) + self.assertEqual((11, 10), + _components(F(Decimal('1.1')))) + self.assertEqual((7, 200), + _components(F(Decimal('3.5e-2')))) + self.assertEqual((0, 1), + _components(F(Decimal('.000e20')))) self.assertRaises(TypeError, F, Decimal('nan')) self.assertRaises(TypeError, F, Decimal('snan')) self.assertRaises(TypeError, F, Decimal('inf')) self.assertRaises(TypeError, F, Decimal('-inf')) def testFromString(self): - self.assertEquals((5, 1), _components(F("5"))) - self.assertEquals((3, 2), _components(F("3/2"))) - self.assertEquals((3, 2), _components(F(" \n +3/2"))) - self.assertEquals((-3, 2), _components(F("-3/2 "))) - self.assertEquals((13, 2), _components(F(" 013/02 \n "))) - self.assertEquals((13, 2), _components(F(u" 013/02 \n "))) + self.assertEqual((5, 1), _components(F("5"))) + self.assertEqual((3, 2), _components(F("3/2"))) + self.assertEqual((3, 2), _components(F(" \n +3/2"))) + self.assertEqual((-3, 2), _components(F("-3/2 "))) + self.assertEqual((13, 2), _components(F(" 013/02 \n "))) + self.assertEqual((13, 2), _components(F(u" 013/02 \n "))) - self.assertEquals((16, 5), _components(F(" 3.2 "))) - self.assertEquals((-16, 5), _components(F(u" -3.2 "))) - self.assertEquals((-3, 1), _components(F(u" -3. "))) - self.assertEquals((3, 5), _components(F(u" .6 "))) - self.assertEquals((1, 3125), _components(F("32.e-5"))) - self.assertEquals((1000000, 1), _components(F("1E+06"))) - self.assertEquals((-12300, 1), _components(F("-1.23e4"))) - self.assertEquals((0, 1), _components(F(" .0e+0\t"))) - self.assertEquals((0, 1), _components(F("-0.000e0"))) + self.assertEqual((16, 5), _components(F(" 3.2 "))) + self.assertEqual((-16, 5), _components(F(u" -3.2 "))) + self.assertEqual((-3, 1), _components(F(u" -3. "))) + self.assertEqual((3, 5), _components(F(u" .6 "))) + self.assertEqual((1, 3125), _components(F("32.e-5"))) + self.assertEqual((1000000, 1), _components(F("1E+06"))) + self.assertEqual((-12300, 1), _components(F("-1.23e4"))) + self.assertEqual((0, 1), _components(F(" .0e+0\t"))) + self.assertEqual((0, 1), _components(F("-0.000e0"))) self.assertRaisesMessage( @@ -229,33 +229,33 @@ def testImmutable(self): r = F(7, 3) r.__init__(2, 15) - self.assertEquals((7, 3), _components(r)) + self.assertEqual((7, 3), _components(r)) self.assertRaises(AttributeError, setattr, r, 'numerator', 12) self.assertRaises(AttributeError, setattr, r, 'denominator', 6) - self.assertEquals((7, 3), _components(r)) + self.assertEqual((7, 3), _components(r)) # But if you _really_ need to: r._numerator = 4 r._denominator = 2 - self.assertEquals((4, 2), _components(r)) + self.assertEqual((4, 2), _components(r)) # Which breaks some important operations: - self.assertNotEquals(F(4, 2), r) + self.assertNotEqual(F(4, 2), r) def testFromFloat(self): self.assertRaises(TypeError, F.from_float, 3+4j) - self.assertEquals((10, 1), _components(F.from_float(10))) + self.assertEqual((10, 1), _components(F.from_float(10))) bigint = 1234567890123456789 - self.assertEquals((bigint, 1), _components(F.from_float(bigint))) - self.assertEquals((0, 1), _components(F.from_float(-0.0))) - self.assertEquals((10, 1), _components(F.from_float(10.0))) - self.assertEquals((-5, 2), _components(F.from_float(-2.5))) - self.assertEquals((99999999999999991611392, 1), - _components(F.from_float(1e23))) - self.assertEquals(float(10**23), float(F.from_float(1e23))) - self.assertEquals((3602879701896397, 1125899906842624), - _components(F.from_float(3.2))) - self.assertEquals(3.2, float(F.from_float(3.2))) + self.assertEqual((bigint, 1), _components(F.from_float(bigint))) + self.assertEqual((0, 1), _components(F.from_float(-0.0))) + self.assertEqual((10, 1), _components(F.from_float(10.0))) + self.assertEqual((-5, 2), _components(F.from_float(-2.5))) + self.assertEqual((99999999999999991611392, 1), + _components(F.from_float(1e23))) + self.assertEqual(float(10**23), float(F.from_float(1e23))) + self.assertEqual((3602879701896397, 1125899906842624), + _components(F.from_float(3.2))) + self.assertEqual(3.2, float(F.from_float(3.2))) inf = 1e1000 nan = inf - inf @@ -271,13 +271,13 @@ def testFromDecimal(self): self.assertRaises(TypeError, F.from_decimal, 3+4j) - self.assertEquals(F(10, 1), F.from_decimal(10)) - self.assertEquals(F(0), F.from_decimal(Decimal("-0"))) - self.assertEquals(F(5, 10), F.from_decimal(Decimal("0.5"))) - self.assertEquals(F(5, 1000), F.from_decimal(Decimal("5e-3"))) - self.assertEquals(F(5000), F.from_decimal(Decimal("5e3"))) - self.assertEquals(1 - F(1, 10**30), - F.from_decimal(Decimal("0." + "9" * 30))) + self.assertEqual(F(10, 1), F.from_decimal(10)) + self.assertEqual(F(0), F.from_decimal(Decimal("-0"))) + self.assertEqual(F(5, 10), F.from_decimal(Decimal("0.5"))) + self.assertEqual(F(5, 1000), F.from_decimal(Decimal("5e-3"))) + self.assertEqual(F(5000), F.from_decimal(Decimal("5e3"))) + self.assertEqual(1 - F(1, 10**30), + F.from_decimal(Decimal("0." + "9" * 30))) self.assertRaisesMessage( TypeError, "Cannot convert Infinity to Fraction.", @@ -306,29 +306,29 @@ self.assertTypedEquals(-1, math.trunc(F(-11, 10))) self.assertTypedEquals(-1, int(F(-11, 10))) - self.assertEquals(False, bool(F(0, 1))) - self.assertEquals(True, bool(F(3, 2))) + self.assertEqual(False, bool(F(0, 1))) + self.assertEqual(True, bool(F(3, 2))) self.assertTypedEquals(0.1, float(F(1, 10))) # Check that __float__ isn't implemented by converting the # numerator and denominator to float before dividing. self.assertRaises(OverflowError, float, long('2'*400+'7')) - self.assertAlmostEquals(2.0/3, + self.assertAlmostEqual(2.0/3, float(F(long('2'*400+'7'), long('3'*400+'1')))) self.assertTypedEquals(0.1+0j, complex(F(1,10))) def testArithmetic(self): - self.assertEquals(F(1, 2), F(1, 10) + F(2, 5)) - self.assertEquals(F(-3, 10), F(1, 10) - F(2, 5)) - self.assertEquals(F(1, 25), F(1, 10) * F(2, 5)) - self.assertEquals(F(1, 4), F(1, 10) / F(2, 5)) + self.assertEqual(F(1, 2), F(1, 10) + F(2, 5)) + self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5)) + self.assertEqual(F(1, 25), F(1, 10) * F(2, 5)) + self.assertEqual(F(1, 4), F(1, 10) / F(2, 5)) self.assertTypedEquals(2, F(9, 10) // F(2, 5)) self.assertTypedEquals(10**23, F(10**23, 1) // F(1)) - self.assertEquals(F(2, 3), F(-7, 3) % F(3, 2)) - self.assertEquals(F(8, 27), F(2, 3) ** F(3)) - self.assertEquals(F(27, 8), F(2, 3) ** F(-3)) + self.assertEqual(F(2, 3), F(-7, 3) % F(3, 2)) + self.assertEqual(F(8, 27), F(2, 3) ** F(3)) + self.assertEqual(F(27, 8), F(2, 3) ** F(-3)) self.assertTypedEquals(2.0, F(4) ** F(1, 2)) # Will return 1j in 3.0: self.assertRaises(ValueError, pow, F(-1), F(1, 2)) @@ -394,7 +394,7 @@ TypeError, "unsupported operand type(s) for +: 'Fraction' and 'Decimal'", operator.add, F(3,11), Decimal('3.1415926')) - self.assertNotEquals(F(5, 2), Decimal('2.5')) + self.assertNotEqual(F(5, 2), Decimal('2.5')) def testComparisons(self): self.assertTrue(F(1, 2) < F(2, 3)) @@ -529,18 +529,18 @@ self.assertFalse(float('-inf') == F(2, 5)) def testStringification(self): - self.assertEquals("Fraction(7, 3)", repr(F(7, 3))) - self.assertEquals("Fraction(6283185307, 2000000000)", - repr(F('3.1415926535'))) - self.assertEquals("Fraction(-1, 100000000000000000000)", - repr(F(1, -10**20))) - self.assertEquals("7/3", str(F(7, 3))) - self.assertEquals("7", str(F(7, 1))) + self.assertEqual("Fraction(7, 3)", repr(F(7, 3))) + self.assertEqual("Fraction(6283185307, 2000000000)", + repr(F('3.1415926535'))) + self.assertEqual("Fraction(-1, 100000000000000000000)", + repr(F(1, -10**20))) + self.assertEqual("7/3", str(F(7, 3))) + self.assertEqual("7", str(F(7, 1))) def testHash(self): - self.assertEquals(hash(2.5), hash(F(5, 2))) - self.assertEquals(hash(10**50), hash(F(10**50))) - self.assertNotEquals(hash(float(10**23)), hash(F(10**23))) + self.assertEqual(hash(2.5), hash(F(5, 2))) + self.assertEqual(hash(10**50), hash(F(10**50))) + self.assertNotEqual(hash(float(10**23)), hash(F(10**23))) def testApproximatePi(self): # Algorithm borrowed from @@ -553,7 +553,7 @@ d, da = d+da, da+32 t = (t * n) / d s += t - self.assertAlmostEquals(math.pi, s) + self.assertAlmostEqual(math.pi, s) def testApproximateCos1(self): # Algorithm borrowed from @@ -567,7 +567,7 @@ num *= x * x sign *= -1 s += num / fact * sign - self.assertAlmostEquals(math.cos(1), s) + self.assertAlmostEqual(math.cos(1), s) def test_copy_deepcopy_pickle(self): r = F(13, 7) diff --git a/lib-python/2.7.0/test/test_glob.py b/lib-python/2.7.0/test/test_glob.py --- a/lib-python/2.7.0/test/test_glob.py +++ b/lib-python/2.7.0/test/test_glob.py @@ -59,8 +59,8 @@ if set(type(x) for x in tmp) == uniset: u1 = glob.glob(u'*') u2 = glob.glob(u'./*') - self.assertEquals(set(type(r) for r in u1), uniset) - self.assertEquals(set(type(r) for r in u2), uniset) + self.assertEqual(set(type(r) for r in u1), uniset) + self.assertEqual(set(type(r) for r in u2), uniset) def test_glob_one_directory(self): eq = self.assertSequencesEqual_noorder diff --git a/lib-python/2.7.0/test/test_epoll.py b/lib-python/2.7.0/test/test_epoll.py --- a/lib-python/2.7.0/test/test_epoll.py +++ b/lib-python/2.7.0/test/test_epoll.py @@ -56,7 +56,7 @@ try: client.connect(('127.0.0.1', self.serverSocket.getsockname()[1])) except socket.error, e: - self.assertEquals(e.args[0], errno.EINPROGRESS) + self.assertEqual(e.args[0], errno.EINPROGRESS) else: raise AssertionError("Connect should have raised EINPROGRESS") server, addr = self.serverSocket.accept() @@ -162,7 +162,7 @@ (server.fileno(), select.EPOLLOUT)] expected.sort() - self.assertEquals(events, expected) + self.assertEqual(events, expected) self.assertFalse(then - now > 0.01, then - now) now = time.time() @@ -183,7 +183,7 @@ (server.fileno(), select.EPOLLIN | select.EPOLLOUT)] expected.sort() - self.assertEquals(events, expected) + self.assertEqual(events, expected) ep.unregister(client.fileno()) ep.modify(server.fileno(), select.EPOLLOUT) @@ -193,7 +193,7 @@ self.assertFalse(then - now > 0.01) expected = [(server.fileno(), select.EPOLLOUT)] - self.assertEquals(events, expected) + self.assertEqual(events, expected) def test_errors(self): self.assertRaises(ValueError, select.epoll, -2) diff --git a/lib-python/2.7.0/test/test_queue.py b/lib-python/2.7.0/test/test_queue.py --- a/lib-python/2.7.0/test/test_queue.py +++ b/lib-python/2.7.0/test/test_queue.py @@ -95,8 +95,8 @@ LifoQueue = [222, 333, 111], PriorityQueue = [111, 222, 333]) actual_order = [q.get(), q.get(), q.get()] - self.assertEquals(actual_order, target_order[q.__class__.__name__], - "Didn't seem to queue the correct data!") + self.assertEqual(actual_order, target_order[q.__class__.__name__], + "Didn't seem to queue the correct data!") for i in range(QUEUE_SIZE-1): q.put(i) self.assertTrue(not q.empty(), "Queue should not be empty") @@ -154,8 +154,8 @@ for i in xrange(100): q.put(i) q.join() - self.assertEquals(self.cum, sum(range(100)), - "q.join() did not block until all tasks were done") + self.assertEqual(self.cum, sum(range(100)), + "q.join() did not block until all tasks were done") for i in (0,1): q.put(None) # instruct the threads to close q.join() # verify that you can join twice diff --git a/lib-python/2.7.0/distutils/command/sdist.py b/lib-python/2.7.0/distutils/command/sdist.py --- a/lib-python/2.7.0/distutils/command/sdist.py +++ b/lib-python/2.7.0/distutils/command/sdist.py @@ -2,7 +2,7 @@ Implements the Distutils 'sdist' command (create a source distribution).""" -__revision__ = "$Id: sdist.py 84713 2010-09-11 15:31:13Z eric.araujo $" +__revision__ = "$Id$" import os import string diff --git a/lib-python/2.7.0/distutils/filelist.py b/lib-python/2.7.0/distutils/filelist.py --- a/lib-python/2.7.0/distutils/filelist.py +++ b/lib-python/2.7.0/distutils/filelist.py @@ -4,7 +4,7 @@ and building lists of files. """ -__revision__ = "$Id: filelist.py 75196 2009-10-03 00:07:35Z tarek.ziade $" +__revision__ = "$Id$" import os, re import fnmatch diff --git a/lib-python/2.7.0/distutils/file_util.py b/lib-python/2.7.0/distutils/file_util.py --- a/lib-python/2.7.0/distutils/file_util.py +++ b/lib-python/2.7.0/distutils/file_util.py @@ -3,7 +3,7 @@ Utility functions for operating on single files. """ -__revision__ = "$Id: file_util.py 80804 2010-05-05 19:09:31Z ronald.oussoren $" +__revision__ = "$Id$" import os from distutils.errors import DistutilsFileError @@ -224,6 +224,8 @@ sequence of strings without line terminators) to it. """ f = open(filename, "w") - for line in contents: - f.write(line + "\n") - f.close() + try: + for line in contents: + f.write(line + "\n") + finally: + f.close() diff --git a/lib-python/2.7.0/test/buffer_tests.py b/lib-python/2.7.0/test/buffer_tests.py --- a/lib-python/2.7.0/test/buffer_tests.py +++ b/lib-python/2.7.0/test/buffer_tests.py @@ -15,32 +15,32 @@ def test_islower(self): self.assertFalse(self.marshal(b'').islower()) - self.assert_(self.marshal(b'a').islower()) + self.assertTrue(self.marshal(b'a').islower()) self.assertFalse(self.marshal(b'A').islower()) self.assertFalse(self.marshal(b'\n').islower()) - self.assert_(self.marshal(b'abc').islower()) + self.assertTrue(self.marshal(b'abc').islower()) self.assertFalse(self.marshal(b'aBc').islower()) - self.assert_(self.marshal(b'abc\n').islower()) + self.assertTrue(self.marshal(b'abc\n').islower()) self.assertRaises(TypeError, self.marshal(b'abc').islower, 42) def test_isupper(self): self.assertFalse(self.marshal(b'').isupper()) self.assertFalse(self.marshal(b'a').isupper()) - self.assert_(self.marshal(b'A').isupper()) + self.assertTrue(self.marshal(b'A').isupper()) self.assertFalse(self.marshal(b'\n').isupper()) - self.assert_(self.marshal(b'ABC').isupper()) + self.assertTrue(self.marshal(b'ABC').isupper()) self.assertFalse(self.marshal(b'AbC').isupper()) - self.assert_(self.marshal(b'ABC\n').isupper()) + self.assertTrue(self.marshal(b'ABC\n').isupper()) self.assertRaises(TypeError, self.marshal(b'abc').isupper, 42) def test_istitle(self): self.assertFalse(self.marshal(b'').istitle()) self.assertFalse(self.marshal(b'a').istitle()) - self.assert_(self.marshal(b'A').istitle()) + self.assertTrue(self.marshal(b'A').istitle()) self.assertFalse(self.marshal(b'\n').istitle()) - self.assert_(self.marshal(b'A Titlecased Line').istitle()) - self.assert_(self.marshal(b'A\nTitlecased Line').istitle()) - self.assert_(self.marshal(b'A Titlecased, Line').istitle()) + self.assertTrue(self.marshal(b'A Titlecased Line').istitle()) + self.assertTrue(self.marshal(b'A\nTitlecased Line').istitle()) + self.assertTrue(self.marshal(b'A Titlecased, Line').istitle()) self.assertFalse(self.marshal(b'Not a capitalized String').istitle()) self.assertFalse(self.marshal(b'Not\ta Titlecase String').istitle()) self.assertFalse(self.marshal(b'Not--a Titlecase String').istitle()) @@ -50,31 +50,31 @@ def test_isspace(self): self.assertFalse(self.marshal(b'').isspace()) self.assertFalse(self.marshal(b'a').isspace()) - self.assert_(self.marshal(b' ').isspace()) - self.assert_(self.marshal(b'\t').isspace()) - self.assert_(self.marshal(b'\r').isspace()) - self.assert_(self.marshal(b'\n').isspace()) - self.assert_(self.marshal(b' \t\r\n').isspace()) + self.assertTrue(self.marshal(b' ').isspace()) + self.assertTrue(self.marshal(b'\t').isspace()) + self.assertTrue(self.marshal(b'\r').isspace()) + self.assertTrue(self.marshal(b'\n').isspace()) + self.assertTrue(self.marshal(b' \t\r\n').isspace()) self.assertFalse(self.marshal(b' \t\r\na').isspace()) self.assertRaises(TypeError, self.marshal(b'abc').isspace, 42) def test_isalpha(self): self.assertFalse(self.marshal(b'').isalpha()) - self.assert_(self.marshal(b'a').isalpha()) - self.assert_(self.marshal(b'A').isalpha()) + self.assertTrue(self.marshal(b'a').isalpha()) + self.assertTrue(self.marshal(b'A').isalpha()) self.assertFalse(self.marshal(b'\n').isalpha()) - self.assert_(self.marshal(b'abc').isalpha()) + self.assertTrue(self.marshal(b'abc').isalpha()) self.assertFalse(self.marshal(b'aBc123').isalpha()) self.assertFalse(self.marshal(b'abc\n').isalpha()) self.assertRaises(TypeError, self.marshal(b'abc').isalpha, 42) def test_isalnum(self): self.assertFalse(self.marshal(b'').isalnum()) - self.assert_(self.marshal(b'a').isalnum()) - self.assert_(self.marshal(b'A').isalnum()) + self.assertTrue(self.marshal(b'a').isalnum()) + self.assertTrue(self.marshal(b'A').isalnum()) self.assertFalse(self.marshal(b'\n').isalnum()) - self.assert_(self.marshal(b'123abc456').isalnum()) - self.assert_(self.marshal(b'a1b3c').isalnum()) + self.assertTrue(self.marshal(b'123abc456').isalnum()) + self.assertTrue(self.marshal(b'a1b3c').isalnum()) self.assertFalse(self.marshal(b'aBc000 ').isalnum()) self.assertFalse(self.marshal(b'abc\n').isalnum()) self.assertRaises(TypeError, self.marshal(b'abc').isalnum, 42) @@ -82,8 +82,8 @@ def test_isdigit(self): self.assertFalse(self.marshal(b'').isdigit()) self.assertFalse(self.marshal(b'a').isdigit()) - self.assert_(self.marshal(b'0').isdigit()) - self.assert_(self.marshal(b'0123456789').isdigit()) + self.assertTrue(self.marshal(b'0').isdigit()) + self.assertTrue(self.marshal(b'0123456789').isdigit()) self.assertFalse(self.marshal(b'0123456789a').isdigit()) self.assertRaises(TypeError, self.marshal(b'abc').isdigit, 42) diff --git a/lib-python/2.7.0/curses/__init__.py b/lib-python/2.7.0/curses/__init__.py --- a/lib-python/2.7.0/curses/__init__.py +++ b/lib-python/2.7.0/curses/__init__.py @@ -10,7 +10,7 @@ """ -__revision__ = "$Id: __init__.py 61064 2008-02-25 16:29:58Z andrew.kuchling $" +__revision__ = "$Id$" from _curses import * from curses.wrapper import wrapper diff --git a/lib-python/2.7.0/test/test_opcodes.py b/lib-python/2.7.0/test/test_opcodes.py --- a/lib-python/2.7.0/test/test_opcodes.py +++ b/lib-python/2.7.0/test/test_opcodes.py @@ -72,35 +72,35 @@ f = eval('lambda: None') g = eval('lambda: None') - self.assertNotEquals(f, g) + self.assertNotEqual(f, g) f = eval('lambda a: a') g = eval('lambda a: a') - self.assertNotEquals(f, g) + self.assertNotEqual(f, g) f = eval('lambda a=1: a') g = eval('lambda a=1: a') - self.assertNotEquals(f, g) + self.assertNotEqual(f, g) f = eval('lambda: 0') g = eval('lambda: 1') - self.assertNotEquals(f, g) + self.assertNotEqual(f, g) f = eval('lambda: None') g = eval('lambda a: None') - self.assertNotEquals(f, g) + self.assertNotEqual(f, g) f = eval('lambda a: None') g = eval('lambda b: None') - self.assertNotEquals(f, g) + self.assertNotEqual(f, g) f = eval('lambda a: None') g = eval('lambda a=None: None') - self.assertNotEquals(f, g) + self.assertNotEqual(f, g) f = eval('lambda a=0: None') g = eval('lambda a=1: None') - self.assertNotEquals(f, g) + self.assertNotEqual(f, g) def test_modulo_of_string_subclasses(self): class MyString(str): diff --git a/lib-python/2.7.0/distutils/tests/test_build.py b/lib-python/2.7.0/distutils/tests/test_build.py --- a/lib-python/2.7.0/distutils/tests/test_build.py +++ b/lib-python/2.7.0/distutils/tests/test_build.py @@ -17,11 +17,11 @@ cmd.finalize_options() # if not specified, plat_name gets the current platform - self.assertEquals(cmd.plat_name, get_platform()) + self.assertEqual(cmd.plat_name, get_platform()) # build_purelib is build + lib wanted = os.path.join(cmd.build_base, 'lib') - self.assertEquals(cmd.build_purelib, wanted) + self.assertEqual(cmd.build_purelib, wanted) # build_platlib is 'build/lib.platform-x.x[-pydebug]' # examples: @@ -31,21 +31,21 @@ self.assertTrue(cmd.build_platlib.endswith('-pydebug')) plat_spec += '-pydebug' wanted = os.path.join(cmd.build_base, 'lib' + plat_spec) - self.assertEquals(cmd.build_platlib, wanted) + self.assertEqual(cmd.build_platlib, wanted) # by default, build_lib = build_purelib - self.assertEquals(cmd.build_lib, cmd.build_purelib) + self.assertEqual(cmd.build_lib, cmd.build_purelib) # build_temp is build/temp. wanted = os.path.join(cmd.build_base, 'temp' + plat_spec) - self.assertEquals(cmd.build_temp, wanted) + self.assertEqual(cmd.build_temp, wanted) # build_scripts is build/scripts-x.x wanted = os.path.join(cmd.build_base, 'scripts-' + sys.version[0:3]) - self.assertEquals(cmd.build_scripts, wanted) + self.assertEqual(cmd.build_scripts, wanted) # executable is os.path.normpath(sys.executable) - self.assertEquals(cmd.executable, os.path.normpath(sys.executable)) + self.assertEqual(cmd.executable, os.path.normpath(sys.executable)) def test_suite(): return unittest.makeSuite(BuildTestCase) diff --git a/lib-python/2.7.0/test/test_gdb.py b/lib-python/2.7.0/test/test_gdb.py --- a/lib-python/2.7.0/test/test_gdb.py +++ b/lib-python/2.7.0/test/test_gdb.py @@ -127,7 +127,7 @@ '') # Ensure no unexpected error messages: - self.assertEquals(err, '') + self.assertEqual(err, '') return out @@ -155,13 +155,12 @@ def assertEndsWith(self, actual, exp_end): '''Ensure that the given "actual" string ends with "exp_end"''' - self.assert_(actual.endswith(exp_end), - msg='%r did not end with %r' % (actual, exp_end)) + self.assertTrue(actual.endswith(exp_end), + msg='%r did not end with %r' % (actual, exp_end)) def assertMultilineMatches(self, actual, pattern): m = re.match(pattern, actual, re.DOTALL) - self.assert_(m, - msg='%r did not match %r' % (actual, pattern)) + self.assertTrue(m, msg='%r did not match %r' % (actual, pattern)) def get_sample_script(self): return findfile('gdb_sample.py') @@ -176,7 +175,7 @@ # matches repr(value) in this process: gdb_repr, gdb_output = self.get_gdb_repr('print ' + repr(val), cmds_after_breakpoint) - self.assertEquals(gdb_repr, repr(val), gdb_output) + self.assertEqual(gdb_repr, repr(val), gdb_output) def test_int(self): 'Verify the pretty-printing of various "int" values' @@ -258,7 +257,7 @@ gdb_repr, gdb_output = self.get_gdb_repr('''s = set(['a','b']) s.pop() print s''') - self.assertEquals(gdb_repr, "set(['b'])") + self.assertEqual(gdb_repr, "set(['b'])") def test_frozensets(self): 'Verify the pretty-printing of frozensets' @@ -274,8 +273,8 @@ except RuntimeError, e: print e ''') - self.assertEquals(gdb_repr, - "exceptions.RuntimeError('I am an error',)") + self.assertEqual(gdb_repr, + "exceptions.RuntimeError('I am an error',)") # Test division by zero: @@ -285,8 +284,8 @@ except ZeroDivisionError, e: print e ''') - self.assertEquals(gdb_repr, - "exceptions.ZeroDivisionError('integer division or modulo by zero',)") + self.assertEqual(gdb_repr, + "exceptions.ZeroDivisionError('integer division or modulo by zero',)") def test_classic_class(self): 'Verify the pretty-printing of classic class instances' @@ -380,7 +379,7 @@ 'backtrace']) ) - self.assertEquals(gdb_repr, '0x0') + self.assertEqual(gdb_repr, '0x0') def test_NULL_ob_type(self): 'Ensure that a PyObject* with NULL ob_type is handled gracefully' @@ -432,12 +431,12 @@ gdb_repr, gdb_output = \ self.get_gdb_repr("a = [3, 4, 5] ; a.append(a) ; print a") - self.assertEquals(gdb_repr, '[3, 4, 5, [...]]') + self.assertEqual(gdb_repr, '[3, 4, 5, [...]]') gdb_repr, gdb_output = \ self.get_gdb_repr("a = [3, 4, 5] ; b = [a] ; a.append(b) ; print a") - self.assertEquals(gdb_repr, '[3, 4, 5, [[...]]]') + self.assertEqual(gdb_repr, '[3, 4, 5, [[...]]]') def test_selfreferential_dict(self): '''Ensure that a reference loop involving a dict doesn't lead proxyval @@ -445,7 +444,7 @@ gdb_repr, gdb_output = \ self.get_gdb_repr("a = {} ; b = {'bar':a} ; a['foo'] = b ; print a") - self.assertEquals(gdb_repr, "{'foo': {'bar': {...}}}") + self.assertEqual(gdb_repr, "{'foo': {'bar': {...}}}") def test_selfreferential_old_style_instance(self): gdb_repr, gdb_output = \ @@ -490,34 +489,34 @@ def test_truncation(self): 'Verify that very long output is truncated' gdb_repr, gdb_output = self.get_gdb_repr('print range(1000)') - self.assertEquals(gdb_repr, - "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, " - "14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, " - "27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, " - "40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, " - "53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, " - "66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, " - "79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, " - "92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, " - "104, 105, 106, 107, 108, 109, 110, 111, 112, 113, " - "114, 115, 116, 117, 118, 119, 120, 121, 122, 123, " - "124, 125, 126, 127, 128, 129, 130, 131, 132, 133, " - "134, 135, 136, 137, 138, 139, 140, 141, 142, 143, " - "144, 145, 146, 147, 148, 149, 150, 151, 152, 153, " - "154, 155, 156, 157, 158, 159, 160, 161, 162, 163, " - "164, 165, 166, 167, 168, 169, 170, 171, 172, 173, " - "174, 175, 176, 177, 178, 179, 180, 181, 182, 183, " - "184, 185, 186, 187, 188, 189, 190, 191, 192, 193, " - "194, 195, 196, 197, 198, 199, 200, 201, 202, 203, " - "204, 205, 206, 207, 208, 209, 210, 211, 212, 213, " - "214, 215, 216, 217, 218, 219, 220, 221, 222, 223, " - "224, 225, 226...(truncated)") - self.assertEquals(len(gdb_repr), - 1024 + len('...(truncated)')) + self.assertEqual(gdb_repr, + "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, " + "14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, " + "27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, " + "40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, " + "53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, " + "66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, " + "79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, " + "92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, " + "104, 105, 106, 107, 108, 109, 110, 111, 112, 113, " + "114, 115, 116, 117, 118, 119, 120, 121, 122, 123, " + "124, 125, 126, 127, 128, 129, 130, 131, 132, 133, " + "134, 135, 136, 137, 138, 139, 140, 141, 142, 143, " + "144, 145, 146, 147, 148, 149, 150, 151, 152, 153, " + "154, 155, 156, 157, 158, 159, 160, 161, 162, 163, " + "164, 165, 166, 167, 168, 169, 170, 171, 172, 173, " + "174, 175, 176, 177, 178, 179, 180, 181, 182, 183, " + "184, 185, 186, 187, 188, 189, 190, 191, 192, 193, " + "194, 195, 196, 197, 198, 199, 200, 201, 202, 203, " + "204, 205, 206, 207, 208, 209, 210, 211, 212, 213, " + "214, 215, 216, 217, 218, 219, 220, 221, 222, 223, " + "224, 225, 226...(truncated)") + self.assertEqual(len(gdb_repr), + 1024 + len('...(truncated)')) def test_builtin_function(self): gdb_repr, gdb_output = self.get_gdb_repr('print len') - self.assertEquals(gdb_repr, '') + self.assertEqual(gdb_repr, '') def test_builtin_method(self): gdb_repr, gdb_output = self.get_gdb_repr('import sys; print sys.stdout.readlines') diff --git a/lib-python/2.7.0/distutils/tests/test_filelist.py b/lib-python/2.7.0/distutils/tests/test_filelist.py --- a/lib-python/2.7.0/distutils/tests/test_filelist.py +++ b/lib-python/2.7.0/distutils/tests/test_filelist.py @@ -24,15 +24,15 @@ def test_glob_to_re(self): # simple cases - self.assertEquals(glob_to_re('foo*'), 'foo[^/]*\\Z(?ms)') - self.assertEquals(glob_to_re('foo?'), 'foo[^/]\\Z(?ms)') - self.assertEquals(glob_to_re('foo??'), 'foo[^/][^/]\\Z(?ms)') + self.assertEqual(glob_to_re('foo*'), 'foo[^/]*\\Z(?ms)') + self.assertEqual(glob_to_re('foo?'), 'foo[^/]\\Z(?ms)') + self.assertEqual(glob_to_re('foo??'), 'foo[^/][^/]\\Z(?ms)') # special cases - self.assertEquals(glob_to_re(r'foo\\*'), r'foo\\\\[^/]*\Z(?ms)') - self.assertEquals(glob_to_re(r'foo\\\*'), r'foo\\\\\\[^/]*\Z(?ms)') - self.assertEquals(glob_to_re('foo????'), r'foo[^/][^/][^/][^/]\Z(?ms)') - self.assertEquals(glob_to_re(r'foo\\??'), r'foo\\\\[^/][^/]\Z(?ms)') + self.assertEqual(glob_to_re(r'foo\\*'), r'foo\\\\[^/]*\Z(?ms)') + self.assertEqual(glob_to_re(r'foo\\\*'), r'foo\\\\\\[^/]*\Z(?ms)') + self.assertEqual(glob_to_re('foo????'), r'foo[^/][^/][^/][^/]\Z(?ms)') + self.assertEqual(glob_to_re(r'foo\\??'), r'foo\\\\[^/][^/]\Z(?ms)') def test_process_template_line(self): # testing all MANIFEST.in template patterns @@ -60,21 +60,21 @@ join('global', 'two.txt'), join('f', 'o', 'f.oo'), join('dir', 'graft-one'), join('dir', 'dir2', 'graft2')] - self.assertEquals(file_list.files, wanted) + self.assertEqual(file_list.files, wanted) def test_debug_print(self): file_list = FileList() with captured_stdout() as stdout: file_list.debug_print('xxx') stdout.seek(0) - self.assertEquals(stdout.read(), '') + self.assertEqual(stdout.read(), '') debug.DEBUG = True try: with captured_stdout() as stdout: file_list.debug_print('xxx') stdout.seek(0) - self.assertEquals(stdout.read(), 'xxx\n') + self.assertEqual(stdout.read(), 'xxx\n') finally: debug.DEBUG = False diff --git a/lib-python/2.7.0/distutils/msvc9compiler.py b/lib-python/2.7.0/distutils/msvc9compiler.py --- a/lib-python/2.7.0/distutils/msvc9compiler.py +++ b/lib-python/2.7.0/distutils/msvc9compiler.py @@ -12,7 +12,7 @@ # finding DevStudio (through the registry) # ported to VS2005 and VS 2008 by Christian Heimes -__revision__ = "$Id: msvc9compiler.py 82130 2010-06-21 15:27:46Z benjamin.peterson $" +__revision__ = "$Id$" import os import subprocess @@ -273,23 +273,27 @@ popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + try: + stdout, stderr = popen.communicate() + if popen.wait() != 0: + raise DistutilsPlatformError(stderr.decode("mbcs")) - stdout, stderr = popen.communicate() - if popen.wait() != 0: - raise DistutilsPlatformError(stderr.decode("mbcs")) + stdout = stdout.decode("mbcs") + for line in stdout.split("\n"): + line = Reg.convert_mbcs(line) + if '=' not in line: + continue + line = line.strip() + key, value = line.split('=', 1) + key = key.lower() + if key in interesting: + if value.endswith(os.pathsep): + value = value[:-1] + result[key] = removeDuplicates(value) - stdout = stdout.decode("mbcs") - for line in stdout.split("\n"): - line = Reg.convert_mbcs(line) - if '=' not in line: - continue - line = line.strip() - key, value = line.split('=', 1) - key = key.lower() - if key in interesting: - if value.endswith(os.pathsep): - value = value[:-1] - result[key] = removeDuplicates(value) + finally: + popen.stdout.close() + popen.stderr.close() if len(result) != len(interesting): raise ValueError(str(list(result.keys()))) diff --git a/lib-python/2.7.0/test/test_kqueue.py b/lib-python/2.7.0/test/test_kqueue.py --- a/lib-python/2.7.0/test/test_kqueue.py +++ b/lib-python/2.7.0/test/test_kqueue.py @@ -90,7 +90,7 @@ try: client.connect(('127.0.0.1', serverSocket.getsockname()[1])) except socket.error, e: - self.assertEquals(e.args[0], errno.EINPROGRESS) + self.assertEqual(e.args[0], errno.EINPROGRESS) else: #raise AssertionError("Connect should have raised EINPROGRESS") pass # FreeBSD doesn't raise an exception here @@ -124,7 +124,7 @@ events = kq.control(None, 4, 1) events = [(e.ident, e.filter, e.flags) for e in events] events.sort() - self.assertEquals(events, [ + self.assertEqual(events, [ (client.fileno(), select.KQ_FILTER_WRITE, flags), (server.fileno(), select.KQ_FILTER_WRITE, flags)]) @@ -143,7 +143,7 @@ events = [(e.ident, e.filter, e.flags) for e in events] events.sort() - self.assertEquals(events, [ + self.assertEqual(events, [ (client.fileno(), select.KQ_FILTER_WRITE, flags), (client.fileno(), select.KQ_FILTER_READ, flags), (server.fileno(), select.KQ_FILTER_WRITE, flags), @@ -166,7 +166,7 @@ events = kq.control([], 4, 0.99) events = [(e.ident, e.filter, e.flags) for e in events] events.sort() - self.assertEquals(events, [ + self.assertEqual(events, [ (server.fileno(), select.KQ_FILTER_WRITE, flags)]) client.close() @@ -183,7 +183,7 @@ r = kq.control([event1, event2], 1, 1) self.assertTrue(r) self.assertFalse(r[0].flags & select.KQ_EV_ERROR) - self.assertEquals(b.recv(r[0].data), b'foo') + self.assertEqual(b.recv(r[0].data), b'foo') a.close() b.close() diff --git a/lib-python/2.7.0/test/test_sysconfig.py b/lib-python/2.7.0/test/test_sysconfig.py --- a/lib-python/2.7.0/test/test_sysconfig.py +++ b/lib-python/2.7.0/test/test_sysconfig.py @@ -87,7 +87,7 @@ shutil.rmtree(path) def test_get_path_names(self): - self.assertEquals(get_path_names(), sysconfig._SCHEME_KEYS) + self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS) def test_get_paths(self): scheme = get_paths() @@ -97,7 +97,7 @@ wanted.sort() scheme = scheme.items() scheme.sort() - self.assertEquals(scheme, wanted) + self.assertEqual(scheme, wanted) def test_get_path(self): # xxx make real tests here @@ -116,21 +116,21 @@ sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Intel)]') sys.platform = 'win32' - self.assertEquals(get_platform(), 'win32') + self.assertEqual(get_platform(), 'win32') # windows XP, amd64 os.name = 'nt' sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Amd64)]') sys.platform = 'win32' - self.assertEquals(get_platform(), 'win-amd64') + self.assertEqual(get_platform(), 'win-amd64') # windows XP, itanium os.name = 'nt' sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Itanium)]') sys.platform = 'win32' - self.assertEquals(get_platform(), 'win-ia64') + self.assertEqual(get_platform(), 'win-ia64') # macbook os.name = 'posix' @@ -149,9 +149,9 @@ maxint = sys.maxint try: sys.maxint = 2147483647 - self.assertEquals(get_platform(), 'macosx-10.3-ppc') + self.assertEqual(get_platform(), 'macosx-10.3-ppc') sys.maxint = 9223372036854775807 - self.assertEquals(get_platform(), 'macosx-10.3-ppc64') + self.assertEqual(get_platform(), 'macosx-10.3-ppc64') finally: sys.maxint = maxint @@ -169,9 +169,9 @@ maxint = sys.maxint try: sys.maxint = 2147483647 - self.assertEquals(get_platform(), 'macosx-10.3-i386') + self.assertEqual(get_platform(), 'macosx-10.3-i386') sys.maxint = 9223372036854775807 - self.assertEquals(get_platform(), 'macosx-10.3-x86_64') + self.assertEqual(get_platform(), 'macosx-10.3-x86_64') finally: sys.maxint = maxint @@ -182,33 +182,33 @@ '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') - self.assertEquals(get_platform(), 'macosx-10.4-fat') + self.assertEqual(get_platform(), 'macosx-10.4-fat') get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') - self.assertEquals(get_platform(), 'macosx-10.4-intel') + self.assertEqual(get_platform(), 'macosx-10.4-intel') get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') - self.assertEquals(get_platform(), 'macosx-10.4-fat3') + self.assertEqual(get_platform(), 'macosx-10.4-fat3') get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') - self.assertEquals(get_platform(), 'macosx-10.4-universal') + self.assertEqual(get_platform(), 'macosx-10.4-universal') get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') - self.assertEquals(get_platform(), 'macosx-10.4-fat64') + self.assertEqual(get_platform(), 'macosx-10.4-fat64') for arch in ('ppc', 'i386', 'x86_64', 'ppc64'): get_config_vars()['CFLAGS'] = ('-arch %s -isysroot ' @@ -216,7 +216,7 @@ '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3'%(arch,)) - self.assertEquals(get_platform(), 'macosx-10.4-%s'%(arch,)) + self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,)) # linux debian sarge os.name = 'posix' @@ -226,7 +226,7 @@ self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7', '#1 Mon Apr 30 17:25:38 CEST 2007', 'i686')) - self.assertEquals(get_platform(), 'linux-i686') + self.assertEqual(get_platform(), 'linux-i686') # XXX more platforms to tests here @@ -237,7 +237,7 @@ def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', 'posix_home', 'posix_prefix', 'posix_user') - self.assertEquals(get_scheme_names(), wanted) + self.assertEqual(get_scheme_names(), wanted) def test_symlink(self): # Issue 7880 @@ -263,7 +263,7 @@ for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'): global_path = get_path(name, 'posix_prefix') user_path = get_path(name, 'posix_user') - self.assertEquals(user_path, global_path.replace(base, user)) + self.assertEqual(user_path, global_path.replace(base, user)) def test_main(): run_unittest(TestSysConfig) diff --git a/lib-python/2.7.0/test/test_textwrap.py b/lib-python/2.7.0/test/test_textwrap.py --- a/lib-python/2.7.0/test/test_textwrap.py +++ b/lib-python/2.7.0/test/test_textwrap.py @@ -5,7 +5,7 @@ # Converted to PyUnit by Peter Hansen . # Currently maintained by Greg Ward. # -# $Id: test_textwrap.py 77727 2010-01-24 16:58:36Z ezio.melotti $ +# $Id$ # import unittest @@ -29,7 +29,7 @@ def check(self, result, expect): - self.assertEquals(result, expect, + self.assertEqual(result, expect, 'expected:\n%s\nbut got:\n%s' % ( self.show(expect), self.show(result))) @@ -39,9 +39,9 @@ def check_split(self, text, expect): result = self.wrapper._split(text) - self.assertEquals(result, expect, - "\nexpected %r\n" - "but got %r" % (expect, result)) + self.assertEqual(result, expect, + "\nexpected %r\n" + "but got %r" % (expect, result)) class WrapTestCase(BaseTestCase): @@ -504,7 +504,7 @@ def assertUnchanged(self, text): """assert that dedent() has no effect on 'text'""" - self.assertEquals(text, dedent(text)) + self.assertEqual(text, dedent(text)) def test_dedent_nomargin(self): # No lines indented. @@ -527,17 +527,17 @@ # All lines indented by two spaces. text = " Hello there.\n How are ya?\n Oh good." expect = "Hello there.\nHow are ya?\nOh good." - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) # Same, with blank lines. text = " Hello there.\n\n How are ya?\n Oh good.\n" expect = "Hello there.\n\nHow are ya?\nOh good.\n" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) # Now indent one of the blank lines. text = " Hello there.\n \n How are ya?\n Oh good.\n" expect = "Hello there.\n\nHow are ya?\nOh good.\n" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) def test_dedent_uneven(self): # Lines indented unevenly. @@ -551,27 +551,27 @@ while 1: return foo ''' - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) # Uneven indentation with a blank line. text = " Foo\n Bar\n\n Baz\n" expect = "Foo\n Bar\n\n Baz\n" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) # Uneven indentation with a whitespace-only line. text = " Foo\n Bar\n \n Baz\n" expect = "Foo\n Bar\n\n Baz\n" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) # dedent() should not mangle internal tabs def test_dedent_preserve_internal_tabs(self): text = " hello\tthere\n how are\tyou?" expect = "hello\tthere\nhow are\tyou?" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) # make sure that it preserves tabs when it's not making any # changes at all - self.assertEquals(expect, dedent(expect)) + self.assertEqual(expect, dedent(expect)) # dedent() should not mangle tabs in the margin (i.e. # tabs and spaces both count as margin, but are *not* @@ -587,17 +587,17 @@ # dedent() only removes whitespace that can be uniformly removed! text = "\thello there\n\thow are you?" expect = "hello there\nhow are you?" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) text = " \thello there\n \thow are you?" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) text = " \t hello there\n \t how are you?" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) text = " \thello there\n \t how are you?" expect = "hello there\n how are you?" - self.assertEquals(expect, dedent(text)) + self.assertEqual(expect, dedent(text)) def test_main(): diff --git a/lib-python/2.7.0/distutils/tests/test_cmd.py b/lib-python/2.7.0/distutils/tests/test_cmd.py --- a/lib-python/2.7.0/distutils/tests/test_cmd.py +++ b/lib-python/2.7.0/distutils/tests/test_cmd.py @@ -44,7 +44,7 @@ # making sure execute gets called properly def _execute(func, args, exec_msg, level): - self.assertEquals(exec_msg, 'generating out from in') + self.assertEqual(exec_msg, 'generating out from in') cmd.force = True cmd.execute = _execute cmd.make_file(infiles='in', outfile='out', func='func', args=()) @@ -63,7 +63,7 @@ wanted = ["command options for 'MyCmd':", ' option1 = 1', ' option2 = 1'] - self.assertEquals(msgs, wanted) + self.assertEqual(msgs, wanted) def test_ensure_string(self): cmd = self.cmd @@ -81,7 +81,7 @@ cmd = self.cmd cmd.option1 = 'ok,dok' cmd.ensure_string_list('option1') - self.assertEquals(cmd.option1, ['ok', 'dok']) + self.assertEqual(cmd.option1, ['ok', 'dok']) cmd.option2 = ['xxx', 'www'] cmd.ensure_string_list('option2') @@ -109,14 +109,14 @@ with captured_stdout() as stdout: cmd.debug_print('xxx') stdout.seek(0) - self.assertEquals(stdout.read(), '') + self.assertEqual(stdout.read(), '') debug.DEBUG = True try: with captured_stdout() as stdout: cmd.debug_print('xxx') stdout.seek(0) - self.assertEquals(stdout.read(), 'xxx\n') + self.assertEqual(stdout.read(), 'xxx\n') finally: debug.DEBUG = False diff --git a/lib-python/2.7.0/json/tests/test_pass2.py b/lib-python/2.7.0/json/tests/test_pass2.py --- a/lib-python/2.7.0/json/tests/test_pass2.py +++ b/lib-python/2.7.0/json/tests/test_pass2.py @@ -11,4 +11,4 @@ # test in/out equivalence and parsing res = json.loads(JSON) out = json.dumps(res) - self.assertEquals(res, json.loads(out)) + self.assertEqual(res, json.loads(out)) diff --git a/lib-python/2.7.0/distutils/command/check.py b/lib-python/2.7.0/distutils/command/check.py --- a/lib-python/2.7.0/distutils/command/check.py +++ b/lib-python/2.7.0/distutils/command/check.py @@ -2,7 +2,7 @@ Implements the Distutils 'check' command. """ -__revision__ = "$Id: check.py 75266 2009-10-05 22:32:48Z andrew.kuchling $" +__revision__ = "$Id$" from distutils.core import Command from distutils.errors import DistutilsSetupError diff --git a/lib-python/2.7.0/distutils/tests/test_config.py b/lib-python/2.7.0/distutils/tests/test_config.py --- a/lib-python/2.7.0/distutils/tests/test_config.py +++ b/lib-python/2.7.0/distutils/tests/test_config.py @@ -90,7 +90,7 @@ waited = [('password', 'secret'), ('realm', 'pypi'), ('repository', 'http://pypi.python.org/pypi'), ('server', 'server1'), ('username', 'me')] - self.assertEquals(config, waited) + self.assertEqual(config, waited) # old format self.write_file(self.rc, PYPIRC_OLD) @@ -100,7 +100,7 @@ waited = [('password', 'secret'), ('realm', 'pypi'), ('repository', 'http://pypi.python.org/pypi'), ('server', 'server-login'), ('username', 'tarek')] - self.assertEquals(config, waited) + self.assertEqual(config, waited) def test_server_empty_registration(self): cmd = self._cmd(self.dist) @@ -108,8 +108,12 @@ self.assertTrue(not os.path.exists(rc)) cmd._store_pypirc('tarek', 'xxx') self.assertTrue(os.path.exists(rc)) - content = open(rc).read() - self.assertEquals(content, WANTED) + f = open(rc) + try: + content = f.read() + self.assertEqual(content, WANTED) + finally: + f.close() def test_suite(): return unittest.makeSuite(PyPIRCCommandTestCase) diff --git a/lib-python/2.7.0/test/test_deque.py b/lib-python/2.7.0/test/test_deque.py --- a/lib-python/2.7.0/test/test_deque.py +++ b/lib-python/2.7.0/test/test_deque.py @@ -234,7 +234,7 @@ d = deque(data[:i]) r = d.reverse() self.assertEqual(list(d), list(reversed(data[:i]))) - self.assert_(r is None) + self.assertIs(r, None) d.reverse() self.assertEqual(list(d), data[:i]) self.assertRaises(TypeError, d.reverse, 1) # Arity is zero diff --git a/lib-python/2.7.0/pydoc.py b/lib-python/2.7.0/pydoc.py --- a/lib-python/2.7.0/pydoc.py +++ b/lib-python/2.7.0/pydoc.py @@ -37,7 +37,7 @@ __author__ = "Ka-Ping Yee " __date__ = "26 February 2001" -__version__ = "$Revision: 84174 $" +__version__ = "$Revision$" __credits__ = """Guido van Rossum, for an excellent programming language. Tommy Burnette, the original creator of manpy. Paul Prescod, for all his work on onlinehelp. diff --git a/lib-python/2.7.0/test/test__locale.py b/lib-python/2.7.0/test/test__locale.py --- a/lib-python/2.7.0/test/test__locale.py +++ b/lib-python/2.7.0/test/test__locale.py @@ -59,7 +59,7 @@ known_value = known_numerics.get(used_locale, ('', ''))[data_type == 'thousands_sep'] if known_value and calc_value: - self.assertEquals(calc_value, known_value, + self.assertEqual(calc_value, known_value, self.lc_numeric_err_msg % ( calc_value, known_value, calc_type, data_type, set_locale, @@ -103,7 +103,7 @@ set_locale = setlocale(LC_NUMERIC) except Error: set_locale = "" - self.assertEquals(nl_radixchar, li_radixchar, + self.assertEqual(nl_radixchar, li_radixchar, "%s (nl_langinfo) != %s (localeconv) " "(set to %s, using %s)" % ( nl_radixchar, li_radixchar, @@ -122,9 +122,9 @@ if loc == 'eu_ES' and localeconv()['decimal_point'] == "' ": continue - self.assertEquals(int(eval('3.14') * 100), 314, + self.assertEqual(int(eval('3.14') * 100), 314, "using eval('3.14') failed for %s" % loc) - self.assertEquals(int(float('3.14') * 100), 314, + self.assertEqual(int(float('3.14') * 100), 314, "using float('3.14') failed for %s" % loc) if localeconv()['decimal_point'] != '.': self.assertRaises(ValueError, float, diff --git a/lib-python/2.7.0/distutils/command/bdist_dumb.py b/lib-python/2.7.0/distutils/command/bdist_dumb.py --- a/lib-python/2.7.0/distutils/command/bdist_dumb.py +++ b/lib-python/2.7.0/distutils/command/bdist_dumb.py @@ -4,7 +4,7 @@ distribution -- i.e., just an archive to be unpacked under $prefix or $exec_prefix).""" -__revision__ = "$Id: bdist_dumb.py 77761 2010-01-26 22:46:15Z tarek.ziade $" +__revision__ = "$Id$" import os diff --git a/lib-python/2.7.0/tarfile.py b/lib-python/2.7.0/tarfile.py --- a/lib-python/2.7.0/tarfile.py +++ b/lib-python/2.7.0/tarfile.py @@ -30,13 +30,13 @@ """Read from and write to tar format archives. """ -__version__ = "$Revision: 85213 $" +__version__ = "$Revision$" # $Source$ version = "0.9.0" __author__ = "Lars Gust�bel (lars at gustaebel.de)" -__date__ = "$Date: 2010-10-04 17:37:53 +0200 (Mon, 04 Oct 2010) $" -__cvsid__ = "$Id: tarfile.py 85213 2010-10-04 15:37:53Z lars.gustaebel $" +__date__ = "$Date$" +__cvsid__ = "$Id$" __credits__ = "Gustavo Niemeyer, Niels Gust�bel, Richard Townsend." #--------- diff --git a/lib-python/2.7.0/json/tests/test_recursion.py b/lib-python/2.7.0/json/tests/test_recursion.py --- a/lib-python/2.7.0/json/tests/test_recursion.py +++ b/lib-python/2.7.0/json/tests/test_recursion.py @@ -57,7 +57,7 @@ def test_defaultrecursion(self): enc = RecursiveJSONEncoder() - self.assertEquals(enc.encode(JSONTestObject), '"JSONTestObject"') + self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"') enc.recurse = True try: enc.encode(JSONTestObject) diff --git a/lib-python/2.7.0/test/test_codecs.py b/lib-python/2.7.0/test/test_codecs.py --- a/lib-python/2.7.0/test/test_codecs.py +++ b/lib-python/2.7.0/test/test_codecs.py @@ -267,7 +267,7 @@ # try to read it back s = StringIO.StringIO(d) f = reader(s) - self.assertEquals(f.read(), u"spamspam") + self.assertEqual(f.read(), u"spamspam") def test_badbom(self): s = StringIO.StringIO(4*"\xff") @@ -425,7 +425,7 @@ # try to read it back s = StringIO.StringIO(d) f = reader(s) - self.assertEquals(f.read(), u"spamspam") + self.assertEqual(f.read(), u"spamspam") def test_badbom(self): s = StringIO.StringIO("\xff\xff") @@ -673,7 +673,7 @@ class EscapeDecodeTest(unittest.TestCase): def test_empty(self): - self.assertEquals(codecs.escape_decode(""), ("", 0)) + self.assertEqual(codecs.escape_decode(""), ("", 0)) class RecodingTest(unittest.TestCase): def test_recoding(self): @@ -800,11 +800,11 @@ # code produces only lower case. Converting just puny to # lower is also insufficient, since some of the input characters # are upper case. - self.assertEquals(uni.encode("punycode").lower(), puny.lower()) + self.assertEqual(uni.encode("punycode").lower(), puny.lower()) def test_decode(self): for uni, puny in punycode_testcases: - self.assertEquals(uni, puny.decode("punycode")) + self.assertEqual(uni, puny.decode("punycode")) class UnicodeInternalTest(unittest.TestCase): def test_bug1251300(self): @@ -826,7 +826,7 @@ for internal, uni in ok: if sys.byteorder == "little": internal = "".join(reversed(internal)) - self.assertEquals(uni, internal.decode("unicode_internal")) + self.assertEqual(uni, internal.decode("unicode_internal")) for internal in not_ok: if sys.byteorder == "little": internal = "".join(reversed(internal)) @@ -838,10 +838,10 @@ try: "\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal") except UnicodeDecodeError, ex: - self.assertEquals("unicode_internal", ex.encoding) - self.assertEquals("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object) - self.assertEquals(4, ex.start) - self.assertEquals(8, ex.end) + self.assertEqual("unicode_internal", ex.encoding) + self.assertEqual("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object) + self.assertEqual(4, ex.start) + self.assertEqual(8, ex.end) else: self.fail() @@ -852,16 +852,16 @@ ab = u"ab".encode("unicode_internal") ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]), "UnicodeInternalTest") - self.assertEquals((u"ab", 12), ignored) + self.assertEqual((u"ab", 12), ignored) def test_encode_length(self): # Issue 3739 encoder = codecs.getencoder("unicode_internal") - self.assertEquals(encoder(u"a")[1], 1) - self.assertEquals(encoder(u"\xe9\u0142")[1], 2) + self.assertEqual(encoder(u"a")[1], 1) + self.assertEqual(encoder(u"\xe9\u0142")[1], 2) encoder = codecs.getencoder("string-escape") - self.assertEquals(encoder(r'\x00')[1], 4) + self.assertEqual(encoder(r'\x00')[1], 4) # From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html nameprep_tests = [ @@ -1032,102 +1032,102 @@ else: prepped = unicode(prepped, "utf-8") try: - self.assertEquals(nameprep(orig), prepped) + self.assertEqual(nameprep(orig), prepped) except Exception,e: raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e))) class IDNACodecTest(unittest.TestCase): def test_builtin_decode(self): - self.assertEquals(unicode("python.org", "idna"), u"python.org") - self.assertEquals(unicode("python.org.", "idna"), u"python.org.") - self.assertEquals(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org") - self.assertEquals(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.") + self.assertEqual(unicode("python.org", "idna"), u"python.org") + self.assertEqual(unicode("python.org.", "idna"), u"python.org.") + self.assertEqual(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org") + self.assertEqual(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.") def test_builtin_encode(self): - self.assertEquals(u"python.org".encode("idna"), "python.org") - self.assertEquals("python.org.".encode("idna"), "python.org.") - self.assertEquals(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org") - self.assertEquals(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.") + self.assertEqual(u"python.org".encode("idna"), "python.org") + self.assertEqual("python.org.".encode("idna"), "python.org.") + self.assertEqual(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org") + self.assertEqual(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.") def test_stream(self): import StringIO r = codecs.getreader("idna")(StringIO.StringIO("abc")) r.read(3) - self.assertEquals(r.read(), u"") + self.assertEqual(r.read(), u"") def test_incremental_decode(self): - self.assertEquals( + self.assertEqual( "".join(codecs.iterdecode("python.org", "idna")), u"python.org" ) - self.assertEquals( + self.assertEqual( "".join(codecs.iterdecode("python.org.", "idna")), u"python.org." ) - self.assertEquals( + self.assertEqual( "".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")), u"pyth\xf6n.org." ) - self.assertEquals( + self.assertEqual( "".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")), u"pyth\xf6n.org." ) decoder = codecs.getincrementaldecoder("idna")() - self.assertEquals(decoder.decode("xn--xam", ), u"") - self.assertEquals(decoder.decode("ple-9ta.o", ), u"\xe4xample.") - self.assertEquals(decoder.decode(u"rg"), u"") - self.assertEquals(decoder.decode(u"", True), u"org") + self.assertEqual(decoder.decode("xn--xam", ), u"") + self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.") + self.assertEqual(decoder.decode(u"rg"), u"") + self.assertEqual(decoder.decode(u"", True), u"org") decoder.reset() - self.assertEquals(decoder.decode("xn--xam", ), u"") - self.assertEquals(decoder.decode("ple-9ta.o", ), u"\xe4xample.") - self.assertEquals(decoder.decode("rg."), u"org.") - self.assertEquals(decoder.decode("", True), u"") + self.assertEqual(decoder.decode("xn--xam", ), u"") + self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.") + self.assertEqual(decoder.decode("rg."), u"org.") + self.assertEqual(decoder.decode("", True), u"") def test_incremental_encode(self): - self.assertEquals( + self.assertEqual( "".join(codecs.iterencode(u"python.org", "idna")), "python.org" ) - self.assertEquals( + self.assertEqual( "".join(codecs.iterencode(u"python.org.", "idna")), "python.org." ) - self.assertEquals( + self.assertEqual( "".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")), "xn--pythn-mua.org." ) - self.assertEquals( + self.assertEqual( "".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")), "xn--pythn-mua.org." ) encoder = codecs.getincrementalencoder("idna")() - self.assertEquals(encoder.encode(u"\xe4x"), "") - self.assertEquals(encoder.encode(u"ample.org"), "xn--xample-9ta.") - self.assertEquals(encoder.encode(u"", True), "org") + self.assertEqual(encoder.encode(u"\xe4x"), "") + self.assertEqual(encoder.encode(u"ample.org"), "xn--xample-9ta.") + self.assertEqual(encoder.encode(u"", True), "org") encoder.reset() - self.assertEquals(encoder.encode(u"\xe4x"), "") - self.assertEquals(encoder.encode(u"ample.org."), "xn--xample-9ta.org.") - self.assertEquals(encoder.encode(u"", True), "") + self.assertEqual(encoder.encode(u"\xe4x"), "") + self.assertEqual(encoder.encode(u"ample.org."), "xn--xample-9ta.org.") + self.assertEqual(encoder.encode(u"", True), "") class CodecsModuleTest(unittest.TestCase): def test_decode(self): - self.assertEquals(codecs.decode('\xe4\xf6\xfc', 'latin-1'), + self.assertEqual(codecs.decode('\xe4\xf6\xfc', 'latin-1'), u'\xe4\xf6\xfc') self.assertRaises(TypeError, codecs.decode) - self.assertEquals(codecs.decode('abc'), u'abc') + self.assertEqual(codecs.decode('abc'), u'abc') self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii') def test_encode(self): - self.assertEquals(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'), + self.assertEqual(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'), '\xe4\xf6\xfc') self.assertRaises(TypeError, codecs.encode) self.assertRaises(LookupError, codecs.encode, "foo", "__spam__") - self.assertEquals(codecs.encode(u'abc'), 'abc') + self.assertEqual(codecs.encode(u'abc'), 'abc') self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii') def test_register(self): @@ -1163,19 +1163,19 @@ def test_readlines(self): f = self.reader(self.stream) - self.assertEquals(f.readlines(), [u'\ud55c\n', u'\uae00']) + self.assertEqual(f.readlines(), [u'\ud55c\n', u'\uae00']) class EncodedFileTest(unittest.TestCase): def test_basic(self): f = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80') ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8') - self.assertEquals(ef.read(), '\\\xd5\n\x00\x00\xae') + self.assertEqual(ef.read(), '\\\xd5\n\x00\x00\xae') f = StringIO.StringIO() ef = codecs.EncodedFile(f, 'utf-8', 'latin1') ef.write('\xc3\xbc') - self.assertEquals(f.getvalue(), '\xfc') + self.assertEqual(f.getvalue(), '\xfc') class Str2StrTest(unittest.TestCase): @@ -1478,33 +1478,33 @@ class CharmapTest(unittest.TestCase): def test_decode_with_string_map(self): - self.assertEquals( + self.assertEqual( codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"), (u"abc", 3) ) - self.assertEquals( + self.assertEqual( codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"), (u"ab\ufffd", 3) ) - self.assertEquals( + self.assertEqual( codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"), (u"ab\ufffd", 3) ) - self.assertEquals( + self.assertEqual( codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"), (u"ab", 3) ) - self.assertEquals( + self.assertEqual( codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"), (u"ab", 3) ) allbytes = "".join(chr(i) for i in xrange(256)) - self.assertEquals( + self.assertEqual( codecs.charmap_decode(allbytes, "ignore", u""), (u"", len(allbytes)) ) @@ -1513,14 +1513,14 @@ def test_encodedfile(self): f = StringIO.StringIO("\xc3\xbc") with codecs.EncodedFile(f, "latin-1", "utf-8") as ef: - self.assertEquals(ef.read(), "\xfc") + self.assertEqual(ef.read(), "\xfc") def test_streamreaderwriter(self): f = StringIO.StringIO("\xc3\xbc") info = codecs.lookup("utf-8") with codecs.StreamReaderWriter(f, info.streamreader, info.streamwriter, 'strict') as srw: - self.assertEquals(srw.read(), u"\xfc") + self.assertEqual(srw.read(), u"\xfc") class BomTest(unittest.TestCase): @@ -1538,27 +1538,27 @@ f.write(data) f.write(data) f.seek(0) - self.assertEquals(f.read(), data * 2) + self.assertEqual(f.read(), data * 2) f.seek(0) - self.assertEquals(f.read(), data * 2) + self.assertEqual(f.read(), data * 2) # Check that the BOM is written after a seek(0) with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f: f.write(data[0]) - self.assertNotEquals(f.tell(), 0) + self.assertNotEqual(f.tell(), 0) f.seek(0) f.write(data) f.seek(0) - self.assertEquals(f.read(), data) + self.assertEqual(f.read(), data) # (StreamWriter) Check that the BOM is written after a seek(0) with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f: f.writer.write(data[0]) - self.assertNotEquals(f.writer.tell(), 0) + self.assertNotEqual(f.writer.tell(), 0) f.writer.seek(0) f.writer.write(data) f.seek(0) - self.assertEquals(f.read(), data) + self.assertEqual(f.read(), data) # Check that the BOM is not written after a seek() at a position # different than the start @@ -1567,7 +1567,7 @@ f.seek(f.tell()) f.write(data) f.seek(0) - self.assertEquals(f.read(), data * 2) + self.assertEqual(f.read(), data * 2) # (StreamWriter) Check that the BOM is not written after a seek() # at a position different than the start @@ -1576,7 +1576,7 @@ f.writer.seek(f.writer.tell()) f.writer.write(data) f.seek(0) - self.assertEquals(f.read(), data * 2) + self.assertEqual(f.read(), data * 2) def test_main(): diff --git a/lib-python/2.7.0/test/test_shutil.py b/lib-python/2.7.0/test/test_shutil.py --- a/lib-python/2.7.0/test/test_shutil.py +++ b/lib-python/2.7.0/test/test_shutil.py @@ -431,7 +431,7 @@ self.assertTrue(os.path.exists(tarball2)) # let's compare both tarballs - self.assertEquals(self._tarinfo(tarball), self._tarinfo(tarball2)) + self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2)) # trying an uncompressed one base_name = os.path.join(tmpdir2, 'archive') @@ -469,6 +469,7 @@ # check if the compressed tarball was created tarball = base_name + '.zip' + self.assertTrue(os.path.exists(tarball)) def test_make_archive(self): @@ -524,8 +525,8 @@ archive = tarfile.open(archive_name) try: for member in archive.getmembers(): - self.assertEquals(member.uid, 0) - self.assertEquals(member.gid, 0) + self.assertEqual(member.uid, 0) + self.assertEqual(member.gid, 0) finally: archive.close() @@ -540,7 +541,7 @@ make_archive('xxx', 'xxx', root_dir=self.mkdtemp()) except Exception: pass - self.assertEquals(os.getcwd(), current_dir) + self.assertEqual(os.getcwd(), current_dir) finally: unregister_archive_format('xxx') diff --git a/lib-python/2.7.0/bsddb/dbtables.py b/lib-python/2.7.0/bsddb/dbtables.py --- a/lib-python/2.7.0/bsddb/dbtables.py +++ b/lib-python/2.7.0/bsddb/dbtables.py @@ -15,7 +15,7 @@ # This provides a simple database table interface built on top of # the Python Berkeley DB 3 interface. # -_cvsid = '$Id: dbtables.py 79285 2010-03-22 14:22:26Z jesus.cea $' +_cvsid = '$Id$' import re import sys diff --git a/lib-python/2.7.0/distutils/emxccompiler.py b/lib-python/2.7.0/distutils/emxccompiler.py --- a/lib-python/2.7.0/distutils/emxccompiler.py +++ b/lib-python/2.7.0/distutils/emxccompiler.py @@ -19,7 +19,7 @@ # # * EMX gcc 2.81/EMX 0.9d fix03 -__revision__ = "$Id: emxccompiler.py 78666 2010-03-05 00:16:02Z tarek.ziade $" +__revision__ = "$Id$" import os,sys,copy from distutils.ccompiler import gen_preprocess_options, gen_lib_options @@ -272,8 +272,10 @@ # It would probably better to read single lines to search. # But we do this only once, and it is fast enough f = open(fn) - s = f.read() - f.close() + try: + s = f.read() + finally: + f.close() except IOError, exc: # if we can't read this file, we cannot say it is wrong @@ -300,8 +302,10 @@ gcc_exe = find_executable('gcc') if gcc_exe: out = os.popen(gcc_exe + ' -dumpversion','r') - out_string = out.read() - out.close() + try: + out_string = out.read() + finally: + out.close() result = re.search('(\d+\.\d+\.\d+)',out_string) if result: gcc_version = StrictVersion(result.group(1)) diff --git a/lib-python/2.7.0/distutils/archive_util.py b/lib-python/2.7.0/distutils/archive_util.py --- a/lib-python/2.7.0/distutils/archive_util.py +++ b/lib-python/2.7.0/distutils/archive_util.py @@ -3,7 +3,7 @@ Utility functions for creating archive files (tarballs, zip files, that sort of thing).""" -__revision__ = "$Id: archive_util.py 75659 2009-10-24 13:29:44Z tarek.ziade $" +__revision__ = "$Id$" import os from warnings import warn diff --git a/lib-python/2.7.0/distutils/bcppcompiler.py b/lib-python/2.7.0/distutils/bcppcompiler.py --- a/lib-python/2.7.0/distutils/bcppcompiler.py +++ b/lib-python/2.7.0/distutils/bcppcompiler.py @@ -11,7 +11,7 @@ # someone should sit down and factor out the common code as # WindowsCCompiler! --GPW -__revision__ = "$Id: bcppcompiler.py 76956 2009-12-21 01:22:46Z tarek.ziade $" +__revision__ = "$Id$" import os diff --git a/lib-python/2.7.0/test/test_signal.py b/lib-python/2.7.0/test/test_signal.py --- a/lib-python/2.7.0/test/test_signal.py +++ b/lib-python/2.7.0/test/test_signal.py @@ -203,10 +203,10 @@ def test_getsignal(self): hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler) - self.assertEquals(signal.getsignal(signal.SIGHUP), - self.trivial_signal_handler) + self.assertEqual(signal.getsignal(signal.SIGHUP), + self.trivial_signal_handler) signal.signal(signal.SIGHUP, hup) - self.assertEquals(signal.getsignal(signal.SIGHUP), hup) + self.assertEqual(signal.getsignal(signal.SIGHUP), hup) @unittest.skipUnless(sys.platform == "win32", "Windows specific") @@ -456,9 +456,9 @@ "high") # virtual itimer should be (0.0, 0.0) now - self.assertEquals(signal.getitimer(self.itimer), (0.0, 0.0)) + self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0)) # and the handler should have been called - self.assertEquals(self.hndl_called, True) + self.assertEqual(self.hndl_called, True) # Issue 3864. Unknown if this affects earlier versions of freebsd also. @unittest.skipIf(sys.platform=='freebsd6', @@ -479,7 +479,7 @@ "high") # profiling itimer should be (0.0, 0.0) now - self.assertEquals(signal.getitimer(self.itimer), (0.0, 0.0)) + self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0)) # and the handler should have been called self.assertEqual(self.hndl_called, True) diff --git a/lib-python/2.7.0/idlelib/idle.bat b/lib-python/2.7.0/idlelib/idle.bat --- a/lib-python/2.7.0/idlelib/idle.bat +++ b/lib-python/2.7.0/idlelib/idle.bat @@ -1,3 +1,4 @@ @echo off -rem Working IDLE bat for Windows - uses start instead of absolute pathname -start idle.pyw %1 %2 %3 %4 %5 %6 %7 %8 %9 +rem Start IDLE using the appropriate Python interpreter +set CURRDIR=%~dp0 +start "%CURRDIR%..\..\pythonw.exe" "%CURRDIR%idle.pyw" %1 %2 %3 %4 %5 %6 %7 %8 %9 diff --git a/lib-python/2.7.0/idlelib/idlever.py b/lib-python/2.7.0/idlelib/idlever.py --- a/lib-python/2.7.0/idlelib/idlever.py +++ b/lib-python/2.7.0/idlelib/idlever.py @@ -1,1 +1,1 @@ -IDLE_VERSION = "2.7.1a0" +IDLE_VERSION = "2.7.1" diff --git a/lib-python/2.7.0/bsddb/test/test_cursor_pget_bug.py b/lib-python/2.7.0/bsddb/test/test_cursor_pget_bug.py --- a/lib-python/2.7.0/bsddb/test/test_cursor_pget_bug.py +++ b/lib-python/2.7.0/bsddb/test/test_cursor_pget_bug.py @@ -37,12 +37,12 @@ def test_pget(self): cursor = self.secondary_db.cursor() - self.assertEquals(('eggs', 'salad', 'eggs'), cursor.pget(key='eggs', flags=db.DB_SET)) - self.assertEquals(('eggs', 'omelet', 'eggs'), cursor.pget(db.DB_NEXT_DUP)) - self.assertEquals(None, cursor.pget(db.DB_NEXT_DUP)) + self.assertEqual(('eggs', 'salad', 'eggs'), cursor.pget(key='eggs', flags=db.DB_SET)) + self.assertEqual(('eggs', 'omelet', 'eggs'), cursor.pget(db.DB_NEXT_DUP)) + self.assertEqual(None, cursor.pget(db.DB_NEXT_DUP)) - self.assertEquals(('ham', 'spam', 'ham'), cursor.pget('ham', 'spam', flags=db.DB_SET)) - self.assertEquals(None, cursor.pget(db.DB_NEXT_DUP)) + self.assertEqual(('ham', 'spam', 'ham'), cursor.pget('ham', 'spam', flags=db.DB_SET)) + self.assertEqual(None, cursor.pget(db.DB_NEXT_DUP)) cursor.close() diff --git a/lib-python/2.7.0/distutils/tests/test_install_lib.py b/lib-python/2.7.0/distutils/tests/test_install_lib.py --- a/lib-python/2.7.0/distutils/tests/test_install_lib.py +++ b/lib-python/2.7.0/distutils/tests/test_install_lib.py @@ -18,8 +18,8 @@ cmd = install_lib(dist) cmd.finalize_options() - self.assertEquals(cmd.compile, 1) - self.assertEquals(cmd.optimize, 0) + self.assertEqual(cmd.compile, 1) + self.assertEqual(cmd.optimize, 0) # optimize must be 0, 1, or 2 cmd.optimize = 'foo' @@ -29,7 +29,7 @@ cmd.optimize = '2' cmd.finalize_options() - self.assertEquals(cmd.optimize, 2) + self.assertEqual(cmd.optimize, 2) def _setup_byte_compile(self): pkg_dir, dist = self.create_dist() @@ -81,7 +81,7 @@ cmd.distribution.script_name = 'setup.py' # get_input should return 2 elements - self.assertEquals(len(cmd.get_inputs()), 2) + self.assertEqual(len(cmd.get_inputs()), 2) def test_dont_write_bytecode(self): # makes sure byte_compile is not used diff --git a/lib-python/2.7.0/test/test_array.py b/lib-python/2.7.0/test/test_array.py --- a/lib-python/2.7.0/test/test_array.py +++ b/lib-python/2.7.0/test/test_array.py @@ -628,11 +628,11 @@ data.reverse() L[start:stop:step] = data a[start:stop:step] = array.array(self.typecode, data) - self.assertEquals(a, array.array(self.typecode, L)) + self.assertEqual(a, array.array(self.typecode, L)) del L[start:stop:step] del a[start:stop:step] - self.assertEquals(a, array.array(self.typecode, L)) + self.assertEqual(a, array.array(self.typecode, L)) def test_index(self): example = 2*self.example diff --git a/lib-python/2.7.0/distutils/tests/test_bdist_dumb.py b/lib-python/2.7.0/distutils/tests/test_bdist_dumb.py --- a/lib-python/2.7.0/distutils/tests/test_bdist_dumb.py +++ b/lib-python/2.7.0/distutils/tests/test_bdist_dumb.py @@ -78,7 +78,7 @@ base = base.replace(':', '-') wanted = ['%s.zip' % base] - self.assertEquals(dist_created, wanted) + self.assertEqual(dist_created, wanted) # now let's check what we have in the zip file # XXX to be done @@ -87,16 +87,16 @@ pkg_dir, dist = self.create_dist() os.chdir(pkg_dir) cmd = bdist_dumb(dist) - self.assertEquals(cmd.bdist_dir, None) + self.assertEqual(cmd.bdist_dir, None) cmd.finalize_options() # bdist_dir is initialized to bdist_base/dumb if not set base = cmd.get_finalized_command('bdist').bdist_base - self.assertEquals(cmd.bdist_dir, os.path.join(base, 'dumb')) + self.assertEqual(cmd.bdist_dir, os.path.join(base, 'dumb')) # the format is set to a default value depending on the os.name default = cmd.default_format[os.name] - self.assertEquals(cmd.format, default) + self.assertEqual(cmd.format, default) def test_suite(): return unittest.makeSuite(BuildDumbTestCase) diff --git a/lib-python/2.7.0/distutils/tests/test_build_py.py b/lib-python/2.7.0/distutils/tests/test_build_py.py --- a/lib-python/2.7.0/distutils/tests/test_build_py.py +++ b/lib-python/2.7.0/distutils/tests/test_build_py.py @@ -19,11 +19,15 @@ def _setup_package_data(self): sources = self.mkdtemp() f = open(os.path.join(sources, "__init__.py"), "w") - f.write("# Pretend this is a package.") - f.close() + try: + f.write("# Pretend this is a package.") + finally: + f.close() f = open(os.path.join(sources, "README.txt"), "w") - f.write("Info about this package") - f.close() + try: + f.write("Info about this package") + finally: + f.close() destination = self.mkdtemp() diff --git a/lib-python/2.7.0/distutils/tests/test_check.py b/lib-python/2.7.0/distutils/tests/test_check.py --- a/lib-python/2.7.0/distutils/tests/test_check.py +++ b/lib-python/2.7.0/distutils/tests/test_check.py @@ -26,7 +26,7 @@ # by default, check is checking the metadata # should have some warnings cmd = self._run() - self.assertEquals(cmd._warnings, 2) + self.assertEqual(cmd._warnings, 2) # now let's add the required fields # and run it again, to make sure we don't get @@ -35,7 +35,7 @@ 'author_email': 'xxx', 'name': 'xxx', 'version': 'xxx'} cmd = self._run(metadata) - self.assertEquals(cmd._warnings, 0) + self.assertEqual(cmd._warnings, 0) # now with the strict mode, we should # get an error if there are missing metadata @@ -43,7 +43,7 @@ # and of course, no error when all metadata are present cmd = self._run(metadata, strict=1) - self.assertEquals(cmd._warnings, 0) + self.assertEqual(cmd._warnings, 0) def test_check_document(self): if not HAS_DOCUTILS: # won't test without docutils @@ -54,12 +54,12 @@ # let's see if it detects broken rest broken_rest = 'title\n===\n\ntest' msgs = cmd._check_rst_data(broken_rest) - self.assertEquals(len(msgs), 1) + self.assertEqual(len(msgs), 1) # and non-broken rest rest = 'title\n=====\n\ntest' msgs = cmd._check_rst_data(rest) - self.assertEquals(len(msgs), 0) + self.assertEqual(len(msgs), 0) def test_check_restructuredtext(self): if not HAS_DOCUTILS: # won't test without docutils @@ -69,7 +69,7 @@ pkg_info, dist = self.create_dist(long_description=broken_rest) cmd = check(dist) cmd.check_restructuredtext() - self.assertEquals(cmd._warnings, 1) + self.assertEqual(cmd._warnings, 1) # let's see if we have an error with strict=1 metadata = {'url': 'xxx', 'author': 'xxx', @@ -82,7 +82,7 @@ # and non-broken rest metadata['long_description'] = 'title\n=====\n\ntest' cmd = self._run(metadata, strict=1, restructuredtext=1) - self.assertEquals(cmd._warnings, 0) + self.assertEqual(cmd._warnings, 0) def test_check_all(self): diff --git a/lib-python/2.7.0/bsddb/test/test_compat.py b/lib-python/2.7.0/bsddb/test/test_compat.py --- a/lib-python/2.7.0/bsddb/test/test_compat.py +++ b/lib-python/2.7.0/bsddb/test/test_compat.py @@ -119,7 +119,7 @@ if verbose: print rec - self.assert_(f.has_key('f'), 'Error, missing key!') + self.assertTrue(f.has_key('f'), 'Error, missing key!') # test that set_location() returns the next nearest key, value # on btree databases and raises KeyError on others. diff --git a/lib-python/2.7.0/distutils/command/install_data.py b/lib-python/2.7.0/distutils/command/install_data.py --- a/lib-python/2.7.0/distutils/command/install_data.py +++ b/lib-python/2.7.0/distutils/command/install_data.py @@ -5,7 +5,7 @@ # contributed by Bastian Kleineidam -__revision__ = "$Id: install_data.py 76849 2009-12-15 06:29:19Z tarek.ziade $" +__revision__ = "$Id$" import os from distutils.core import Command diff --git a/lib-python/2.7.0/test/test_imaplib.py b/lib-python/2.7.0/test/test_imaplib.py --- a/lib-python/2.7.0/test/test_imaplib.py +++ b/lib-python/2.7.0/test/test_imaplib.py @@ -10,7 +10,7 @@ import SocketServer import time -from test_support import reap_threads, verbose +from test_support import reap_threads, verbose, transient_internet import unittest try: @@ -112,7 +112,7 @@ if verbose: print "creating server" server = MyServer(addr, hdlr) - self.assertEquals(server.server_address, server.socket.getsockname()) + self.assertEqual(server.server_address, server.socket.getsockname()) if verbose: print "server created" @@ -178,8 +178,46 @@ imap_class = IMAP4_SSL +class RemoteIMAPTest(unittest.TestCase): + host = 'cyrus.andrew.cmu.edu' + port = 143 + username = 'anonymous' + password = 'pass' + imap_class = imaplib.IMAP4 + + def setUp(self): + with transient_internet(self.host): + self.server = self.imap_class(self.host, self.port) + + def tearDown(self): + if self.server is not None: + self.server.logout() + + def test_logincapa(self): + self.assertTrue('LOGINDISABLED' in self.server.capabilities) + + def test_anonlogin(self): + self.assertTrue('AUTH=ANONYMOUS' in self.server.capabilities) + rs = self.server.login(self.username, self.password) + self.assertEqual(rs[0], 'OK') + + def test_logout(self): + rs = self.server.logout() + self.server = None + self.assertEqual(rs[0], 'BYE') + + + at unittest.skipUnless(ssl, "SSL not available") +class RemoteIMAP_SSLTest(RemoteIMAPTest): + port = 993 + imap_class = IMAP4_SSL + + def test_logincapa(self): + self.assertFalse('LOGINDISABLED' in self.server.capabilities) + self.assertTrue('AUTH=PLAIN' in self.server.capabilities) + + def test_main(): - tests = [TestImaplib] if support.is_resource_enabled('network'): @@ -189,7 +227,10 @@ "keycert.pem") if not os.path.exists(CERTFILE): raise support.TestFailed("Can't read certificate files!") - tests.extend([ThreadedNetworkedTests, ThreadedNetworkedTestsSSL]) + tests.extend([ + ThreadedNetworkedTests, ThreadedNetworkedTestsSSL, + RemoteIMAPTest, RemoteIMAP_SSLTest, + ]) support.run_unittest(*tests) diff --git a/lib-python/2.7.0/distutils/util.py b/lib-python/2.7.0/distutils/util.py --- a/lib-python/2.7.0/distutils/util.py +++ b/lib-python/2.7.0/distutils/util.py @@ -4,7 +4,7 @@ one of the other *util.py modules. """ -__revision__ = "$Id: util.py 82791 2010-07-11 08:52:52Z ronald.oussoren $" +__revision__ = "$Id$" import sys, os, string, re from distutils.errors import DistutilsPlatformError @@ -116,13 +116,15 @@ # behaviour. pass else: - m = re.search( - r'ProductUserVisibleVersion\s*' + - r'(.*?)', f.read()) - f.close() - if m is not None: - macrelease = '.'.join(m.group(1).split('.')[:2]) - # else: fall back to the default behaviour + try: + m = re.search( + r'ProductUserVisibleVersion\s*' + + r'(.*?)', f.read()) + if m is not None: + macrelease = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + finally: + f.close() if not macver: macver = macrelease diff --git a/lib-python/2.7.0/bsddb/test/test_get_none.py b/lib-python/2.7.0/bsddb/test/test_get_none.py --- a/lib-python/2.7.0/bsddb/test/test_get_none.py +++ b/lib-python/2.7.0/bsddb/test/test_get_none.py @@ -76,7 +76,7 @@ break self.assertNotEqual(rec, None) - self.assert_(exceptionHappened) + self.assertTrue(exceptionHappened) self.assertEqual(count, len(string.letters)) c.close() diff --git a/lib-python/2.7.0/json/tests/test_indent.py b/lib-python/2.7.0/json/tests/test_indent.py --- a/lib-python/2.7.0/json/tests/test_indent.py +++ b/lib-python/2.7.0/json/tests/test_indent.py @@ -36,6 +36,6 @@ h1 = json.loads(d1) h2 = json.loads(d2) - self.assertEquals(h1, h) - self.assertEquals(h2, h) - self.assertEquals(d2, expect) + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(d2, expect) diff --git a/lib-python/2.7.0/distutils/command/__init__.py b/lib-python/2.7.0/distutils/command/__init__.py --- a/lib-python/2.7.0/distutils/command/__init__.py +++ b/lib-python/2.7.0/distutils/command/__init__.py @@ -3,7 +3,7 @@ Package containing implementation of all the standard Distutils commands.""" -__revision__ = "$Id: __init__.py 71473 2009-04-11 14:55:07Z tarek.ziade $" +__revision__ = "$Id$" __all__ = ['build', 'build_py', diff --git a/lib-python/2.7.0/test/test_macos.py b/lib-python/2.7.0/test/test_macos.py --- a/lib-python/2.7.0/test/test_macos.py +++ b/lib-python/2.7.0/test/test_macos.py @@ -23,8 +23,8 @@ test_support.TESTFN]) cr, tp = MacOS.GetCreatorAndType(test_support.TESTFN) - self.assertEquals(tp, 'ABCD') - self.assertEquals(cr, 'EFGH') + self.assertEqual(tp, 'ABCD') + self.assertEqual(cr, 'EFGH') finally: os.unlink(test_support.TESTFN) @@ -42,8 +42,8 @@ 'ABCD', 'EFGH') cr, tp = MacOS.GetCreatorAndType(test_support.TESTFN) - self.assertEquals(cr, 'ABCD') - self.assertEquals(tp, 'EFGH') + self.assertEqual(cr, 'ABCD') + self.assertEqual(tp, 'EFGH') data = subprocess.Popen(["/Developer/Tools/GetFileInfo", test_support.TESTFN], stdout=subprocess.PIPE).communicate()[0] @@ -56,8 +56,8 @@ if ln.startswith('creator:'): cr = ln.split()[-1][1:-1] - self.assertEquals(cr, 'ABCD') - self.assertEquals(tp, 'EFGH') + self.assertEqual(cr, 'ABCD') + self.assertEqual(tp, 'EFGH') finally: os.unlink(test_support.TESTFN) @@ -77,14 +77,14 @@ fp = open(test_support.TESTFN, 'r') data = fp.read() fp.close() - self.assertEquals(data, 'hello world\n') + self.assertEqual(data, 'hello world\n') rfp = MacOS.openrf(test_support.TESTFN, '*rb') data = rfp.read(100) data2 = rfp.read(100) rfp.close() - self.assertEquals(data, 'goodbye world\n') - self.assertEquals(data2, '') + self.assertEqual(data, 'goodbye world\n') + self.assertEqual(data2, '') finally: diff --git a/lib-python/2.7.0/json/tests/test_decode.py b/lib-python/2.7.0/json/tests/test_decode.py --- a/lib-python/2.7.0/json/tests/test_decode.py +++ b/lib-python/2.7.0/json/tests/test_decode.py @@ -9,19 +9,19 @@ def test_decimal(self): rval = json.loads('1.1', parse_float=decimal.Decimal) self.assertTrue(isinstance(rval, decimal.Decimal)) - self.assertEquals(rval, decimal.Decimal('1.1')) + self.assertEqual(rval, decimal.Decimal('1.1')) def test_float(self): rval = json.loads('1', parse_int=float) self.assertTrue(isinstance(rval, float)) - self.assertEquals(rval, 1.0) + self.assertEqual(rval, 1.0) def test_decoder_optimizations(self): # Several optimizations were made that skip over calls to # the whitespace regex, so this test is designed to try and # exercise the uncommon cases. The array cases are already covered. rval = json.loads('{ "key" : "value" , "k":"v" }') - self.assertEquals(rval, {"key":"value", "k":"v"}) + self.assertEqual(rval, {"key":"value", "k":"v"}) def test_object_pairs_hook(self): s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' diff --git a/lib-python/2.7.0/distutils/command/install_headers.py b/lib-python/2.7.0/distutils/command/install_headers.py --- a/lib-python/2.7.0/distutils/command/install_headers.py +++ b/lib-python/2.7.0/distutils/command/install_headers.py @@ -3,7 +3,7 @@ Implements the Distutils 'install_headers' command, to install C/C++ header files to the Python include directory.""" -__revision__ = "$Id: install_headers.py 70891 2009-03-31 20:55:21Z tarek.ziade $" +__revision__ = "$Id$" from distutils.core import Command diff --git a/lib-python/2.7.0/bsddb/test/test_misc.py b/lib-python/2.7.0/bsddb/test/test_misc.py --- a/lib-python/2.7.0/bsddb/test/test_misc.py +++ b/lib-python/2.7.0/bsddb/test/test_misc.py @@ -32,7 +32,7 @@ def test02_db_home(self): env = db.DBEnv() # check for crash fixed when db_home is used before open() - self.assert_(env.db_home is None) + self.assertTrue(env.db_home is None) env.open(self.homeDir, db.DB_CREATE) if sys.version_info[0] < 3 : self.assertEqual(self.homeDir, env.db_home) @@ -43,7 +43,7 @@ db = hashopen(self.filename) db.close() rp = repr(db) - self.assertEquals(rp, "{}") + self.assertEqual(rp, "{}") def test04_repr_db(self) : db = hashopen(self.filename) @@ -54,7 +54,7 @@ db.close() db = hashopen(self.filename) rp = repr(db) - self.assertEquals(rp, repr(d)) + self.assertEqual(rp, repr(d)) db.close() # http://sourceforge.net/tracker/index.php?func=detail&aid=1708868&group_id=13900&atid=313900 diff --git a/lib-python/2.7.0/distutils/tests/test_install_headers.py b/lib-python/2.7.0/distutils/tests/test_install_headers.py --- a/lib-python/2.7.0/distutils/tests/test_install_headers.py +++ b/lib-python/2.7.0/distutils/tests/test_install_headers.py @@ -23,7 +23,7 @@ pkg_dir, dist = self.create_dist(headers=headers) cmd = install_headers(dist) - self.assertEquals(cmd.get_inputs(), headers) + self.assertEqual(cmd.get_inputs(), headers) # let's run the command cmd.install_dir = os.path.join(pkg_dir, 'inst') @@ -31,7 +31,7 @@ cmd.run() # let's check the results - self.assertEquals(len(cmd.get_outputs()), 2) + self.assertEqual(len(cmd.get_outputs()), 2) def test_suite(): return unittest.makeSuite(InstallHeadersTestCase) diff --git a/lib-python/2.7.0/test/test_telnetlib.py b/lib-python/2.7.0/test/test_telnetlib.py --- a/lib-python/2.7.0/test/test_telnetlib.py +++ b/lib-python/2.7.0/test/test_telnetlib.py @@ -38,6 +38,7 @@ pass finally: serv.close() + conn.close() evt.set() class GeneralTests(TestCase): diff --git a/lib-python/2.7.0/test/test_logging.py b/lib-python/2.7.0/test/test_logging.py --- a/lib-python/2.7.0/test/test_logging.py +++ b/lib-python/2.7.0/test/test_logging.py @@ -120,13 +120,13 @@ except AttributeError: # StringIO.StringIO lacks a reset() method. actual_lines = stream.getvalue().splitlines() - self.assertEquals(len(actual_lines), len(expected_values)) + self.assertEqual(len(actual_lines), len(expected_values)) for actual, expected in zip(actual_lines, expected_values): match = pat.search(actual) if not match: self.fail("Log line does not match expected pattern:\n" + actual) - self.assertEquals(tuple(match.groups()), expected) + self.assertEqual(tuple(match.groups()), expected) s = stream.read() if s: self.fail("Remaining output at end of log stream:\n" + s) @@ -692,7 +692,7 @@ except RuntimeError: logging.exception("just testing") sys.stdout.seek(0) - self.assertEquals(output.getvalue(), + self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output is empty self.assert_log_lines([]) @@ -811,7 +811,7 @@ logger = logging.getLogger("tcp") logger.error("spam") logger.debug("eggs") - self.assertEquals(self.get_output(), "spam\neggs\n") + self.assertEqual(self.get_output(), "spam\neggs\n") class MemoryTest(BaseTest): @@ -1527,7 +1527,7 @@ except RuntimeError: logging.exception("just testing") sys.stdout.seek(0) - self.assertEquals(output.getvalue(), + self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output is empty self.assert_log_lines([]) @@ -1542,7 +1542,7 @@ except RuntimeError: logging.exception("just testing") sys.stdout.seek(0) - self.assertEquals(output.getvalue(), + self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output is empty self.assert_log_lines([]) diff --git a/lib-python/2.7.0/distutils/__init__.py b/lib-python/2.7.0/distutils/__init__.py --- a/lib-python/2.7.0/distutils/__init__.py +++ b/lib-python/2.7.0/distutils/__init__.py @@ -8,12 +8,12 @@ setup (...) """ -__revision__ = "$Id: __init__.py 82506 2010-07-03 14:51:25Z benjamin.peterson $" +__revision__ = "$Id$" # Distutils version # # Updated automatically by the Python release process. # #--start constants-- -__version__ = "2.7.1a0" +__version__ = "2.7.1" #--end constants-- diff --git a/lib-python/2.7.0/distutils/tests/test_sysconfig.py b/lib-python/2.7.0/distutils/tests/test_sysconfig.py --- a/lib-python/2.7.0/distutils/tests/test_sysconfig.py +++ b/lib-python/2.7.0/distutils/tests/test_sysconfig.py @@ -36,7 +36,7 @@ sysconfig.get_python_lib(prefix=TESTFN)) _sysconfig = __import__('sysconfig') res = sysconfig.get_python_lib(True, True) - self.assertEquals(_sysconfig.get_path('platstdlib'), res) + self.assertEqual(_sysconfig.get_path('platstdlib'), res) def test_get_python_inc(self): inc_dir = sysconfig.get_python_inc() @@ -50,22 +50,26 @@ def test_parse_makefile_base(self): self.makefile = test.test_support.TESTFN fd = open(self.makefile, 'w') - fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=LIB'" '\n') - fd.write('VAR=$OTHER\nOTHER=foo') - fd.close() + try: + fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=LIB'" '\n') + fd.write('VAR=$OTHER\nOTHER=foo') + finally: + fd.close() d = sysconfig.parse_makefile(self.makefile) - self.assertEquals(d, {'CONFIG_ARGS': "'--arg1=optarg1' 'ENV=LIB'", - 'OTHER': 'foo'}) + self.assertEqual(d, {'CONFIG_ARGS': "'--arg1=optarg1' 'ENV=LIB'", + 'OTHER': 'foo'}) def test_parse_makefile_literal_dollar(self): self.makefile = test.test_support.TESTFN fd = open(self.makefile, 'w') - fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=\$$LIB'" '\n') - fd.write('VAR=$OTHER\nOTHER=foo') - fd.close() + try: + fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=\$$LIB'" '\n') + fd.write('VAR=$OTHER\nOTHER=foo') + finally: + fd.close() d = sysconfig.parse_makefile(self.makefile) - self.assertEquals(d, {'CONFIG_ARGS': r"'--arg1=optarg1' 'ENV=\$LIB'", - 'OTHER': 'foo'}) + self.assertEqual(d, {'CONFIG_ARGS': r"'--arg1=optarg1' 'ENV=\$LIB'", + 'OTHER': 'foo'}) def test_suite(): diff --git a/lib-python/2.7.0/test/test_code.py b/lib-python/2.7.0/test/test_code.py --- a/lib-python/2.7.0/test/test_code.py +++ b/lib-python/2.7.0/test/test_code.py @@ -106,9 +106,9 @@ def test_newempty(self): co = _testcapi.code_newempty("filename", "funcname", 15) - self.assertEquals(co.co_filename, "filename") - self.assertEquals(co.co_name, "funcname") - self.assertEquals(co.co_firstlineno, 15) + self.assertEqual(co.co_filename, "filename") + self.assertEqual(co.co_name, "funcname") + self.assertEqual(co.co_firstlineno, 15) class CodeWeakRefTest(unittest.TestCase): diff --git a/lib-python/2.7.0/test/test_compiler.py b/lib-python/2.7.0/test/test_compiler.py --- a/lib-python/2.7.0/test/test_compiler.py +++ b/lib-python/2.7.0/test/test_compiler.py @@ -81,8 +81,8 @@ "", "exec") dct = {} exec c in dct - self.assertEquals(dct.get('e'), 1) - self.assertEquals(dct.get('f'), 1) + self.assertEqual(dct.get('e'), 1) + self.assertEqual(dct.get('f'), 1) def testDefaultArgs(self): self.assertRaises(SyntaxError, compiler.parse, "def foo(a=1, b): pass") @@ -93,7 +93,7 @@ c = compiler.compile('def f():\n "doc"', '', 'exec') g = {} exec c in g - self.assertEquals(g['f'].__doc__, "doc") + self.assertEqual(g['f'].__doc__, "doc") def testLineNo(self): # Test that all nodes except Module have a correct lineno attribute. @@ -120,8 +120,8 @@ self.check_lineno(child) def testFlatten(self): - self.assertEquals(flatten([1, [2]]), [1, 2]) - self.assertEquals(flatten((1, (2,))), [1, 2]) + self.assertEqual(flatten([1, [2]]), [1, 2]) + self.assertEqual(flatten((1, (2,))), [1, 2]) def testNestedScope(self): c = compiler.compile('def g():\n' @@ -133,44 +133,44 @@ 'exec') dct = {} exec c in dct - self.assertEquals(dct.get('result'), 3) + self.assertEqual(dct.get('result'), 3) def testGenExp(self): c = compiler.compile('list((i,j) for i in range(3) if i < 3' ' for j in range(4) if j > 2)', '', 'eval') - self.assertEquals(eval(c), [(0, 3), (1, 3), (2, 3)]) + self.assertEqual(eval(c), [(0, 3), (1, 3), (2, 3)]) def testSetLiteral(self): c = compiler.compile('{1, 2, 3}', '', 'eval') - self.assertEquals(eval(c), {1,2,3}) + self.assertEqual(eval(c), {1,2,3}) c = compiler.compile('{1, 2, 3,}', '', 'eval') - self.assertEquals(eval(c), {1,2,3}) + self.assertEqual(eval(c), {1,2,3}) def testDictLiteral(self): c = compiler.compile('{1:2, 2:3, 3:4}', '', 'eval') - self.assertEquals(eval(c), {1:2, 2:3, 3:4}) + self.assertEqual(eval(c), {1:2, 2:3, 3:4}) c = compiler.compile('{1:2, 2:3, 3:4,}', '', 'eval') - self.assertEquals(eval(c), {1:2, 2:3, 3:4}) + self.assertEqual(eval(c), {1:2, 2:3, 3:4}) def testSetComp(self): c = compiler.compile('{x for x in range(1, 4)}', '', 'eval') - self.assertEquals(eval(c), {1, 2, 3}) + self.assertEqual(eval(c), {1, 2, 3}) c = compiler.compile('{x * y for x in range(3) if x != 0' ' for y in range(4) if y != 0}', '', 'eval') - self.assertEquals(eval(c), {1, 2, 3, 4, 6}) + self.assertEqual(eval(c), {1, 2, 3, 4, 6}) def testDictComp(self): c = compiler.compile('{x:x+1 for x in range(1, 4)}', '', 'eval') - self.assertEquals(eval(c), {1:2, 2:3, 3:4}) + self.assertEqual(eval(c), {1:2, 2:3, 3:4}) c = compiler.compile('{(x, y) : y for x in range(2) if x != 0' ' for y in range(3) if y != 0}', '', 'eval') - self.assertEquals(eval(c), {(1, 2): 2, (1, 1): 1}) + self.assertEqual(eval(c), {(1, 2): 2, (1, 1): 1}) def testWith(self): # SF bug 1638243 @@ -183,7 +183,7 @@ 'exec' ) dct = {'TrivialContext': TrivialContext} exec c in dct - self.assertEquals(dct.get('result'), 1) + self.assertEqual(dct.get('result'), 1) def testWithAss(self): c = compiler.compile('from __future__ import with_statement\n' @@ -195,7 +195,7 @@ 'exec' ) dct = {'TrivialContext': TrivialContext} exec c in dct - self.assertEquals(dct.get('result'), 1) + self.assertEqual(dct.get('result'), 1) def testWithMult(self): events = [] @@ -215,8 +215,8 @@ 'exec' ) dct = {'Ctx': Ctx} exec c in dct - self.assertEquals(dct.get('result'), 1) - self.assertEquals(events, [1, 2]) + self.assertEqual(dct.get('result'), 1) + self.assertEqual(events, [1, 2]) def testGlobal(self): code = compiler.compile('global x\nx=1', '', 'exec') @@ -224,7 +224,7 @@ d2 = {} exec code in d1, d2 # x should be in the globals dict - self.assertEquals(d1.get('x'), 1) + self.assertEqual(d1.get('x'), 1) def testPrintFunction(self): c = compiler.compile('from __future__ import print_function\n' @@ -234,14 +234,14 @@ 'exec' ) dct = {'output': StringIO()} exec c in dct - self.assertEquals(dct['output'].getvalue(), 'a**b++') + self.assertEqual(dct['output'].getvalue(), 'a**b++') def _testErrEnc(self, src, text, offset): try: compile(src, "", "exec") except SyntaxError, e: - self.assertEquals(e.offset, offset) - self.assertEquals(e.text, text) + self.assertEqual(e.offset, offset) + self.assertEqual(e.text, text) def testSourceCodeEncodingsError(self): # Test SyntaxError with encoding definition diff --git a/lib-python/2.7.0/test/test_profile.py b/lib-python/2.7.0/test/test_profile.py --- a/lib-python/2.7.0/test/test_profile.py +++ b/lib-python/2.7.0/test/test_profile.py @@ -93,7 +93,7 @@ # Don't remove this comment. Everything below it is auto-generated. #--cut-------------------------------------------------------------------------- ProfileTest.expected_output['print_stats'] = """\ - 127 function calls (107 primitive calls) in 999.749 CPU seconds + 127 function calls (107 primitive calls) in 999.749 seconds Ordered by: standard name diff --git a/lib-python/2.7.0/test/test_threadedtempfile.py b/lib-python/2.7.0/test/test_threadedtempfile.py --- a/lib-python/2.7.0/test/test_threadedtempfile.py +++ b/lib-python/2.7.0/test/test_threadedtempfile.py @@ -68,8 +68,8 @@ msg = "Errors: errors %d ok %d\n%s" % (len(errors), ok, '\n'.join(errors)) - self.assertEquals(errors, [], msg) - self.assertEquals(ok, NUM_THREADS * FILES_PER_THREAD) + self.assertEqual(errors, [], msg) + self.assertEqual(ok, NUM_THREADS * FILES_PER_THREAD) def test_main(): run_unittest(ThreadedTempFileTest) diff --git a/lib-python/2.7.0/test/test_argparse.py b/lib-python/2.7.0/test/test_argparse.py --- a/lib-python/2.7.0/test/test_argparse.py +++ b/lib-python/2.7.0/test/test_argparse.py @@ -2709,18 +2709,18 @@ def test_empty(self): ns = argparse.Namespace() - self.assertEquals('' in ns, False) - self.assertEquals('' not in ns, True) - self.assertEquals('x' in ns, False) + self.assertEqual('' in ns, False) + self.assertEqual('' not in ns, True) + self.assertEqual('x' in ns, False) def test_non_empty(self): ns = argparse.Namespace(x=1, y=2) - self.assertEquals('x' in ns, True) - self.assertEquals('x' not in ns, False) - self.assertEquals('y' in ns, True) - self.assertEquals('' in ns, False) - self.assertEquals('xx' in ns, False) - self.assertEquals('z' in ns, False) + self.assertEqual('x' in ns, True) + self.assertEqual('x' not in ns, False) + self.assertEqual('y' in ns, True) + self.assertEqual('' in ns, False) + self.assertEqual('xx' in ns, False) + self.assertEqual('z' in ns, False) # ===================== # Help formatting tests diff --git a/lib-python/2.7.0/test/test_marshal.py b/lib-python/2.7.0/test/test_marshal.py --- a/lib-python/2.7.0/test/test_marshal.py +++ b/lib-python/2.7.0/test/test_marshal.py @@ -210,8 +210,8 @@ def test_version_argument(self): # Python 2.4.0 crashes for any call to marshal.dumps(x, y) - self.assertEquals(marshal.loads(marshal.dumps(5, 0)), 5) - self.assertEquals(marshal.loads(marshal.dumps(5, 1)), 5) + self.assertEqual(marshal.loads(marshal.dumps(5, 0)), 5) + self.assertEqual(marshal.loads(marshal.dumps(5, 1)), 5) def test_fuzz(self): # simple test that it's at least not *totally* trivial to diff --git a/lib-python/2.7.0/test/test_charmapcodec.py b/lib-python/2.7.0/test/test_charmapcodec.py --- a/lib-python/2.7.0/test/test_charmapcodec.py +++ b/lib-python/2.7.0/test/test_charmapcodec.py @@ -27,24 +27,24 @@ class CharmapCodecTest(unittest.TestCase): def test_constructorx(self): - self.assertEquals(unicode('abc', codecname), u'abc') - self.assertEquals(unicode('xdef', codecname), u'abcdef') - self.assertEquals(unicode('defx', codecname), u'defabc') - self.assertEquals(unicode('dxf', codecname), u'dabcf') - self.assertEquals(unicode('dxfx', codecname), u'dabcfabc') + self.assertEqual(unicode('abc', codecname), u'abc') + self.assertEqual(unicode('xdef', codecname), u'abcdef') + self.assertEqual(unicode('defx', codecname), u'defabc') + self.assertEqual(unicode('dxf', codecname), u'dabcf') + self.assertEqual(unicode('dxfx', codecname), u'dabcfabc') def test_encodex(self): - self.assertEquals(u'abc'.encode(codecname), 'abc') - self.assertEquals(u'xdef'.encode(codecname), 'abcdef') - self.assertEquals(u'defx'.encode(codecname), 'defabc') - self.assertEquals(u'dxf'.encode(codecname), 'dabcf') - self.assertEquals(u'dxfx'.encode(codecname), 'dabcfabc') + self.assertEqual(u'abc'.encode(codecname), 'abc') + self.assertEqual(u'xdef'.encode(codecname), 'abcdef') + self.assertEqual(u'defx'.encode(codecname), 'defabc') + self.assertEqual(u'dxf'.encode(codecname), 'dabcf') + self.assertEqual(u'dxfx'.encode(codecname), 'dabcfabc') def test_constructory(self): - self.assertEquals(unicode('ydef', codecname), u'def') - self.assertEquals(unicode('defy', codecname), u'def') - self.assertEquals(unicode('dyf', codecname), u'df') - self.assertEquals(unicode('dyfy', codecname), u'df') + self.assertEqual(unicode('ydef', codecname), u'def') + self.assertEqual(unicode('defy', codecname), u'def') + self.assertEqual(unicode('dyf', codecname), u'df') + self.assertEqual(unicode('dyfy', codecname), u'df') def test_maptoundefined(self): self.assertRaises(UnicodeError, unicode, 'abc\001', codecname) diff --git a/lib-python/2.7.0/json/tests/test_speedups.py b/lib-python/2.7.0/json/tests/test_speedups.py --- a/lib-python/2.7.0/json/tests/test_speedups.py +++ b/lib-python/2.7.0/json/tests/test_speedups.py @@ -5,11 +5,11 @@ class TestSpeedups(TestCase): def test_scanstring(self): - self.assertEquals(decoder.scanstring.__module__, "_json") + self.assertEqual(decoder.scanstring.__module__, "_json") self.assertTrue(decoder.scanstring is decoder.c_scanstring) def test_encode_basestring_ascii(self): - self.assertEquals(encoder.encode_basestring_ascii.__module__, "_json") + self.assertEqual(encoder.encode_basestring_ascii.__module__, "_json") self.assertTrue(encoder.encode_basestring_ascii is encoder.c_encode_basestring_ascii) diff --git a/lib-python/2.7.0/lib-tk/Tkinter.py b/lib-python/2.7.0/lib-tk/Tkinter.py --- a/lib-python/2.7.0/lib-tk/Tkinter.py +++ b/lib-python/2.7.0/lib-tk/Tkinter.py @@ -30,7 +30,7 @@ tk.mainloop() """ -__version__ = "$Revision: 81008 $" +__version__ = "$Revision$" import sys if sys.platform == "win32": diff --git a/lib-python/2.7.0/test/test_augassign.py b/lib-python/2.7.0/test/test_augassign.py --- a/lib-python/2.7.0/test/test_augassign.py +++ b/lib-python/2.7.0/test/test_augassign.py @@ -19,10 +19,10 @@ x /= 2 if 1/2 == 0: # classic division - self.assertEquals(x, 3) + self.assertEqual(x, 3) else: # new-style division (with -Qnew) - self.assertEquals(x, 3.0) + self.assertEqual(x, 3.0) def test_with_unpacking(self): self.assertRaises(SyntaxError, compile, "x, b += 3", "", "exec") @@ -40,9 +40,9 @@ x[0] ^= 1 x[0] /= 2 if 1/2 == 0: - self.assertEquals(x[0], 3) + self.assertEqual(x[0], 3) else: - self.assertEquals(x[0], 3.0) + self.assertEqual(x[0], 3.0) def testInDict(self): x = {0: 2} @@ -57,23 +57,23 @@ x[0] ^= 1 x[0] /= 2 if 1/2 == 0: - self.assertEquals(x[0], 3) + self.assertEqual(x[0], 3) else: - self.assertEquals(x[0], 3.0) + self.assertEqual(x[0], 3.0) def testSequences(self): x = [1,2] x += [3,4] x *= 2 - self.assertEquals(x, [1, 2, 3, 4, 1, 2, 3, 4]) + self.assertEqual(x, [1, 2, 3, 4, 1, 2, 3, 4]) x = [1, 2, 3] y = x x[1:2] *= 2 y[1:2] += [1] - self.assertEquals(x, [1, 2, 1, 2, 3]) + self.assertEqual(x, [1, 2, 1, 2, 3]) self.assertTrue(x is y) def testCustomMethods1(self): @@ -101,14 +101,14 @@ self.assertIsInstance(x, aug_test) self.assertTrue(y is not x) - self.assertEquals(x.val, 11) + self.assertEqual(x.val, 11) x = aug_test2(2) y = x x += 10 self.assertTrue(y is x) - self.assertEquals(x.val, 12) + self.assertEqual(x.val, 12) x = aug_test3(3) y = x @@ -116,7 +116,7 @@ self.assertIsInstance(x, aug_test3) self.assertTrue(y is not x) - self.assertEquals(x.val, 13) + self.assertEqual(x.val, 13) def testCustomMethods2(test_self): @@ -284,7 +284,7 @@ 1 << x x <<= 1 - test_self.assertEquals(output, '''\ + test_self.assertEqual(output, '''\ __add__ called __radd__ called __iadd__ called diff --git a/lib-python/2.7.0/distutils/tests/test_install_data.py b/lib-python/2.7.0/distutils/tests/test_install_data.py --- a/lib-python/2.7.0/distutils/tests/test_install_data.py +++ b/lib-python/2.7.0/distutils/tests/test_install_data.py @@ -27,14 +27,14 @@ self.write_file(two, 'xxx') cmd.data_files = [one, (inst2, [two])] - self.assertEquals(cmd.get_inputs(), [one, (inst2, [two])]) + self.assertEqual(cmd.get_inputs(), [one, (inst2, [two])]) # let's run the command cmd.ensure_finalized() cmd.run() # let's check the result - self.assertEquals(len(cmd.get_outputs()), 2) + self.assertEqual(len(cmd.get_outputs()), 2) rtwo = os.path.split(two)[-1] self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) rone = os.path.split(one)[-1] @@ -47,7 +47,7 @@ cmd.run() # let's check the result - self.assertEquals(len(cmd.get_outputs()), 2) + self.assertEqual(len(cmd.get_outputs()), 2) self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) self.assertTrue(os.path.exists(os.path.join(inst, rone))) cmd.outfiles = [] @@ -65,7 +65,7 @@ cmd.run() # let's check the result - self.assertEquals(len(cmd.get_outputs()), 4) + self.assertEqual(len(cmd.get_outputs()), 4) self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) self.assertTrue(os.path.exists(os.path.join(inst, rone))) diff --git a/lib-python/2.7.0/distutils/tests/test_sdist.py b/lib-python/2.7.0/distutils/tests/test_sdist.py --- a/lib-python/2.7.0/distutils/tests/test_sdist.py +++ b/lib-python/2.7.0/distutils/tests/test_sdist.py @@ -127,7 +127,7 @@ # now let's check what we have dist_folder = join(self.tmp_dir, 'dist') files = os.listdir(dist_folder) - self.assertEquals(files, ['fake-1.0.zip']) + self.assertEqual(files, ['fake-1.0.zip']) zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip')) try: @@ -136,7 +136,7 @@ zip_file.close() # making sure everything has been pruned correctly - self.assertEquals(len(content), 4) + self.assertEqual(len(content), 4) @unittest.skipUnless(zlib, "requires zlib") def test_make_distribution(self): @@ -158,8 +158,7 @@ dist_folder = join(self.tmp_dir, 'dist') result = os.listdir(dist_folder) result.sort() - self.assertEquals(result, - ['fake-1.0.tar', 'fake-1.0.tar.gz'] ) + self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'] ) os.remove(join(dist_folder, 'fake-1.0.tar')) os.remove(join(dist_folder, 'fake-1.0.tar.gz')) @@ -172,8 +171,7 @@ result = os.listdir(dist_folder) result.sort() - self.assertEquals(result, - ['fake-1.0.tar', 'fake-1.0.tar.gz']) + self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz']) @unittest.skipUnless(zlib, "requires zlib") def test_add_defaults(self): @@ -222,7 +220,7 @@ # now let's check what we have dist_folder = join(self.tmp_dir, 'dist') files = os.listdir(dist_folder) - self.assertEquals(files, ['fake-1.0.zip']) + self.assertEqual(files, ['fake-1.0.zip']) zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip')) try: @@ -231,11 +229,15 @@ zip_file.close() # making sure everything was added - self.assertEquals(len(content), 11) + self.assertEqual(len(content), 11) # checking the MANIFEST - manifest = open(join(self.tmp_dir, 'MANIFEST')).read() - self.assertEquals(manifest, MANIFEST % {'sep': os.sep}) + f = open(join(self.tmp_dir, 'MANIFEST')) + try: + manifest = f.read() + self.assertEqual(manifest, MANIFEST % {'sep': os.sep}) + finally: + f.close() @unittest.skipUnless(zlib, "requires zlib") def test_metadata_check_option(self): @@ -247,7 +249,7 @@ cmd.ensure_finalized() cmd.run() warnings = self.get_logs(WARN) - self.assertEquals(len(warnings), 2) + self.assertEqual(len(warnings), 2) # trying with a complete set of metadata self.clear_logs() @@ -256,7 +258,7 @@ cmd.metadata_check = 0 cmd.run() warnings = self.get_logs(WARN) - self.assertEquals(len(warnings), 0) + self.assertEqual(len(warnings), 0) def test_check_metadata_deprecated(self): # makes sure make_metadata is deprecated @@ -264,7 +266,7 @@ with check_warnings() as w: warnings.simplefilter("always") cmd.check_metadata() - self.assertEquals(len(w.warnings), 1) + self.assertEqual(len(w.warnings), 1) def test_show_formats(self): with captured_stdout() as stdout: @@ -274,7 +276,7 @@ num_formats = len(ARCHIVE_FORMATS.keys()) output = [line for line in stdout.getvalue().split('\n') if line.strip().startswith('--formats=')] - self.assertEquals(len(output), num_formats) + self.assertEqual(len(output), num_formats) def test_finalize_options(self): @@ -282,9 +284,9 @@ cmd.finalize_options() # default options set by finalize - self.assertEquals(cmd.manifest, 'MANIFEST') - self.assertEquals(cmd.template, 'MANIFEST.in') - self.assertEquals(cmd.dist_dir, 'dist') + self.assertEqual(cmd.manifest, 'MANIFEST') + self.assertEqual(cmd.template, 'MANIFEST.in') + self.assertEqual(cmd.dist_dir, 'dist') # formats has to be a string splitable on (' ', ',') or # a stringlist @@ -321,8 +323,8 @@ archive = tarfile.open(archive_name) try: for member in archive.getmembers(): - self.assertEquals(member.uid, 0) - self.assertEquals(member.gid, 0) + self.assertEqual(member.uid, 0) + self.assertEqual(member.gid, 0) finally: archive.close() @@ -343,7 +345,7 @@ # rights (see #7408) try: for member in archive.getmembers(): - self.assertEquals(member.uid, os.getuid()) + self.assertEqual(member.uid, os.getuid()) finally: archive.close() @@ -365,7 +367,7 @@ finally: f.close() - self.assertEquals(len(manifest), 5) + self.assertEqual(len(manifest), 5) # adding a file self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#') @@ -385,7 +387,7 @@ f.close() # do we have the new file in MANIFEST ? - self.assertEquals(len(manifest2), 6) + self.assertEqual(len(manifest2), 6) self.assertIn('doc2.txt', manifest2[-1]) def test_manifest_marker(self): diff --git a/lib-python/2.7.0/distutils/command/build.py b/lib-python/2.7.0/distutils/command/build.py --- a/lib-python/2.7.0/distutils/command/build.py +++ b/lib-python/2.7.0/distutils/command/build.py @@ -2,7 +2,7 @@ Implements the Distutils 'build' command.""" -__revision__ = "$Id: build.py 77761 2010-01-26 22:46:15Z tarek.ziade $" +__revision__ = "$Id$" import sys, os diff --git a/lib-python/2.7.0/test/test_io.py b/lib-python/2.7.0/test/test_io.py --- a/lib-python/2.7.0/test/test_io.py +++ b/lib-python/2.7.0/test/test_io.py @@ -606,7 +606,7 @@ rawio = self.MockRawIO() bufio = self.tp(rawio) - self.assertEquals(42, bufio.fileno()) + self.assertEqual(42, bufio.fileno()) def test_no_fileno(self): # XXX will we always have fileno() function? If so, kill @@ -711,36 +711,36 @@ bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) - self.assertEquals(b"abc", bufio.read()) + self.assertEqual(b"abc", bufio.read()) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) rawio = self.MockRawIO([b"abc"]) bufio.__init__(rawio) - self.assertEquals(b"abc", bufio.read()) + self.assertEqual(b"abc", bufio.read()) def test_read(self): for arg in (None, 7): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) - self.assertEquals(b"abcdefg", bufio.read(arg)) + self.assertEqual(b"abcdefg", bufio.read(arg)) # Invalid args self.assertRaises(ValueError, bufio.read, -2) def test_read1(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) - self.assertEquals(b"a", bufio.read(1)) - self.assertEquals(b"b", bufio.read1(1)) - self.assertEquals(rawio._reads, 1) - self.assertEquals(b"c", bufio.read1(100)) - self.assertEquals(rawio._reads, 1) - self.assertEquals(b"d", bufio.read1(100)) - self.assertEquals(rawio._reads, 2) - self.assertEquals(b"efg", bufio.read1(100)) - self.assertEquals(rawio._reads, 3) - self.assertEquals(b"", bufio.read1(100)) - self.assertEquals(rawio._reads, 4) + self.assertEqual(b"a", bufio.read(1)) + self.assertEqual(b"b", bufio.read1(1)) + self.assertEqual(rawio._reads, 1) + self.assertEqual(b"c", bufio.read1(100)) + self.assertEqual(rawio._reads, 1) + self.assertEqual(b"d", bufio.read1(100)) + self.assertEqual(rawio._reads, 2) + self.assertEqual(b"efg", bufio.read1(100)) + self.assertEqual(rawio._reads, 3) + self.assertEqual(b"", bufio.read1(100)) + self.assertEqual(rawio._reads, 4) # Invalid args self.assertRaises(ValueError, bufio.read1, -1) @@ -748,24 +748,24 @@ rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) b = bytearray(2) - self.assertEquals(bufio.readinto(b), 2) - self.assertEquals(b, b"ab") - self.assertEquals(bufio.readinto(b), 2) - self.assertEquals(b, b"cd") - self.assertEquals(bufio.readinto(b), 2) - self.assertEquals(b, b"ef") - self.assertEquals(bufio.readinto(b), 1) - self.assertEquals(b, b"gf") - self.assertEquals(bufio.readinto(b), 0) - self.assertEquals(b, b"gf") + self.assertEqual(bufio.readinto(b), 2) + self.assertEqual(b, b"ab") + self.assertEqual(bufio.readinto(b), 2) + self.assertEqual(b, b"cd") + self.assertEqual(bufio.readinto(b), 2) + self.assertEqual(b, b"ef") + self.assertEqual(bufio.readinto(b), 1) + self.assertEqual(b, b"gf") + self.assertEqual(bufio.readinto(b), 0) + self.assertEqual(b, b"gf") def test_readlines(self): def bufio(): rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef")) return self.tp(rawio) - self.assertEquals(bufio().readlines(), [b"abc\n", b"d\n", b"ef"]) - self.assertEquals(bufio().readlines(5), [b"abc\n", b"d\n"]) - self.assertEquals(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"]) + self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"]) + self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"]) + self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"]) def test_buffering(self): data = b"abcdefghi" @@ -782,34 +782,34 @@ bufio = self.tp(rawio, buffer_size=bufsize) pos = 0 for nbytes in buf_read_sizes: - self.assertEquals(bufio.read(nbytes), data[pos:pos+nbytes]) + self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes]) pos += nbytes # this is mildly implementation-dependent - self.assertEquals(rawio.read_history, raw_read_sizes) + self.assertEqual(rawio.read_history, raw_read_sizes) def test_read_non_blocking(self): # Inject some None's in there to simulate EWOULDBLOCK rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None)) bufio = self.tp(rawio) - self.assertEquals(b"abcd", bufio.read(6)) - self.assertEquals(b"e", bufio.read(1)) - self.assertEquals(b"fg", bufio.read()) - self.assertEquals(b"", bufio.peek(1)) + self.assertEqual(b"abcd", bufio.read(6)) + self.assertEqual(b"e", bufio.read(1)) + self.assertEqual(b"fg", bufio.read()) + self.assertEqual(b"", bufio.peek(1)) self.assertTrue(None is bufio.read()) - self.assertEquals(b"", bufio.read()) + self.assertEqual(b"", bufio.read()) def test_read_past_eof(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) - self.assertEquals(b"abcdefg", bufio.read(9000)) + self.assertEqual(b"abcdefg", bufio.read(9000)) def test_read_all(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) - self.assertEquals(b"abcdefg", bufio.read()) + self.assertEqual(b"abcdefg", bufio.read()) @unittest.skipUnless(threading, 'Threading required for this test.') @support.requires_resource('cpu') @@ -936,15 +936,15 @@ bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) - self.assertEquals(3, bufio.write(b"abc")) + self.assertEqual(3, bufio.write(b"abc")) bufio.flush() self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) bufio.__init__(rawio) - self.assertEquals(3, bufio.write(b"ghi")) + self.assertEqual(3, bufio.write(b"ghi")) bufio.flush() - self.assertEquals(b"".join(rawio._write_stack), b"abcghi") + self.assertEqual(b"".join(rawio._write_stack), b"abcghi") def test_detach_flush(self): raw = self.MockRawIO() @@ -986,11 +986,11 @@ sizes = gen_sizes() while n < len(contents): size = min(next(sizes), len(contents) - n) - self.assertEquals(bufio.write(contents[n:n+size]), size) + self.assertEqual(bufio.write(contents[n:n+size]), size) intermediate_func(bufio) n += size bufio.flush() - self.assertEquals(contents, + self.assertEqual(contents, b"".join(writer._write_stack)) def test_writes(self): @@ -1020,11 +1020,11 @@ raw = self.MockNonBlockWriterIO() bufio = self.tp(raw, 8) - self.assertEquals(bufio.write(b"abcd"), 4) - self.assertEquals(bufio.write(b"efghi"), 5) + self.assertEqual(bufio.write(b"abcd"), 4) + self.assertEqual(bufio.write(b"efghi"), 5) # 1 byte will be written, the rest will be buffered raw.block_on(b"k") - self.assertEquals(bufio.write(b"jklmn"), 5) + self.assertEqual(bufio.write(b"jklmn"), 5) # 8 bytes will be written, 8 will be buffered and the rest will be lost raw.block_on(b"0") @@ -1034,11 +1034,11 @@ written = e.characters_written else: self.fail("BlockingIOError should have been raised") - self.assertEquals(written, 16) - self.assertEquals(raw.pop_written(), + self.assertEqual(written, 16) + self.assertEqual(raw.pop_written(), b"abcdefghijklmnopqrwxyz") - self.assertEquals(bufio.write(b"ABCDEFGHI"), 9) + self.assertEqual(bufio.write(b"ABCDEFGHI"), 9) s = raw.pop_written() # Previously buffered bytes were flushed self.assertTrue(s.startswith(b"01234567A"), s) @@ -1061,7 +1061,7 @@ bufio = self.tp(writer, 8) bufio.write(b"abc") bufio.flush() - self.assertEquals(b"abc", writer._write_stack[0]) + self.assertEqual(b"abc", writer._write_stack[0]) def test_destructor(self): writer = self.MockRawIO() @@ -1069,7 +1069,7 @@ bufio.write(b"abc") del bufio support.gc_collect() - self.assertEquals(b"abc", writer._write_stack[0]) + self.assertEqual(b"abc", writer._write_stack[0]) def test_truncate(self): # Truncate implicitly flushes the buffer. @@ -1128,7 +1128,7 @@ with self.open(support.TESTFN, "rb") as f: s = f.read() for i in range(256): - self.assertEquals(s.count(bytes([i])), N) + self.assertEqual(s.count(bytes([i])), N) finally: support.unlink(support.TESTFN) @@ -1329,45 +1329,45 @@ rw.write(b"eee") self.assertFalse(raw._write_stack) # Buffer writes self.assertEqual(b"ghjk", rw.read()) - self.assertEquals(b"dddeee", raw._write_stack[0]) + self.assertEqual(b"dddeee", raw._write_stack[0]) def test_seek_and_tell(self): raw = self.BytesIO(b"asdfghjkl") rw = self.tp(raw) - self.assertEquals(b"as", rw.read(2)) - self.assertEquals(2, rw.tell()) + self.assertEqual(b"as", rw.read(2)) + self.assertEqual(2, rw.tell()) rw.seek(0, 0) - self.assertEquals(b"asdf", rw.read(4)) + self.assertEqual(b"asdf", rw.read(4)) rw.write(b"asdf") rw.seek(0, 0) - self.assertEquals(b"asdfasdfl", rw.read()) - self.assertEquals(9, rw.tell()) + self.assertEqual(b"asdfasdfl", rw.read()) + self.assertEqual(9, rw.tell()) rw.seek(-4, 2) - self.assertEquals(5, rw.tell()) + self.assertEqual(5, rw.tell()) rw.seek(2, 1) - self.assertEquals(7, rw.tell()) - self.assertEquals(b"fl", rw.read(11)) + self.assertEqual(7, rw.tell()) + self.assertEqual(b"fl", rw.read(11)) self.assertRaises(TypeError, rw.seek, 0.0) def check_flush_and_read(self, read_func): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) - self.assertEquals(b"ab", read_func(bufio, 2)) + self.assertEqual(b"ab", read_func(bufio, 2)) bufio.write(b"12") - self.assertEquals(b"ef", read_func(bufio, 2)) - self.assertEquals(6, bufio.tell()) + self.assertEqual(b"ef", read_func(bufio, 2)) + self.assertEqual(6, bufio.tell()) bufio.flush() - self.assertEquals(6, bufio.tell()) - self.assertEquals(b"ghi", read_func(bufio)) + self.assertEqual(6, bufio.tell()) + self.assertEqual(b"ghi", read_func(bufio)) raw.seek(0, 0) raw.write(b"XYZ") # flush() resets the read buffer bufio.flush() bufio.seek(0, 0) - self.assertEquals(b"XYZ", read_func(bufio, 3)) + self.assertEqual(b"XYZ", read_func(bufio, 3)) def test_flush_and_read(self): self.check_flush_and_read(lambda bufio, *args: bufio.read(*args)) @@ -1399,8 +1399,8 @@ bufio.write(b"45") bufio.flush() bufio.seek(0, 0) - self.assertEquals(b"12345fghi", raw.getvalue()) - self.assertEquals(b"12345fghi", bufio.read()) + self.assertEqual(b"12345fghi", raw.getvalue()) + self.assertEqual(b"12345fghi", bufio.read()) def test_threads(self): BufferedReaderTest.test_threads(self) @@ -1625,12 +1625,12 @@ # Try a few one-shot test cases. for input, eof, output in self.test_cases: d = StatefulIncrementalDecoder() - self.assertEquals(d.decode(input, eof), output) + self.assertEqual(d.decode(input, eof), output) # Also test an unfinished decode, followed by forcing EOF. d = StatefulIncrementalDecoder() - self.assertEquals(d.decode(b'oiabcd'), '') - self.assertEquals(d.decode(b'', 1), 'abcd.') + self.assertEqual(d.decode(b'oiabcd'), '') + self.assertEqual(d.decode(b'', 1), 'abcd.') class TextIOWrapperTest(unittest.TestCase): @@ -1647,12 +1647,12 @@ b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) t.__init__(b, encoding="latin1", newline="\r\n") - self.assertEquals(t.encoding, "latin1") - self.assertEquals(t.line_buffering, False) + self.assertEqual(t.encoding, "latin1") + self.assertEqual(t.line_buffering, False) t.__init__(b, encoding="utf8", line_buffering=True) - self.assertEquals(t.encoding, "utf8") - self.assertEquals(t.line_buffering, True) - self.assertEquals("\xe9\n", t.readline()) + self.assertEqual(t.encoding, "utf8") + self.assertEqual(t.line_buffering, True) + self.assertEqual("\xe9\n", t.readline()) self.assertRaises(TypeError, t.__init__, b, newline=42) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') @@ -1688,11 +1688,11 @@ b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=True) t.write("X") - self.assertEquals(r.getvalue(), b"") # No flush happened + self.assertEqual(r.getvalue(), b"") # No flush happened t.write("Y\nZ") - self.assertEquals(r.getvalue(), b"XY\nZ") # All got flushed + self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed t.write("A\rB") - self.assertEquals(r.getvalue(), b"XY\nZA\rB") + self.assertEqual(r.getvalue(), b"XY\nZA\rB") def test_encoding(self): # Check the encoding attribute is always set, and valid @@ -1715,11 +1715,11 @@ # (3) ignore b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="ignore") - self.assertEquals(t.read(), "abc\n\n") + self.assertEqual(t.read(), "abc\n\n") # (4) replace b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="replace") - self.assertEquals(t.read(), "abc\n\ufffd\n") + self.assertEqual(t.read(), "abc\n\ufffd\n") def test_encoding_errors_writing(self): # (1) default @@ -1736,14 +1736,14 @@ newline="\n") t.write("abc\xffdef\n") t.flush() - self.assertEquals(b.getvalue(), b"abcdef\n") + self.assertEqual(b.getvalue(), b"abcdef\n") # (4) replace b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="replace", newline="\n") t.write("abc\xffdef\n") t.flush() - self.assertEquals(b.getvalue(), b"abc?def\n") + self.assertEqual(b.getvalue(), b"abc?def\n") def test_newlines(self): input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ] @@ -1778,14 +1778,14 @@ c2 = textio.read(2) if c2 == '': break - self.assertEquals(len(c2), 2) + self.assertEqual(len(c2), 2) got_lines.append(c2 + textio.readline()) else: got_lines = list(textio) for got_line, exp_line in zip(got_lines, exp_lines): - self.assertEquals(got_line, exp_line) - self.assertEquals(len(got_lines), len(exp_lines)) + self.assertEqual(got_line, exp_line) + self.assertEqual(len(got_lines), len(exp_lines)) def test_newlines_input(self): testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG" @@ -1799,9 +1799,9 @@ ]: buf = self.BytesIO(testdata) txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) - self.assertEquals(txt.readlines(), expected) + self.assertEqual(txt.readlines(), expected) txt.seek(0) - self.assertEquals(txt.read(), "".join(expected)) + self.assertEqual(txt.read(), "".join(expected)) def test_newlines_output(self): testdict = { @@ -1818,8 +1818,8 @@ txt.write("BB\nCCC\n") txt.write("X\rY\r\nZ") txt.flush() - self.assertEquals(buf.closed, False) - self.assertEquals(buf.getvalue(), expected) + self.assertEqual(buf.closed, False) + self.assertEqual(buf.getvalue(), expected) def test_destructor(self): l = [] @@ -1833,7 +1833,7 @@ t.write("abc") del t support.gc_collect() - self.assertEquals([b"abc"], l) + self.assertEqual([b"abc"], l) def test_override_destructor(self): record = [] @@ -1880,26 +1880,26 @@ for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le": f = self.open(support.TESTFN, "w+", encoding=enc) f._CHUNK_SIZE = chunksize - self.assertEquals(f.write("abc"), 3) + self.assertEqual(f.write("abc"), 3) f.close() f = self.open(support.TESTFN, "r+", encoding=enc) f._CHUNK_SIZE = chunksize - self.assertEquals(f.tell(), 0) - self.assertEquals(f.read(), "abc") + self.assertEqual(f.tell(), 0) + self.assertEqual(f.read(), "abc") cookie = f.tell() - self.assertEquals(f.seek(0), 0) - self.assertEquals(f.read(None), "abc") + self.assertEqual(f.seek(0), 0) + self.assertEqual(f.read(None), "abc") f.seek(0) - self.assertEquals(f.read(2), "ab") - self.assertEquals(f.read(1), "c") - self.assertEquals(f.read(1), "") - self.assertEquals(f.read(), "") - self.assertEquals(f.tell(), cookie) - self.assertEquals(f.seek(0), 0) - self.assertEquals(f.seek(0, 2), cookie) - self.assertEquals(f.write("def"), 3) - self.assertEquals(f.seek(cookie), cookie) - self.assertEquals(f.read(), "def") + self.assertEqual(f.read(2), "ab") + self.assertEqual(f.read(1), "c") + self.assertEqual(f.read(1), "") + self.assertEqual(f.read(), "") + self.assertEqual(f.tell(), cookie) + self.assertEqual(f.seek(0), 0) + self.assertEqual(f.seek(0, 2), cookie) + self.assertEqual(f.write("def"), 3) + self.assertEqual(f.seek(cookie), cookie) + self.assertEqual(f.read(), "def") if enc.startswith("utf"): self.multi_line_test(f, enc) f.close() @@ -1924,7 +1924,7 @@ if not line: break rlines.append((pos, line)) - self.assertEquals(rlines, wlines) + self.assertEqual(rlines, wlines) def test_telling(self): f = self.open(support.TESTFN, "w+", encoding="utf8") @@ -1934,16 +1934,16 @@ f.write("\xff\n") p2 = f.tell() f.seek(0) - self.assertEquals(f.tell(), p0) - self.assertEquals(f.readline(), "\xff\n") - self.assertEquals(f.tell(), p1) - self.assertEquals(f.readline(), "\xff\n") - self.assertEquals(f.tell(), p2) + self.assertEqual(f.tell(), p0) + self.assertEqual(f.readline(), "\xff\n") + self.assertEqual(f.tell(), p1) + self.assertEqual(f.readline(), "\xff\n") + self.assertEqual(f.tell(), p2) f.seek(0) for line in f: - self.assertEquals(line, "\xff\n") + self.assertEqual(line, "\xff\n") self.assertRaises(IOError, f.tell) - self.assertEquals(f.tell(), p2) + self.assertEqual(f.tell(), p2) f.close() def test_seeking(self): @@ -1951,7 +1951,7 @@ prefix_size = chunk_size - 2 u_prefix = "a" * prefix_size prefix = bytes(u_prefix.encode("utf-8")) - self.assertEquals(len(u_prefix), len(prefix)) + self.assertEqual(len(u_prefix), len(prefix)) u_suffix = "\u8888\n" suffix = bytes(u_suffix.encode("utf-8")) line = prefix + suffix @@ -1960,9 +1960,9 @@ f.close() f = self.open(support.TESTFN, "r", encoding="utf-8") s = f.read(prefix_size) - self.assertEquals(s, prefix.decode("ascii")) - self.assertEquals(f.tell(), prefix_size) - self.assertEquals(f.readline(), u_suffix) + self.assertEqual(s, prefix.decode("ascii")) + self.assertEqual(f.tell(), prefix_size) + self.assertEqual(f.readline(), u_suffix) def test_seeking_too(self): # Regression test for a specific bug @@ -1995,11 +1995,11 @@ for i in range(min_pos, len(decoded) + 1): # seek positions for j in [1, 5, len(decoded) - i]: # read lengths f = self.open(support.TESTFN, encoding='test_decoder') - self.assertEquals(f.read(i), decoded[:i]) + self.assertEqual(f.read(i), decoded[:i]) cookie = f.tell() - self.assertEquals(f.read(j), decoded[i:i + j]) + self.assertEqual(f.read(j), decoded[i:i + j]) f.seek(cookie) - self.assertEquals(f.read(), decoded[i:]) + self.assertEqual(f.read(), decoded[i:]) f.close() # Enable the test decoder. @@ -2038,10 +2038,10 @@ f.write(data) f.write(data) f.seek(0) - self.assertEquals(f.read(), data * 2) + self.assertEqual(f.read(), data * 2) f.seek(0) - self.assertEquals(f.read(), data * 2) - self.assertEquals(buf.getvalue(), (data * 2).encode(encoding)) + self.assertEqual(f.read(), data * 2) + self.assertEqual(buf.getvalue(), (data * 2).encode(encoding)) def test_unreadable(self): class UnReadable(self.BytesIO): @@ -2058,7 +2058,7 @@ if not c: break reads += c - self.assertEquals(reads, "AA\nBB") + self.assertEqual(reads, "AA\nBB") def test_readlines(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC")) @@ -2078,7 +2078,7 @@ if not c: break reads += c - self.assertEquals(reads, "A"*127+"\nB") + self.assertEqual(reads, "A"*127+"\nB") def test_issue1395_1(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") @@ -2090,7 +2090,7 @@ if not c: break reads += c - self.assertEquals(reads, self.normalized) + self.assertEqual(reads, self.normalized) def test_issue1395_2(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") @@ -2102,7 +2102,7 @@ if not c: break reads += c - self.assertEquals(reads, self.normalized) + self.assertEqual(reads, self.normalized) def test_issue1395_3(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") @@ -2113,7 +2113,7 @@ reads += txt.readline() reads += txt.readline() reads += txt.readline() - self.assertEquals(reads, self.normalized) + self.assertEqual(reads, self.normalized) def test_issue1395_4(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") @@ -2121,7 +2121,7 @@ reads = txt.read(4) reads += txt.read() - self.assertEquals(reads, self.normalized) + self.assertEqual(reads, self.normalized) def test_issue1395_5(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") @@ -2131,7 +2131,7 @@ pos = txt.tell() txt.seek(0) txt.seek(pos) - self.assertEquals(txt.read(4), "BBB\n") + self.assertEqual(txt.read(4), "BBB\n") def test_issue2282(self): buffer = self.BytesIO(self.testdata) @@ -2147,12 +2147,12 @@ f.write('aaa') pos = f.tell() with self.open(filename, 'rb') as f: - self.assertEquals(f.read(), 'aaa'.encode(charset)) + self.assertEqual(f.read(), 'aaa'.encode(charset)) with self.open(filename, 'a', encoding=charset) as f: f.write('xxx') with self.open(filename, 'rb') as f: - self.assertEquals(f.read(), 'aaaxxx'.encode(charset)) + self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_seek_bom(self): # Same test, but when seeking manually @@ -2167,7 +2167,7 @@ f.seek(0) f.write('bbb') with self.open(filename, 'rb') as f: - self.assertEquals(f.read(), 'bbbzzz'.encode(charset)) + self.assertEqual(f.read(), 'bbbzzz'.encode(charset)) def test_errors_property(self): with self.open(support.TESTFN, "w") as f: @@ -2195,7 +2195,7 @@ with self.open(support.TESTFN) as f: content = f.read() for n in range(20): - self.assertEquals(content.count("Thread%03d\n" % n), 1) + self.assertEqual(content.count("Thread%03d\n" % n), 1) def test_flush_error_on_close(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") @@ -2249,9 +2249,9 @@ def _check_decode(b, s, **kwargs): # We exercise getstate() / setstate() as well as decode() state = decoder.getstate() - self.assertEquals(decoder.decode(b, **kwargs), s) + self.assertEqual(decoder.decode(b, **kwargs), s) decoder.setstate(state) - self.assertEquals(decoder.decode(b, **kwargs), s) + self.assertEqual(decoder.decode(b, **kwargs), s) _check_decode(b'\xe8\xa2\x88', "\u8888") @@ -2300,24 +2300,24 @@ # Decode one char at a time for c in s: result.append(decoder.decode(c)) - self.assertEquals(decoder.newlines, None) + self.assertEqual(decoder.newlines, None) _decode_bytewise("abc\n\r") - self.assertEquals(decoder.newlines, '\n') + self.assertEqual(decoder.newlines, '\n') _decode_bytewise("\nabc") - self.assertEquals(decoder.newlines, ('\n', '\r\n')) + self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc\r") - self.assertEquals(decoder.newlines, ('\n', '\r\n')) + self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc") - self.assertEquals(decoder.newlines, ('\r', '\n', '\r\n')) + self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n')) _decode_bytewise("abc\r") - self.assertEquals("".join(result), "abc\n\nabcabc\nabcabc") + self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc") decoder.reset() input = "abc" if encoder is not None: encoder.reset() input = encoder.encode(input) - self.assertEquals(decoder.decode(input), "abc") - self.assertEquals(decoder.newlines, None) + self.assertEqual(decoder.decode(input), "abc") + self.assertEqual(decoder.newlines, None) def test_newline_decoder(self): encodings = ( @@ -2338,11 +2338,11 @@ def test_newline_bytes(self): # Issue 5433: Excessive optimization in IncrementalNewlineDecoder def _check(dec): - self.assertEquals(dec.newlines, None) - self.assertEquals(dec.decode("\u0D00"), "\u0D00") - self.assertEquals(dec.newlines, None) - self.assertEquals(dec.decode("\u0A00"), "\u0A00") - self.assertEquals(dec.newlines, None) + self.assertEqual(dec.newlines, None) + self.assertEqual(dec.decode("\u0D00"), "\u0D00") + self.assertEqual(dec.newlines, None) + self.assertEqual(dec.decode("\u0A00"), "\u0A00") + self.assertEqual(dec.newlines, None) dec = self.IncrementalNewlineDecoder(None, translate=False) _check(dec) dec = self.IncrementalNewlineDecoder(None, translate=True) @@ -2375,28 +2375,28 @@ def test_attributes(self): f = self.open(support.TESTFN, "wb", buffering=0) - self.assertEquals(f.mode, "wb") + self.assertEqual(f.mode, "wb") f.close() f = self.open(support.TESTFN, "U") - self.assertEquals(f.name, support.TESTFN) - self.assertEquals(f.buffer.name, support.TESTFN) - self.assertEquals(f.buffer.raw.name, support.TESTFN) - self.assertEquals(f.mode, "U") - self.assertEquals(f.buffer.mode, "rb") - self.assertEquals(f.buffer.raw.mode, "rb") + self.assertEqual(f.name, support.TESTFN) + self.assertEqual(f.buffer.name, support.TESTFN) + self.assertEqual(f.buffer.raw.name, support.TESTFN) + self.assertEqual(f.mode, "U") + self.assertEqual(f.buffer.mode, "rb") + self.assertEqual(f.buffer.raw.mode, "rb") f.close() f = self.open(support.TESTFN, "w+") - self.assertEquals(f.mode, "w+") - self.assertEquals(f.buffer.mode, "rb+") # Does it really matter? - self.assertEquals(f.buffer.raw.mode, "rb+") + self.assertEqual(f.mode, "w+") + self.assertEqual(f.buffer.mode, "rb+") # Does it really matter? + self.assertEqual(f.buffer.raw.mode, "rb+") g = self.open(f.fileno(), "wb", closefd=False) - self.assertEquals(g.mode, "wb") - self.assertEquals(g.raw.mode, "wb") - self.assertEquals(g.name, f.fileno()) - self.assertEquals(g.raw.name, f.fileno()) + self.assertEqual(g.mode, "wb") + self.assertEqual(g.raw.mode, "wb") + self.assertEqual(g.name, f.fileno()) + self.assertEqual(g.raw.name, f.fileno()) f.close() g.close() diff --git a/lib-python/2.7.0/distutils/tests/test_install_scripts.py b/lib-python/2.7.0/distutils/tests/test_install_scripts.py --- a/lib-python/2.7.0/distutils/tests/test_install_scripts.py +++ b/lib-python/2.7.0/distutils/tests/test_install_scripts.py @@ -42,8 +42,10 @@ def write_script(name, text): expected.append(name) f = open(os.path.join(source, name), "w") - f.write(text) - f.close() + try: + f.write(text) + finally: + f.close() write_script("script1.py", ("#! /usr/bin/env python2.3\n" "# bogus script w/ Python sh-bang\n" diff --git a/lib-python/2.7.0/bsddb/test/test_recno.py b/lib-python/2.7.0/bsddb/test/test_recno.py --- a/lib-python/2.7.0/bsddb/test/test_recno.py +++ b/lib-python/2.7.0/bsddb/test/test_recno.py @@ -18,7 +18,7 @@ def assertFalse(self, expr, msg=None) : return self.failIf(expr,msg=msg) def assertTrue(self, expr, msg=None) : - return self.assert_(expr, msg=msg) + return self.assertTrue(expr, msg=msg) if (sys.version_info < (2, 7)) or ((sys.version_info >= (3, 0)) and (sys.version_info < (3, 2))) : diff --git a/lib-python/2.7.0/test/test_parser.py b/lib-python/2.7.0/test/test_parser.py --- a/lib-python/2.7.0/test/test_parser.py +++ b/lib-python/2.7.0/test/test_parser.py @@ -19,8 +19,8 @@ except parser.ParserError, why: self.fail("could not roundtrip %r: %s" % (s, why)) - self.assertEquals(t, st2.totuple(), - "could not re-generate syntax tree") + self.assertEqual(t, st2.totuple(), + "could not re-generate syntax tree") def check_expr(self, s): self.roundtrip(parser.expr, s) @@ -547,14 +547,14 @@ def test_compile_expr(self): st = parser.expr('2 + 3') code = parser.compilest(st) - self.assertEquals(eval(code), 5) + self.assertEqual(eval(code), 5) def test_compile_suite(self): st = parser.suite('x = 2; y = x + 3') code = parser.compilest(st) globs = {} exec code in globs - self.assertEquals(globs['y'], 5) + self.assertEqual(globs['y'], 5) def test_compile_error(self): st = parser.suite('1 = 3 + 4') diff --git a/lib-python/2.7.0/distutils/cmd.py b/lib-python/2.7.0/distutils/cmd.py --- a/lib-python/2.7.0/distutils/cmd.py +++ b/lib-python/2.7.0/distutils/cmd.py @@ -4,7 +4,7 @@ in the distutils.command package. """ -__revision__ = "$Id: cmd.py 75192 2009-10-02 23:49:48Z tarek.ziade $" +__revision__ = "$Id$" import sys, os, re from distutils.errors import DistutilsOptionError diff --git a/lib-python/2.7.0/bsddb/test/test_basics.py b/lib-python/2.7.0/bsddb/test/test_basics.py --- a/lib-python/2.7.0/bsddb/test/test_basics.py +++ b/lib-python/2.7.0/bsddb/test/test_basics.py @@ -612,7 +612,7 @@ d.put("abcde", "ABCDE"); num = d.truncate() - self.assert_(num >= 1, "truncate returned <= 0 on non-empty database") + self.assertTrue(num >= 1, "truncate returned <= 0 on non-empty database") num = d.truncate() self.assertEqual(num, 0, "truncate on empty DB returned nonzero (%r)" % (num,)) @@ -631,9 +631,9 @@ if db.version() >= (4, 6): def test08_exists(self) : self.d.put("abcde", "ABCDE") - self.assert_(self.d.exists("abcde") == True, + self.assertTrue(self.d.exists("abcde") == True, "DB->exists() returns wrong value") - self.assert_(self.d.exists("x") == False, + self.assertTrue(self.d.exists("x") == False, "DB->exists() returns wrong value") #---------------------------------------- @@ -806,9 +806,9 @@ self.d.put("abcde", "ABCDE", txn=txn) txn.commit() txn = self.env.txn_begin() - self.assert_(self.d.exists("abcde", txn=txn) == True, + self.assertTrue(self.d.exists("abcde", txn=txn) == True, "DB->exists() returns wrong value") - self.assert_(self.d.exists("x", txn=txn) == False, + self.assertTrue(self.d.exists("x", txn=txn) == False, "DB->exists() returns wrong value") txn.abort() @@ -823,7 +823,7 @@ d.put("abcde", "ABCDE"); txn = self.env.txn_begin() num = d.truncate(txn) - self.assert_(num >= 1, "truncate returned <= 0 on non-empty database") + self.assertTrue(num >= 1, "truncate returned <= 0 on non-empty database") num = d.truncate(txn) self.assertEqual(num, 0, "truncate on empty DB returned nonzero (%r)" % (num,)) diff --git a/lib-python/2.7.0/test/test_posix.py b/lib-python/2.7.0/test/test_posix.py --- a/lib-python/2.7.0/test/test_posix.py +++ b/lib-python/2.7.0/test/test_posix.py @@ -103,7 +103,7 @@ try: posix.initgroups(name, 13) except OSError as e: - self.assertEquals(e.errno, errno.EPERM) + self.assertEqual(e.errno, errno.EPERM) else: self.fail("Expected OSError to be raised by initgroups") diff --git a/lib-python/2.7.0/distutils/command/bdist.py b/lib-python/2.7.0/distutils/command/bdist.py --- a/lib-python/2.7.0/distutils/command/bdist.py +++ b/lib-python/2.7.0/distutils/command/bdist.py @@ -3,7 +3,7 @@ Implements the Distutils 'bdist' command (create a built [binary] distribution).""" -__revision__ = "$Id: bdist.py 77761 2010-01-26 22:46:15Z tarek.ziade $" +__revision__ = "$Id$" import os diff --git a/lib-python/2.7.0/md5.py b/lib-python/2.7.0/md5.py --- a/lib-python/2.7.0/md5.py +++ b/lib-python/2.7.0/md5.py @@ -1,4 +1,4 @@ -# $Id: md5.py 58064 2007-09-09 20:25:00Z gregory.p.smith $ +# $Id$ # # Copyright (C) 2005 Gregory P. Smith (greg at krypto.org) # Licensed to PSF under a Contributor Agreement. diff --git a/lib-python/2.7.0/distutils/tests/test_dir_util.py b/lib-python/2.7.0/distutils/tests/test_dir_util.py --- a/lib-python/2.7.0/distutils/tests/test_dir_util.py +++ b/lib-python/2.7.0/distutils/tests/test_dir_util.py @@ -37,18 +37,18 @@ mkpath(self.target, verbose=0) wanted = [] - self.assertEquals(self._logs, wanted) + self.assertEqual(self._logs, wanted) remove_tree(self.root_target, verbose=0) mkpath(self.target, verbose=1) wanted = ['creating %s' % self.root_target, 'creating %s' % self.target] - self.assertEquals(self._logs, wanted) + self.assertEqual(self._logs, wanted) self._logs = [] remove_tree(self.root_target, verbose=1) wanted = ["removing '%s' (and everything under it)" % self.root_target] - self.assertEquals(self._logs, wanted) + self.assertEqual(self._logs, wanted) @unittest.skipIf(sys.platform.startswith('win'), "This test is only appropriate for POSIX-like systems.") @@ -66,12 +66,12 @@ def test_create_tree_verbosity(self): create_tree(self.root_target, ['one', 'two', 'three'], verbose=0) - self.assertEquals(self._logs, []) + self.assertEqual(self._logs, []) remove_tree(self.root_target, verbose=0) wanted = ['creating %s' % self.root_target] create_tree(self.root_target, ['one', 'two', 'three'], verbose=1) - self.assertEquals(self._logs, wanted) + self.assertEqual(self._logs, wanted) remove_tree(self.root_target, verbose=0) @@ -81,30 +81,32 @@ mkpath(self.target, verbose=0) copy_tree(self.target, self.target2, verbose=0) - self.assertEquals(self._logs, []) + self.assertEqual(self._logs, []) remove_tree(self.root_target, verbose=0) mkpath(self.target, verbose=0) a_file = os.path.join(self.target, 'ok.txt') f = open(a_file, 'w') - f.write('some content') - f.close() + try: + f.write('some content') + finally: + f.close() wanted = ['copying %s -> %s' % (a_file, self.target2)] copy_tree(self.target, self.target2, verbose=1) - self.assertEquals(self._logs, wanted) + self.assertEqual(self._logs, wanted) remove_tree(self.root_target, verbose=0) remove_tree(self.target2, verbose=0) def test_ensure_relative(self): if os.sep == '/': - self.assertEquals(ensure_relative('/home/foo'), 'home/foo') - self.assertEquals(ensure_relative('some/path'), 'some/path') + self.assertEqual(ensure_relative('/home/foo'), 'home/foo') + self.assertEqual(ensure_relative('some/path'), 'some/path') else: # \\ - self.assertEquals(ensure_relative('c:\\home\\foo'), 'c:home\\foo') - self.assertEquals(ensure_relative('home\\foo'), 'home\\foo') + self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo') + self.assertEqual(ensure_relative('home\\foo'), 'home\\foo') def test_suite(): return unittest.makeSuite(DirUtilTestCase) diff --git a/lib-python/2.7.0/test/test_threading.py b/lib-python/2.7.0/test/test_threading.py --- a/lib-python/2.7.0/test/test_threading.py +++ b/lib-python/2.7.0/test/test_threading.py @@ -403,17 +403,17 @@ weak_cyclic_object = weakref.ref(cyclic_object) cyclic_object.thread.join() del cyclic_object - self.assertEquals(None, weak_cyclic_object(), - msg=('%d references still around' % - sys.getrefcount(weak_cyclic_object()))) + self.assertEqual(None, weak_cyclic_object(), + msg=('%d references still around' % + sys.getrefcount(weak_cyclic_object()))) raising_cyclic_object = RunSelfFunction(should_raise=True) weak_raising_cyclic_object = weakref.ref(raising_cyclic_object) raising_cyclic_object.thread.join() del raising_cyclic_object - self.assertEquals(None, weak_raising_cyclic_object(), - msg=('%d references still around' % - sys.getrefcount(weak_raising_cyclic_object()))) + self.assertEqual(None, weak_raising_cyclic_object(), + msg=('%d references still around' % + sys.getrefcount(weak_raising_cyclic_object()))) class ThreadJoinOnShutdown(BaseTestCase): diff --git a/lib-python/2.7.0/distutils/tests/test_build_ext.py b/lib-python/2.7.0/distutils/tests/test_build_ext.py --- a/lib-python/2.7.0/distutils/tests/test_build_ext.py +++ b/lib-python/2.7.0/distutils/tests/test_build_ext.py @@ -103,15 +103,15 @@ import xx for attr in ('error', 'foo', 'new', 'roj'): - self.assert_(hasattr(xx, attr)) + self.assertTrue(hasattr(xx, attr)) - self.assertEquals(xx.foo(2, 5), 7) - self.assertEquals(xx.foo(13,15), 28) - self.assertEquals(xx.new().demo(), None) + self.assertEqual(xx.foo(2, 5), 7) + self.assertEqual(xx.foo(13,15), 28) + self.assertEqual(xx.new().demo(), None) doc = 'This is a template module just for instruction.' - self.assertEquals(xx.__doc__, doc) - self.assert_(isinstance(xx.Null(), xx.Null)) - self.assert_(isinstance(xx.Str(), xx.Str)) + self.assertEqual(xx.__doc__, doc) + self.assertTrue(isinstance(xx.Null(), xx.Null)) + self.assertTrue(isinstance(xx.Str(), xx.Str)) def test_solaris_enable_shared(self): dist = Distribution({'name': 'xx'}) @@ -132,7 +132,7 @@ _config_vars['Py_ENABLE_SHARED'] = old_var # make sure we get some library dirs under solaris - self.assert_(len(cmd.library_dirs) > 0) + self.assertTrue(len(cmd.library_dirs) > 0) def test_finalize_options(self): # Make sure Python's include directories (for Python.h, pyconfig.h, @@ -144,31 +144,31 @@ from distutils import sysconfig py_include = sysconfig.get_python_inc() - self.assert_(py_include in cmd.include_dirs) + self.assertTrue(py_include in cmd.include_dirs) plat_py_include = sysconfig.get_python_inc(plat_specific=1) - self.assert_(plat_py_include in cmd.include_dirs) + self.assertTrue(plat_py_include in cmd.include_dirs) # make sure cmd.libraries is turned into a list # if it's a string cmd = build_ext(dist) cmd.libraries = 'my_lib' cmd.finalize_options() - self.assertEquals(cmd.libraries, ['my_lib']) + self.assertEqual(cmd.libraries, ['my_lib']) # make sure cmd.library_dirs is turned into a list # if it's a string cmd = build_ext(dist) cmd.library_dirs = 'my_lib_dir' cmd.finalize_options() - self.assert_('my_lib_dir' in cmd.library_dirs) + self.assertTrue('my_lib_dir' in cmd.library_dirs) # make sure rpath is turned into a list # if it's a list of os.pathsep's paths cmd = build_ext(dist) cmd.rpath = os.pathsep.join(['one', 'two']) cmd.finalize_options() - self.assertEquals(cmd.rpath, ['one', 'two']) + self.assertEqual(cmd.rpath, ['one', 'two']) # XXX more tests to perform for win32 @@ -177,25 +177,25 @@ cmd = build_ext(dist) cmd.define = 'one,two' cmd.finalize_options() - self.assertEquals(cmd.define, [('one', '1'), ('two', '1')]) + self.assertEqual(cmd.define, [('one', '1'), ('two', '1')]) # make sure undef is turned into a list of # strings if they are ','-separated strings cmd = build_ext(dist) cmd.undef = 'one,two' cmd.finalize_options() - self.assertEquals(cmd.undef, ['one', 'two']) + self.assertEqual(cmd.undef, ['one', 'two']) # make sure swig_opts is turned into a list cmd = build_ext(dist) cmd.swig_opts = None cmd.finalize_options() - self.assertEquals(cmd.swig_opts, []) + self.assertEqual(cmd.swig_opts, []) cmd = build_ext(dist) cmd.swig_opts = '1 2' cmd.finalize_options() - self.assertEquals(cmd.swig_opts, ['1', '2']) + self.assertEqual(cmd.swig_opts, ['1', '2']) def test_check_extensions_list(self): dist = Distribution() @@ -226,13 +226,13 @@ 'some': 'bar'})] cmd.check_extensions_list(exts) ext = exts[0] - self.assert_(isinstance(ext, Extension)) + self.assertTrue(isinstance(ext, Extension)) # check_extensions_list adds in ext the values passed # when they are in ('include_dirs', 'library_dirs', 'libraries' # 'extra_objects', 'extra_compile_args', 'extra_link_args') - self.assertEquals(ext.libraries, 'foo') - self.assert_(not hasattr(ext, 'some')) + self.assertEqual(ext.libraries, 'foo') + self.assertTrue(not hasattr(ext, 'some')) # 'macros' element of build info dict must be 1- or 2-tuple exts = [('foo.bar', {'sources': [''], 'libraries': 'foo', @@ -241,15 +241,15 @@ exts[0][1]['macros'] = [('1', '2'), ('3',)] cmd.check_extensions_list(exts) - self.assertEquals(exts[0].undef_macros, ['3']) - self.assertEquals(exts[0].define_macros, [('1', '2')]) + self.assertEqual(exts[0].undef_macros, ['3']) + self.assertEqual(exts[0].define_macros, [('1', '2')]) def test_get_source_files(self): modules = [Extension('foo', ['xxx'])] dist = Distribution({'name': 'xx', 'ext_modules': modules}) cmd = build_ext(dist) cmd.ensure_finalized() - self.assertEquals(cmd.get_source_files(), ['xxx']) + self.assertEqual(cmd.get_source_files(), ['xxx']) def test_compiler_option(self): # cmd.compiler is an option and @@ -260,7 +260,7 @@ cmd.compiler = 'unix' cmd.ensure_finalized() cmd.run() - self.assertEquals(cmd.compiler, 'unix') + self.assertEqual(cmd.compiler, 'unix') def test_get_outputs(self): tmp_dir = self.mkdtemp() @@ -272,7 +272,7 @@ cmd = build_ext(dist) self._fixup_command(cmd) cmd.ensure_finalized() - self.assertEquals(len(cmd.get_outputs()), 1) + self.assertEqual(len(cmd.get_outputs()), 1) if os.name == "nt": cmd.debug = sys.executable.endswith("_d.exe") @@ -291,20 +291,20 @@ so_file = cmd.get_outputs()[0] finally: os.chdir(old_wd) - self.assert_(os.path.exists(so_file)) - self.assertEquals(os.path.splitext(so_file)[-1], - sysconfig.get_config_var('SO')) + self.assertTrue(os.path.exists(so_file)) + self.assertEqual(os.path.splitext(so_file)[-1], + sysconfig.get_config_var('SO')) so_dir = os.path.dirname(so_file) - self.assertEquals(so_dir, other_tmp_dir) + self.assertEqual(so_dir, other_tmp_dir) cmd.compiler = None cmd.inplace = 0 cmd.run() so_file = cmd.get_outputs()[0] - self.assert_(os.path.exists(so_file)) - self.assertEquals(os.path.splitext(so_file)[-1], - sysconfig.get_config_var('SO')) + self.assertTrue(os.path.exists(so_file)) + self.assertEqual(os.path.splitext(so_file)[-1], + sysconfig.get_config_var('SO')) so_dir = os.path.dirname(so_file) - self.assertEquals(so_dir, cmd.build_lib) + self.assertEqual(so_dir, cmd.build_lib) # inplace = 0, cmd.package = 'bar' build_py = cmd.get_finalized_command('build_py') @@ -312,7 +312,7 @@ path = cmd.get_ext_fullpath('foo') # checking that the last directory is the build_dir path = os.path.split(path)[0] - self.assertEquals(path, cmd.build_lib) + self.assertEqual(path, cmd.build_lib) # inplace = 1, cmd.package = 'bar' cmd.inplace = 1 @@ -326,7 +326,7 @@ # checking that the last directory is bar path = os.path.split(path)[0] lastdir = os.path.split(path)[-1] - self.assertEquals(lastdir, 'bar') + self.assertEqual(lastdir, 'bar') def test_ext_fullpath(self): ext = sysconfig.get_config_vars()['SO'] @@ -338,14 +338,14 @@ curdir = os.getcwd() wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') - self.assertEquals(wanted, path) + self.assertEqual(wanted, path) # building lxml.etree not inplace cmd.inplace = 0 cmd.build_lib = os.path.join(curdir, 'tmpdir') wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') - self.assertEquals(wanted, path) + self.assertEqual(wanted, path) # building twisted.runner.portmap not inplace build_py = cmd.get_finalized_command('build_py') @@ -354,13 +354,13 @@ path = cmd.get_ext_fullpath('twisted.runner.portmap') wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner', 'portmap' + ext) - self.assertEquals(wanted, path) + self.assertEqual(wanted, path) # building twisted.runner.portmap inplace cmd.inplace = 1 path = cmd.get_ext_fullpath('twisted.runner.portmap') wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext) - self.assertEquals(wanted, path) + self.assertEqual(wanted, path) def test_build_ext_inplace(self): etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c') @@ -375,7 +375,7 @@ ext = sysconfig.get_config_var("SO") wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') - self.assertEquals(wanted, path) + self.assertEqual(wanted, path) def test_setuptools_compat(self): import distutils.core, distutils.extension, distutils.command.build_ext @@ -400,7 +400,7 @@ ext = sysconfig.get_config_var("SO") wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') - self.assertEquals(wanted, path) + self.assertEqual(wanted, path) finally: # restoring Distutils' Extension class otherwise its broken distutils.extension.Extension = saved_ext @@ -415,7 +415,7 @@ ext_name = os.path.join('UpdateManager', 'fdsend') ext_path = cmd.get_ext_fullpath(ext_name) wanted = os.path.join(cmd.build_lib, 'UpdateManager', 'fdsend' + ext) - self.assertEquals(ext_path, wanted) + self.assertEqual(ext_path, wanted) def test_build_ext_path_cross_platform(self): if sys.platform != 'win32': @@ -428,7 +428,7 @@ ext_name = 'UpdateManager/fdsend' ext_path = cmd.get_ext_fullpath(ext_name) wanted = os.path.join(cmd.build_lib, 'UpdateManager', 'fdsend' + ext) - self.assertEquals(ext_path, wanted) + self.assertEqual(ext_path, wanted) def test_suite(): return unittest.makeSuite(BuildExtTestCase) diff --git a/lib-python/2.7.0/test/test_linecache.py b/lib-python/2.7.0/test/test_linecache.py --- a/lib-python/2.7.0/test/test_linecache.py +++ b/lib-python/2.7.0/test/test_linecache.py @@ -42,31 +42,31 @@ getline = linecache.getline # Bad values for line number should return an empty string - self.assertEquals(getline(FILENAME, 2**15), EMPTY) - self.assertEquals(getline(FILENAME, -1), EMPTY) + self.assertEqual(getline(FILENAME, 2**15), EMPTY) + self.assertEqual(getline(FILENAME, -1), EMPTY) # Float values currently raise TypeError, should it? self.assertRaises(TypeError, getline, FILENAME, 1.1) # Bad filenames should return an empty string - self.assertEquals(getline(EMPTY, 1), EMPTY) - self.assertEquals(getline(INVALID_NAME, 1), EMPTY) + self.assertEqual(getline(EMPTY, 1), EMPTY) + self.assertEqual(getline(INVALID_NAME, 1), EMPTY) # Check whether lines correspond to those from file iteration for entry in TESTS: filename = os.path.join(TEST_PATH, entry) + '.py' for index, line in enumerate(open(filename)): - self.assertEquals(line, getline(filename, index + 1)) + self.assertEqual(line, getline(filename, index + 1)) # Check module loading for entry in MODULES: filename = os.path.join(MODULE_PATH, entry) + '.py' for index, line in enumerate(open(filename)): - self.assertEquals(line, getline(filename, index + 1)) + self.assertEqual(line, getline(filename, index + 1)) # Check that bogus data isn't returned (issue #1309567) empty = linecache.getlines('a/b/c/__init__.py') - self.assertEquals(empty, []) + self.assertEqual(empty, []) def test_no_ending_newline(self): self.addCleanup(support.unlink, support.TESTFN) @@ -84,12 +84,12 @@ # Are all files cached? cached_empty = [fn for fn in cached if fn not in linecache.cache] - self.assertEquals(cached_empty, []) + self.assertEqual(cached_empty, []) # Can we clear the cache? linecache.clearcache() cached_empty = [fn for fn in cached if fn in linecache.cache] - self.assertEquals(cached_empty, []) + self.assertEqual(cached_empty, []) def test_checkcache(self): getline = linecache.getline @@ -104,7 +104,7 @@ source_list = [] with open(source_name) as source: for index, line in enumerate(source): - self.assertEquals(line, getline(source_name, index + 1)) + self.assertEqual(line, getline(source_name, index + 1)) source_list.append(line) with open(source_name, 'w') as source: @@ -115,13 +115,13 @@ # Check that the cache matches the old contents for index, line in enumerate(source_list): - self.assertEquals(line, getline(source_name, index + 1)) + self.assertEqual(line, getline(source_name, index + 1)) # Update the cache and check whether it matches the new source file linecache.checkcache(source_name) with open(source_name) as source: for index, line in enumerate(source): - self.assertEquals(line, getline(source_name, index + 1)) + self.assertEqual(line, getline(source_name, index + 1)) source_list.append(line) def test_main(): diff --git a/lib-python/2.7.0/test/test_int.py b/lib-python/2.7.0/test/test_int.py --- a/lib-python/2.7.0/test/test_int.py +++ b/lib-python/2.7.0/test/test_int.py @@ -392,7 +392,7 @@ try: int(TruncReturnsNonIntegral()) except TypeError as e: - self.assertEquals(str(e), + self.assertEqual(str(e), "__trunc__ returned non-Integral" " (type NonIntegral)") else: diff --git a/lib-python/2.7.0/distutils/tests/test_core.py b/lib-python/2.7.0/distutils/tests/test_core.py --- a/lib-python/2.7.0/distutils/tests/test_core.py +++ b/lib-python/2.7.0/distutils/tests/test_core.py @@ -52,7 +52,11 @@ shutil.rmtree(path) def write_setup(self, text, path=test.test_support.TESTFN): - open(path, "w").write(text) + f = open(path, "w") + try: + f.write(text) + finally: + f.close() return path def test_run_setup_provides_file(self): @@ -85,7 +89,7 @@ with captured_stdout() as stdout: distutils.core.setup(name='bar') stdout.seek(0) - self.assertEquals(stdout.read(), 'bar\n') + self.assertEqual(stdout.read(), 'bar\n') distutils.core.DEBUG = True try: @@ -95,7 +99,7 @@ distutils.core.DEBUG = False stdout.seek(0) wanted = "options (after parsing config files):\n" - self.assertEquals(stdout.readlines()[0], wanted) + self.assertEqual(stdout.readlines()[0], wanted) def test_suite(): return unittest.makeSuite(CoreTestCase) diff --git a/lib-python/2.7.0/bsddb/test/test_join.py b/lib-python/2.7.0/bsddb/test/test_join.py --- a/lib-python/2.7.0/bsddb/test/test_join.py +++ b/lib-python/2.7.0/bsddb/test/test_join.py @@ -67,7 +67,7 @@ # Don't do the .set() in an assert, or you can get a bogus failure # when running python -O tmp = sCursor.set('red') - self.assert_(tmp) + self.assertTrue(tmp) # FIXME: jCursor doesn't properly hold a reference to its # cursors, if they are closed before jcursor is used it diff --git a/lib-python/2.7.0/bsddb/test/test_associate.py b/lib-python/2.7.0/bsddb/test/test_associate.py --- a/lib-python/2.7.0/bsddb/test/test_associate.py +++ b/lib-python/2.7.0/bsddb/test/test_associate.py @@ -233,7 +233,7 @@ self.assertEqual(vals, None, vals) vals = secDB.pget('Unknown', txn=txn) - self.assert_(vals[0] == 99 or vals[0] == '99', vals) + self.assertTrue(vals[0] == 99 or vals[0] == '99', vals) vals[1].index('Unknown') vals[1].index('Unnamed') vals[1].index('unknown') @@ -245,9 +245,9 @@ rec = self.cur.first() while rec is not None: if type(self.keytype) == type(''): - self.assert_(int(rec[0])) # for primary db, key is a number + self.assertTrue(int(rec[0])) # for primary db, key is a number else: - self.assert_(rec[0] and type(rec[0]) == type(0)) + self.assertTrue(rec[0] and type(rec[0]) == type(0)) count = count + 1 if verbose: print rec @@ -262,7 +262,7 @@ # test cursor pget vals = self.cur.pget('Unknown', flags=db.DB_LAST) - self.assert_(vals[1] == 99 or vals[1] == '99', vals) + self.assertTrue(vals[1] == 99 or vals[1] == '99', vals) self.assertEqual(vals[0], 'Unknown') vals[2].index('Unknown') vals[2].index('Unnamed') diff --git a/lib-python/2.7.0/distutils/command/install.py b/lib-python/2.7.0/distutils/command/install.py --- a/lib-python/2.7.0/distutils/command/install.py +++ b/lib-python/2.7.0/distutils/command/install.py @@ -6,7 +6,7 @@ # This module should be kept compatible with Python 2.1. -__revision__ = "$Id: install.py 80804 2010-05-05 19:09:31Z ronald.oussoren $" +__revision__ = "$Id$" import sys, os, string from types import * diff --git a/lib-python/2.7.0/distutils/sysconfig.py b/lib-python/2.7.0/distutils/sysconfig.py --- a/lib-python/2.7.0/distutils/sysconfig.py +++ b/lib-python/2.7.0/distutils/sysconfig.py @@ -9,7 +9,7 @@ Email: """ -__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" +__revision__ = "$Id$" import os import re @@ -453,32 +453,6 @@ _config_vars = g -def _init_mac(): - """Initialize the module as appropriate for Macintosh systems""" - g = {} - # set basic install directories - g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) - g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) - - # XXX hmmm.. a normal install puts include files here - g['INCLUDEPY'] = get_python_inc(plat_specific=0) - - import MacOS - if not hasattr(MacOS, 'runtimemodel'): - g['SO'] = '.ppc.slb' - else: - g['SO'] = '.%s.slb' % MacOS.runtimemodel - - # XXX are these used anywhere? - g['install_lib'] = os.path.join(EXEC_PREFIX, "Lib") - g['install_platlib'] = os.path.join(EXEC_PREFIX, "Mac", "Lib") - - # These are used by the extension module build - g['srcdir'] = ':' - global _config_vars - _config_vars = g - - def _init_os2(): """Initialize the module as appropriate for OS/2""" g = {} diff --git a/lib-python/2.7.0/distutils/command/build_scripts.py b/lib-python/2.7.0/distutils/command/build_scripts.py --- a/lib-python/2.7.0/distutils/command/build_scripts.py +++ b/lib-python/2.7.0/distutils/command/build_scripts.py @@ -2,7 +2,7 @@ Implements the Distutils 'build_scripts' command.""" -__revision__ = "$Id: build_scripts.py 77704 2010-01-23 09:23:15Z tarek.ziade $" +__revision__ = "$Id$" import os, re from stat import ST_MODE diff --git a/lib-python/2.7.0/test/test_optparse.py b/lib-python/2.7.0/test/test_optparse.py --- a/lib-python/2.7.0/test/test_optparse.py +++ b/lib-python/2.7.0/test/test_optparse.py @@ -3,7 +3,7 @@ # (taradino at softhome.net) -- translated from the original Optik # test suite to this PyUnit-based version. # -# $Id: test_optparse.py 83429 2010-08-01 19:14:56Z georg.brandl $ +# $Id$ # import sys @@ -427,19 +427,19 @@ def test_str_aliases_string(self): self.parser.add_option("-s", type="str") - self.assertEquals(self.parser.get_option("-s").type, "string") + self.assertEqual(self.parser.get_option("-s").type, "string") def test_new_type_object(self): self.parser.add_option("-s", type=str) - self.assertEquals(self.parser.get_option("-s").type, "string") + self.assertEqual(self.parser.get_option("-s").type, "string") self.parser.add_option("-x", type=int) - self.assertEquals(self.parser.get_option("-x").type, "int") + self.assertEqual(self.parser.get_option("-x").type, "int") def test_old_type_object(self): self.parser.add_option("-s", type=types.StringType) - self.assertEquals(self.parser.get_option("-s").type, "string") + self.assertEqual(self.parser.get_option("-s").type, "string") self.parser.add_option("-x", type=types.IntType) - self.assertEquals(self.parser.get_option("-x").type, "int") + self.assertEqual(self.parser.get_option("-x").type, "int") # Custom type for testing processing of default values. diff --git a/lib-python/2.7.0/test/test_slice.py b/lib-python/2.7.0/test/test_slice.py --- a/lib-python/2.7.0/test/test_slice.py +++ b/lib-python/2.7.0/test/test_slice.py @@ -117,7 +117,7 @@ x = X() with test_support.check_py3k_warnings(): x[1:2] = 42 - self.assertEquals(tmp, [(1, 2, 42)]) + self.assertEqual(tmp, [(1, 2, 42)]) def test_pickle(self): s = slice(10, 20, 3) diff --git a/lib-python/2.7.0/test/test_collections.py b/lib-python/2.7.0/test/test_collections.py --- a/lib-python/2.7.0/test/test_collections.py +++ b/lib-python/2.7.0/test/test_collections.py @@ -674,9 +674,9 @@ ]): msg = (i, dup, words) self.assertTrue(dup is not words) - self.assertEquals(dup, words) - self.assertEquals(len(dup), len(words)) - self.assertEquals(type(dup), type(words)) + self.assertEqual(dup, words) + self.assertEqual(len(dup), len(words)) + self.assertEqual(type(dup), type(words)) def test_conversions(self): # Convert to: set, list, dict @@ -897,10 +897,10 @@ OrderedDict(od), ]): self.assertTrue(dup is not od) - self.assertEquals(dup, od) - self.assertEquals(list(dup.items()), list(od.items())) - self.assertEquals(len(dup), len(od)) - self.assertEquals(type(dup), type(od)) + self.assertEqual(dup, od) + self.assertEqual(list(dup.items()), list(od.items())) + self.assertEqual(len(dup), len(od)) + self.assertEqual(type(dup), type(od)) def test_yaml_linkage(self): # Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature. diff --git a/lib-python/2.7.0/test/test_exceptions.py b/lib-python/2.7.0/test/test_exceptions.py --- a/lib-python/2.7.0/test/test_exceptions.py +++ b/lib-python/2.7.0/test/test_exceptions.py @@ -32,8 +32,8 @@ raise exc("spam") except exc, err: buf2 = str(err) - self.assertEquals(buf1, buf2) - self.assertEquals(exc.__name__, excname) + self.assertEqual(buf1, buf2) + self.assertEqual(exc.__name__, excname) def testRaising(self): self.raise_catch(AttributeError, "AttributeError") @@ -163,7 +163,7 @@ except TypeError, err: exc, err, tb = sys.exc_info() co = tb.tb_frame.f_code - self.assertEquals(co.co_name, "test_capi1") + self.assertEqual(co.co_name, "test_capi1") self.assertTrue(co.co_filename.endswith('test_exceptions'+os.extsep+'py')) else: self.fail("Expected exception") @@ -175,10 +175,10 @@ except RuntimeError, err: exc, err, tb = sys.exc_info() co = tb.tb_frame.f_code - self.assertEquals(co.co_name, "__init__") + self.assertEqual(co.co_name, "__init__") self.assertTrue(co.co_filename.endswith('test_exceptions'+os.extsep+'py')) co2 = tb.tb_frame.f_back.f_code - self.assertEquals(co2.co_name, "test_capi2") + self.assertEqual(co2.co_name, "test_capi2") else: self.fail("Expected exception") @@ -284,14 +284,14 @@ if type(e) is not exc: raise # Verify module name - self.assertEquals(type(e).__module__, 'exceptions') + self.assertEqual(type(e).__module__, 'exceptions') # Verify no ref leaks in Exc_str() s = str(e) for checkArgName in expected: - self.assertEquals(repr(getattr(e, checkArgName)), - repr(expected[checkArgName]), - 'exception "%s", attribute "%s"' % - (repr(e), checkArgName)) + self.assertEqual(repr(getattr(e, checkArgName)), + repr(expected[checkArgName]), + 'exception "%s", attribute "%s"' % + (repr(e), checkArgName)) # test for pickling support for p in pickle, cPickle: @@ -300,9 +300,9 @@ for checkArgName in expected: got = repr(getattr(new, checkArgName)) want = repr(expected[checkArgName]) - self.assertEquals(got, want, - 'pickled "%r", attribute "%s"' % - (e, checkArgName)) + self.assertEqual(got, want, + 'pickled "%r", attribute "%s"' % + (e, checkArgName)) def testDeprecatedMessageAttribute(self): @@ -359,7 +359,7 @@ self.fancy_arg = fancy_arg x = DerivedException(fancy_arg=42) - self.assertEquals(x.fancy_arg, 42) + self.assertEqual(x.fancy_arg, 42) def testInfiniteRecursion(self): def f(): diff --git a/lib-python/2.7.0/distutils/text_file.py b/lib-python/2.7.0/distutils/text_file.py --- a/lib-python/2.7.0/distutils/text_file.py +++ b/lib-python/2.7.0/distutils/text_file.py @@ -4,7 +4,7 @@ that (optionally) takes care of stripping comments, ignoring blank lines, and joining lines with backslashes.""" -__revision__ = "$Id: text_file.py 76956 2009-12-21 01:22:46Z tarek.ziade $" +__revision__ = "$Id$" import sys diff --git a/lib-python/2.7.0/test/test_trace.py b/lib-python/2.7.0/test/test_trace.py --- a/lib-python/2.7.0/test/test_trace.py +++ b/lib-python/2.7.0/test/test_trace.py @@ -309,7 +309,7 @@ self._coverage(tracer) if os.path.exists(TESTFN): files = os.listdir(TESTFN) - self.assertEquals(files, []) + self.assertEqual(files, []) def test_issue9936(self): tracer = trace.Trace(trace=0, count=1) diff --git a/lib-python/2.7.0/test/test_cprofile.py b/lib-python/2.7.0/test/test_cprofile.py --- a/lib-python/2.7.0/test/test_cprofile.py +++ b/lib-python/2.7.0/test/test_cprofile.py @@ -39,7 +39,7 @@ # Don't remove this comment. Everything below it is auto-generated. #--cut-------------------------------------------------------------------------- CProfileTest.expected_output['print_stats'] = """\ - 126 function calls (106 primitive calls) in 1.000 CPU seconds + 126 function calls (106 primitive calls) in 1.000 seconds Ordered by: standard name diff --git a/lib-python/2.7.0/test/test_frozen.py b/lib-python/2.7.0/test/test_frozen.py --- a/lib-python/2.7.0/test/test_frozen.py +++ b/lib-python/2.7.0/test/test_frozen.py @@ -30,8 +30,8 @@ else: self.fail("import __phello__.foo should have failed") - self.assertEquals(stdout.getvalue(), - 'Hello world...\nHello world...\nHello world...\n') + self.assertEqual(stdout.getvalue(), + 'Hello world...\nHello world...\nHello world...\n') del sys.modules['__hello__'] del sys.modules['__phello__'] diff --git a/lib-python/2.7.0/distutils/msvccompiler.py b/lib-python/2.7.0/distutils/msvccompiler.py --- a/lib-python/2.7.0/distutils/msvccompiler.py +++ b/lib-python/2.7.0/distutils/msvccompiler.py @@ -8,7 +8,7 @@ # hacked by Robin Becker and Thomas Heller to do a better job of # finding DevStudio (through the registry) -__revision__ = "$Id: msvccompiler.py 76956 2009-12-21 01:22:46Z tarek.ziade $" +__revision__ = "$Id$" import sys import os diff --git a/lib-python/2.7.0/bsddb/test/test_dbshelve.py b/lib-python/2.7.0/bsddb/test/test_dbshelve.py --- a/lib-python/2.7.0/bsddb/test/test_dbshelve.py +++ b/lib-python/2.7.0/bsddb/test/test_dbshelve.py @@ -255,7 +255,7 @@ self.assertEqual(value.L, [x] * 10) else: - self.assert_(0, 'Unknown key type, fix the test') + self.assertTrue(0, 'Unknown key type, fix the test') #---------------------------------------------------------------------- diff --git a/lib-python/2.7.0/distutils/command/build_py.py b/lib-python/2.7.0/distutils/command/build_py.py --- a/lib-python/2.7.0/distutils/command/build_py.py +++ b/lib-python/2.7.0/distutils/command/build_py.py @@ -2,7 +2,7 @@ Implements the Distutils 'build_py' command.""" -__revision__ = "$Id: build_py.py 76956 2009-12-21 01:22:46Z tarek.ziade $" +__revision__ = "$Id$" import os import sys diff --git a/lib-python/2.7.0/test/test_pty.py b/lib-python/2.7.0/test/test_pty.py --- a/lib-python/2.7.0/test/test_pty.py +++ b/lib-python/2.7.0/test/test_pty.py @@ -86,7 +86,7 @@ fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK) try: s1 = os.read(master_fd, 1024) - self.assertEquals('', s1) + self.assertEqual('', s1) except OSError, e: if e.errno != errno.EAGAIN: raise @@ -96,14 +96,14 @@ debug("Writing to slave_fd") os.write(slave_fd, TEST_STRING_1) s1 = os.read(master_fd, 1024) - self.assertEquals('I wish to buy a fish license.\n', - normalize_output(s1)) + self.assertEqual('I wish to buy a fish license.\n', + normalize_output(s1)) debug("Writing chunked output") os.write(slave_fd, TEST_STRING_2[:5]) os.write(slave_fd, TEST_STRING_2[5:]) s2 = os.read(master_fd, 1024) - self.assertEquals('For my pet fish, Eric.\n', normalize_output(s2)) + self.assertEqual('For my pet fish, Eric.\n', normalize_output(s2)) os.close(slave_fd) os.close(master_fd) diff --git a/lib-python/2.7.0/test/test_descr.py b/lib-python/2.7.0/test/test_descr.py --- a/lib-python/2.7.0/test/test_descr.py +++ b/lib-python/2.7.0/test/test_descr.py @@ -4534,11 +4534,11 @@ __getattr__ = descr self.assertRaises(AttributeError, getattr, A(), "attr") - self.assertEquals(descr.counter, 1) + self.assertEqual(descr.counter, 1) self.assertRaises(AttributeError, getattr, B(), "attr") - self.assertEquals(descr.counter, 2) + self.assertEqual(descr.counter, 2) self.assertRaises(AttributeError, getattr, C(), "attr") - self.assertEquals(descr.counter, 4) + self.assertEqual(descr.counter, 4) import gc class EvilGetattribute(object): @@ -4565,7 +4565,7 @@ # Testing dict-proxy iterkeys... keys = [ key for key in self.C.__dict__.iterkeys() ] keys.sort() - self.assertEquals(keys, ['__dict__', '__doc__', '__module__', + self.assertEqual(keys, ['__dict__', '__doc__', '__module__', '__weakref__', 'meth']) def test_iter_values(self): diff --git a/lib-python/2.7.0/json/tests/test_dump.py b/lib-python/2.7.0/json/tests/test_dump.py --- a/lib-python/2.7.0/json/tests/test_dump.py +++ b/lib-python/2.7.0/json/tests/test_dump.py @@ -7,15 +7,15 @@ def test_dump(self): sio = StringIO() json.dump({}, sio) - self.assertEquals(sio.getvalue(), '{}') + self.assertEqual(sio.getvalue(), '{}') def test_dumps(self): - self.assertEquals(json.dumps({}), '{}') + self.assertEqual(json.dumps({}), '{}') def test_encode_truefalse(self): - self.assertEquals(json.dumps( + self.assertEqual(json.dumps( {True: False, False: True}, sort_keys=True), '{"false": true, "true": false}') - self.assertEquals(json.dumps( + self.assertEqual(json.dumps( {2: 3.0, 4.0: 5L, False: 1, 6L: True}, sort_keys=True), '{"false": 1, "2": 3.0, "4.0": 5, "6": true}') diff --git a/lib-python/2.7.0/test/test_cgi.py b/lib-python/2.7.0/test/test_cgi.py --- a/lib-python/2.7.0/test/test_cgi.py +++ b/lib-python/2.7.0/test/test_cgi.py @@ -251,7 +251,7 @@ -----------------------------721837373350705526688164684-- """ fs = cgi.FieldStorage(fp=StringIO(postdata), environ=env) - self.assertEquals(len(fs.list), 4) + self.assertEqual(len(fs.list), 4) expect = [{'name':'id', 'filename':None, 'value':'1234'}, {'name':'title', 'filename':None, 'value':''}, {'name':'file', 'filename':'test.txt','value':'Testing 123.\n'}, @@ -259,7 +259,7 @@ for x in range(len(fs.list)): for k, exp in expect[x].items(): got = getattr(fs.list[x], k) - self.assertEquals(got, exp) + self.assertEqual(got, exp) _qs_result = { 'key1': 'value1', diff --git a/lib-python/2.7.0/test/test_strop.py b/lib-python/2.7.0/test/test_strop.py --- a/lib-python/2.7.0/test/test_strop.py +++ b/lib-python/2.7.0/test/test_strop.py @@ -123,7 +123,7 @@ except OverflowError: pass else: - self.assertEquals(len(r), len(a) * 3) + self.assertEqual(len(r), len(a) * 3) @test_support.precisionbigmemtest(size=test_support._2G - 1, memuse=1) def test_stropjoin_huge_tup(self, size): @@ -133,7 +133,7 @@ except OverflowError: pass # acceptable on 32-bit else: - self.assertEquals(len(r), len(a) * 3) + self.assertEqual(len(r), len(a) * 3) transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377' diff --git a/lib-python/2.7.0/test/test_ioctl.py b/lib-python/2.7.0/test/test_ioctl.py --- a/lib-python/2.7.0/test/test_ioctl.py +++ b/lib-python/2.7.0/test/test_ioctl.py @@ -50,7 +50,7 @@ with open("/dev/tty", "r") as tty: r = fcntl.ioctl(tty, termios.TIOCGPGRP, buf, 1) rpgrp = buf[0] - self.assertEquals(r, 0) + self.assertEqual(r, 0) self.assertIn(rpgrp, ids) def test_ioctl_mutate(self): diff --git a/lib-python/2.7.0/test/test_sys.py b/lib-python/2.7.0/test/test_sys.py --- a/lib-python/2.7.0/test/test_sys.py +++ b/lib-python/2.7.0/test/test_sys.py @@ -120,7 +120,7 @@ try: sys.exit(0) except SystemExit, exc: - self.assertEquals(exc.code, 0) + self.assertEqual(exc.code, 0) except: self.fail("wrong exception") else: @@ -131,7 +131,7 @@ try: sys.exit(42) except SystemExit, exc: - self.assertEquals(exc.code, 42) + self.assertEqual(exc.code, 42) except: self.fail("wrong exception") else: @@ -141,7 +141,7 @@ try: sys.exit((42,)) except SystemExit, exc: - self.assertEquals(exc.code, 42) + self.assertEqual(exc.code, 42) except: self.fail("wrong exception") else: @@ -151,7 +151,7 @@ try: sys.exit("exit") except SystemExit, exc: - self.assertEquals(exc.code, "exit") + self.assertEqual(exc.code, "exit") except: self.fail("wrong exception") else: @@ -161,7 +161,7 @@ try: sys.exit((17, 23)) except SystemExit, exc: - self.assertEquals(exc.code, (17, 23)) + self.assertEqual(exc.code, (17, 23)) except: self.fail("wrong exception") else: @@ -213,7 +213,7 @@ orig = sys.getcheckinterval() for n in 0, 100, 120, orig: # orig last to restore starting state sys.setcheckinterval(n) - self.assertEquals(sys.getcheckinterval(), n) + self.assertEqual(sys.getcheckinterval(), n) def test_recursionlimit(self): self.assertRaises(TypeError, sys.getrecursionlimit, 42) diff --git a/lib-python/2.7.0/test/test_bigaddrspace.py b/lib-python/2.7.0/test/test_bigaddrspace.py --- a/lib-python/2.7.0/test/test_bigaddrspace.py +++ b/lib-python/2.7.0/test/test_bigaddrspace.py @@ -28,7 +28,7 @@ pass else: self.fail("should have raised OverflowError") - self.assertEquals(len(x), MAX_Py_ssize_t) + self.assertEqual(len(x), MAX_Py_ssize_t) ### the following test is pending a patch # (http://mail.python.org/pipermail/python-dev/2006-July/067774.html) diff --git a/lib-python/2.7.0/test/test_long.py b/lib-python/2.7.0/test/test_long.py --- a/lib-python/2.7.0/test/test_long.py +++ b/lib-python/2.7.0/test/test_long.py @@ -530,9 +530,9 @@ try: long(TruncReturnsNonIntegral()) except TypeError as e: - self.assertEquals(str(e), - "__trunc__ returned non-Integral" - " (type NonIntegral)") + self.assertEqual(str(e), + "__trunc__ returned non-Integral" + " (type NonIntegral)") else: self.fail("Failed to raise TypeError with %s" % ((base, trunc_result_base),)) diff --git a/lib-python/2.7.0/distutils/command/clean.py b/lib-python/2.7.0/distutils/command/clean.py --- a/lib-python/2.7.0/distutils/command/clean.py +++ b/lib-python/2.7.0/distutils/command/clean.py @@ -4,7 +4,7 @@ # contributed by Bastian Kleineidam , added 2000-03-18 -__revision__ = "$Id: clean.py 70886 2009-03-31 20:50:59Z tarek.ziade $" +__revision__ = "$Id$" import os from distutils.core import Command diff --git a/lib-python/2.7.0/pickle.py b/lib-python/2.7.0/pickle.py --- a/lib-python/2.7.0/pickle.py +++ b/lib-python/2.7.0/pickle.py @@ -24,7 +24,7 @@ """ -__version__ = "$Revision: 72223 $" # Code version +__version__ = "$Revision$" # Code version from types import * from copy_reg import dispatch_table diff --git a/lib-python/2.7.0/test/test_hashlib.py b/lib-python/2.7.0/test/test_hashlib.py --- a/lib-python/2.7.0/test/test_hashlib.py +++ b/lib-python/2.7.0/test/test_hashlib.py @@ -1,6 +1,6 @@ # Test hashlib module # -# $Id: test_hashlib.py 80564 2010-04-27 22:59:35Z victor.stinner $ +# $Id$ # # Copyright (C) 2005-2010 Gregory P. Smith (greg at krypto.org) # Licensed to PSF under a Contributor Agreement. diff --git a/lib-python/2.7.0/test/test_mhlib.py b/lib-python/2.7.0/test/test_mhlib.py --- a/lib-python/2.7.0/test/test_mhlib.py +++ b/lib-python/2.7.0/test/test_mhlib.py @@ -148,7 +148,7 @@ writeCurMessage('inbox', 2) mh = getMH() - eq = self.assertEquals + eq = self.assertEqual eq(mh.getprofile('Editor'), 'emacs') eq(mh.getprofile('not-set'), None) eq(mh.getpath(), os.path.abspath(_mhpath)) @@ -171,7 +171,7 @@ def test_listfolders(self): mh = getMH() - eq = self.assertEquals + eq = self.assertEqual folders = mh.listfolders() folders.sort() @@ -198,7 +198,7 @@ def test_sequence(self): mh = getMH() - eq = self.assertEquals + eq = self.assertEqual writeCurMessage('wide', 55) f = mh.openfolder('wide') @@ -253,7 +253,7 @@ def test_modify(self): mh = getMH() - eq = self.assertEquals + eq = self.assertEqual mh.makefolder("dummy1") self.assertIn("dummy1", mh.listfolders()) @@ -315,7 +315,7 @@ def test_read(self): mh = getMH() - eq = self.assertEquals + eq = self.assertEqual f = mh.openfolder('inbox') msg = f.openmessage(1) diff --git a/lib-python/2.7.0/test/seq_tests.py b/lib-python/2.7.0/test/seq_tests.py --- a/lib-python/2.7.0/test/seq_tests.py +++ b/lib-python/2.7.0/test/seq_tests.py @@ -131,8 +131,8 @@ self.assertRaises(ZeroDivisionError, self.type2test, IterGenExc(s)) def test_truth(self): - self.assert_(not self.type2test()) - self.assert_(self.type2test([42])) + self.assertFalse(self.type2test()) + self.assertTrue(self.type2test([42])) def test_getitem(self): u = self.type2test([0, 1, 2, 3, 4]) @@ -270,7 +270,7 @@ pass u3 = subclass([0, 1]) self.assertEqual(u3, u3*1) - self.assert_(u3 is not u3*1) + self.assertIsNot(u3, u3*1) def test_iadd(self): u = self.type2test([0, 1]) diff --git a/lib-python/2.7.0/distutils/command/upload.py b/lib-python/2.7.0/distutils/command/upload.py --- a/lib-python/2.7.0/distutils/command/upload.py +++ b/lib-python/2.7.0/distutils/command/upload.py @@ -79,7 +79,11 @@ # Fill in the data - send all the meta-data in case we need to # register a new release - content = open(filename,'rb').read() + f = open(filename,'rb') + try: + content = f.read() + finally: + f.close() meta = self.distribution.metadata data = { # action diff --git a/lib-python/2.7.0/distutils/tests/test_spawn.py b/lib-python/2.7.0/distutils/tests/test_spawn.py --- a/lib-python/2.7.0/distutils/tests/test_spawn.py +++ b/lib-python/2.7.0/distutils/tests/test_spawn.py @@ -20,7 +20,7 @@ (['nochange', 'nospace'], ['nochange', 'nospace'])): res = _nt_quote_args(args) - self.assertEquals(res, wanted) + self.assertEqual(res, wanted) @unittest.skipUnless(os.name in ('nt', 'posix'), diff --git a/lib-python/2.7.0/test/test_pep247.py b/lib-python/2.7.0/test/test_pep247.py --- a/lib-python/2.7.0/test/test_pep247.py +++ b/lib-python/2.7.0/test/test_pep247.py @@ -39,24 +39,24 @@ obj3.update('string') h2 = obj3.digest() - self.assertEquals(h1, h2) + self.assertEqual(h1, h2) self.assertTrue(hasattr(obj1, 'digest_size')) if not module.digest_size is None: - self.assertEquals(obj1.digest_size, module.digest_size) + self.assertEqual(obj1.digest_size, module.digest_size) - self.assertEquals(obj1.digest_size, len(h1)) + self.assertEqual(obj1.digest_size, len(h1)) obj1.update('string') obj_copy = obj1.copy() - self.assertEquals(obj1.digest(), obj_copy.digest()) - self.assertEquals(obj1.hexdigest(), obj_copy.hexdigest()) + self.assertEqual(obj1.digest(), obj_copy.digest()) + self.assertEqual(obj1.hexdigest(), obj_copy.hexdigest()) digest, hexdigest = obj1.digest(), obj1.hexdigest() hd2 = "" for byte in digest: hd2 += '%02x' % ord(byte) - self.assertEquals(hd2, hexdigest) + self.assertEqual(hd2, hexdigest) def test_md5(self): self.check_module(md5) diff --git a/lib-python/2.7.0/distutils/tests/test_config_cmd.py b/lib-python/2.7.0/distutils/tests/test_config_cmd.py --- a/lib-python/2.7.0/distutils/tests/test_config_cmd.py +++ b/lib-python/2.7.0/distutils/tests/test_config_cmd.py @@ -34,7 +34,7 @@ f.close() dump_file(this_file, 'I am the header') - self.assertEquals(len(self._logs), numlines+1) + self.assertEqual(len(self._logs), numlines+1) def test_search_cpp(self): if sys.platform == 'win32': @@ -44,10 +44,10 @@ # simple pattern searches match = cmd.search_cpp(pattern='xxx', body='// xxx') - self.assertEquals(match, 0) + self.assertEqual(match, 0) match = cmd.search_cpp(pattern='_configtest', body='// xxx') - self.assertEquals(match, 1) + self.assertEqual(match, 1) def test_finalize_options(self): # finalize_options does a bit of transformation @@ -59,9 +59,9 @@ cmd.library_dirs = 'three%sfour' % os.pathsep cmd.ensure_finalized() - self.assertEquals(cmd.include_dirs, ['one', 'two']) - self.assertEquals(cmd.libraries, ['one']) - self.assertEquals(cmd.library_dirs, ['three', 'four']) + self.assertEqual(cmd.include_dirs, ['one', 'two']) + self.assertEqual(cmd.libraries, ['one']) + self.assertEqual(cmd.library_dirs, ['three', 'four']) def test_clean(self): # _clean removes files diff --git a/lib-python/2.7.0/hashlib.py b/lib-python/2.7.0/hashlib.py --- a/lib-python/2.7.0/hashlib.py +++ b/lib-python/2.7.0/hashlib.py @@ -1,4 +1,4 @@ -# $Id: hashlib.py 78528 2010-03-01 02:01:47Z gregory.p.smith $ +# $Id$ # # Copyright (C) 2005 Gregory P. Smith (greg at krypto.org) # Licensed to PSF under a Contributor Agreement. diff --git a/lib-python/2.7.0/test/test_datetime.py b/lib-python/2.7.0/test/test_datetime.py --- a/lib-python/2.7.0/test/test_datetime.py +++ b/lib-python/2.7.0/test/test_datetime.py @@ -1491,8 +1491,8 @@ def test_microsecond_rounding(self): # Test whether fromtimestamp "rounds up" floats that are less # than one microsecond smaller than an integer. - self.assertEquals(self.theclass.fromtimestamp(0.9999999), - self.theclass.fromtimestamp(1)) + self.assertEqual(self.theclass.fromtimestamp(0.9999999), + self.theclass.fromtimestamp(1)) def test_insane_fromtimestamp(self): # It's possible that some platform maps time_t to double, @@ -1520,7 +1520,7 @@ @unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps") def test_negative_float_utcfromtimestamp(self): d = self.theclass.utcfromtimestamp(-1.05) - self.assertEquals(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000)) + self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000)) def test_utcnow(self): import time diff --git a/lib-python/2.7.0/test/test_calendar.py b/lib-python/2.7.0/test/test_calendar.py --- a/lib-python/2.7.0/test/test_calendar.py +++ b/lib-python/2.7.0/test/test_calendar.py @@ -2,6 +2,7 @@ import unittest from test import test_support +import locale result_2004_text = """ @@ -248,6 +249,19 @@ # verify it "acts like a sequence" in two forms of iteration self.assertEqual(value[::-1], list(reversed(value))) + def test_localecalendars(self): + # ensure that Locale{Text,HTML}Calendar resets the locale properly + # (it is still not thread-safe though) + old_october = calendar.TextCalendar().formatmonthname(2010, 10, 10) + try: + calendar.LocaleTextCalendar(locale='').formatmonthname(2010, 10, 10) + except locale.Error: + # cannot set the system default locale -- skip rest of test + return + calendar.LocaleHTMLCalendar(locale='').formatmonthname(2010, 10) + new_october = calendar.TextCalendar().formatmonthname(2010, 10, 10) + self.assertEquals(old_october, new_october) + class MonthCalendarTestCase(unittest.TestCase): def setUp(self): diff --git a/lib-python/2.7.0/json/tests/test_separators.py b/lib-python/2.7.0/json/tests/test_separators.py --- a/lib-python/2.7.0/json/tests/test_separators.py +++ b/lib-python/2.7.0/json/tests/test_separators.py @@ -37,6 +37,6 @@ h1 = json.loads(d1) h2 = json.loads(d2) - self.assertEquals(h1, h) - self.assertEquals(h2, h) - self.assertEquals(d2, expect) + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(d2, expect) diff --git a/lib-python/2.7.0/test/fork_wait.py b/lib-python/2.7.0/test/fork_wait.py --- a/lib-python/2.7.0/test/fork_wait.py +++ b/lib-python/2.7.0/test/fork_wait.py @@ -43,8 +43,8 @@ break time.sleep(2 * SHORTSLEEP) - self.assertEquals(spid, cpid) - self.assertEquals(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8)) + self.assertEqual(spid, cpid) + self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8)) def test_wait(self): for i in range(NUM_THREADS): @@ -54,7 +54,7 @@ a = self.alive.keys() a.sort() - self.assertEquals(a, range(NUM_THREADS)) + self.assertEqual(a, range(NUM_THREADS)) prefork_lives = self.alive.copy() diff --git a/lib-python/2.7.0/distutils/tests/test_build_scripts.py b/lib-python/2.7.0/distutils/tests/test_build_scripts.py --- a/lib-python/2.7.0/distutils/tests/test_build_scripts.py +++ b/lib-python/2.7.0/distutils/tests/test_build_scripts.py @@ -71,8 +71,10 @@ def write_script(self, dir, name, text): f = open(os.path.join(dir, name), "w") - f.write(text) - f.close() + try: + f.write(text) + finally: + f.close() def test_version_int(self): source = self.mkdtemp() diff --git a/lib-python/2.7.0/test/test_itertools.py b/lib-python/2.7.0/test/test_itertools.py --- a/lib-python/2.7.0/test/test_itertools.py +++ b/lib-python/2.7.0/test/test_itertools.py @@ -192,7 +192,7 @@ regular_combs = list(combinations(values, r)) # compare to combs without replacement if n == 0 or r <= 1: - self.assertEquals(result, regular_combs) # cases that should be identical + self.assertEqual(result, regular_combs) # cases that should be identical else: self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs @@ -288,20 +288,20 @@ comb = list(combinations(s, r)) # Check size - self.assertEquals(len(prod), n**r) - self.assertEquals(len(cwr), (fact(n+r-1) // fact(r) // fact(n-1)) if n else (not r)) - self.assertEquals(len(perm), 0 if r>n else fact(n) // fact(n-r)) - self.assertEquals(len(comb), 0 if r>n else fact(n) // fact(r) // fact(n-r)) + self.assertEqual(len(prod), n**r) + self.assertEqual(len(cwr), (fact(n+r-1) // fact(r) // fact(n-1)) if n else (not r)) + self.assertEqual(len(perm), 0 if r>n else fact(n) // fact(n-r)) + self.assertEqual(len(comb), 0 if r>n else fact(n) // fact(r) // fact(n-r)) # Check lexicographic order without repeated tuples - self.assertEquals(prod, sorted(set(prod))) - self.assertEquals(cwr, sorted(set(cwr))) - self.assertEquals(perm, sorted(set(perm))) - self.assertEquals(comb, sorted(set(comb))) + self.assertEqual(prod, sorted(set(prod))) + self.assertEqual(cwr, sorted(set(cwr))) + self.assertEqual(perm, sorted(set(perm))) + self.assertEqual(comb, sorted(set(comb))) # Check interrelationships - self.assertEquals(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted - self.assertEquals(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups + self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted + self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups self.assertEqual(comb, filter(set(cwr).__contains__, perm)) # comb: perm that is a cwr diff --git a/lib-python/2.7.0/test/script_helper.py b/lib-python/2.7.0/test/script_helper.py --- a/lib-python/2.7.0/test/script_helper.py +++ b/lib-python/2.7.0/test/script_helper.py @@ -12,6 +12,45 @@ import zipfile # Executing the interpreter in a subprocess +def _assert_python(expected_success, *args, **env_vars): + cmd_line = [sys.executable] + if not env_vars: + cmd_line.append('-E') + cmd_line.extend(args) + # Need to preserve the original environment, for in-place testing of + # shared library builds. + env = os.environ.copy() + env.update(env_vars) + p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=env) + try: + out, err = p.communicate() + finally: + subprocess._cleanup() + p.stdout.close() + p.stderr.close() + rc = p.returncode + if (rc and expected_success) or (not rc and not expected_success): + raise AssertionError( + "Process return code is %d, " + "stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore'))) + return rc, out, err + +def assert_python_ok(*args, **env_vars): + """ + Assert that running the interpreter with `args` and optional environment + variables `env_vars` is ok and return a (return code, stdout, stderr) tuple. + """ + return _assert_python(True, *args, **env_vars) + +def assert_python_failure(*args, **env_vars): + """ + Assert that running the interpreter with `args` and optional environment + variables `env_vars` fails and return a (return code, stdout, stderr) tuple. + """ + return _assert_python(False, *args, **env_vars) + def python_exit_code(*args): cmd_line = [sys.executable, '-E'] cmd_line.extend(args) diff --git a/lib-python/2.7.0/test/test_scope.py b/lib-python/2.7.0/test/test_scope.py --- a/lib-python/2.7.0/test/test_scope.py +++ b/lib-python/2.7.0/test/test_scope.py @@ -523,7 +523,7 @@ def f(self): return x - self.assertEquals(x, 12) # Used to raise UnboundLocalError + self.assertEqual(x, 12) # Used to raise UnboundLocalError finally: sys.settrace(None) diff --git a/lib-python/2.7.0/test/test_getopt.py b/lib-python/2.7.0/test/test_getopt.py --- a/lib-python/2.7.0/test/test_getopt.py +++ b/lib-python/2.7.0/test/test_getopt.py @@ -175,9 +175,9 @@ def test_issue4629(self): longopts, shortopts = getopt.getopt(['--help='], '', ['help=']) - self.assertEquals(longopts, [('--help', '')]) + self.assertEqual(longopts, [('--help', '')]) longopts, shortopts = getopt.getopt(['--help=x'], '', ['help=']) - self.assertEquals(longopts, [('--help', 'x')]) + self.assertEqual(longopts, [('--help', 'x')]) self.assertRaises(getopt.GetoptError, getopt.getopt, ['--help='], '', ['help']) def test_main(): diff --git a/lib-python/2.7.0/test/test_gettext.py b/lib-python/2.7.0/test/test_gettext.py --- a/lib-python/2.7.0/test/test_gettext.py +++ b/lib-python/2.7.0/test/test_gettext.py @@ -75,6 +75,7 @@ fp.close() self.env = test_support.EnvironmentVarGuard() self.env['LANGUAGE'] = 'xx' + gettext._translations.clear() def tearDown(self): self.env.__exit__() diff --git a/lib-python/2.7.0/test/test_sax.py b/lib-python/2.7.0/test/test_sax.py --- a/lib-python/2.7.0/test/test_sax.py +++ b/lib-python/2.7.0/test/test_sax.py @@ -1,5 +1,5 @@ # regression test for SAX 2.0 -*- coding: utf-8 -*- -# $Id: test_sax.py 85863 2010-10-27 18:58:04Z antoine.pitrou $ +# $Id$ from xml.sax import make_parser, ContentHandler, \ SAXException, SAXReaderNotAvailable, SAXParseException @@ -29,16 +29,16 @@ self.assertRaises(KeyError, attrs.getNameByQName, "attr") self.assertRaises(KeyError, attrs.getQNameByName, "attr") self.assertRaises(KeyError, attrs.__getitem__, "attr") - self.assertEquals(attrs.getLength(), 0) - self.assertEquals(attrs.getNames(), []) - self.assertEquals(attrs.getQNames(), []) - self.assertEquals(len(attrs), 0) + self.assertEqual(attrs.getLength(), 0) + self.assertEqual(attrs.getNames(), []) + self.assertEqual(attrs.getQNames(), []) + self.assertEqual(len(attrs), 0) self.assertFalse(attrs.has_key("attr")) - self.assertEquals(attrs.keys(), []) - self.assertEquals(attrs.get("attrs"), None) - self.assertEquals(attrs.get("attrs", 25), 25) - self.assertEquals(attrs.items(), []) - self.assertEquals(attrs.values(), []) + self.assertEqual(attrs.keys(), []) + self.assertEqual(attrs.get("attrs"), None) + self.assertEqual(attrs.get("attrs", 25), 25) + self.assertEqual(attrs.items(), []) + self.assertEqual(attrs.values(), []) def verify_empty_nsattrs(self, attrs): self.assertRaises(KeyError, attrs.getValue, (ns_uri, "attr")) @@ -46,33 +46,33 @@ self.assertRaises(KeyError, attrs.getNameByQName, "ns:attr") self.assertRaises(KeyError, attrs.getQNameByName, (ns_uri, "attr")) self.assertRaises(KeyError, attrs.__getitem__, (ns_uri, "attr")) - self.assertEquals(attrs.getLength(), 0) - self.assertEquals(attrs.getNames(), []) - self.assertEquals(attrs.getQNames(), []) - self.assertEquals(len(attrs), 0) + self.assertEqual(attrs.getLength(), 0) + self.assertEqual(attrs.getNames(), []) + self.assertEqual(attrs.getQNames(), []) + self.assertEqual(len(attrs), 0) self.assertFalse(attrs.has_key((ns_uri, "attr"))) - self.assertEquals(attrs.keys(), []) - self.assertEquals(attrs.get((ns_uri, "attr")), None) - self.assertEquals(attrs.get((ns_uri, "attr"), 25), 25) - self.assertEquals(attrs.items(), []) - self.assertEquals(attrs.values(), []) + self.assertEqual(attrs.keys(), []) + self.assertEqual(attrs.get((ns_uri, "attr")), None) + self.assertEqual(attrs.get((ns_uri, "attr"), 25), 25) + self.assertEqual(attrs.items(), []) + self.assertEqual(attrs.values(), []) def verify_attrs_wattr(self, attrs): - self.assertEquals(attrs.getLength(), 1) - self.assertEquals(attrs.getNames(), ["attr"]) - self.assertEquals(attrs.getQNames(), ["attr"]) - self.assertEquals(len(attrs), 1) + self.assertEqual(attrs.getLength(), 1) + self.assertEqual(attrs.getNames(), ["attr"]) + self.assertEqual(attrs.getQNames(), ["attr"]) + self.assertEqual(len(attrs), 1) self.assertTrue(attrs.has_key("attr")) - self.assertEquals(attrs.keys(), ["attr"]) - self.assertEquals(attrs.get("attr"), "val") - self.assertEquals(attrs.get("attr", 25), "val") - self.assertEquals(attrs.items(), [("attr", "val")]) - self.assertEquals(attrs.values(), ["val"]) - self.assertEquals(attrs.getValue("attr"), "val") - self.assertEquals(attrs.getValueByQName("attr"), "val") - self.assertEquals(attrs.getNameByQName("attr"), "attr") - self.assertEquals(attrs["attr"], "val") - self.assertEquals(attrs.getQNameByName("attr"), "attr") + self.assertEqual(attrs.keys(), ["attr"]) + self.assertEqual(attrs.get("attr"), "val") + self.assertEqual(attrs.get("attr", 25), "val") + self.assertEqual(attrs.items(), [("attr", "val")]) + self.assertEqual(attrs.values(), ["val"]) + self.assertEqual(attrs.getValue("attr"), "val") + self.assertEqual(attrs.getValueByQName("attr"), "val") + self.assertEqual(attrs.getNameByQName("attr"), "attr") + self.assertEqual(attrs["attr"], "val") + self.assertEqual(attrs.getQNameByName("attr"), "attr") class MakeParserTest(unittest.TestCase): def test_make_parser2(self): @@ -102,47 +102,47 @@ class SaxutilsTest(unittest.TestCase): # ===== escape def test_escape_basic(self): - self.assertEquals(escape("Donald Duck & Co"), "Donald Duck & Co") + self.assertEqual(escape("Donald Duck & Co"), "Donald Duck & Co") def test_escape_all(self): - self.assertEquals(escape(""), - "<Donald Duck & Co>") + self.assertEqual(escape(""), + "<Donald Duck & Co>") def test_escape_extra(self): - self.assertEquals(escape("Hei på deg", {"å" : "å"}), - "Hei på deg") + self.assertEqual(escape("Hei på deg", {"å" : "å"}), + "Hei på deg") # ===== unescape def test_unescape_basic(self): - self.assertEquals(unescape("Donald Duck & Co"), "Donald Duck & Co") + self.assertEqual(unescape("Donald Duck & Co"), "Donald Duck & Co") def test_unescape_all(self): - self.assertEquals(unescape("<Donald Duck & Co>"), - "") + self.assertEqual(unescape("<Donald Duck & Co>"), + "") def test_unescape_extra(self): - self.assertEquals(unescape("Hei på deg", {"å" : "å"}), - "Hei på deg") + self.assertEqual(unescape("Hei på deg", {"å" : "å"}), + "Hei på deg") def test_unescape_amp_extra(self): - self.assertEquals(unescape("&foo;", {"&foo;": "splat"}), "&foo;") + self.assertEqual(unescape("&foo;", {"&foo;": "splat"}), "&foo;") # ===== quoteattr def test_quoteattr_basic(self): - self.assertEquals(quoteattr("Donald Duck & Co"), - '"Donald Duck & Co"') + self.assertEqual(quoteattr("Donald Duck & Co"), + '"Donald Duck & Co"') def test_single_quoteattr(self): - self.assertEquals(quoteattr('Includes "double" quotes'), - '\'Includes "double" quotes\'') + self.assertEqual(quoteattr('Includes "double" quotes'), + '\'Includes "double" quotes\'') def test_double_quoteattr(self): - self.assertEquals(quoteattr("Includes 'single' quotes"), - "\"Includes 'single' quotes\"") + self.assertEqual(quoteattr("Includes 'single' quotes"), + "\"Includes 'single' quotes\"") def test_single_double_quoteattr(self): - self.assertEquals(quoteattr("Includes 'single' and \"double\" quotes"), - "\"Includes 'single' and "double" quotes\"") + self.assertEqual(quoteattr("Includes 'single' and \"double\" quotes"), + "\"Includes 'single' and "double" quotes\"") # ===== make_parser def test_make_parser(self): @@ -164,7 +164,7 @@ gen.endElement("doc") gen.endDocument() - self.assertEquals(result.getvalue(), start + "") + self.assertEqual(result.getvalue(), start + "") def test_xmlgen_content(self): result = StringIO() @@ -176,7 +176,7 @@ gen.endElement("doc") gen.endDocument() - self.assertEquals(result.getvalue(), start + "huhei") + self.assertEqual(result.getvalue(), start + "huhei") def test_xmlgen_pi(self): result = StringIO() @@ -188,7 +188,7 @@ gen.endElement("doc") gen.endDocument() - self.assertEquals(result.getvalue(), start + "") + self.assertEqual(result.getvalue(), start + "") def test_xmlgen_content_escape(self): result = StringIO() @@ -200,7 +200,7 @@ gen.endElement("doc") gen.endDocument() - self.assertEquals(result.getvalue(), + self.assertEqual(result.getvalue(), start + "<huhei&") def test_xmlgen_attr_escape(self): @@ -218,7 +218,7 @@ gen.endElement("doc") gen.endDocument() - self.assertEquals(result.getvalue(), start + + self.assertEqual(result.getvalue(), start + ("" "" "")) @@ -233,7 +233,7 @@ gen.endElement("doc") gen.endDocument() - self.assertEquals(result.getvalue(), start + " ") + self.assertEqual(result.getvalue(), start + " ") def test_xmlgen_ns(self): result = StringIO() @@ -249,7 +249,7 @@ gen.endPrefixMapping("ns1") gen.endDocument() - self.assertEquals(result.getvalue(), start + \ + self.assertEqual(result.getvalue(), start + \ ('' % ns_uri)) @@ -262,7 +262,7 @@ gen.endElementNS((None, 'a'), 'a') gen.endDocument() - self.assertEquals(result.getvalue(), start+'') + self.assertEqual(result.getvalue(), start+'') def test_1463026_2(self): result = StringIO() @@ -275,7 +275,7 @@ gen.endPrefixMapping(None) gen.endDocument() - self.assertEquals(result.getvalue(), start+'') + self.assertEqual(result.getvalue(), start+'') def test_1463026_3(self): result = StringIO() @@ -288,7 +288,7 @@ gen.endPrefixMapping('my') gen.endDocument() - self.assertEquals(result.getvalue(), + self.assertEqual(result.getvalue(), start+'') def test_5027_1(self): @@ -311,11 +311,11 @@ parser.setContentHandler(gen) parser.parse(test_xml) - self.assertEquals(result.getvalue(), - start + ( - '' - 'Hello' - '')) + self.assertEqual(result.getvalue(), + start + ( + '' + 'Hello' + '')) def test_5027_2(self): # The xml prefix (as in xml:lang below) is reserved and bound by @@ -339,11 +339,11 @@ gen.endPrefixMapping('a') gen.endDocument() - self.assertEquals(result.getvalue(), - start + ( - '' - 'Hello' - '')) + self.assertEqual(result.getvalue(), + start + ( + '' + 'Hello' + '')) class XMLFilterBaseTest(unittest.TestCase): @@ -360,7 +360,7 @@ filter.endElement("doc") filter.endDocument() - self.assertEquals(result.getvalue(), start + "content ") + self.assertEqual(result.getvalue(), start + "content ") # =========================================================================== # @@ -382,7 +382,7 @@ parser.setContentHandler(xmlgen) parser.parse(open(TEST_XMLFILE)) - self.assertEquals(result.getvalue(), xml_test_out) + self.assertEqual(result.getvalue(), xml_test_out) # ===== DTDHandler support @@ -410,9 +410,9 @@ parser.feed('') parser.close() - self.assertEquals(handler._notations, + self.assertEqual(handler._notations, [("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)]) - self.assertEquals(handler._entities, [("img", None, "expat.gif", "GIF")]) + self.assertEqual(handler._entities, [("img", None, "expat.gif", "GIF")]) # ===== EntityResolver support @@ -435,8 +435,8 @@ parser.feed('&test;') parser.close() - self.assertEquals(result.getvalue(), start + - "") + self.assertEqual(result.getvalue(), start + + "") # ===== Attributes support @@ -488,18 +488,18 @@ attrs = gather._attrs - self.assertEquals(attrs.getLength(), 1) - self.assertEquals(attrs.getNames(), [(ns_uri, "attr")]) + self.assertEqual(attrs.getLength(), 1) + self.assertEqual(attrs.getNames(), [(ns_uri, "attr")]) self.assertTrue((attrs.getQNames() == [] or attrs.getQNames() == ["ns:attr"])) - self.assertEquals(len(attrs), 1) + self.assertEqual(len(attrs), 1) self.assertTrue(attrs.has_key((ns_uri, "attr"))) - self.assertEquals(attrs.get((ns_uri, "attr")), "val") - self.assertEquals(attrs.get((ns_uri, "attr"), 25), "val") - self.assertEquals(attrs.items(), [((ns_uri, "attr"), "val")]) - self.assertEquals(attrs.values(), ["val"]) - self.assertEquals(attrs.getValue((ns_uri, "attr")), "val") - self.assertEquals(attrs[(ns_uri, "attr")], "val") + self.assertEqual(attrs.get((ns_uri, "attr")), "val") + self.assertEqual(attrs.get((ns_uri, "attr"), 25), "val") + self.assertEqual(attrs.items(), [((ns_uri, "attr"), "val")]) + self.assertEqual(attrs.values(), ["val"]) + self.assertEqual(attrs.getValue((ns_uri, "attr")), "val") + self.assertEqual(attrs[(ns_uri, "attr")], "val") # ===== InputSource support @@ -511,7 +511,7 @@ parser.setContentHandler(xmlgen) parser.parse(TEST_XMLFILE) - self.assertEquals(result.getvalue(), xml_test_out) + self.assertEqual(result.getvalue(), xml_test_out) def test_expat_inpsource_sysid(self): parser = create_parser() @@ -521,7 +521,7 @@ parser.setContentHandler(xmlgen) parser.parse(InputSource(TEST_XMLFILE)) - self.assertEquals(result.getvalue(), xml_test_out) + self.assertEqual(result.getvalue(), xml_test_out) def test_expat_inpsource_stream(self): parser = create_parser() @@ -533,7 +533,7 @@ inpsrc.setByteStream(open(TEST_XMLFILE)) parser.parse(inpsrc) - self.assertEquals(result.getvalue(), xml_test_out) + self.assertEqual(result.getvalue(), xml_test_out) # ===== IncrementalParser support @@ -547,7 +547,7 @@ parser.feed("") parser.close() - self.assertEquals(result.getvalue(), start + "") + self.assertEqual(result.getvalue(), start + "") def test_expat_incremental_reset(self): result = StringIO() @@ -568,7 +568,7 @@ parser.feed("") parser.close() - self.assertEquals(result.getvalue(), start + "text") + self.assertEqual(result.getvalue(), start + "text") # ===== Locator support @@ -582,9 +582,9 @@ parser.feed("") parser.close() - self.assertEquals(parser.getSystemId(), None) - self.assertEquals(parser.getPublicId(), None) - self.assertEquals(parser.getLineNumber(), 1) + self.assertEqual(parser.getSystemId(), None) + self.assertEqual(parser.getPublicId(), None) + self.assertEqual(parser.getLineNumber(), 1) def test_expat_locator_withinfo(self): result = StringIO() @@ -593,8 +593,8 @@ parser.setContentHandler(xmlgen) parser.parse(TEST_XMLFILE) - self.assertEquals(parser.getSystemId(), TEST_XMLFILE) - self.assertEquals(parser.getPublicId(), None) + self.assertEqual(parser.getSystemId(), TEST_XMLFILE) + self.assertEqual(parser.getPublicId(), None) # =========================================================================== @@ -615,7 +615,7 @@ parser.parse(source) self.fail() except SAXException, e: - self.assertEquals(e.getSystemId(), name) + self.assertEqual(e.getSystemId(), name) def test_expat_incomplete(self): parser = create_parser() @@ -679,21 +679,21 @@ attrs = AttributesNSImpl({(ns_uri, "attr") : "val"}, {(ns_uri, "attr") : "ns:attr"}) - self.assertEquals(attrs.getLength(), 1) - self.assertEquals(attrs.getNames(), [(ns_uri, "attr")]) - self.assertEquals(attrs.getQNames(), ["ns:attr"]) - self.assertEquals(len(attrs), 1) + self.assertEqual(attrs.getLength(), 1) + self.assertEqual(attrs.getNames(), [(ns_uri, "attr")]) + self.assertEqual(attrs.getQNames(), ["ns:attr"]) + self.assertEqual(len(attrs), 1) self.assertTrue(attrs.has_key((ns_uri, "attr"))) - self.assertEquals(attrs.keys(), [(ns_uri, "attr")]) - self.assertEquals(attrs.get((ns_uri, "attr")), "val") - self.assertEquals(attrs.get((ns_uri, "attr"), 25), "val") - self.assertEquals(attrs.items(), [((ns_uri, "attr"), "val")]) - self.assertEquals(attrs.values(), ["val"]) - self.assertEquals(attrs.getValue((ns_uri, "attr")), "val") - self.assertEquals(attrs.getValueByQName("ns:attr"), "val") - self.assertEquals(attrs.getNameByQName("ns:attr"), (ns_uri, "attr")) - self.assertEquals(attrs[(ns_uri, "attr")], "val") - self.assertEquals(attrs.getQNameByName((ns_uri, "attr")), "ns:attr") + self.assertEqual(attrs.keys(), [(ns_uri, "attr")]) + self.assertEqual(attrs.get((ns_uri, "attr")), "val") + self.assertEqual(attrs.get((ns_uri, "attr"), 25), "val") + self.assertEqual(attrs.items(), [((ns_uri, "attr"), "val")]) + self.assertEqual(attrs.values(), ["val"]) + self.assertEqual(attrs.getValue((ns_uri, "attr")), "val") + self.assertEqual(attrs.getValueByQName("ns:attr"), "val") + self.assertEqual(attrs.getNameByQName("ns:attr"), (ns_uri, "attr")) + self.assertEqual(attrs[(ns_uri, "attr")], "val") + self.assertEqual(attrs.getQNameByName((ns_uri, "attr")), "ns:attr") # During the development of Python 2.5, an attempt to move the "xml" @@ -729,7 +729,7 @@ try: import xml.sax.expatreader module = xml.sax.expatreader - self.assertEquals(module.__name__, "xml.sax.expatreader") + self.assertEqual(module.__name__, "xml.sax.expatreader") finally: sys.modules.update(old_modules) diff --git a/lib-python/2.7.0/json/tests/test_float.py b/lib-python/2.7.0/json/tests/test_float.py --- a/lib-python/2.7.0/json/tests/test_float.py +++ b/lib-python/2.7.0/json/tests/test_float.py @@ -7,13 +7,13 @@ def test_floats(self): for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100, 3.1]: - self.assertEquals(float(json.dumps(num)), num) - self.assertEquals(json.loads(json.dumps(num)), num) - self.assertEquals(json.loads(unicode(json.dumps(num))), num) + self.assertEqual(float(json.dumps(num)), num) + self.assertEqual(json.loads(json.dumps(num)), num) + self.assertEqual(json.loads(unicode(json.dumps(num))), num) def test_ints(self): for num in [1, 1L, 1<<32, 1<<64]: - self.assertEquals(json.dumps(num), str(num)) - self.assertEquals(int(json.dumps(num)), num) - self.assertEquals(json.loads(json.dumps(num)), num) - self.assertEquals(json.loads(unicode(json.dumps(num))), num) + self.assertEqual(json.dumps(num), str(num)) + self.assertEqual(int(json.dumps(num)), num) + self.assertEqual(json.loads(json.dumps(num)), num) + self.assertEqual(json.loads(unicode(json.dumps(num))), num) diff --git a/lib-python/2.7.0/test/list_tests.py b/lib-python/2.7.0/test/list_tests.py --- a/lib-python/2.7.0/test/list_tests.py +++ b/lib-python/2.7.0/test/list_tests.py @@ -330,7 +330,7 @@ self.assertRaises(BadExc, d.remove, 'c') for x, y in zip(d, e): # verify that original order and values are retained. - self.assert_(x is y) + self.assertIs(x, y) def test_count(self): a = self.type2test([0, 1, 2])*3 @@ -466,7 +466,7 @@ u = self.type2test([0, 1]) u2 = u u += [2, 3] - self.assert_(u is u2) + self.assertIs(u, u2) u = self.type2test("spam") u += "eggs" diff --git a/lib-python/2.7.0/json/tests/test_scanstring.py b/lib-python/2.7.0/json/tests/test_scanstring.py --- a/lib-python/2.7.0/json/tests/test_scanstring.py +++ b/lib-python/2.7.0/json/tests/test_scanstring.py @@ -13,92 +13,92 @@ self._test_scanstring(json.decoder.c_scanstring) def _test_scanstring(self, scanstring): - self.assertEquals( + self.assertEqual( scanstring('"z\\ud834\\udd20x"', 1, None, True), (u'z\U0001d120x', 16)) if sys.maxunicode == 65535: - self.assertEquals( + self.assertEqual( scanstring(u'"z\U0001d120x"', 1, None, True), (u'z\U0001d120x', 6)) else: - self.assertEquals( + self.assertEqual( scanstring(u'"z\U0001d120x"', 1, None, True), (u'z\U0001d120x', 5)) - self.assertEquals( + self.assertEqual( scanstring('"\\u007b"', 1, None, True), (u'{', 8)) - self.assertEquals( + self.assertEqual( scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True), (u'A JSON payload should be an object or array, not a string.', 60)) - self.assertEquals( + self.assertEqual( scanstring('["Unclosed array"', 2, None, True), (u'Unclosed array', 17)) - self.assertEquals( + self.assertEqual( scanstring('["extra comma",]', 2, None, True), (u'extra comma', 14)) - self.assertEquals( + self.assertEqual( scanstring('["double extra comma",,]', 2, None, True), (u'double extra comma', 21)) - self.assertEquals( + self.assertEqual( scanstring('["Comma after the close"],', 2, None, True), (u'Comma after the close', 24)) - self.assertEquals( + self.assertEqual( scanstring('["Extra close"]]', 2, None, True), (u'Extra close', 14)) - self.assertEquals( + self.assertEqual( scanstring('{"Extra comma": true,}', 2, None, True), (u'Extra comma', 14)) - self.assertEquals( + self.assertEqual( scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True), (u'Extra value after close', 26)) - self.assertEquals( + self.assertEqual( scanstring('{"Illegal expression": 1 + 2}', 2, None, True), (u'Illegal expression', 21)) - self.assertEquals( + self.assertEqual( scanstring('{"Illegal invocation": alert()}', 2, None, True), (u'Illegal invocation', 21)) - self.assertEquals( + self.assertEqual( scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True), (u'Numbers cannot have leading zeroes', 37)) - self.assertEquals( + self.assertEqual( scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True), (u'Numbers cannot be hex', 24)) - self.assertEquals( + self.assertEqual( scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True), (u'Too deep', 30)) - self.assertEquals( + self.assertEqual( scanstring('{"Missing colon" null}', 2, None, True), (u'Missing colon', 16)) - self.assertEquals( + self.assertEqual( scanstring('{"Double colon":: null}', 2, None, True), (u'Double colon', 15)) - self.assertEquals( + self.assertEqual( scanstring('{"Comma instead of colon", null}', 2, None, True), (u'Comma instead of colon', 25)) - self.assertEquals( + self.assertEqual( scanstring('["Colon instead of comma": false]', 2, None, True), (u'Colon instead of comma', 25)) - self.assertEquals( + self.assertEqual( scanstring('["Bad value", truth]', 2, None, True), (u'Bad value', 12)) diff --git a/lib-python/2.7.0/test/test_popen2.py b/lib-python/2.7.0/test/test_popen2.py --- a/lib-python/2.7.0/test/test_popen2.py +++ b/lib-python/2.7.0/test/test_popen2.py @@ -63,8 +63,8 @@ w.write(teststr) w.close() got = r.read() - self.assertEquals(expected_out, got.strip(), "wrote %r read %r" % - (teststr, got)) + self.assertEqual(expected_out, got.strip(), "wrote %r read %r" % + (teststr, got)) if e is not None: got = e.read() @@ -90,7 +90,7 @@ w, r = os.popen2(["echo", self.teststr]) got = r.read() - self.assertEquals(got, self.teststr + "\n") + self.assertEqual(got, self.teststr + "\n") w, r = os.popen2(self.cmd) self.validate_output(self.teststr, self.expected, r, w) @@ -103,7 +103,7 @@ w, r, e = os.popen3(["echo", self.teststr]) got = r.read() - self.assertEquals(got, self.teststr + "\n") + self.assertEqual(got, self.teststr + "\n") got = e.read() self.assertFalse(got, "unexpected %r on stderr" % got) @@ -117,7 +117,7 @@ w, r = os.popen4(["echo", self.teststr]) got = r.read() - self.assertEquals(got, self.teststr + "\n") + self.assertEqual(got, self.teststr + "\n") w, r = os.popen4(self.cmd) self.validate_output(self.teststr, self.expected, r, w) diff --git a/lib-python/2.7.0/json/tests/test_pass1.py b/lib-python/2.7.0/json/tests/test_pass1.py --- a/lib-python/2.7.0/json/tests/test_pass1.py +++ b/lib-python/2.7.0/json/tests/test_pass1.py @@ -67,7 +67,7 @@ # test in/out equivalence and parsing res = json.loads(JSON) out = json.dumps(res) - self.assertEquals(res, json.loads(out)) + self.assertEqual(res, json.loads(out)) try: json.dumps(res, allow_nan=False) except ValueError: diff --git a/lib-python/2.7.0/test/test_float.py b/lib-python/2.7.0/test/test_float.py --- a/lib-python/2.7.0/test/test_float.py +++ b/lib-python/2.7.0/test/test_float.py @@ -473,15 +473,15 @@ return -0.0, math.atan2(0.0, -1) def neg_neg(): return -0.0, math.atan2(-0.0, -1) - self.assertEquals(pos_pos(), neg_pos()) - self.assertEquals(pos_neg(), neg_neg()) + self.assertEqual(pos_pos(), neg_pos()) + self.assertEqual(pos_neg(), neg_neg()) @requires_IEEE_754 def test_underflow_sign(self): # check that -1e-1000 gives -0.0, not 0.0 - self.assertEquals(math.atan2(-1e-1000, -1), math.atan2(-0.0, -1)) - self.assertEquals(math.atan2(float('-1e-1000'), -1), - math.atan2(-0.0, -1)) + self.assertEqual(math.atan2(-1e-1000, -1), math.atan2(-0.0, -1)) + self.assertEqual(math.atan2(float('-1e-1000'), -1), + math.atan2(-0.0, -1)) def test_format(self): # these should be rewritten to use both format(x, spec) and @@ -556,9 +556,9 @@ self.assertEqual(fmt % -arg, '-' + rhs) def test_issue5864(self): - self.assertEquals(format(123.456, '.4'), '123.5') - self.assertEquals(format(1234.56, '.4'), '1.235e+03') - self.assertEquals(format(12345.6, '.4'), '1.235e+04') + self.assertEqual(format(123.456, '.4'), '123.5') + self.assertEqual(format(1234.56, '.4'), '1.235e+03') + self.assertEqual(format(12345.6, '.4'), '1.235e+04') class ReprTestCase(unittest.TestCase): def test_repr(self): diff --git a/lib-python/2.7.0/test/test_SimpleHTTPServer.py b/lib-python/2.7.0/test/test_SimpleHTTPServer.py --- a/lib-python/2.7.0/test/test_SimpleHTTPServer.py +++ b/lib-python/2.7.0/test/test_SimpleHTTPServer.py @@ -21,17 +21,17 @@ def test_queryArguments (self): path = self.handler.translate_path ('/filename') - self.assertEquals (path, self.translated) + self.assertEqual (path, self.translated) path = self.handler.translate_path ('/filename?foo=bar') - self.assertEquals (path, self.translated) + self.assertEqual (path, self.translated) path = self.handler.translate_path ('/filename?a=b&spam=eggs#zot') - self.assertEquals (path, self.translated) + self.assertEqual (path, self.translated) def test_startWithDoubleSlash (self): path = self.handler.translate_path ('//filename') - self.assertEquals (path, self.translated) + self.assertEqual (path, self.translated) path = self.handler.translate_path ('//filename?foo=bar') - self.assertEquals (path, self.translated) + self.assertEqual (path, self.translated) def test_main(): diff --git a/lib-python/2.7.0/test/test_urllib.py b/lib-python/2.7.0/test/test_urllib.py --- a/lib-python/2.7.0/test/test_urllib.py +++ b/lib-python/2.7.0/test/test_urllib.py @@ -112,7 +112,7 @@ self.env.set('NO_PROXY', 'localhost') proxies = urllib.getproxies_environment() # getproxies_environment use lowered case truncated (no '_proxy') keys - self.assertEquals('localhost', proxies['no']) + self.assertEqual('localhost', proxies['no']) class urlopen_HttpTests(unittest.TestCase): diff --git a/lib-python/2.7.0/test/test_memoryview.py b/lib-python/2.7.0/test/test_memoryview.py --- a/lib-python/2.7.0/test/test_memoryview.py +++ b/lib-python/2.7.0/test/test_memoryview.py @@ -27,11 +27,11 @@ b = tp(self._source) oldrefcount = sys.getrefcount(b) m = self._view(b) - self.assertEquals(m[0], item(b"a")) + self.assertEqual(m[0], item(b"a")) self.assertIsInstance(m[0], bytes) - self.assertEquals(m[5], item(b"f")) - self.assertEquals(m[-1], item(b"f")) - self.assertEquals(m[-6], item(b"a")) + self.assertEqual(m[5], item(b"f")) + self.assertEqual(m[-1], item(b"f")) + self.assertEqual(m[-6], item(b"a")) # Bounds checking self.assertRaises(IndexError, lambda: m[6]) self.assertRaises(IndexError, lambda: m[-7]) @@ -42,7 +42,7 @@ self.assertRaises(TypeError, lambda: m[0.0]) self.assertRaises(TypeError, lambda: m["a"]) m = None - self.assertEquals(sys.getrefcount(b), oldrefcount) + self.assertEqual(sys.getrefcount(b), oldrefcount) def test_getitem(self): for tp in self._types: @@ -72,7 +72,7 @@ self.assertRaises(TypeError, setitem, 65) self.assertRaises(TypeError, setitem, memoryview(b"a")) m = None - self.assertEquals(sys.getrefcount(b), oldrefcount) + self.assertEqual(sys.getrefcount(b), oldrefcount) def test_setitem_writable(self): if not self.rw_type: @@ -115,7 +115,7 @@ self.assertRaises(ValueError, setitem, slice(0,2), b"a") m = None - self.assertEquals(sys.getrefcount(b), oldrefcount) + self.assertEqual(sys.getrefcount(b), oldrefcount) def test_delitem(self): for tp in self._types: @@ -133,14 +133,14 @@ # This calls self.getitem_type() on each separate byte of b"abcdef" expected = b"".join( self.getitem_type(c) for c in b"abcdef") - self.assertEquals(b, expected) + self.assertEqual(b, expected) self.assertIsInstance(b, bytes) def test_tolist(self): for tp in self._types: m = self._view(tp(self._source)) l = m.tolist() - self.assertEquals(l, map(ord, b"abcdef")) + self.assertEqual(l, map(ord, b"abcdef")) def test_compare(self): # memoryviews can compare for equality with other objects @@ -170,27 +170,27 @@ def check_attributes_with_type(self, tp): m = self._view(tp(self._source)) - self.assertEquals(m.format, self.format) + self.assertEqual(m.format, self.format) self.assertIsInstance(m.format, str) - self.assertEquals(m.itemsize, self.itemsize) - self.assertEquals(m.ndim, 1) - self.assertEquals(m.shape, (6,)) - self.assertEquals(len(m), 6) - self.assertEquals(m.strides, (self.itemsize,)) - self.assertEquals(m.suboffsets, None) + self.assertEqual(m.itemsize, self.itemsize) + self.assertEqual(m.ndim, 1) + self.assertEqual(m.shape, (6,)) + self.assertEqual(len(m), 6) + self.assertEqual(m.strides, (self.itemsize,)) + self.assertEqual(m.suboffsets, None) return m def test_attributes_readonly(self): if not self.ro_type: return m = self.check_attributes_with_type(self.ro_type) - self.assertEquals(m.readonly, True) + self.assertEqual(m.readonly, True) def test_attributes_writable(self): if not self.rw_type: return m = self.check_attributes_with_type(self.rw_type) - self.assertEquals(m.readonly, False) + self.assertEqual(m.readonly, False) # Disabled: unicode uses the old buffer API in 2.x @@ -203,9 +203,9 @@ #oldviewrefcount = sys.getrefcount(m) #s = unicode(m, "utf-8") #self._check_contents(tp, b, s.encode("utf-8")) - #self.assertEquals(sys.getrefcount(m), oldviewrefcount) + #self.assertEqual(sys.getrefcount(m), oldviewrefcount) #m = None - #self.assertEquals(sys.getrefcount(b), oldrefcount) + #self.assertEqual(sys.getrefcount(b), oldrefcount) def test_gc(self): for tp in self._types: @@ -269,7 +269,7 @@ return memoryview(obj) def _check_contents(self, tp, obj, contents): - self.assertEquals(obj, tp(contents)) + self.assertEqual(obj, tp(contents)) class BaseMemorySliceTests: source_bytes = b"XabcdefY" @@ -279,14 +279,14 @@ return m[1:7] def _check_contents(self, tp, obj, contents): - self.assertEquals(obj[1:7], tp(contents)) + self.assertEqual(obj[1:7], tp(contents)) def test_refs(self): for tp in self._types: m = memoryview(tp(self._source)) oldrefcount = sys.getrefcount(m) m[1:2] - self.assertEquals(sys.getrefcount(m), oldrefcount) + self.assertEqual(sys.getrefcount(m), oldrefcount) class BaseMemorySliceSliceTests: source_bytes = b"XabcdefY" @@ -296,7 +296,7 @@ return m[:7][1:] def _check_contents(self, tp, obj, contents): - self.assertEquals(obj[1:7], tp(contents)) + self.assertEqual(obj[1:7], tp(contents)) # Concrete test classes @@ -323,7 +323,7 @@ #m = memoryview(a) #new_a = array.array('i', range(9, -1, -1)) #m[:] = new_a - #self.assertEquals(a, new_a) + #self.assertEqual(a, new_a) class BytesMemorySliceTest(unittest.TestCase, diff --git a/lib-python/2.7.0/test/test_traceback.py b/lib-python/2.7.0/test/test_traceback.py --- a/lib-python/2.7.0/test/test_traceback.py +++ b/lib-python/2.7.0/test/test_traceback.py @@ -185,11 +185,11 @@ raise Error("unable to create test traceback string") # Make sure that Python and the traceback module format the same thing - self.assertEquals(traceback_fmt, python_fmt) + self.assertEqual(traceback_fmt, python_fmt) # Make sure that the traceback is properly indented. tb_lines = python_fmt.splitlines() - self.assertEquals(len(tb_lines), 3) + self.assertEqual(len(tb_lines), 3) banner, location, source_line = tb_lines self.assertTrue(banner.startswith('Traceback')) self.assertTrue(location.startswith(' File')) diff --git a/lib-python/2.7.0/distutils/dir_util.py b/lib-python/2.7.0/distutils/dir_util.py --- a/lib-python/2.7.0/distutils/dir_util.py +++ b/lib-python/2.7.0/distutils/dir_util.py @@ -2,9 +2,10 @@ Utility functions for manipulating directories and directory trees.""" -__revision__ = "$Id: dir_util.py 84862 2010-09-17 16:40:01Z senthil.kumaran $" +__revision__ = "$Id$" import os +import errno from distutils.errors import DistutilsFileError, DistutilsInternalError from distutils import log @@ -69,10 +70,11 @@ if not dry_run: try: os.mkdir(head, mode) - created_dirs.append(head) except OSError, exc: - raise DistutilsFileError, \ - "could not create '%s': %s" % (head, exc[-1]) + if not (exc.errno == errno.EEXIST and os.path.isdir(head)): + raise DistutilsFileError( + "could not create '%s': %s" % (head, exc.args[-1])) + created_dirs.append(head) _path_created[abs_head] = 1 return created_dirs diff --git a/lib-python/2.7.0/distutils/tests/test_ccompiler.py b/lib-python/2.7.0/distutils/tests/test_ccompiler.py --- a/lib-python/2.7.0/distutils/tests/test_ccompiler.py +++ b/lib-python/2.7.0/distutils/tests/test_ccompiler.py @@ -32,7 +32,7 @@ opts = gen_lib_options(compiler, libdirs, runlibdirs, libs) wanted = ['-Llib1', '-Llib2', '-cool', '-Rrunlib1', 'found', '-lname2'] - self.assertEquals(opts, wanted) + self.assertEqual(opts, wanted) def test_debug_print(self): @@ -43,14 +43,14 @@ with captured_stdout() as stdout: compiler.debug_print('xxx') stdout.seek(0) - self.assertEquals(stdout.read(), '') + self.assertEqual(stdout.read(), '') debug.DEBUG = True try: with captured_stdout() as stdout: compiler.debug_print('xxx') stdout.seek(0) - self.assertEquals(stdout.read(), 'xxx\n') + self.assertEqual(stdout.read(), 'xxx\n') finally: debug.DEBUG = False @@ -72,7 +72,7 @@ comp = compiler() customize_compiler(comp) - self.assertEquals(comp.exes['archiver'], 'my_ar -arflags') + self.assertEqual(comp.exes['archiver'], 'my_ar -arflags') def test_suite(): return unittest.makeSuite(CCompilerTestCase) diff --git a/lib-python/2.7.0/test/test_repr.py b/lib-python/2.7.0/test/test_repr.py --- a/lib-python/2.7.0/test/test_repr.py +++ b/lib-python/2.7.0/test/test_repr.py @@ -22,7 +22,7 @@ class ReprTests(unittest.TestCase): def test_string(self): - eq = self.assertEquals + eq = self.assertEqual eq(r("abc"), "'abc'") eq(r("abcdefghijklmnop"),"'abcdefghijklmnop'") @@ -36,7 +36,7 @@ eq(r(s), expected) def test_tuple(self): - eq = self.assertEquals + eq = self.assertEqual eq(r((1,)), "(1,)") t3 = (1, 2, 3) @@ -51,7 +51,7 @@ from array import array from collections import deque - eq = self.assertEquals + eq = self.assertEqual # Tuples give up after 6 elements eq(r(()), "()") eq(r((1,)), "(1,)") @@ -101,7 +101,7 @@ "array('i', [1, 2, 3, 4, 5, ...])") def test_numbers(self): - eq = self.assertEquals + eq = self.assertEqual eq(r(123), repr(123)) eq(r(123L), repr(123L)) eq(r(1.0/3), repr(1.0/3)) @@ -111,7 +111,7 @@ eq(r(n), expected) def test_instance(self): - eq = self.assertEquals + eq = self.assertEqual i1 = ClassWithRepr("a") eq(r(i1), repr(i1)) @@ -141,7 +141,7 @@ # XXX anonymous functions? see func_repr def test_builtin_function(self): - eq = self.assertEquals + eq = self.assertEqual # Functions eq(repr(hash), '') # Methods @@ -149,13 +149,13 @@ '") # XXX member descriptors @@ -244,7 +244,7 @@ del sys.path[0] def test_module(self): - eq = self.assertEquals + eq = self.assertEqual touch(os.path.join(self.subpkgname, self.pkgname + os.extsep + 'py')) from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation eq(repr(areallylongpackageandmodulenametotestreprtruncation), @@ -252,7 +252,7 @@ eq(repr(sys), "") def test_type(self): - eq = self.assertEquals + eq = self.assertEqual touch(os.path.join(self.subpkgname, 'foo'+os.extsep+'py'), '''\ class foo(object): pass @@ -287,7 +287,7 @@ "<%s.baz instance at 0x" % baz.__name__)) def test_method(self): - eq = self.assertEquals + eq = self.assertEqual touch(os.path.join(self.subpkgname, 'qux'+os.extsep+'py'), '''\ class aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: def amethod(self): pass diff --git a/lib-python/2.7.0/idlelib/IOBinding.py b/lib-python/2.7.0/idlelib/IOBinding.py --- a/lib-python/2.7.0/idlelib/IOBinding.py +++ b/lib-python/2.7.0/idlelib/IOBinding.py @@ -521,8 +521,8 @@ savedialog = None filetypes = [ - ("Python and text files", "*.py *.pyw *.txt", "TEXT"), - ("All text files", "*", "TEXT"), + ("Python files", "*.py *.pyw", "TEXT"), + ("Text files", "*.txt", "TEXT"), ("All files", "*"), ] diff --git a/lib-python/2.7.0/distutils/command/bdist_rpm.py b/lib-python/2.7.0/distutils/command/bdist_rpm.py --- a/lib-python/2.7.0/distutils/command/bdist_rpm.py +++ b/lib-python/2.7.0/distutils/command/bdist_rpm.py @@ -3,7 +3,7 @@ Implements the Distutils 'bdist_rpm' command (create RPM source and binary distributions).""" -__revision__ = "$Id: bdist_rpm.py 76956 2009-12-21 01:22:46Z tarek.ziade $" +__revision__ = "$Id$" import sys import os @@ -355,22 +355,26 @@ src_rpm, non_src_rpm, spec_path) out = os.popen(q_cmd) - binary_rpms = [] - source_rpm = None - while 1: - line = out.readline() - if not line: - break - l = string.split(string.strip(line)) - assert(len(l) == 2) - binary_rpms.append(l[1]) - # The source rpm is named after the first entry in the spec file - if source_rpm is None: - source_rpm = l[0] + try: + binary_rpms = [] + source_rpm = None + while 1: + line = out.readline() + if not line: + break + l = string.split(string.strip(line)) + assert(len(l) == 2) + binary_rpms.append(l[1]) + # The source rpm is named after the first entry in the spec file + if source_rpm is None: + source_rpm = l[0] - status = out.close() - if status: - raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd)) + status = out.close() + if status: + raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd)) + + finally: + out.close() self.spawn(rpm_cmd) diff --git a/lib-python/2.7.0/distutils/tests/test_bdist.py b/lib-python/2.7.0/distutils/tests/test_bdist.py --- a/lib-python/2.7.0/distutils/tests/test_bdist.py +++ b/lib-python/2.7.0/distutils/tests/test_bdist.py @@ -25,7 +25,7 @@ cmd = bdist(dist) cmd.formats = ['msi'] cmd.ensure_finalized() - self.assertEquals(cmd.formats, ['msi']) + self.assertEqual(cmd.formats, ['msi']) # what format bdist offers ? # XXX an explicit list in bdist is @@ -36,7 +36,7 @@ formats.sort() founded = cmd.format_command.keys() founded.sort() - self.assertEquals(founded, formats) + self.assertEqual(founded, formats) def test_suite(): return unittest.makeSuite(BuildTestCase) diff --git a/lib-python/2.7.0/httplib.py b/lib-python/2.7.0/httplib.py --- a/lib-python/2.7.0/httplib.py +++ b/lib-python/2.7.0/httplib.py @@ -879,6 +879,9 @@ host_enc = self.host.encode("ascii") except UnicodeEncodeError: host_enc = self.host.encode("idna") + # Wrap the IPv6 Host Header with [] (RFC 2732) + if host_enc.find(':') >= 0: + host_enc = "[" + host_enc + "]" if self.port == self.default_port: self.putheader('Host', host_enc) else: diff --git a/lib-python/2.7.0/test/test_class.py b/lib-python/2.7.0/test/test_class.py --- a/lib-python/2.7.0/test/test_class.py +++ b/lib-python/2.7.0/test/test_class.py @@ -475,7 +475,7 @@ del testme import gc gc.collect() - self.assertEquals(["crab people, crab people"], x) + self.assertEqual(["crab people, crab people"], x) def testBadTypeReturned(self): # return values of some method are type-checked @@ -507,14 +507,14 @@ callLst[:] = [] as_int = int(mixIntAndLong) - self.assertEquals(type(as_int), long) - self.assertEquals(as_int, 42L) + self.assertEqual(type(as_int), long) + self.assertEqual(as_int, 42L) self.assertCallStack([('__int__', (mixIntAndLong,))]) callLst[:] = [] as_long = long(mixIntAndLong) - self.assertEquals(type(as_long), long) - self.assertEquals(as_long, 64) + self.assertEqual(type(as_long), long) + self.assertEqual(as_long, 64) self.assertCallStack([('__long__', (mixIntAndLong,))]) def testHashStuff(self): @@ -599,17 +599,17 @@ a1 = A(1) a2 = A(2) - self.assertEquals(a1.f, a1.f) - self.assertNotEquals(a1.f, a2.f) - self.assertNotEquals(a1.f, a1.g) - self.assertEquals(a1.f, A(1).f) - self.assertEquals(hash(a1.f), hash(a1.f)) - self.assertEquals(hash(a1.f), hash(A(1).f)) + self.assertEqual(a1.f, a1.f) + self.assertNotEqual(a1.f, a2.f) + self.assertNotEqual(a1.f, a1.g) + self.assertEqual(a1.f, A(1).f) + self.assertEqual(hash(a1.f), hash(a1.f)) + self.assertEqual(hash(a1.f), hash(A(1).f)) - self.assertNotEquals(A.f, a1.f) - self.assertNotEquals(A.f, A.g) - self.assertEquals(B.f, A.f) - self.assertEquals(hash(B.f), hash(A.f)) + self.assertNotEqual(A.f, a1.f) + self.assertNotEqual(A.f, A.g) + self.assertEqual(B.f, A.f) + self.assertEqual(hash(B.f), hash(A.f)) # the following triggers a SystemError in 2.4 a = A(hash(A.f.im_func)^(-1)) diff --git a/lib-python/2.7.0/distutils/tests/test_build_clib.py b/lib-python/2.7.0/distutils/tests/test_build_clib.py --- a/lib-python/2.7.0/distutils/tests/test_build_clib.py +++ b/lib-python/2.7.0/distutils/tests/test_build_clib.py @@ -55,14 +55,14 @@ self.assertRaises(DistutilsSetupError, cmd.get_source_files) cmd.libraries = [('name', {'sources': ['a', 'b']})] - self.assertEquals(cmd.get_source_files(), ['a', 'b']) + self.assertEqual(cmd.get_source_files(), ['a', 'b']) cmd.libraries = [('name', {'sources': ('a', 'b')})] - self.assertEquals(cmd.get_source_files(), ['a', 'b']) + self.assertEqual(cmd.get_source_files(), ['a', 'b']) cmd.libraries = [('name', {'sources': ('a', 'b')}), ('name2', {'sources': ['c', 'd']})] - self.assertEquals(cmd.get_source_files(), ['a', 'b', 'c', 'd']) + self.assertEqual(cmd.get_source_files(), ['a', 'b', 'c', 'd']) def test_build_libraries(self): @@ -91,11 +91,11 @@ cmd.include_dirs = 'one-dir' cmd.finalize_options() - self.assertEquals(cmd.include_dirs, ['one-dir']) + self.assertEqual(cmd.include_dirs, ['one-dir']) cmd.include_dirs = None cmd.finalize_options() - self.assertEquals(cmd.include_dirs, []) + self.assertEqual(cmd.include_dirs, []) cmd.distribution.libraries = 'WONTWORK' self.assertRaises(DistutilsSetupError, cmd.finalize_options) diff --git a/lib-python/2.7.0/distutils/command/bdist_wininst.py b/lib-python/2.7.0/distutils/command/bdist_wininst.py --- a/lib-python/2.7.0/distutils/command/bdist_wininst.py +++ b/lib-python/2.7.0/distutils/command/bdist_wininst.py @@ -3,7 +3,7 @@ Implements the Distutils 'bdist_wininst' command: create a windows installer exe-program.""" -__revision__ = "$Id: bdist_wininst.py 83593 2010-08-02 21:44:25Z georg.brandl $" +__revision__ = "$Id$" import sys import os @@ -356,5 +356,9 @@ sfix = '' filename = os.path.join(directory, "wininst-%.1f%s.exe" % (bv, sfix)) - return open(filename, "rb").read() + f = open(filename, "rb") + try: + return f.read() + finally: + f.close() # class bdist_wininst diff --git a/lib-python/2.7.0/test/test_socketserver.py b/lib-python/2.7.0/test/test_socketserver.py --- a/lib-python/2.7.0/test/test_socketserver.py +++ b/lib-python/2.7.0/test/test_socketserver.py @@ -57,8 +57,8 @@ os._exit(72) yield None pid2, status = os.waitpid(pid, 0) - testcase.assertEquals(pid2, pid) - testcase.assertEquals(72 << 8, status) + testcase.assertEqual(pid2, pid) + testcase.assertEqual(72 << 8, status) @unittest.skipUnless(threading, 'Threading required for this test.') @@ -120,7 +120,7 @@ if verbose: print "creating server" server = MyServer(addr, MyHandler) - self.assertEquals(server.server_address, server.socket.getsockname()) + self.assertEqual(server.server_address, server.socket.getsockname()) return server @unittest.skipUnless(threading, 'Threading required for this test.') @@ -161,7 +161,7 @@ while data and '\n' not in buf: data = receive(s, 100) buf += data - self.assertEquals(buf, TEST_STR) + self.assertEqual(buf, TEST_STR) s.close() def dgram_examine(self, proto, addr): @@ -171,7 +171,7 @@ while data and '\n' not in buf: data = receive(s, 100) buf += data - self.assertEquals(buf, TEST_STR) + self.assertEqual(buf, TEST_STR) s.close() def test_TCPServer(self): diff --git a/lib-python/2.7.0/test/test_structmembers.py b/lib-python/2.7.0/test/test_structmembers.py --- a/lib-python/2.7.0/test/test_structmembers.py +++ b/lib-python/2.7.0/test/test_structmembers.py @@ -15,61 +15,61 @@ def test_bool(self): ts.T_BOOL = True - self.assertEquals(ts.T_BOOL, True) + self.assertEqual(ts.T_BOOL, True) ts.T_BOOL = False - self.assertEquals(ts.T_BOOL, False) + self.assertEqual(ts.T_BOOL, False) self.assertRaises(TypeError, setattr, ts, 'T_BOOL', 1) def test_byte(self): ts.T_BYTE = CHAR_MAX - self.assertEquals(ts.T_BYTE, CHAR_MAX) + self.assertEqual(ts.T_BYTE, CHAR_MAX) ts.T_BYTE = CHAR_MIN - self.assertEquals(ts.T_BYTE, CHAR_MIN) + self.assertEqual(ts.T_BYTE, CHAR_MIN) ts.T_UBYTE = UCHAR_MAX - self.assertEquals(ts.T_UBYTE, UCHAR_MAX) + self.assertEqual(ts.T_UBYTE, UCHAR_MAX) def test_short(self): ts.T_SHORT = SHRT_MAX - self.assertEquals(ts.T_SHORT, SHRT_MAX) + self.assertEqual(ts.T_SHORT, SHRT_MAX) ts.T_SHORT = SHRT_MIN - self.assertEquals(ts.T_SHORT, SHRT_MIN) + self.assertEqual(ts.T_SHORT, SHRT_MIN) ts.T_USHORT = USHRT_MAX - self.assertEquals(ts.T_USHORT, USHRT_MAX) + self.assertEqual(ts.T_USHORT, USHRT_MAX) def test_int(self): ts.T_INT = INT_MAX - self.assertEquals(ts.T_INT, INT_MAX) + self.assertEqual(ts.T_INT, INT_MAX) ts.T_INT = INT_MIN - self.assertEquals(ts.T_INT, INT_MIN) + self.assertEqual(ts.T_INT, INT_MIN) ts.T_UINT = UINT_MAX - self.assertEquals(ts.T_UINT, UINT_MAX) + self.assertEqual(ts.T_UINT, UINT_MAX) def test_long(self): ts.T_LONG = LONG_MAX - self.assertEquals(ts.T_LONG, LONG_MAX) + self.assertEqual(ts.T_LONG, LONG_MAX) ts.T_LONG = LONG_MIN - self.assertEquals(ts.T_LONG, LONG_MIN) + self.assertEqual(ts.T_LONG, LONG_MIN) ts.T_ULONG = ULONG_MAX - self.assertEquals(ts.T_ULONG, ULONG_MAX) + self.assertEqual(ts.T_ULONG, ULONG_MAX) @unittest.skipUnless(hasattr(ts, "T_LONGLONG"), "long long not present") def test_longlong(self): ts.T_LONGLONG = LLONG_MAX - self.assertEquals(ts.T_LONGLONG, LLONG_MAX) + self.assertEqual(ts.T_LONGLONG, LLONG_MAX) ts.T_LONGLONG = LLONG_MIN - self.assertEquals(ts.T_LONGLONG, LLONG_MIN) + self.assertEqual(ts.T_LONGLONG, LLONG_MIN) ts.T_ULONGLONG = ULLONG_MAX - self.assertEquals(ts.T_ULONGLONG, ULLONG_MAX) + self.assertEqual(ts.T_ULONGLONG, ULLONG_MAX) ## make sure these will accept a plain int as well as a long ts.T_LONGLONG = 3 - self.assertEquals(ts.T_LONGLONG, 3) + self.assertEqual(ts.T_LONGLONG, 3) ts.T_ULONGLONG = 4 - self.assertEquals(ts.T_ULONGLONG, 4) + self.assertEqual(ts.T_ULONGLONG, 4) def test_inplace_string(self): - self.assertEquals(ts.T_STRING_INPLACE, "hi") + self.assertEqual(ts.T_STRING_INPLACE, "hi") self.assertRaises(TypeError, setattr, ts, "T_STRING_INPLACE", "s") self.assertRaises(TypeError, delattr, ts, "T_STRING_INPLACE") diff --git a/lib-python/2.7.0/test/test_gzip.py b/lib-python/2.7.0/test/test_gzip.py --- a/lib-python/2.7.0/test/test_gzip.py +++ b/lib-python/2.7.0/test/test_gzip.py @@ -102,7 +102,7 @@ ztxt = zgfile.read(8192) contents += ztxt if not ztxt: break - self.assertEquals(contents, 'a'*201) + self.assertEqual(contents, 'a'*201) def test_buffered_reader(self): # Issue #7471: a GzipFile can be wrapped in a BufferedReader for @@ -166,7 +166,7 @@ f.read(10) f.seek(10, whence=1) y = f.read(10) - self.assertEquals(y, data1[20:30]) + self.assertEqual(y, data1[20:30]) def test_seek_write(self): # Try seek, write test diff --git a/lib-python/2.7.0/test/test_pyexpat.py b/lib-python/2.7.0/test/test_pyexpat.py --- a/lib-python/2.7.0/test/test_pyexpat.py +++ b/lib-python/2.7.0/test/test_pyexpat.py @@ -22,17 +22,17 @@ def test_returns_unicode(self): for x, y in self.set_get_pairs: self.parser.returns_unicode = x - self.assertEquals(self.parser.returns_unicode, y) + self.assertEqual(self.parser.returns_unicode, y) def test_ordered_attributes(self): for x, y in self.set_get_pairs: self.parser.ordered_attributes = x - self.assertEquals(self.parser.ordered_attributes, y) + self.assertEqual(self.parser.ordered_attributes, y) def test_specified_attributes(self): for x, y in self.set_get_pairs: self.parser.specified_attributes = x - self.assertEquals(self.parser.specified_attributes, y) + self.assertEqual(self.parser.specified_attributes, y) data = '''\ @@ -140,23 +140,23 @@ # Verify output op = out.out - self.assertEquals(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'') - self.assertEquals(op[1], "Comment: ' comment data '") - self.assertEquals(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)") - self.assertEquals(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')") - self.assertEquals(op[4], "Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}") - self.assertEquals(op[5], "NS decl: 'myns' 'http://www.python.org/namespace'") - self.assertEquals(op[6], "Start element: 'http://www.python.org/namespace!subelement' {}") - self.assertEquals(op[7], "Character data: 'Contents of subelements'") - self.assertEquals(op[8], "End element: 'http://www.python.org/namespace!subelement'") - self.assertEquals(op[9], "End of NS decl: 'myns'") - self.assertEquals(op[10], "Start element: 'sub2' {}") - self.assertEquals(op[11], 'Start of CDATA section') - self.assertEquals(op[12], "Character data: 'contents of CDATA section'") - self.assertEquals(op[13], 'End of CDATA section') - self.assertEquals(op[14], "End element: 'sub2'") - self.assertEquals(op[15], "External entity ref: (None, 'entity.file', None)") - self.assertEquals(op[16], "End element: 'root'") + self.assertEqual(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'') + self.assertEqual(op[1], "Comment: ' comment data '") + self.assertEqual(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)") + self.assertEqual(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')") + self.assertEqual(op[4], "Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}") + self.assertEqual(op[5], "NS decl: 'myns' 'http://www.python.org/namespace'") + self.assertEqual(op[6], "Start element: 'http://www.python.org/namespace!subelement' {}") + self.assertEqual(op[7], "Character data: 'Contents of subelements'") + self.assertEqual(op[8], "End element: 'http://www.python.org/namespace!subelement'") + self.assertEqual(op[9], "End of NS decl: 'myns'") + self.assertEqual(op[10], "Start element: 'sub2' {}") + self.assertEqual(op[11], 'Start of CDATA section') + self.assertEqual(op[12], "Character data: 'contents of CDATA section'") + self.assertEqual(op[13], 'End of CDATA section') + self.assertEqual(op[14], "End element: 'sub2'") + self.assertEqual(op[15], "External entity ref: (None, 'entity.file', None)") + self.assertEqual(op[16], "End element: 'root'") def test_unicode(self): # Try the parse again, this time producing Unicode output @@ -169,23 +169,23 @@ parser.Parse(data, 1) op = out.out - self.assertEquals(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') - self.assertEquals(op[1], "Comment: u' comment data '") - self.assertEquals(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") - self.assertEquals(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") - self.assertEquals(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") - self.assertEquals(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") - self.assertEquals(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") - self.assertEquals(op[7], "Character data: u'Contents of subelements'") - self.assertEquals(op[8], "End element: u'http://www.python.org/namespace!subelement'") - self.assertEquals(op[9], "End of NS decl: u'myns'") - self.assertEquals(op[10], "Start element: u'sub2' {}") - self.assertEquals(op[11], 'Start of CDATA section') - self.assertEquals(op[12], "Character data: u'contents of CDATA section'") - self.assertEquals(op[13], 'End of CDATA section') - self.assertEquals(op[14], "End element: u'sub2'") - self.assertEquals(op[15], "External entity ref: (None, u'entity.file', None)") - self.assertEquals(op[16], "End element: u'root'") + self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') + self.assertEqual(op[1], "Comment: u' comment data '") + self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") + self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") + self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") + self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") + self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") + self.assertEqual(op[7], "Character data: u'Contents of subelements'") + self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'") + self.assertEqual(op[9], "End of NS decl: u'myns'") + self.assertEqual(op[10], "Start element: u'sub2' {}") + self.assertEqual(op[11], 'Start of CDATA section') + self.assertEqual(op[12], "Character data: u'contents of CDATA section'") + self.assertEqual(op[13], 'End of CDATA section') + self.assertEqual(op[14], "End element: u'sub2'") + self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)") + self.assertEqual(op[16], "End element: u'root'") def test_parse_file(self): # Try parsing a file @@ -199,23 +199,23 @@ parser.ParseFile(file) op = out.out - self.assertEquals(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') - self.assertEquals(op[1], "Comment: u' comment data '") - self.assertEquals(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") - self.assertEquals(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") - self.assertEquals(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") - self.assertEquals(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") - self.assertEquals(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") - self.assertEquals(op[7], "Character data: u'Contents of subelements'") - self.assertEquals(op[8], "End element: u'http://www.python.org/namespace!subelement'") - self.assertEquals(op[9], "End of NS decl: u'myns'") - self.assertEquals(op[10], "Start element: u'sub2' {}") - self.assertEquals(op[11], 'Start of CDATA section') - self.assertEquals(op[12], "Character data: u'contents of CDATA section'") - self.assertEquals(op[13], 'End of CDATA section') - self.assertEquals(op[14], "End element: u'sub2'") - self.assertEquals(op[15], "External entity ref: (None, u'entity.file', None)") - self.assertEquals(op[16], "End element: u'root'") + self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') + self.assertEqual(op[1], "Comment: u' comment data '") + self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") + self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") + self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") + self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") + self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") + self.assertEqual(op[7], "Character data: u'Contents of subelements'") + self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'") + self.assertEqual(op[9], "End of NS decl: u'myns'") + self.assertEqual(op[10], "Start element: u'sub2' {}") + self.assertEqual(op[11], 'Start of CDATA section') + self.assertEqual(op[12], "Character data: u'contents of CDATA section'") + self.assertEqual(op[13], 'End of CDATA section') + self.assertEqual(op[14], "End element: u'sub2'") + self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)") + self.assertEqual(op[16], "End element: u'root'") class NamespaceSeparatorTest(unittest.TestCase): @@ -231,14 +231,14 @@ expat.ParserCreate(namespace_separator=42) self.fail() except TypeError, e: - self.assertEquals(str(e), + self.assertEqual(str(e), 'ParserCreate() argument 2 must be string or None, not int') try: expat.ParserCreate(namespace_separator='too long') self.fail() except ValueError, e: - self.assertEquals(str(e), + self.assertEqual(str(e), 'namespace_separator must be at most one character, omitted, or None') def test_zero_length(self): @@ -264,7 +264,7 @@ p.EndElementHandler = collector p.Parse(" ", 1) tag = L[0] - self.assertEquals(len(L), 6) + self.assertEqual(len(L), 6) for entry in L: # L should have the same string repeated over and over. self.assertTrue(tag is entry) @@ -278,7 +278,7 @@ self.parser.CharacterDataHandler = self.CharacterDataHandler def check(self, expected, label): - self.assertEquals(self.stuff, expected, + self.assertEqual(self.stuff, expected, "%s\nstuff = %r\nexpected = %r" % (label, self.stuff, map(unicode, expected))) @@ -311,47 +311,47 @@ # Make sure buffering is turned on self.assertTrue(self.parser.buffer_text) self.parser.Parse("123", 1) - self.assertEquals(self.stuff, ['123'], - "buffered text not properly collapsed") + self.assertEqual(self.stuff, ['123'], + "buffered text not properly collapsed") def test1(self): # XXX This test exposes more detail of Expat's text chunking than we # XXX like, but it tests what we need to concisely. self.setHandlers(["StartElementHandler"]) self.parser.Parse("12\n34\n5", 1) - self.assertEquals(self.stuff, - ["", "1", "", "2", "\n", "3", "", "4\n5"], - "buffering control not reacting as expected") + self.assertEqual(self.stuff, + ["", "1", "", "2", "\n", "3", "", "4\n5"], + "buffering control not reacting as expected") def test2(self): self.parser.Parse("1<2> \n 3", 1) - self.assertEquals(self.stuff, ["1<2> \n 3"], - "buffered text not properly collapsed") + self.assertEqual(self.stuff, ["1<2> \n 3"], + "buffered text not properly collapsed") def test3(self): self.setHandlers(["StartElementHandler"]) self.parser.Parse("123", 1) - self.assertEquals(self.stuff, ["", "1", "", "2", "", "3"], + self.assertEqual(self.stuff, ["", "1", "", "2", "", "3"], "buffered text not properly split") def test4(self): self.setHandlers(["StartElementHandler", "EndElementHandler"]) self.parser.CharacterDataHandler = None self.parser.Parse("123", 1) - self.assertEquals(self.stuff, - ["", "", "", "", "", ""]) + self.assertEqual(self.stuff, + ["", "", "", "", "", ""]) def test5(self): self.setHandlers(["StartElementHandler", "EndElementHandler"]) self.parser.Parse("123", 1) - self.assertEquals(self.stuff, + self.assertEqual(self.stuff, ["", "1", "", "", "2", "", "", "3", ""]) def test6(self): self.setHandlers(["CommentHandler", "EndElementHandler", "StartElementHandler"]) self.parser.Parse("12345 ", 1) - self.assertEquals(self.stuff, + self.assertEqual(self.stuff, ["", "1", "", "", "2", "", "", "345", ""], "buffered text not properly split") @@ -359,10 +359,10 @@ self.setHandlers(["CommentHandler", "EndElementHandler", "StartElementHandler"]) self.parser.Parse("12345 ", 1) - self.assertEquals(self.stuff, - ["", "1", "", "", "2", "", "", "3", - "", "4", "", "5", ""], - "buffered text not properly split") + self.assertEqual(self.stuff, + ["", "1", "", "", "2", "", "", "3", + "", "4", "", "5", ""], + "buffered text not properly split") # Test handling of exception from callback: @@ -377,9 +377,9 @@ parser.Parse("", 1) self.fail() except RuntimeError, e: - self.assertEquals(e.args[0], 'a', - "Expected RuntimeError for element 'a', but" + \ - " found %r" % e.args[0]) + self.assertEqual(e.args[0], 'a', + "Expected RuntimeError for element 'a', but" + \ + " found %r" % e.args[0]) # Test Current* members: @@ -398,7 +398,7 @@ self.assertTrue(self.upto < len(self.expected_list), 'too many parser events') expected = self.expected_list[self.upto] - self.assertEquals(pos, expected, + self.assertEqual(pos, expected, 'Expected position %s, got position %s' %(pos, expected)) self.upto += 1 @@ -439,10 +439,10 @@ """ def test_1025_bytes(self): - self.assertEquals(self.small_buffer_test(1025), 2) + self.assertEqual(self.small_buffer_test(1025), 2) def test_1000_bytes(self): - self.assertEquals(self.small_buffer_test(1000), 1) + self.assertEqual(self.small_buffer_test(1000), 1) def test_wrong_size(self): parser = expat.ParserCreate() @@ -466,15 +466,15 @@ # once. self.n = 0 parser.Parse(xml1) - self.assertEquals(self.n, 1) + self.assertEqual(self.n, 1) # Reassign to buffer_size, but assign the same size. parser.buffer_size = parser.buffer_size - self.assertEquals(self.n, 1) + self.assertEqual(self.n, 1) # Try parsing rest of the document parser.Parse(xml2) - self.assertEquals(self.n, 2) + self.assertEqual(self.n, 2) def test_disabling_buffer(self): @@ -485,27 +485,27 @@ parser.CharacterDataHandler = self.counting_handler parser.buffer_text = 1 parser.buffer_size = 1024 - self.assertEquals(parser.buffer_size, 1024) + self.assertEqual(parser.buffer_size, 1024) # Parse one chunk of XML self.n = 0 parser.Parse(xml1, 0) - self.assertEquals(parser.buffer_size, 1024) - self.assertEquals(self.n, 1) + self.assertEqual(parser.buffer_size, 1024) + self.assertEqual(self.n, 1) # Turn off buffering and parse the next chunk. parser.buffer_text = 0 self.assertFalse(parser.buffer_text) - self.assertEquals(parser.buffer_size, 1024) + self.assertEqual(parser.buffer_size, 1024) for i in range(10): parser.Parse(xml2, 0) - self.assertEquals(self.n, 11) + self.assertEqual(self.n, 11) parser.buffer_text = 1 self.assertTrue(parser.buffer_text) - self.assertEquals(parser.buffer_size, 1024) + self.assertEqual(parser.buffer_size, 1024) parser.Parse(xml3, 1) - self.assertEquals(self.n, 12) + self.assertEqual(self.n, 12) @@ -533,14 +533,14 @@ parser.CharacterDataHandler = self.counting_handler parser.buffer_text = 1 parser.buffer_size = 1024 - self.assertEquals(parser.buffer_size, 1024) + self.assertEqual(parser.buffer_size, 1024) self.n = 0 parser.Parse(xml1, 0) parser.buffer_size *= 2 - self.assertEquals(parser.buffer_size, 2048) + self.assertEqual(parser.buffer_size, 2048) parser.Parse(xml2, 1) - self.assertEquals(self.n, 2) + self.assertEqual(self.n, 2) def test_change_size_2(self): xml1 = "a%s" % ('a' * 1023) @@ -549,14 +549,14 @@ parser.CharacterDataHandler = self.counting_handler parser.buffer_text = 1 parser.buffer_size = 2048 - self.assertEquals(parser.buffer_size, 2048) + self.assertEqual(parser.buffer_size, 2048) self.n=0 parser.Parse(xml1, 0) parser.buffer_size //= 2 - self.assertEquals(parser.buffer_size, 1024) + self.assertEqual(parser.buffer_size, 1024) parser.Parse(xml2, 1) - self.assertEquals(self.n, 4) + self.assertEqual(self.n, 4) class MalformedInputText(unittest.TestCase): def test1(self): @@ -566,7 +566,7 @@ parser.Parse(xml, True) self.fail() except expat.ExpatError as e: - self.assertEquals(str(e), 'unclosed token: line 2, column 0') + self.assertEqual(str(e), 'unclosed token: line 2, column 0') def test2(self): xml = "\r\n" @@ -575,7 +575,7 @@ parser.Parse(xml, True) self.fail() except expat.ExpatError as e: - self.assertEquals(str(e), 'XML declaration not well-formed: line 1, column 14') + self.assertEqual(str(e), 'XML declaration not well-formed: line 1, column 14') def test_main(): run_unittest(SetAttributeTest, diff --git a/lib-python/2.7.0/email/test/test_email_renamed.py b/lib-python/2.7.0/email/test/test_email_renamed.py --- a/lib-python/2.7.0/email/test/test_email_renamed.py +++ b/lib-python/2.7.0/email/test/test_email_renamed.py @@ -41,13 +41,13 @@ SPACE = ' ' - + def openfile(filename, mode='r'): path = os.path.join(os.path.dirname(landmark), 'data', filename) return open(path, mode) - + # Base test class class TestEmailBase(unittest.TestCase): def ndiffAssertEqual(self, first, second): @@ -69,7 +69,7 @@ return msg - + # Test various aspects of the Message class's API class TestMessageAPI(TestEmailBase): def test_get_all(self): @@ -504,7 +504,7 @@ self.assertEqual(msg.get_payload(decode=True), x) - + # Test the email.encoders module class TestEncoders(unittest.TestCase): def test_encode_empty_payload(self): @@ -531,7 +531,7 @@ eq(msg['content-transfer-encoding'], 'quoted-printable') - + # Test long header wrapping class TestLongHeaders(TestEmailBase): def test_split_long_continuation(self): @@ -852,7 +852,7 @@ """) - + # Test mangling of "From " lines in the body of a message class TestFromMangling(unittest.TestCase): def setUp(self): @@ -886,7 +886,7 @@ """) - + # Test the basic MIMEAudio class class TestMIMEAudio(unittest.TestCase): def setUp(self): @@ -935,7 +935,7 @@ header='foobar') is missing) - + # Test the basic MIMEImage class class TestMIMEImage(unittest.TestCase): def setUp(self): @@ -978,7 +978,7 @@ header='foobar') is missing) - + # Test the basic MIMEApplication class class TestMIMEApplication(unittest.TestCase): def test_headers(self): @@ -995,7 +995,7 @@ eq(msg.get_payload(decode=True), bytes) - + # Test the basic MIMEText class class TestMIMEText(unittest.TestCase): def setUp(self): @@ -1022,7 +1022,7 @@ eq(msg['content-type'], 'text/plain; charset="us-ascii"') - + # Test complicated multipart/* messages class TestMultipart(TestEmailBase): def setUp(self): @@ -1398,10 +1398,10 @@ YXNkZg== --===============0012394164==--""") - self.assertEquals(m.get_payload(0).get_payload(), 'YXNkZg==') - - - + self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==') + + + # Test some badly formatted messages class TestNonConformant(TestEmailBase): def test_parse_missing_minor_type(self): @@ -1515,7 +1515,7 @@ eq(msg.defects[0].line, ' Line 1\n') - + # Test RFC 2047 header encoding and decoding class TestRFC2047(unittest.TestCase): def test_rfc2047_multiline(self): @@ -1562,7 +1562,7 @@ ('sbord', None)]) - + # Test the MIMEMessage class class TestMIMEMessage(TestEmailBase): def setUp(self): @@ -1872,7 +1872,7 @@ eq(msg.get_payload(1), text2) - + # A general test of parser->model->generator idempotency. IOW, read a message # in, parse it into a message object tree, then without touching the tree, # regenerate the plain text. The original text and the transformed text @@ -1896,7 +1896,7 @@ eq(text, s.getvalue()) def test_parse_text_message(self): - eq = self.assertEquals + eq = self.assertEqual msg, text = self._msgobj('msg_01.txt') eq(msg.get_content_type(), 'text/plain') eq(msg.get_content_maintype(), 'text') @@ -1908,7 +1908,7 @@ self._idempotent(msg, text) def test_parse_untyped_message(self): - eq = self.assertEquals + eq = self.assertEqual msg, text = self._msgobj('msg_03.txt') eq(msg.get_content_type(), 'text/plain') eq(msg.get_params(), None) @@ -1980,7 +1980,7 @@ self._idempotent(msg, text) def test_content_type(self): - eq = self.assertEquals + eq = self.assertEqual unless = self.assertTrue # Get a message object and reset the seek pointer for other tests msg, text = self._msgobj('msg_05.txt') @@ -2012,7 +2012,7 @@ eq(msg4.get_payload(), 'Yadda yadda yadda\n') def test_parser(self): - eq = self.assertEquals + eq = self.assertEqual unless = self.assertTrue msg, text = self._msgobj('msg_06.txt') # Check some of the outer headers @@ -2029,7 +2029,7 @@ eq(msg1.get_payload(), '\n') - + # Test various other bits of the package's functionality class TestMiscellaneous(TestEmailBase): def test_message_from_string(self): @@ -2354,7 +2354,7 @@ """) - + # Test the iterator/generators class TestIterators(TestEmailBase): def test_body_line_iterator(self): @@ -2414,7 +2414,7 @@ """) - + class TestParsers(TestEmailBase): def test_header_parser(self): eq = self.assertEqual @@ -2559,7 +2559,7 @@ eq(msg.get_payload(), 'body') - + class TestBase64(unittest.TestCase): def test_len(self): eq = self.assertEqual @@ -2631,7 +2631,7 @@ =?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""") - + class TestQuopri(unittest.TestCase): def setUp(self): self.hlit = [chr(x) for x in range(ord('a'), ord('z')+1)] + \ @@ -2741,7 +2741,7 @@ two line""") - + # Test the Charset class class TestCharset(unittest.TestCase): def tearDown(self): @@ -2799,7 +2799,7 @@ self.assertRaises(errors.CharsetError, Charset, 'asc\xffii') - + # Test multilingual MIME headers. class TestHeader(TestEmailBase): def test_simple(self): @@ -2962,7 +2962,7 @@ raises(errors.HeaderParseError, decode_header, s) - + # Test RFC 2231 header parameters (en/de)coding class TestRFC2231(TestEmailBase): def test_get_param(self): @@ -3274,7 +3274,7 @@ eq(s, 'My Document For You') - + def _testclasses(): mod = sys.modules[__name__] return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')] @@ -3292,6 +3292,6 @@ run_unittest(testclass) - + if __name__ == '__main__': unittest.main(defaultTest='suite') diff --git a/lib-python/2.7.0/distutils/unixccompiler.py b/lib-python/2.7.0/distutils/unixccompiler.py --- a/lib-python/2.7.0/distutils/unixccompiler.py +++ b/lib-python/2.7.0/distutils/unixccompiler.py @@ -13,7 +13,7 @@ * link shared library handled by 'cc -shared' """ -__revision__ = "$Id: unixccompiler.py 82272 2010-06-27 12:36:16Z ronald.oussoren $" +__revision__ = "$Id$" import os, sys, re from types import StringType, NoneType diff --git a/lib-python/2.7.0/calendar.py b/lib-python/2.7.0/calendar.py --- a/lib-python/2.7.0/calendar.py +++ b/lib-python/2.7.0/calendar.py @@ -486,8 +486,8 @@ self.locale = locale def __enter__(self): - self.oldlocale = _locale.setlocale(_locale.LC_TIME, self.locale) - return _locale.getlocale(_locale.LC_TIME)[1] + self.oldlocale = _locale.getlocale(_locale.LC_TIME) + _locale.setlocale(_locale.LC_TIME, self.locale) def __exit__(self, *args): _locale.setlocale(_locale.LC_TIME, self.oldlocale) diff --git a/lib-python/2.7.0/test/test_commands.py b/lib-python/2.7.0/test/test_commands.py --- a/lib-python/2.7.0/test/test_commands.py +++ b/lib-python/2.7.0/test/test_commands.py @@ -24,8 +24,8 @@ class CommandTests(unittest.TestCase): def test_getoutput(self): - self.assertEquals(commands.getoutput('echo xyzzy'), 'xyzzy') - self.assertEquals(commands.getstatusoutput('echo xyzzy'), (0, 'xyzzy')) + self.assertEqual(commands.getoutput('echo xyzzy'), 'xyzzy') + self.assertEqual(commands.getstatusoutput('echo xyzzy'), (0, 'xyzzy')) # we use mkdtemp in the next line to create an empty directory # under our exclusive control; from that, we can invent a pathname @@ -36,7 +36,7 @@ name = os.path.join(dir, "foo") status, output = commands.getstatusoutput('cat ' + name) - self.assertNotEquals(status, 0) + self.assertNotEqual(status, 0) finally: if dir is not None: os.rmdir(dir) diff --git a/lib-python/2.7.0/test/test_functools.py b/lib-python/2.7.0/test/test_functools.py --- a/lib-python/2.7.0/test/test_functools.py +++ b/lib-python/2.7.0/test/test_functools.py @@ -361,12 +361,12 @@ self.value = value def __lt__(self, other): return self.value < other.value - self.assert_(A(1) < A(2)) - self.assert_(A(2) > A(1)) - self.assert_(A(1) <= A(2)) - self.assert_(A(2) >= A(1)) - self.assert_(A(2) <= A(2)) - self.assert_(A(2) >= A(2)) + self.assertTrue(A(1) < A(2)) + self.assertTrue(A(2) > A(1)) + self.assertTrue(A(1) <= A(2)) + self.assertTrue(A(2) >= A(1)) + self.assertTrue(A(2) <= A(2)) + self.assertTrue(A(2) >= A(2)) def test_total_ordering_le(self): @functools.total_ordering @@ -375,12 +375,12 @@ self.value = value def __le__(self, other): return self.value <= other.value - self.assert_(A(1) < A(2)) - self.assert_(A(2) > A(1)) - self.assert_(A(1) <= A(2)) - self.assert_(A(2) >= A(1)) - self.assert_(A(2) <= A(2)) - self.assert_(A(2) >= A(2)) + self.assertTrue(A(1) < A(2)) + self.assertTrue(A(2) > A(1)) + self.assertTrue(A(1) <= A(2)) + self.assertTrue(A(2) >= A(1)) + self.assertTrue(A(2) <= A(2)) + self.assertTrue(A(2) >= A(2)) def test_total_ordering_gt(self): @functools.total_ordering @@ -389,12 +389,12 @@ self.value = value def __gt__(self, other): return self.value > other.value - self.assert_(A(1) < A(2)) - self.assert_(A(2) > A(1)) - self.assert_(A(1) <= A(2)) - self.assert_(A(2) >= A(1)) - self.assert_(A(2) <= A(2)) - self.assert_(A(2) >= A(2)) + self.assertTrue(A(1) < A(2)) + self.assertTrue(A(2) > A(1)) + self.assertTrue(A(1) <= A(2)) + self.assertTrue(A(2) >= A(1)) + self.assertTrue(A(2) <= A(2)) + self.assertTrue(A(2) >= A(2)) def test_total_ordering_ge(self): @functools.total_ordering @@ -403,24 +403,24 @@ self.value = value def __ge__(self, other): return self.value >= other.value - self.assert_(A(1) < A(2)) - self.assert_(A(2) > A(1)) - self.assert_(A(1) <= A(2)) - self.assert_(A(2) >= A(1)) - self.assert_(A(2) <= A(2)) - self.assert_(A(2) >= A(2)) + self.assertTrue(A(1) < A(2)) + self.assertTrue(A(2) > A(1)) + self.assertTrue(A(1) <= A(2)) + self.assertTrue(A(2) >= A(1)) + self.assertTrue(A(2) <= A(2)) + self.assertTrue(A(2) >= A(2)) def test_total_ordering_no_overwrite(self): # new methods should not overwrite existing @functools.total_ordering class A(str): pass - self.assert_(A("a") < A("b")) - self.assert_(A("b") > A("a")) - self.assert_(A("a") <= A("b")) - self.assert_(A("b") >= A("a")) - self.assert_(A("b") <= A("b")) - self.assert_(A("b") >= A("b")) + self.assertTrue(A("a") < A("b")) + self.assertTrue(A("b") > A("a")) + self.assertTrue(A("a") <= A("b")) + self.assertTrue(A("b") >= A("a")) + self.assertTrue(A("b") <= A("b")) + self.assertTrue(A("b") >= A("b")) def test_no_operations_defined(self): with self.assertRaises(ValueError): diff --git a/lib-python/2.7.0/test/test_rlcompleter.py b/lib-python/2.7.0/test/test_rlcompleter.py --- a/lib-python/2.7.0/test/test_rlcompleter.py +++ b/lib-python/2.7.0/test/test_rlcompleter.py @@ -31,9 +31,9 @@ def test_global_matches(self): # test with builtins namespace - self.assertEqual(self.stdcompleter.global_matches('di'), + self.assertEqual(sorted(self.stdcompleter.global_matches('di')), [x+'(' for x in dir(builtins) if x.startswith('di')]) - self.assertEqual(self.stdcompleter.global_matches('st'), + self.assertEqual(sorted(self.stdcompleter.global_matches('st')), [x+'(' for x in dir(builtins) if x.startswith('st')]) self.assertEqual(self.stdcompleter.global_matches('akaksajadhak'), []) diff --git a/lib-python/2.7.0/test/test_difflib.py b/lib-python/2.7.0/test/test_difflib.py --- a/lib-python/2.7.0/test/test_difflib.py +++ b/lib-python/2.7.0/test/test_difflib.py @@ -4,8 +4,47 @@ import doctest import sys + +class TestWithAscii(unittest.TestCase): + def test_one_insert(self): + sm = difflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100) + self.assertAlmostEqual(sm.ratio(), 0.995, places=3) + self.assertEqual(list(sm.get_opcodes()), + [ ('insert', 0, 0, 0, 1), + ('equal', 0, 100, 1, 101)]) + sm = difflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50) + self.assertAlmostEqual(sm.ratio(), 0.995, places=3) + self.assertEqual(list(sm.get_opcodes()), + [ ('equal', 0, 50, 0, 50), + ('insert', 50, 50, 50, 51), + ('equal', 50, 100, 51, 101)]) + + def test_one_delete(self): + sm = difflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40) + self.assertAlmostEqual(sm.ratio(), 0.994, places=3) + self.assertEqual(list(sm.get_opcodes()), + [ ('equal', 0, 40, 0, 40), + ('delete', 40, 41, 40, 40), + ('equal', 41, 81, 40, 80)]) + + +class TestAutojunk(unittest.TestCase): + """Tests for the autojunk parameter added in 2.7""" + def test_one_insert_homogenous_sequence(self): + # By default autojunk=True and the heuristic kicks in for a sequence + # of length 200+ + seq1 = 'b' * 200 + seq2 = 'a' + 'b' * 200 + + sm = difflib.SequenceMatcher(None, seq1, seq2) + self.assertAlmostEqual(sm.ratio(), 0, places=3) + + # Now turn the heuristic off + sm = difflib.SequenceMatcher(None, seq1, seq2, autojunk=False) + self.assertAlmostEqual(sm.ratio(), 0.9975, places=3) + + class TestSFbugs(unittest.TestCase): - def test_ratio_for_null_seqn(self): # Check clearing of SF bug 763023 s = difflib.SequenceMatcher(None, [], []) @@ -184,7 +223,9 @@ def test_main(): difflib.HtmlDiff._default_prefix = 0 Doctests = doctest.DocTestSuite(difflib) - run_unittest(TestSFpatches, TestSFbugs, TestOutputFormat, Doctests) + run_unittest( + TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs, + TestOutputFormat, Doctests) if __name__ == '__main__': test_main() diff --git a/lib-python/2.7.0/test/test_codeccallbacks.py b/lib-python/2.7.0/test/test_codeccallbacks.py --- a/lib-python/2.7.0/test/test_codeccallbacks.py +++ b/lib-python/2.7.0/test/test_codeccallbacks.py @@ -186,7 +186,7 @@ charmap = dict([ (ord(c), 2*c.upper()) for c in "abcdefgh"]) sin = u"abc" sout = "AABBCC" - self.assertEquals(codecs.charmap_encode(sin, "strict", charmap)[0], sout) + self.assertEqual(codecs.charmap_encode(sin, "strict", charmap)[0], sout) sin = u"abcA" self.assertRaises(UnicodeError, codecs.charmap_encode, sin, "strict", charmap) @@ -194,7 +194,7 @@ charmap[ord("?")] = "XYZ" sin = u"abcDEF" sout = "AABBCCXYZXYZXYZ" - self.assertEquals(codecs.charmap_encode(sin, "replace", charmap)[0], sout) + self.assertEqual(codecs.charmap_encode(sin, "replace", charmap)[0], sout) charmap[ord("?")] = u"XYZ" self.assertRaises(TypeError, codecs.charmap_encode, sin, "replace", charmap) @@ -327,7 +327,7 @@ # check with the correct number and type of arguments exc = exctype(*args) - self.assertEquals(str(exc), msg) + self.assertEqual(str(exc), msg) def test_unicodeencodeerror(self): self.check_exceptionobjectargs( @@ -437,15 +437,15 @@ UnicodeError("ouch") ) # If the correct exception is passed in, "ignore" returns an empty replacement - self.assertEquals( + self.assertEqual( codecs.ignore_errors(UnicodeEncodeError("ascii", u"\u3042", 0, 1, "ouch")), (u"", 1) ) - self.assertEquals( + self.assertEqual( codecs.ignore_errors(UnicodeDecodeError("ascii", "\xff", 0, 1, "ouch")), (u"", 1) ) - self.assertEquals( + self.assertEqual( codecs.ignore_errors(UnicodeTranslateError(u"\u3042", 0, 1, "ouch")), (u"", 1) ) @@ -474,15 +474,15 @@ BadObjectUnicodeDecodeError() ) # With the correct exception, "replace" returns an "?" or u"\ufffd" replacement - self.assertEquals( + self.assertEqual( codecs.replace_errors(UnicodeEncodeError("ascii", u"\u3042", 0, 1, "ouch")), (u"?", 1) ) - self.assertEquals( + self.assertEqual( codecs.replace_errors(UnicodeDecodeError("ascii", "\xff", 0, 1, "ouch")), (u"\ufffd", 1) ) - self.assertEquals( + self.assertEqual( codecs.replace_errors(UnicodeTranslateError(u"\u3042", 0, 1, "ouch")), (u"\ufffd", 1) ) @@ -514,7 +514,7 @@ # Use the correct exception cs = (0, 1, 9, 10, 99, 100, 999, 1000, 9999, 10000, 0x3042) s = "".join(unichr(c) for c in cs) - self.assertEquals( + self.assertEqual( codecs.xmlcharrefreplace_errors( UnicodeEncodeError("ascii", s, 0, len(s), "ouch") ), @@ -546,32 +546,32 @@ UnicodeTranslateError(u"\u3042", 0, 1, "ouch") ) # Use the correct exception - self.assertEquals( + self.assertEqual( codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\u3042", 0, 1, "ouch")), (u"\\u3042", 1) ) - self.assertEquals( + self.assertEqual( codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\x00", 0, 1, "ouch")), (u"\\x00", 1) ) - self.assertEquals( + self.assertEqual( codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\xff", 0, 1, "ouch")), (u"\\xff", 1) ) - self.assertEquals( + self.assertEqual( codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\u0100", 0, 1, "ouch")), (u"\\u0100", 1) ) - self.assertEquals( + self.assertEqual( codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\uffff", 0, 1, "ouch")), (u"\\uffff", 1) ) if sys.maxunicode>0xffff: - self.assertEquals( + self.assertEqual( codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\U00010000", 0, 1, "ouch")), (u"\\U00010000", 1) ) - self.assertEquals( + self.assertEqual( codecs.backslashreplace_errors(UnicodeEncodeError("ascii", u"\U0010ffff", 0, 1, "ouch")), (u"\\U0010ffff", 1) ) @@ -603,14 +603,14 @@ ) def test_lookup(self): - self.assertEquals(codecs.strict_errors, codecs.lookup_error("strict")) - self.assertEquals(codecs.ignore_errors, codecs.lookup_error("ignore")) - self.assertEquals(codecs.strict_errors, codecs.lookup_error("strict")) - self.assertEquals( + self.assertEqual(codecs.strict_errors, codecs.lookup_error("strict")) + self.assertEqual(codecs.ignore_errors, codecs.lookup_error("ignore")) + self.assertEqual(codecs.strict_errors, codecs.lookup_error("strict")) + self.assertEqual( codecs.xmlcharrefreplace_errors, codecs.lookup_error("xmlcharrefreplace") ) - self.assertEquals( + self.assertEqual( codecs.backslashreplace_errors, codecs.lookup_error("backslashreplace") ) @@ -686,11 +686,11 @@ # Valid negative position handler.pos = -1 - self.assertEquals("\xff0".decode("ascii", "test.posreturn"), u"0") + self.assertEqual("\xff0".decode("ascii", "test.posreturn"), u"0") # Valid negative position handler.pos = -2 - self.assertEquals("\xff0".decode("ascii", "test.posreturn"), u"") + self.assertEqual("\xff0".decode("ascii", "test.posreturn"), u"") # Negative position out of bounds handler.pos = -3 @@ -698,11 +698,11 @@ # Valid positive position handler.pos = 1 - self.assertEquals("\xff0".decode("ascii", "test.posreturn"), u"0") + self.assertEqual("\xff0".decode("ascii", "test.posreturn"), u"0") # Largest valid positive position (one beyond end of input) handler.pos = 2 - self.assertEquals("\xff0".decode("ascii", "test.posreturn"), u"") + self.assertEqual("\xff0".decode("ascii", "test.posreturn"), u"") # Invalid positive position handler.pos = 3 @@ -710,7 +710,7 @@ # Restart at the "0" handler.pos = 6 - self.assertEquals("\\uyyyy0".decode("raw-unicode-escape", "test.posreturn"), u"0") + self.assertEqual("\\uyyyy0".decode("raw-unicode-escape", "test.posreturn"), u"0") class D(dict): def __getitem__(self, key): @@ -740,11 +740,11 @@ # Valid negative position handler.pos = -1 - self.assertEquals(u"\xff0".encode("ascii", "test.posreturn"), "0") + self.assertEqual(u"\xff0".encode("ascii", "test.posreturn"), "0") # Valid negative position handler.pos = -2 - self.assertEquals(u"\xff0".encode("ascii", "test.posreturn"), "") + self.assertEqual(u"\xff0".encode("ascii", "test.posreturn"), "") # Negative position out of bounds handler.pos = -3 @@ -752,11 +752,11 @@ # Valid positive position handler.pos = 1 - self.assertEquals(u"\xff0".encode("ascii", "test.posreturn"), "0") + self.assertEqual(u"\xff0".encode("ascii", "test.posreturn"), "0") # Largest valid positive position (one beyond end of input handler.pos = 2 - self.assertEquals(u"\xff0".encode("ascii", "test.posreturn"), "") + self.assertEqual(u"\xff0".encode("ascii", "test.posreturn"), "") # Invalid positive position handler.pos = 3 diff --git a/lib-python/2.7.0/test/test_builtin.py b/lib-python/2.7.0/test/test_builtin.py --- a/lib-python/2.7.0/test/test_builtin.py +++ b/lib-python/2.7.0/test/test_builtin.py @@ -651,15 +651,15 @@ class X: def __hash__(self): return 2**100 - self.assertEquals(type(hash(X())), int) + self.assertEqual(type(hash(X())), int) class Y(object): def __hash__(self): return 2**100 - self.assertEquals(type(hash(Y())), int) + self.assertEqual(type(hash(Y())), int) class Z(long): def __hash__(self): return self - self.assertEquals(hash(Z(42)), hash(42L)) + self.assertEqual(hash(Z(42)), hash(42L)) def test_hex(self): self.assertEqual(hex(16), '0x10') @@ -930,7 +930,7 @@ self.assertEqual(next(it), 1) self.assertRaises(StopIteration, next, it) self.assertRaises(StopIteration, next, it) - self.assertEquals(next(it, 42), 42) + self.assertEqual(next(it, 42), 42) class Iter(object): def __iter__(self): @@ -939,7 +939,7 @@ raise StopIteration it = iter(Iter()) - self.assertEquals(next(it, 42), 42) + self.assertEqual(next(it, 42), 42) self.assertRaises(StopIteration, next, it) def gen(): @@ -947,9 +947,9 @@ return it = gen() - self.assertEquals(next(it), 1) + self.assertEqual(next(it), 1) self.assertRaises(StopIteration, next, it) - self.assertEquals(next(it, 42), 42) + self.assertEqual(next(it, 42), 42) def test_oct(self): self.assertEqual(oct(100), '0144') diff --git a/lib-python/2.7.0/test/test_bytes.py b/lib-python/2.7.0/test/test_bytes.py --- a/lib-python/2.7.0/test/test_bytes.py +++ b/lib-python/2.7.0/test/test_bytes.py @@ -255,11 +255,11 @@ def test_fromhex(self): self.assertRaises(TypeError, self.type2test.fromhex) self.assertRaises(TypeError, self.type2test.fromhex, 1) - self.assertEquals(self.type2test.fromhex(u''), self.type2test()) + self.assertEqual(self.type2test.fromhex(u''), self.type2test()) b = bytearray([0x1a, 0x2b, 0x30]) - self.assertEquals(self.type2test.fromhex(u'1a2B30'), b) - self.assertEquals(self.type2test.fromhex(u' 1A 2B 30 '), b) - self.assertEquals(self.type2test.fromhex(u'0000'), b'\0\0') + self.assertEqual(self.type2test.fromhex(u'1a2B30'), b) + self.assertEqual(self.type2test.fromhex(u' 1A 2B 30 '), b) + self.assertEqual(self.type2test.fromhex(u'0000'), b'\0\0') self.assertRaises(ValueError, self.type2test.fromhex, u'a') self.assertRaises(ValueError, self.type2test.fromhex, u'rt') self.assertRaises(ValueError, self.type2test.fromhex, u'1a b cd') @@ -587,11 +587,11 @@ data.reverse() L[start:stop:step] = data b[start:stop:step] = data - self.assertEquals(b, bytearray(L)) + self.assertEqual(b, bytearray(L)) del L[start:stop:step] del b[start:stop:step] - self.assertEquals(b, bytearray(L)) + self.assertEqual(b, bytearray(L)) def test_setslice_trap(self): # This test verifies that we correctly handle assigning self @@ -771,25 +771,25 @@ resize(10) orig = b[:] self.assertRaises(BufferError, resize, 11) - self.assertEquals(b, orig) + self.assertEqual(b, orig) self.assertRaises(BufferError, resize, 9) - self.assertEquals(b, orig) + self.assertEqual(b, orig) self.assertRaises(BufferError, resize, 0) - self.assertEquals(b, orig) + self.assertEqual(b, orig) # Other operations implying resize self.assertRaises(BufferError, b.pop, 0) - self.assertEquals(b, orig) + self.assertEqual(b, orig) self.assertRaises(BufferError, b.remove, b[1]) - self.assertEquals(b, orig) + self.assertEqual(b, orig) def delitem(): del b[1] self.assertRaises(BufferError, delitem) - self.assertEquals(b, orig) + self.assertEqual(b, orig) # deleting a non-contiguous slice def delslice(): b[1:-1:2] = b"" self.assertRaises(BufferError, delslice) - self.assertEquals(b, orig) + self.assertEqual(b, orig) def test_empty_bytearray(self): # Issue #7561: operations on empty bytearrays could crash in many diff --git a/lib-python/2.7.0/distutils/version.py b/lib-python/2.7.0/distutils/version.py --- a/lib-python/2.7.0/distutils/version.py +++ b/lib-python/2.7.0/distutils/version.py @@ -4,7 +4,7 @@ # Implements multiple version numbering conventions for the # Python Module Distribution Utilities. # -# $Id: version.py 70642 2009-03-28 00:48:48Z georg.brandl $ +# $Id$ # """Provides classes to represent module version numbers (one class for diff --git a/lib-python/2.7.0/distutils/tests/test_msvc9compiler.py b/lib-python/2.7.0/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7.0/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7.0/distutils/tests/test_msvc9compiler.py @@ -103,7 +103,7 @@ import _winreg HKCU = _winreg.HKEY_CURRENT_USER keys = Reg.read_keys(HKCU, 'xxxx') - self.assertEquals(keys, None) + self.assertEqual(keys, None) keys = Reg.read_keys(HKCU, r'Control Panel') self.assertTrue('Desktop' in keys) @@ -113,20 +113,24 @@ tempdir = self.mkdtemp() manifest = os.path.join(tempdir, 'manifest') f = open(manifest, 'w') - f.write(_MANIFEST) - f.close() + try: + f.write(_MANIFEST) + finally: + f.close() compiler = MSVCCompiler() compiler._remove_visual_c_ref(manifest) # see what we got f = open(manifest) - # removing trailing spaces - content = '\n'.join([line.rstrip() for line in f.readlines()]) - f.close() + try: + # removing trailing spaces + content = '\n'.join([line.rstrip() for line in f.readlines()]) + finally: + f.close() # makes sure the manifest was properly cleaned - self.assertEquals(content, _CLEANED_MANIFEST) + self.assertEqual(content, _CLEANED_MANIFEST) def test_suite(): diff --git a/lib-python/2.7.0/difflib.py b/lib-python/2.7.0/difflib.py --- a/lib-python/2.7.0/difflib.py +++ b/lib-python/2.7.0/difflib.py @@ -151,7 +151,7 @@ Return an upper bound on ratio() very quickly. """ - def __init__(self, isjunk=None, a='', b=''): + def __init__(self, isjunk=None, a='', b='', autojunk=True): """Construct a SequenceMatcher. Optional arg isjunk is None (the default), or a one-argument @@ -169,6 +169,10 @@ Optional arg b is the second of two sequences to be compared. By default, an empty string. The elements of b must be hashable. See also .set_seqs() and .set_seq2(). + + Optional arg autojunk should be set to False to disable the + "automatic junk heuristic" that treats popular elements as junk + (see module documentation for more information). """ # Members: @@ -207,11 +211,13 @@ # DOES NOT WORK for x in a! # isbpopular # for x in b, isbpopular(x) is true iff b is reasonably long - # (at least 200 elements) and x accounts for more than 1% of - # its elements. DOES NOT WORK for x in a! + # (at least 200 elements) and x accounts for more than 1 + 1% of + # its elements (when autojunk is enabled). + # DOES NOT WORK for x in a! self.isjunk = isjunk self.a = self.b = None + self.autojunk = autojunk self.set_seqs(a, b) def set_seqs(self, a, b): @@ -288,7 +294,7 @@ # from starting any matching block at a junk element ... # also creates the fast isbjunk function ... # b2j also does not contain entries for "popular" elements, meaning - # elements that account for more than 1% of the total elements, and + # elements that account for more than 1 + 1% of the total elements, and # when the sequence is reasonably large (>= 200 elements); this can # be viewed as an adaptive notion of semi-junk, and yields an enormous # speedup when, e.g., comparing program files with hundreds of @@ -309,44 +315,37 @@ # out the junk later is much cheaper than building b2j "right" # from the start. b = self.b + self.b2j = b2j = {} + + for i, elt in enumerate(b): + indices = b2j.setdefault(elt, []) + indices.append(i) + + # Purge junk elements + junk = set() + isjunk = self.isjunk + if isjunk: + for elt in list(b2j.keys()): # using list() since b2j is modified + if isjunk(elt): + junk.add(elt) + del b2j[elt] + + # Purge popular elements that are not junk + popular = set() n = len(b) - self.b2j = b2j = {} - populardict = {} - for i, elt in enumerate(b): - if elt in b2j: - indices = b2j[elt] - if n >= 200 and len(indices) * 100 > n: - populardict[elt] = 1 - del indices[:] - else: - indices.append(i) - else: - b2j[elt] = [i] + if self.autojunk and n >= 200: + ntest = n // 100 + 1 + for elt, idxs in list(b2j.items()): + if len(idxs) > ntest: + popular.add(elt) + del b2j[elt] - # Purge leftover indices for popular elements. - for elt in populardict: - del b2j[elt] - - # Now b2j.keys() contains elements uniquely, and especially when - # the sequence is a string, that's usually a good deal smaller - # than len(string). The difference is the number of isjunk calls - # saved. - isjunk = self.isjunk - junkdict = {} - if isjunk: - for d in populardict, b2j: - for elt in d.keys(): - if isjunk(elt): - junkdict[elt] = 1 - del d[elt] - - # Now for x in b, isjunk(x) == x in junkdict, but the - # latter is much faster. Note too that while there may be a - # lot of junk in the sequence, the number of *unique* junk - # elements is probably small. So the memory burden of keeping - # this dict alive is likely trivial compared to the size of b2j. - self.isbjunk = junkdict.__contains__ - self.isbpopular = populardict.__contains__ + # Now for x in b, isjunk(x) == x in junk, but the latter is much faster. + # Sicne the number of *unique* junk elements is probably small, the + # memory burden of keeping this set alive is likely trivial compared to + # the size of b2j. + self.isbjunk = junk.__contains__ + self.isbpopular = popular.__contains__ def find_longest_match(self, alo, ahi, blo, bhi): """Find longest matching block in a[alo:ahi] and b[blo:bhi]. diff --git a/lib-python/2.7.0/distutils/command/config.py b/lib-python/2.7.0/distutils/command/config.py --- a/lib-python/2.7.0/distutils/command/config.py +++ b/lib-python/2.7.0/distutils/command/config.py @@ -9,7 +9,7 @@ this header file lives". """ -__revision__ = "$Id: config.py 77704 2010-01-23 09:23:15Z tarek.ziade $" +__revision__ = "$Id$" import os import re diff --git a/lib-python/2.7.0/test/test_mmap.py b/lib-python/2.7.0/test/test_mmap.py --- a/lib-python/2.7.0/test/test_mmap.py +++ b/lib-python/2.7.0/test/test_mmap.py @@ -417,7 +417,7 @@ data = "".join(reversed(data)) L[start:stop:step] = data m[start:stop:step] = data - self.assertEquals(m[:], "".join(L)) + self.assertEqual(m[:], "".join(L)) def make_mmap_file (self, f, halfsize): # Write 2 pages worth of data to the file @@ -512,27 +512,27 @@ f.close() # Test write_byte() for i in xrange(len(data)): - self.assertEquals(m.tell(), i) + self.assertEqual(m.tell(), i) m.write_byte(data[i]) - self.assertEquals(m.tell(), i+1) + self.assertEqual(m.tell(), i+1) self.assertRaises(ValueError, m.write_byte, "x") - self.assertEquals(m[:], data) + self.assertEqual(m[:], data) # Test read_byte() m.seek(0) for i in xrange(len(data)): - self.assertEquals(m.tell(), i) - self.assertEquals(m.read_byte(), data[i]) - self.assertEquals(m.tell(), i+1) + self.assertEqual(m.tell(), i) + self.assertEqual(m.read_byte(), data[i]) + self.assertEqual(m.tell(), i+1) self.assertRaises(ValueError, m.read_byte) # Test read() m.seek(3) - self.assertEquals(m.read(3), "345") - self.assertEquals(m.tell(), 6) + self.assertEqual(m.read(3), "345") + self.assertEqual(m.tell(), 6) # Test write() m.seek(3) m.write("bar") - self.assertEquals(m.tell(), 6) - self.assertEquals(m[:], "012bar6789") + self.assertEqual(m.tell(), 6) + self.assertEqual(m[:], "012bar6789") m.seek(8) self.assertRaises(ValueError, m.write, "bar") @@ -547,8 +547,8 @@ m1[:] = data1 m2 = mmap.mmap(-1, len(data2), tagname="foo") m2[:] = data2 - self.assertEquals(m1[:], data2) - self.assertEquals(m2[:], data2) + self.assertEqual(m1[:], data2) + self.assertEqual(m2[:], data2) m2.close() m1.close() @@ -557,8 +557,8 @@ m1[:] = data1 m2 = mmap.mmap(-1, len(data2), tagname="boo") m2[:] = data2 - self.assertEquals(m1[:], data1) - self.assertEquals(m2[:], data2) + self.assertEqual(m1[:], data1) + self.assertEqual(m2[:], data2) m2.close() m1.close() diff --git a/lib-python/2.7.0/test/test_pyclbr.py b/lib-python/2.7.0/test/test_pyclbr.py --- a/lib-python/2.7.0/test/test_pyclbr.py +++ b/lib-python/2.7.0/test/test_pyclbr.py @@ -97,7 +97,7 @@ self.assertIsInstance(py_item, (FunctionType, BuiltinFunctionType)) if py_item.__module__ != moduleName: continue # skip functions that came from somewhere else - self.assertEquals(py_item.__module__, value.module) + self.assertEqual(py_item.__module__, value.module) else: self.assertIsInstance(py_item, (ClassType, type)) if py_item.__module__ != moduleName: @@ -126,7 +126,7 @@ try: self.assertListEq(foundMethods, actualMethods, ignore) - self.assertEquals(py_item.__module__, value.module) + self.assertEqual(py_item.__module__, value.module) self.assertEqualsOrIgnored(py_item.__name__, value.name, ignore) diff --git a/lib-python/2.7.0/bsddb/test/test_fileid.py b/lib-python/2.7.0/bsddb/test/test_fileid.py --- a/lib-python/2.7.0/bsddb/test/test_fileid.py +++ b/lib-python/2.7.0/bsddb/test/test_fileid.py @@ -35,11 +35,11 @@ self.db1 = db.DB(self.db_env) self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=db.DB_RDONLY) - self.assertEquals(self.db1.get('spam'), 'eggs') + self.assertEqual(self.db1.get('spam'), 'eggs') self.db2 = db.DB(self.db_env) self.db2.open(self.db_path_2, dbtype=db.DB_HASH, flags=db.DB_RDONLY) - self.assertEquals(self.db2.get('spam'), 'spam') + self.assertEqual(self.db2.get('spam'), 'spam') self.db1.close() self.db2.close() diff --git a/lib-python/2.7.0/test/test_math.py b/lib-python/2.7.0/test/test_math.py --- a/lib-python/2.7.0/test/test_math.py +++ b/lib-python/2.7.0/test/test_math.py @@ -158,7 +158,7 @@ self.ftest('acosh(2)', math.acosh(2), 1.3169578969248168) self.assertRaises(ValueError, math.acosh, 0) self.assertRaises(ValueError, math.acosh, -1) - self.assertEquals(math.acosh(INF), INF) + self.assertEqual(math.acosh(INF), INF) self.assertRaises(ValueError, math.acosh, NINF) self.assertTrue(math.isnan(math.acosh(NAN))) @@ -176,8 +176,8 @@ self.ftest('asinh(0)', math.asinh(0), 0) self.ftest('asinh(1)', math.asinh(1), 0.88137358701954305) self.ftest('asinh(-1)', math.asinh(-1), -0.88137358701954305) - self.assertEquals(math.asinh(INF), INF) - self.assertEquals(math.asinh(NINF), NINF) + self.assertEqual(math.asinh(INF), INF) + self.assertEqual(math.asinh(NINF), NINF) self.assertTrue(math.isnan(math.asinh(NAN))) def testAtan(self): @@ -264,17 +264,17 @@ def testCeil(self): self.assertRaises(TypeError, math.ceil) # These types will be int in py3k. - self.assertEquals(float, type(math.ceil(1))) - self.assertEquals(float, type(math.ceil(1L))) - self.assertEquals(float, type(math.ceil(1.0))) + self.assertEqual(float, type(math.ceil(1))) + self.assertEqual(float, type(math.ceil(1L))) + self.assertEqual(float, type(math.ceil(1.0))) self.ftest('ceil(0.5)', math.ceil(0.5), 1) self.ftest('ceil(1.0)', math.ceil(1.0), 1) self.ftest('ceil(1.5)', math.ceil(1.5), 2) self.ftest('ceil(-0.5)', math.ceil(-0.5), 0) self.ftest('ceil(-1.0)', math.ceil(-1.0), -1) self.ftest('ceil(-1.5)', math.ceil(-1.5), -1) - self.assertEquals(math.ceil(INF), INF) - self.assertEquals(math.ceil(NINF), NINF) + self.assertEqual(math.ceil(INF), INF) + self.assertEqual(math.ceil(NINF), NINF) self.assertTrue(math.isnan(math.ceil(NAN))) class TestCeil(object): @@ -300,19 +300,19 @@ self.assertRaises(TypeError, math.copysign) # copysign should let us distinguish signs of zeros - self.assertEquals(math.copysign(1., 0.), 1.) - self.assertEquals(math.copysign(1., -0.), -1.) - self.assertEquals(math.copysign(INF, 0.), INF) - self.assertEquals(math.copysign(INF, -0.), NINF) - self.assertEquals(math.copysign(NINF, 0.), INF) - self.assertEquals(math.copysign(NINF, -0.), NINF) + self.assertEqual(math.copysign(1., 0.), 1.) + self.assertEqual(math.copysign(1., -0.), -1.) + self.assertEqual(math.copysign(INF, 0.), INF) + self.assertEqual(math.copysign(INF, -0.), NINF) + self.assertEqual(math.copysign(NINF, 0.), INF) + self.assertEqual(math.copysign(NINF, -0.), NINF) # and of infinities - self.assertEquals(math.copysign(1., INF), 1.) - self.assertEquals(math.copysign(1., NINF), -1.) - self.assertEquals(math.copysign(INF, INF), INF) - self.assertEquals(math.copysign(INF, NINF), NINF) - self.assertEquals(math.copysign(NINF, INF), INF) - self.assertEquals(math.copysign(NINF, NINF), NINF) + self.assertEqual(math.copysign(1., INF), 1.) + self.assertEqual(math.copysign(1., NINF), -1.) + self.assertEqual(math.copysign(INF, INF), INF) + self.assertEqual(math.copysign(INF, NINF), NINF) + self.assertEqual(math.copysign(NINF, INF), INF) + self.assertEqual(math.copysign(NINF, NINF), NINF) self.assertTrue(math.isnan(math.copysign(NAN, 1.))) self.assertTrue(math.isnan(math.copysign(NAN, INF))) self.assertTrue(math.isnan(math.copysign(NAN, NINF))) @@ -322,7 +322,7 @@ # given platform. self.assertTrue(math.isinf(math.copysign(INF, NAN))) # similarly, copysign(2., NAN) could be 2. or -2. - self.assertEquals(abs(math.copysign(2., NAN)), 2.) + self.assertEqual(abs(math.copysign(2., NAN)), 2.) def testCos(self): self.assertRaises(TypeError, math.cos) @@ -342,8 +342,8 @@ self.assertRaises(TypeError, math.cosh) self.ftest('cosh(0)', math.cosh(0), 1) self.ftest('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert - self.assertEquals(math.cosh(INF), INF) - self.assertEquals(math.cosh(NINF), INF) + self.assertEqual(math.cosh(INF), INF) + self.assertEqual(math.cosh(NINF), INF) self.assertTrue(math.isnan(math.cosh(NAN))) def testDegrees(self): @@ -357,8 +357,8 @@ self.ftest('exp(-1)', math.exp(-1), 1/math.e) self.ftest('exp(0)', math.exp(0), 1) self.ftest('exp(1)', math.exp(1), math.e) - self.assertEquals(math.exp(INF), INF) - self.assertEquals(math.exp(NINF), 0.) + self.assertEqual(math.exp(INF), INF) + self.assertEqual(math.exp(NINF), 0.) self.assertTrue(math.isnan(math.exp(NAN))) def testFabs(self): @@ -384,9 +384,9 @@ def testFloor(self): self.assertRaises(TypeError, math.floor) # These types will be int in py3k. - self.assertEquals(float, type(math.floor(1))) - self.assertEquals(float, type(math.floor(1L))) - self.assertEquals(float, type(math.floor(1.0))) + self.assertEqual(float, type(math.floor(1))) + self.assertEqual(float, type(math.floor(1L))) + self.assertEqual(float, type(math.floor(1.0))) self.ftest('floor(0.5)', math.floor(0.5), 0) self.ftest('floor(1.0)', math.floor(1.0), 1) self.ftest('floor(1.5)', math.floor(1.5), 1) @@ -397,8 +397,8 @@ # This fails on some platforms - so check it here self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167) self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167) - self.assertEquals(math.ceil(INF), INF) - self.assertEquals(math.ceil(NINF), NINF) + self.assertEqual(math.ceil(INF), INF) + self.assertEqual(math.ceil(NINF), NINF) self.assertTrue(math.isnan(math.floor(NAN))) class TestFloor(object): @@ -429,12 +429,12 @@ self.assertRaises(ValueError, math.fmod, INF, 1.) self.assertRaises(ValueError, math.fmod, NINF, 1.) self.assertRaises(ValueError, math.fmod, INF, 0.) - self.assertEquals(math.fmod(3.0, INF), 3.0) - self.assertEquals(math.fmod(-3.0, INF), -3.0) - self.assertEquals(math.fmod(3.0, NINF), 3.0) - self.assertEquals(math.fmod(-3.0, NINF), -3.0) - self.assertEquals(math.fmod(0.0, 3.0), 0.0) - self.assertEquals(math.fmod(0.0, NINF), 0.0) + self.assertEqual(math.fmod(3.0, INF), 3.0) + self.assertEqual(math.fmod(-3.0, INF), -3.0) + self.assertEqual(math.fmod(3.0, NINF), 3.0) + self.assertEqual(math.fmod(-3.0, NINF), -3.0) + self.assertEqual(math.fmod(0.0, 3.0), 0.0) + self.assertEqual(math.fmod(0.0, NINF), 0.0) def testFrexp(self): self.assertRaises(TypeError, math.frexp) @@ -450,8 +450,8 @@ testfrexp('frexp(1)', math.frexp(1), (0.5, 1)) testfrexp('frexp(2)', math.frexp(2), (0.5, 2)) - self.assertEquals(math.frexp(INF)[0], INF) - self.assertEquals(math.frexp(NINF)[0], NINF) + self.assertEqual(math.frexp(INF)[0], INF) + self.assertEqual(math.frexp(NINF)[0], NINF) self.assertTrue(math.isnan(math.frexp(NAN)[0])) @requires_IEEE_754 @@ -564,28 +564,28 @@ self.ftest('ldexp(-1,1)', math.ldexp(-1,1), -2) self.assertRaises(OverflowError, math.ldexp, 1., 1000000) self.assertRaises(OverflowError, math.ldexp, -1., 1000000) - self.assertEquals(math.ldexp(1., -1000000), 0.) - self.assertEquals(math.ldexp(-1., -1000000), -0.) - self.assertEquals(math.ldexp(INF, 30), INF) - self.assertEquals(math.ldexp(NINF, -213), NINF) + self.assertEqual(math.ldexp(1., -1000000), 0.) + self.assertEqual(math.ldexp(-1., -1000000), -0.) + self.assertEqual(math.ldexp(INF, 30), INF) + self.assertEqual(math.ldexp(NINF, -213), NINF) self.assertTrue(math.isnan(math.ldexp(NAN, 0))) # large second argument for n in [10**5, 10L**5, 10**10, 10L**10, 10**20, 10**40]: - self.assertEquals(math.ldexp(INF, -n), INF) - self.assertEquals(math.ldexp(NINF, -n), NINF) - self.assertEquals(math.ldexp(1., -n), 0.) - self.assertEquals(math.ldexp(-1., -n), -0.) - self.assertEquals(math.ldexp(0., -n), 0.) - self.assertEquals(math.ldexp(-0., -n), -0.) + self.assertEqual(math.ldexp(INF, -n), INF) + self.assertEqual(math.ldexp(NINF, -n), NINF) + self.assertEqual(math.ldexp(1., -n), 0.) + self.assertEqual(math.ldexp(-1., -n), -0.) + self.assertEqual(math.ldexp(0., -n), 0.) + self.assertEqual(math.ldexp(-0., -n), -0.) self.assertTrue(math.isnan(math.ldexp(NAN, -n))) self.assertRaises(OverflowError, math.ldexp, 1., n) self.assertRaises(OverflowError, math.ldexp, -1., n) - self.assertEquals(math.ldexp(0., n), 0.) - self.assertEquals(math.ldexp(-0., n), -0.) - self.assertEquals(math.ldexp(INF, n), INF) - self.assertEquals(math.ldexp(NINF, n), NINF) + self.assertEqual(math.ldexp(0., n), 0.) + self.assertEqual(math.ldexp(-0., n), -0.) + self.assertEqual(math.ldexp(INF, n), INF) + self.assertEqual(math.ldexp(NINF, n), NINF) self.assertTrue(math.isnan(math.ldexp(NAN, n))) def testLog(self): @@ -596,7 +596,7 @@ self.ftest('log(32,2)', math.log(32,2), 5) self.ftest('log(10**40, 10)', math.log(10**40, 10), 40) self.ftest('log(10**40, 10**20)', math.log(10**40, 10**20), 2) - self.assertEquals(math.log(INF), INF) + self.assertEqual(math.log(INF), INF) self.assertRaises(ValueError, math.log, NINF) self.assertTrue(math.isnan(math.log(NAN))) @@ -606,19 +606,19 @@ self.ftest('log1p(0)', math.log1p(0), 0) self.ftest('log1p(e-1)', math.log1p(math.e-1), 1) self.ftest('log1p(1)', math.log1p(1), math.log(2)) - self.assertEquals(math.log1p(INF), INF) + self.assertEqual(math.log1p(INF), INF) self.assertRaises(ValueError, math.log1p, NINF) self.assertTrue(math.isnan(math.log1p(NAN))) n= 2**90 - self.assertAlmostEquals(math.log1p(n), 62.383246250395075) - self.assertAlmostEquals(math.log1p(n), math.log1p(float(n))) + self.assertAlmostEqual(math.log1p(n), 62.383246250395075) + self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) def testLog10(self): self.assertRaises(TypeError, math.log10) self.ftest('log10(0.1)', math.log10(0.1), -1) self.ftest('log10(1)', math.log10(1), 0) self.ftest('log10(10)', math.log10(10), 1) - self.assertEquals(math.log(INF), INF) + self.assertEqual(math.log(INF), INF) self.assertRaises(ValueError, math.log10, NINF) self.assertTrue(math.isnan(math.log10(NAN))) @@ -634,8 +634,8 @@ testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0)) testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0)) - self.assertEquals(math.modf(INF), (0.0, INF)) - self.assertEquals(math.modf(NINF), (-0.0, NINF)) + self.assertEqual(math.modf(INF), (0.0, INF)) + self.assertEqual(math.modf(NINF), (-0.0, NINF)) modf_nan = math.modf(NAN) self.assertTrue(math.isnan(modf_nan[0])) @@ -814,8 +814,8 @@ self.ftest('sinh(0)', math.sinh(0), 0) self.ftest('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1) self.ftest('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0) - self.assertEquals(math.sinh(INF), INF) - self.assertEquals(math.sinh(NINF), NINF) + self.assertEqual(math.sinh(INF), INF) + self.assertEqual(math.sinh(NINF), NINF) self.assertTrue(math.isnan(math.sinh(NAN))) def testSqrt(self): @@ -823,7 +823,7 @@ self.ftest('sqrt(0)', math.sqrt(0), 0) self.ftest('sqrt(1)', math.sqrt(1), 1) self.ftest('sqrt(4)', math.sqrt(4), 2) - self.assertEquals(math.sqrt(INF), INF) + self.assertEqual(math.sqrt(INF), INF) self.assertRaises(ValueError, math.sqrt, NINF) self.assertTrue(math.isnan(math.sqrt(NAN))) diff --git a/lib-python/2.7.0/test/test_complex.py b/lib-python/2.7.0/test/test_complex.py --- a/lib-python/2.7.0/test/test_complex.py +++ b/lib-python/2.7.0/test/test_complex.py @@ -501,8 +501,8 @@ def test_plus_minus_0j(self): # test that -0j and 0j literals are not identified z1, z2 = 0j, -0j - self.assertEquals(atan2(z1.imag, -1.), atan2(0., -1.)) - self.assertEquals(atan2(z2.imag, -1.), atan2(-0., -1.)) + self.assertEqual(atan2(z1.imag, -1.), atan2(0., -1.)) + self.assertEqual(atan2(z2.imag, -1.), atan2(-0., -1.)) @unittest.skipUnless(float.__getformat__("double").startswith("IEEE"), "test requires IEEE 754 doubles") diff --git a/lib-python/2.7.0/test/test_getargs2.py b/lib-python/2.7.0/test/test_getargs2.py --- a/lib-python/2.7.0/test/test_getargs2.py +++ b/lib-python/2.7.0/test/test_getargs2.py @@ -262,7 +262,7 @@ from _testcapi import getargs_tuple ret = getargs_tuple(1, (2, 3)) - self.assertEquals(ret, (1,2,3)) + self.assertEqual(ret, (1,2,3)) # make sure invalid tuple arguments are handled correctly class seq: @@ -275,25 +275,25 @@ class Keywords_TestCase(unittest.TestCase): def test_positional_args(self): # using all positional args - self.assertEquals( + self.assertEqual( getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), 10), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) ) def test_mixed_args(self): # positional and keyword args - self.assertEquals( + self.assertEqual( getargs_keywords((1,2), 3, (4,(5,6)), arg4=(7,8,9), arg5=10), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) ) def test_keyword_args(self): # all keywords - self.assertEquals( + self.assertEqual( getargs_keywords(arg1=(1,2), arg2=3, arg3=(4,(5,6)), arg4=(7,8,9), arg5=10), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) ) def test_optional_args(self): # missing optional keyword args, skipping tuples - self.assertEquals( + self.assertEqual( getargs_keywords(arg1=(1,2), arg2=3, arg5=10), (1, 2, 3, -1, -1, -1, -1, -1, -1, 10) ) @@ -302,14 +302,14 @@ try: getargs_keywords(arg1=(1,2)) except TypeError, err: - self.assertEquals(str(err), "Required argument 'arg2' (pos 2) not found") + self.assertEqual(str(err), "Required argument 'arg2' (pos 2) not found") else: self.fail('TypeError should have been raised') def test_too_many_args(self): try: getargs_keywords((1,2),3,(4,(5,6)),(7,8,9),10,111) except TypeError, err: - self.assertEquals(str(err), "function takes at most 5 arguments (6 given)") + self.assertEqual(str(err), "function takes at most 5 arguments (6 given)") else: self.fail('TypeError should have been raised') def test_invalid_keyword(self): @@ -317,7 +317,7 @@ try: getargs_keywords((1,2),3,arg5=10,arg666=666) except TypeError, err: - self.assertEquals(str(err), "'arg666' is an invalid keyword argument for this function") + self.assertEqual(str(err), "'arg666' is an invalid keyword argument for this function") else: self.fail('TypeError should have been raised') diff --git a/lib-python/2.7.0/test/test_fpformat.py b/lib-python/2.7.0/test/test_fpformat.py --- a/lib-python/2.7.0/test/test_fpformat.py +++ b/lib-python/2.7.0/test/test_fpformat.py @@ -26,7 +26,7 @@ n = repr(n) expected = "%.*f" % (digits, float(n)) - self.assertEquals(result, expected) + self.assertEqual(result, expected) def checkSci(self, n, digits): result = sci(n, digits) @@ -39,11 +39,11 @@ exp = exp[0] + "0" + exp[1:] expected = "%se%s" % (num, exp) - self.assertEquals(result, expected) + self.assertEqual(result, expected) def test_basic_cases(self): - self.assertEquals(fix(100.0/3, 3), '33.333') - self.assertEquals(sci(100.0/3, 3), '3.333e+001') + self.assertEqual(fix(100.0/3, 3), '33.333') + self.assertEqual(sci(100.0/3, 3), '3.333e+001') def test_reasonable_values(self): for d in range(7): @@ -54,12 +54,12 @@ def test_failing_values(self): # Now for 'unreasonable n and d' - self.assertEquals(fix(1.0, 1000), '1.'+('0'*1000)) - self.assertEquals(sci("1"+('0'*1000), 0), '1e+1000') + self.assertEqual(fix(1.0, 1000), '1.'+('0'*1000)) + self.assertEqual(sci("1"+('0'*1000), 0), '1e+1000') # This behavior is inconsistent. sci raises an exception; fix doesn't. yacht = "Throatwobbler Mangrove" - self.assertEquals(fix(yacht, 10), yacht) + self.assertEqual(fix(yacht, 10), yacht) try: sci(yacht, 10) except NotANumber: diff --git a/lib-python/2.7.0/test/test_urllib2.py b/lib-python/2.7.0/test/test_urllib2.py --- a/lib-python/2.7.0/test/test_urllib2.py +++ b/lib-python/2.7.0/test/test_urllib2.py @@ -45,7 +45,7 @@ ('a, b, "c", "d", "e,f", g, h', ['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']), ('a="b\\"c", d="e\\,f", g="h\\\\i"', ['a="b"c"', 'd="e,f"', 'g="h\\i"'])] for string, list in tests: - self.assertEquals(urllib2.parse_http_list(string), list) + self.assertEqual(urllib2.parse_http_list(string), list) def test_request_headers_dict(): @@ -622,22 +622,32 @@ h = NullFTPHandler(data) o = h.parent = MockOpener() - for url, host, port, type_, dirs, filename, mimetype in [ + for url, host, port, user, passwd, type_, dirs, filename, mimetype in [ ("ftp://localhost/foo/bar/baz.html", - "localhost", ftplib.FTP_PORT, "I", + "localhost", ftplib.FTP_PORT, "", "", "I", + ["foo", "bar"], "baz.html", "text/html"), + ("ftp://parrot at localhost/foo/bar/baz.html", + "localhost", ftplib.FTP_PORT, "parrot", "", "I", + ["foo", "bar"], "baz.html", "text/html"), + ("ftp://%25parrot at localhost/foo/bar/baz.html", + "localhost", ftplib.FTP_PORT, "%parrot", "", "I", + ["foo", "bar"], "baz.html", "text/html"), + ("ftp://%2542parrot at localhost/foo/bar/baz.html", + "localhost", ftplib.FTP_PORT, "%42parrot", "", "I", ["foo", "bar"], "baz.html", "text/html"), ("ftp://localhost:80/foo/bar/", - "localhost", 80, "D", + "localhost", 80, "", "", "D", ["foo", "bar"], "", None), ("ftp://localhost/baz.gif;type=a", - "localhost", ftplib.FTP_PORT, "A", + "localhost", ftplib.FTP_PORT, "", "", "A", [], "baz.gif", None), # XXX really this should guess image/gif ]: req = Request(url) req.timeout = None r = h.ftp_open(req) # ftp authentication not yet implemented by FTPHandler - self.assertTrue(h.user == h.passwd == "") + self.assertEqual(h.user, user) + self.assertEqual(h.passwd, passwd) self.assertEqual(h.host, socket.gethostbyname(host)) self.assertEqual(h.port, port) self.assertEqual(h.dirs, dirs) @@ -828,6 +838,25 @@ p_ds_req = h.do_request_(ds_req) self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com") + def test_fixpath_in_weirdurls(self): + # Issue4493: urllib2 to supply '/' when to urls where path does not + # start with'/' + + h = urllib2.AbstractHTTPHandler() + o = h.parent = MockOpener() + + weird_url = 'http://www.python.org?getspam' + req = Request(weird_url) + newreq = h.do_request_(req) + self.assertEqual(newreq.get_host(),'www.python.org') + self.assertEqual(newreq.get_selector(),'/?getspam') + + url_without_path = 'http://www.python.org' + req = Request(url_without_path) + newreq = h.do_request_(req) + self.assertEqual(newreq.get_host(),'www.python.org') + self.assertEqual(newreq.get_selector(),'') + def test_errors(self): h = urllib2.HTTPErrorProcessor() o = h.parent = MockOpener() @@ -862,7 +891,7 @@ r = MockResponse(200, "OK", {}, "") newreq = h.http_request(req) self.assertTrue(cj.ach_req is req is newreq) - self.assertEquals(req.get_origin_req_host(), "example.com") + self.assertEqual(req.get_origin_req_host(), "example.com") self.assertTrue(not req.is_unverifiable()) newr = h.http_response(req, r) self.assertTrue(cj.ec_req is req) diff --git a/lib-python/2.7.0/test/test_os.py b/lib-python/2.7.0/test/test_os.py --- a/lib-python/2.7.0/test/test_os.py +++ b/lib-python/2.7.0/test/test_os.py @@ -187,8 +187,8 @@ result = os.stat(self.fname) # Make sure direct access works - self.assertEquals(result[stat.ST_SIZE], 3) - self.assertEquals(result.st_size, 3) + self.assertEqual(result[stat.ST_SIZE], 3) + self.assertEqual(result.st_size, 3) # Make sure all the attributes are there members = dir(result) @@ -199,8 +199,8 @@ def trunc(x): return int(x) else: def trunc(x): return x - self.assertEquals(trunc(getattr(result, attr)), - result[getattr(stat, name)]) + self.assertEqual(trunc(getattr(result, attr)), + result[getattr(stat, name)]) self.assertIn(attr, members) try: @@ -254,13 +254,13 @@ return # Make sure direct access works - self.assertEquals(result.f_bfree, result[3]) + self.assertEqual(result.f_bfree, result[3]) # Make sure all the attributes are there. members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files', 'ffree', 'favail', 'flag', 'namemax') for value, member in enumerate(members): - self.assertEquals(getattr(result, 'f_' + member), result[value]) + self.assertEqual(getattr(result, 'f_' + member), result[value]) # Make sure that assignment really fails try: @@ -295,7 +295,7 @@ # time stamps in stat, but not in utime. os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta))) st2 = os.stat(test_support.TESTFN) - self.assertEquals(st2.st_mtime, int(st.st_mtime-delta)) + self.assertEqual(st2.st_mtime, int(st.st_mtime-delta)) # Restrict test to Win32, since there is no guarantee other # systems support centiseconds @@ -312,7 +312,7 @@ def test_1565150(self): t1 = 1159195039.25 os.utime(self.fname, (t1, t1)) - self.assertEquals(os.stat(self.fname).st_mtime, t1) + self.assertEqual(os.stat(self.fname).st_mtime, t1) def test_1686475(self): # Verify that an open file can be stat'ed @@ -346,7 +346,7 @@ os.environ.update(HELLO="World") with os.popen("/bin/sh -c 'echo $HELLO'") as popen: value = popen.read().strip() - self.assertEquals(value, "World") + self.assertEqual(value, "World") class WalkTests(unittest.TestCase): """Tests for os.walk().""" diff --git a/lib-python/2.7.0/json/tests/test_default.py b/lib-python/2.7.0/json/tests/test_default.py --- a/lib-python/2.7.0/json/tests/test_default.py +++ b/lib-python/2.7.0/json/tests/test_default.py @@ -4,6 +4,6 @@ class TestDefault(TestCase): def test_default(self): - self.assertEquals( + self.assertEqual( json.dumps(type, default=repr), json.dumps(repr(type))) diff --git a/lib-python/2.7.0/json/tests/test_pass3.py b/lib-python/2.7.0/json/tests/test_pass3.py --- a/lib-python/2.7.0/json/tests/test_pass3.py +++ b/lib-python/2.7.0/json/tests/test_pass3.py @@ -17,4 +17,4 @@ # test in/out equivalence and parsing res = json.loads(JSON) out = json.dumps(res) - self.assertEquals(res, json.loads(out)) + self.assertEqual(res, json.loads(out)) diff --git a/lib-python/2.7.0/curses/panel.py b/lib-python/2.7.0/curses/panel.py --- a/lib-python/2.7.0/curses/panel.py +++ b/lib-python/2.7.0/curses/panel.py @@ -3,6 +3,6 @@ Module for using panels with curses. """ -__revision__ = "$Id: panel.py 36560 2004-07-18 06:16:08Z tim_one $" +__revision__ = "$Id$" from _curses_panel import * diff --git a/lib-python/2.7.0/test/test_copy_reg.py b/lib-python/2.7.0/test/test_copy_reg.py --- a/lib-python/2.7.0/test/test_copy_reg.py +++ b/lib-python/2.7.0/test/test_copy_reg.py @@ -40,7 +40,7 @@ def test_bool(self): import copy - self.assertEquals(True, copy.copy(True)) + self.assertEqual(True, copy.copy(True)) def test_extension_registry(self): mod, func, code = 'junk1 ', ' junk2', 0xabcd @@ -101,16 +101,16 @@ mod, func, code) def test_slotnames(self): - self.assertEquals(copy_reg._slotnames(WithoutSlots), []) - self.assertEquals(copy_reg._slotnames(WithWeakref), []) + self.assertEqual(copy_reg._slotnames(WithoutSlots), []) + self.assertEqual(copy_reg._slotnames(WithWeakref), []) expected = ['_WithPrivate__spam'] - self.assertEquals(copy_reg._slotnames(WithPrivate), expected) - self.assertEquals(copy_reg._slotnames(WithSingleString), ['spam']) + self.assertEqual(copy_reg._slotnames(WithPrivate), expected) + self.assertEqual(copy_reg._slotnames(WithSingleString), ['spam']) expected = ['eggs', 'spam'] expected.sort() result = copy_reg._slotnames(WithInherited) result.sort() - self.assertEquals(result, expected) + self.assertEqual(result, expected) def test_main(): diff --git a/lib-python/2.7.0/test/test_site.py b/lib-python/2.7.0/test/test_site.py --- a/lib-python/2.7.0/test/test_site.py +++ b/lib-python/2.7.0/test/test_site.py @@ -139,7 +139,7 @@ user_base = site.getuserbase() # the call sets site.USER_BASE - self.assertEquals(site.USER_BASE, user_base) + self.assertEqual(site.USER_BASE, user_base) # let's set PYTHONUSERBASE and see if it uses it site.USER_BASE = None @@ -157,7 +157,7 @@ user_site = site.getusersitepackages() # the call sets USER_BASE *and* USER_SITE - self.assertEquals(site.USER_SITE, user_site) + self.assertEqual(site.USER_SITE, user_site) self.assertTrue(user_site.startswith(site.USER_BASE), user_site) def test_getsitepackages(self): @@ -167,19 +167,19 @@ if sys.platform in ('os2emx', 'riscos'): self.assertEqual(len(dirs), 1) wanted = os.path.join('xoxo', 'Lib', 'site-packages') - self.assertEquals(dirs[0], wanted) + self.assertEqual(dirs[0], wanted) elif os.sep == '/': self.assertEqual(len(dirs), 2) wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3], 'site-packages') - self.assertEquals(dirs[0], wanted) + self.assertEqual(dirs[0], wanted) wanted = os.path.join('xoxo', 'lib', 'site-python') - self.assertEquals(dirs[1], wanted) + self.assertEqual(dirs[1], wanted) else: self.assertEqual(len(dirs), 2) - self.assertEquals(dirs[0], 'xoxo') + self.assertEqual(dirs[0], 'xoxo') wanted = os.path.join('xoxo', 'lib', 'site-packages') - self.assertEquals(dirs[1], wanted) + self.assertEqual(dirs[1], wanted) # let's try the specific Apple location if (sys.platform == "darwin" and @@ -189,10 +189,10 @@ self.assertEqual(len(dirs), 4) wanted = os.path.join('~', 'Library', 'Python', sys.version[:3], 'site-packages') - self.assertEquals(dirs[2], os.path.expanduser(wanted)) + self.assertEqual(dirs[2], os.path.expanduser(wanted)) wanted = os.path.join('/Library', 'Python', sys.version[:3], 'site-packages') - self.assertEquals(dirs[3], wanted) + self.assertEqual(dirs[3], wanted) class PthFile(object): """Helper class for handling testing of .pth files""" diff --git a/lib-python/2.7.0/distutils/dep_util.py b/lib-python/2.7.0/distutils/dep_util.py --- a/lib-python/2.7.0/distutils/dep_util.py +++ b/lib-python/2.7.0/distutils/dep_util.py @@ -4,7 +4,7 @@ and groups of files; also, function based entirely on such timestamp dependency analysis.""" -__revision__ = "$Id: dep_util.py 76746 2009-12-10 15:29:03Z tarek.ziade $" +__revision__ = "$Id$" import os from distutils.errors import DistutilsFileError diff --git a/lib-python/2.7.0/test/test_memoryio.py b/lib-python/2.7.0/test/test_memoryio.py --- a/lib-python/2.7.0/test/test_memoryio.py +++ b/lib-python/2.7.0/test/test_memoryio.py @@ -23,17 +23,17 @@ buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) - self.assertEquals(buf[:1], bytesIo.read(1)) - self.assertEquals(buf[1:5], bytesIo.read(4)) - self.assertEquals(buf[5:], bytesIo.read(900)) - self.assertEquals(self.EOF, bytesIo.read()) + self.assertEqual(buf[:1], bytesIo.read(1)) + self.assertEqual(buf[1:5], bytesIo.read(4)) + self.assertEqual(buf[5:], bytesIo.read(900)) + self.assertEqual(self.EOF, bytesIo.read()) def testReadNoArgs(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) - self.assertEquals(buf, bytesIo.read()) - self.assertEquals(self.EOF, bytesIo.read()) + self.assertEqual(buf, bytesIo.read()) + self.assertEqual(self.EOF, bytesIo.read()) def testSeek(self): buf = self.buftype("1234567890") @@ -41,21 +41,21 @@ bytesIo.read(5) bytesIo.seek(0) - self.assertEquals(buf, bytesIo.read()) + self.assertEqual(buf, bytesIo.read()) bytesIo.seek(3) - self.assertEquals(buf[3:], bytesIo.read()) + self.assertEqual(buf[3:], bytesIo.read()) self.assertRaises(TypeError, bytesIo.seek, 0.0) def testTell(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) - self.assertEquals(0, bytesIo.tell()) + self.assertEqual(0, bytesIo.tell()) bytesIo.seek(5) - self.assertEquals(5, bytesIo.tell()) + self.assertEqual(5, bytesIo.tell()) bytesIo.seek(10000) - self.assertEquals(10000, bytesIo.tell()) + self.assertEqual(10000, bytesIo.tell()) class MemoryTestMixin: @@ -438,6 +438,11 @@ self.assertEqual(a.tostring(), b"1234567890d") memio.close() self.assertRaises(ValueError, memio.readinto, b) + memio = self.ioclass(b"123") + b = bytearray() + memio.seek(42) + memio.readinto(b) + self.assertEqual(b, b"") def test_relative_seek(self): buf = self.buftype("1234567890") @@ -613,7 +618,7 @@ self.assertEqual(len(state), 3) bytearray(state[0]) # Check if state[0] supports the buffer interface. self.assertIsInstance(state[1], int) - self.assert_(isinstance(state[2], dict) or state[2] is None) + self.assertTrue(isinstance(state[2], dict) or state[2] is None) memio.close() self.assertRaises(ValueError, memio.__getstate__) @@ -659,7 +664,7 @@ self.assertIsInstance(state[0], unicode) self.assertIsInstance(state[1], str) self.assertIsInstance(state[2], int) - self.assert_(isinstance(state[3], dict) or state[3] is None) + self.assertTrue(isinstance(state[3], dict) or state[3] is None) memio.close() self.assertRaises(ValueError, memio.__getstate__) diff --git a/lib-python/2.7.0/distutils/tests/test_register.py b/lib-python/2.7.0/distutils/tests/test_register.py --- a/lib-python/2.7.0/distutils/tests/test_register.py +++ b/lib-python/2.7.0/distutils/tests/test_register.py @@ -119,8 +119,12 @@ self.assertTrue(os.path.exists(self.rc)) # with the content similar to WANTED_PYPIRC - content = open(self.rc).read() - self.assertEquals(content, WANTED_PYPIRC) + f = open(self.rc) + try: + content = f.read() + self.assertEqual(content, WANTED_PYPIRC) + finally: + f.close() # now let's make sure the .pypirc file generated # really works : we shouldn't be asked anything @@ -137,7 +141,7 @@ self.assertTrue(self.conn.reqs, 2) req1 = dict(self.conn.reqs[0].headers) req2 = dict(self.conn.reqs[1].headers) - self.assertEquals(req2['Content-length'], req1['Content-length']) + self.assertEqual(req2['Content-length'], req1['Content-length']) self.assertTrue('xxx' in self.conn.reqs[1].data) def test_password_not_in_file(self): @@ -150,7 +154,7 @@ # dist.password should be set # therefore used afterwards by other commands - self.assertEquals(cmd.distribution.password, 'password') + self.assertEqual(cmd.distribution.password, 'password') def test_registering(self): # this test runs choice 2 @@ -167,7 +171,7 @@ self.assertTrue(self.conn.reqs, 1) req = self.conn.reqs[0] headers = dict(req.headers) - self.assertEquals(headers['Content-length'], '608') + self.assertEqual(headers['Content-length'], '608') self.assertTrue('tarek' in req.data) def test_password_reset(self): @@ -185,7 +189,7 @@ self.assertTrue(self.conn.reqs, 1) req = self.conn.reqs[0] headers = dict(req.headers) - self.assertEquals(headers['Content-length'], '290') + self.assertEqual(headers['Content-length'], '290') self.assertTrue('tarek' in req.data) def test_strict(self): @@ -248,7 +252,7 @@ with check_warnings() as w: warnings.simplefilter("always") cmd.check_metadata() - self.assertEquals(len(w.warnings), 1) + self.assertEqual(len(w.warnings), 1) def test_suite(): return unittest.makeSuite(RegisterTestCase) diff --git a/lib-python/2.7.0/test/test_operator.py b/lib-python/2.7.0/test/test_operator.py --- a/lib-python/2.7.0/test/test_operator.py +++ b/lib-python/2.7.0/test/test_operator.py @@ -455,12 +455,12 @@ f = operator.methodcaller('foo') self.assertRaises(IndexError, f, a) f = operator.methodcaller('foo', 1, 2) - self.assertEquals(f(a), 3) + self.assertEqual(f(a), 3) f = operator.methodcaller('bar') - self.assertEquals(f(a), 42) + self.assertEqual(f(a), 42) self.assertRaises(TypeError, f, a, a) f = operator.methodcaller('bar', f=5) - self.assertEquals(f(a), 5) + self.assertEqual(f(a), 5) def test_inplace(self): class C(object): diff --git a/lib-python/2.7.0/distutils/README b/lib-python/2.7.0/distutils/README --- a/lib-python/2.7.0/distutils/README +++ b/lib-python/2.7.0/distutils/README @@ -10,4 +10,4 @@ WARNING : Distutils must remain compatible with 2.3 -$Id: README 70017 2009-02-27 12:53:34Z tarek.ziade $ +$Id$ diff --git a/lib-python/2.7.0/pstats.py b/lib-python/2.7.0/pstats.py --- a/lib-python/2.7.0/pstats.py +++ b/lib-python/2.7.0/pstats.py @@ -359,7 +359,7 @@ print >> self.stream, indent, self.total_calls, "function calls", if self.total_calls != self.prim_calls: print >> self.stream, "(%d primitive calls)" % self.prim_calls, - print >> self.stream, "in %.3f CPU seconds" % self.total_tt + print >> self.stream, "in %.3f seconds" % self.total_tt print >> self.stream width, list = self.get_print_list(amount) if list: diff --git a/lib-python/2.7.0/test/test_cfgparser.py b/lib-python/2.7.0/test/test_cfgparser.py --- a/lib-python/2.7.0/test/test_cfgparser.py +++ b/lib-python/2.7.0/test/test_cfgparser.py @@ -572,14 +572,14 @@ "k=v\n") output = StringIO.StringIO() self.cf.write(output) - self.assertEquals(output.getvalue(), - "[a]\n" - "k = v\n\n" - "[b]\n" - "o1 = 4\n" - "o2 = 3\n" - "o3 = 2\n" - "o4 = 1\n\n") + self.assertEqual(output.getvalue(), + "[a]\n" + "k = v\n\n" + "[b]\n" + "o1 = 4\n" + "o2 = 3\n" + "o3 = 2\n" + "o4 = 1\n\n") def test_main(): diff --git a/lib-python/2.7.0/distutils/tests/test_version.py b/lib-python/2.7.0/distutils/tests/test_version.py --- a/lib-python/2.7.0/distutils/tests/test_version.py +++ b/lib-python/2.7.0/distutils/tests/test_version.py @@ -7,12 +7,12 @@ def test_prerelease(self): version = StrictVersion('1.2.3a1') - self.assertEquals(version.version, (1, 2, 3)) - self.assertEquals(version.prerelease, ('a', 1)) - self.assertEquals(str(version), '1.2.3a1') + self.assertEqual(version.version, (1, 2, 3)) + self.assertEqual(version.prerelease, ('a', 1)) + self.assertEqual(str(version), '1.2.3a1') version = StrictVersion('1.2.0') - self.assertEquals(str(version), '1.2') + self.assertEqual(str(version), '1.2') def test_cmp_strict(self): versions = (('1.5.1', '1.5.2b2', -1), @@ -41,9 +41,9 @@ raise AssertionError(("cmp(%s, %s) " "shouldn't raise ValueError") % (v1, v2)) - self.assertEquals(res, wanted, - 'cmp(%s, %s) should be %s, got %s' % - (v1, v2, wanted, res)) + self.assertEqual(res, wanted, + 'cmp(%s, %s) should be %s, got %s' % + (v1, v2, wanted, res)) def test_cmp(self): @@ -59,9 +59,9 @@ for v1, v2, wanted in versions: res = LooseVersion(v1).__cmp__(LooseVersion(v2)) - self.assertEquals(res, wanted, - 'cmp(%s, %s) should be %s, got %s' % - (v1, v2, wanted, res)) + self.assertEqual(res, wanted, + 'cmp(%s, %s) should be %s, got %s' % + (v1, v2, wanted, res)) def test_suite(): return unittest.makeSuite(VersionTestCase) diff --git a/lib-python/2.7.0/test/string_tests.py b/lib-python/2.7.0/test/string_tests.py --- a/lib-python/2.7.0/test/string_tests.py +++ b/lib-python/2.7.0/test/string_tests.py @@ -62,7 +62,7 @@ pass object = subtype(object) realresult = getattr(object, methodname)(*args) - self.assert_(object is not realresult) + self.assertTrue(object is not realresult) # check that object.method(*args) raises exc def checkraises(self, exc, object, methodname, *args): @@ -1243,34 +1243,34 @@ pass s1 = subclass("abcd") s2 = t().join([s1]) - self.assert_(s1 is not s2) - self.assert_(type(s2) is t) + self.assertTrue(s1 is not s2) + self.assertTrue(type(s2) is t) s1 = t("abcd") s2 = t().join([s1]) - self.assert_(s1 is s2) + self.assertTrue(s1 is s2) # Should also test mixed-type join. if t is unicode: s1 = subclass("abcd") s2 = "".join([s1]) - self.assert_(s1 is not s2) - self.assert_(type(s2) is t) + self.assertTrue(s1 is not s2) + self.assertTrue(type(s2) is t) s1 = t("abcd") s2 = "".join([s1]) - self.assert_(s1 is s2) + self.assertTrue(s1 is s2) elif t is str: s1 = subclass("abcd") s2 = u"".join([s1]) - self.assert_(s1 is not s2) - self.assert_(type(s2) is unicode) # promotes! + self.assertTrue(s1 is not s2) + self.assertTrue(type(s2) is unicode) # promotes! s1 = t("abcd") s2 = u"".join([s1]) - self.assert_(s1 is not s2) - self.assert_(type(s2) is unicode) # promotes! + self.assertTrue(s1 is not s2) + self.assertTrue(type(s2) is unicode) # promotes! else: self.fail("unexpected type for MixinStrUnicodeTest %r" % t) diff --git a/lib-python/2.7.0/test/test_platform.py b/lib-python/2.7.0/test/test_platform.py --- a/lib-python/2.7.0/test/test_platform.py +++ b/lib-python/2.7.0/test/test_platform.py @@ -183,17 +183,17 @@ # On Snow Leopard, sw_vers reports 10.6.0 as 10.6 if len_diff > 0: expect_list.extend(['0'] * len_diff) - self.assertEquals(result_list, expect_list) + self.assertEqual(result_list, expect_list) # res[1] claims to contain # (version, dev_stage, non_release_version) # That information is no longer available - self.assertEquals(res[1], ('', '', '')) + self.assertEqual(res[1], ('', '', '')) if sys.byteorder == 'little': - self.assertEquals(res[2], 'i386') + self.assertEqual(res[2], 'i386') else: - self.assertEquals(res[2], 'PowerPC') + self.assertEqual(res[2], 'PowerPC') @unittest.skipUnless(sys.platform == 'darwin', "OSX only test") @@ -211,8 +211,8 @@ else: # parent cpid, sts = os.waitpid(pid, 0) - self.assertEquals(cpid, pid) - self.assertEquals(sts, 0) + self.assertEqual(cpid, pid) + self.assertEqual(sts, 0) def test_dist(self): res = platform.dist() diff --git a/lib-python/2.7.0/bsddb/test/test_replication.py b/lib-python/2.7.0/bsddb/test/test_replication.py --- a/lib-python/2.7.0/bsddb/test/test_replication.py +++ b/lib-python/2.7.0/bsddb/test/test_replication.py @@ -88,23 +88,23 @@ self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123) self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321) - self.assertEquals(self.dbenvMaster.rep_get_timeout( + self.assertEqual(self.dbenvMaster.rep_get_timeout( db.DB_REP_CONNECTION_RETRY), 100123) - self.assertEquals(self.dbenvClient.rep_get_timeout( + self.assertEqual(self.dbenvClient.rep_get_timeout( db.DB_REP_CONNECTION_RETRY), 100321) self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234) self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432) - self.assertEquals(self.dbenvMaster.rep_get_timeout( + self.assertEqual(self.dbenvMaster.rep_get_timeout( db.DB_REP_ELECTION_TIMEOUT), 100234) - self.assertEquals(self.dbenvClient.rep_get_timeout( + self.assertEqual(self.dbenvClient.rep_get_timeout( db.DB_REP_ELECTION_TIMEOUT), 100432) self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345) self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543) - self.assertEquals(self.dbenvMaster.rep_get_timeout( + self.assertEqual(self.dbenvMaster.rep_get_timeout( db.DB_REP_ELECTION_RETRY), 100345) - self.assertEquals(self.dbenvClient.rep_get_timeout( + self.assertEqual(self.dbenvClient.rep_get_timeout( db.DB_REP_ELECTION_RETRY), 100543) self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL) @@ -113,13 +113,13 @@ self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER); self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT); - self.assertEquals(self.dbenvMaster.rep_get_nsites(),2) - self.assertEquals(self.dbenvClient.rep_get_nsites(),2) - self.assertEquals(self.dbenvMaster.rep_get_priority(),10) - self.assertEquals(self.dbenvClient.rep_get_priority(),0) - self.assertEquals(self.dbenvMaster.repmgr_get_ack_policy(), + self.assertEqual(self.dbenvMaster.rep_get_nsites(),2) + self.assertEqual(self.dbenvClient.rep_get_nsites(),2) + self.assertEqual(self.dbenvMaster.rep_get_priority(),10) + self.assertEqual(self.dbenvClient.rep_get_priority(),0) + self.assertEqual(self.dbenvMaster.repmgr_get_ack_policy(), db.DB_REPMGR_ACKS_ALL) - self.assertEquals(self.dbenvClient.repmgr_get_ack_policy(), + self.assertEqual(self.dbenvClient.repmgr_get_ack_policy(), db.DB_REPMGR_ACKS_ALL) # The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE @@ -143,16 +143,16 @@ startup_timeout = True d = self.dbenvMaster.repmgr_site_list() - self.assertEquals(len(d), 1) - self.assertEquals(d[0][0], "127.0.0.1") - self.assertEquals(d[0][1], client_port) + self.assertEqual(len(d), 1) + self.assertEqual(d[0][0], "127.0.0.1") + self.assertEqual(d[0][1], client_port) self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \ (d[0][2]==db.DB_REPMGR_DISCONNECTED)) d = self.dbenvClient.repmgr_site_list() - self.assertEquals(len(d), 1) - self.assertEquals(d[0][0], "127.0.0.1") - self.assertEquals(d[0][1], master_port) + self.assertEqual(len(d), 1) + self.assertEqual(d[0][0], "127.0.0.1") + self.assertEqual(d[0][1], master_port) self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \ (d[0][2]==db.DB_REPMGR_DISCONNECTED)) @@ -207,7 +207,7 @@ self.skipTest("replication test skipped due to random failure, " "see issue 3892") self.assertTrue(time.time()= (4,7) : def test02_test_request(self) : diff --git a/lib-python/2.7.0/distutils/fancy_getopt.py b/lib-python/2.7.0/distutils/fancy_getopt.py --- a/lib-python/2.7.0/distutils/fancy_getopt.py +++ b/lib-python/2.7.0/distutils/fancy_getopt.py @@ -8,7 +8,7 @@ * options set attributes of a passed-in object """ -__revision__ = "$Id: fancy_getopt.py 76956 2009-12-21 01:22:46Z tarek.ziade $" +__revision__ = "$Id$" import sys import string diff --git a/lib-python/2.7.0/test/test_bigmem.py b/lib-python/2.7.0/test/test_bigmem.py --- a/lib-python/2.7.0/test/test_bigmem.py +++ b/lib-python/2.7.0/test/test_bigmem.py @@ -13,7 +13,7 @@ # doesn't release the old 's' (if it exists) until well after its new # value has been created. Use 'del s' before the create_largestring call. # -# - Do *not* compare large objects using assertEquals or similar. It's a +# - Do *not* compare large objects using assertEqual or similar. It's a # lengty operation and the errormessage will be utterly useless due to # its size. To make sure whether a result has the right contents, better # to use the strip or count methods, or compare meaningful slices. @@ -39,20 +39,20 @@ SUBSTR = ' abc def ghi' s = '-' * size + SUBSTR caps = s.capitalize() - self.assertEquals(caps[-len(SUBSTR):], + self.assertEqual(caps[-len(SUBSTR):], SUBSTR.capitalize()) - self.assertEquals(caps.lstrip('-'), SUBSTR) + self.assertEqual(caps.lstrip('-'), SUBSTR) @bigmemtest(minsize=_2G + 10, memuse=1) def test_center(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.center(size) - self.assertEquals(len(s), size) + self.assertEqual(len(s), size) lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2 if len(s) % 2: lpadsize += 1 - self.assertEquals(s[lpadsize:-rpadsize], SUBSTR) - self.assertEquals(s.strip(), SUBSTR.strip()) + self.assertEqual(s[lpadsize:-rpadsize], SUBSTR) + self.assertEqual(s.strip(), SUBSTR.strip()) @precisionbigmemtest(size=_2G - 1, memuse=1) def test_center_unicode(self, size): @@ -62,36 +62,36 @@ except OverflowError: pass # acceptable on 32-bit else: - self.assertEquals(len(s), size) + self.assertEqual(len(s), size) lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2 if len(s) % 2: lpadsize += 1 - self.assertEquals(s[lpadsize:-rpadsize], SUBSTR) - self.assertEquals(s.strip(), SUBSTR.strip()) + self.assertEqual(s[lpadsize:-rpadsize], SUBSTR) + self.assertEqual(s.strip(), SUBSTR.strip()) del s @bigmemtest(minsize=_2G, memuse=2) def test_count(self, size): SUBSTR = ' abc def ghi' s = '.' * size + SUBSTR - self.assertEquals(s.count('.'), size) + self.assertEqual(s.count('.'), size) s += '.' - self.assertEquals(s.count('.'), size + 1) - self.assertEquals(s.count(' '), 3) - self.assertEquals(s.count('i'), 1) - self.assertEquals(s.count('j'), 0) + self.assertEqual(s.count('.'), size + 1) + self.assertEqual(s.count(' '), 3) + self.assertEqual(s.count('i'), 1) + self.assertEqual(s.count('j'), 0) @bigmemtest(minsize=_2G + 2, memuse=3) def test_decode(self, size): s = '.' * size - self.assertEquals(len(s.decode('utf-8')), size) + self.assertEqual(len(s.decode('utf-8')), size) def basic_encode_test(self, size, enc, c=u'.', expectedsize=None): if expectedsize is None: expectedsize = size s = c * size - self.assertEquals(len(s.encode(enc)), expectedsize) + self.assertEqual(len(s.encode(enc)), expectedsize) @bigmemtest(minsize=_2G + 2, memuse=3) def test_encode(self, size): @@ -147,43 +147,43 @@ def test_expandtabs(self, size): s = '-' * size tabsize = 8 - self.assertEquals(s.expandtabs(), s) + self.assertEqual(s.expandtabs(), s) del s slen, remainder = divmod(size, tabsize) s = ' \t' * slen s = s.expandtabs(tabsize) - self.assertEquals(len(s), size - remainder) - self.assertEquals(len(s.strip(' ')), 0) + self.assertEqual(len(s), size - remainder) + self.assertEqual(len(s.strip(' ')), 0) @bigmemtest(minsize=_2G, memuse=2) def test_find(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) - self.assertEquals(s.find(' '), 0) - self.assertEquals(s.find(SUBSTR), 0) - self.assertEquals(s.find(' ', sublen), sublen + size) - self.assertEquals(s.find(SUBSTR, len(SUBSTR)), sublen + size) - self.assertEquals(s.find('i'), SUBSTR.find('i')) - self.assertEquals(s.find('i', sublen), + self.assertEqual(s.find(' '), 0) + self.assertEqual(s.find(SUBSTR), 0) + self.assertEqual(s.find(' ', sublen), sublen + size) + self.assertEqual(s.find(SUBSTR, len(SUBSTR)), sublen + size) + self.assertEqual(s.find('i'), SUBSTR.find('i')) + self.assertEqual(s.find('i', sublen), sublen + size + SUBSTR.find('i')) - self.assertEquals(s.find('i', size), + self.assertEqual(s.find('i', size), sublen + size + SUBSTR.find('i')) - self.assertEquals(s.find('j'), -1) + self.assertEqual(s.find('j'), -1) @bigmemtest(minsize=_2G, memuse=2) def test_index(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) - self.assertEquals(s.index(' '), 0) - self.assertEquals(s.index(SUBSTR), 0) - self.assertEquals(s.index(' ', sublen), sublen + size) - self.assertEquals(s.index(SUBSTR, sublen), sublen + size) - self.assertEquals(s.index('i'), SUBSTR.index('i')) - self.assertEquals(s.index('i', sublen), + self.assertEqual(s.index(' '), 0) + self.assertEqual(s.index(SUBSTR), 0) + self.assertEqual(s.index(' ', sublen), sublen + size) + self.assertEqual(s.index(SUBSTR, sublen), sublen + size) + self.assertEqual(s.index('i'), SUBSTR.index('i')) + self.assertEqual(s.index('i', sublen), sublen + size + SUBSTR.index('i')) - self.assertEquals(s.index('i', size), + self.assertEqual(s.index('i', size), sublen + size + SUBSTR.index('i')) self.assertRaises(ValueError, s.index, 'j') @@ -252,8 +252,8 @@ def test_join(self, size): s = 'A' * size x = s.join(['aaaaa', 'bbbbb']) - self.assertEquals(x.count('a'), 5) - self.assertEquals(x.count('b'), 5) + self.assertEqual(x.count('a'), 5) + self.assertEqual(x.count('b'), 5) self.assertTrue(x.startswith('aaaaaA')) self.assertTrue(x.endswith('Abbbbb')) @@ -262,25 +262,25 @@ SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) self.assertTrue(s.startswith(SUBSTR + ' ')) - self.assertEquals(len(s), size) - self.assertEquals(s.strip(), SUBSTR.strip()) + self.assertEqual(len(s), size) + self.assertEqual(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G + 10, memuse=2) def test_lower(self, size): s = 'A' * size s = s.lower() - self.assertEquals(len(s), size) - self.assertEquals(s.count('a'), size) + self.assertEqual(len(s), size) + self.assertEqual(s.count('a'), size) @bigmemtest(minsize=_2G + 10, memuse=1) def test_lstrip(self, size): SUBSTR = 'abc def ghi' s = SUBSTR.rjust(size) - self.assertEquals(len(s), size) - self.assertEquals(s.lstrip(), SUBSTR.lstrip()) + self.assertEqual(len(s), size) + self.assertEqual(s.lstrip(), SUBSTR.lstrip()) del s s = SUBSTR.ljust(size) - self.assertEquals(len(s), size) + self.assertEqual(len(s), size) stripped = s.lstrip() self.assertTrue(stripped is s) @@ -289,44 +289,44 @@ replacement = 'a' s = ' ' * size s = s.replace(' ', replacement) - self.assertEquals(len(s), size) - self.assertEquals(s.count(replacement), size) + self.assertEqual(len(s), size) + self.assertEqual(s.count(replacement), size) s = s.replace(replacement, ' ', size - 4) - self.assertEquals(len(s), size) - self.assertEquals(s.count(replacement), 4) - self.assertEquals(s[-10:], ' aaaa') + self.assertEqual(len(s), size) + self.assertEqual(s.count(replacement), 4) + self.assertEqual(s[-10:], ' aaaa') @bigmemtest(minsize=_2G, memuse=2) def test_rfind(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) - self.assertEquals(s.rfind(' '), sublen + size + SUBSTR.rfind(' ')) - self.assertEquals(s.rfind(SUBSTR), sublen + size) - self.assertEquals(s.rfind(' ', 0, size), SUBSTR.rfind(' ')) - self.assertEquals(s.rfind(SUBSTR, 0, sublen + size), 0) - self.assertEquals(s.rfind('i'), sublen + size + SUBSTR.rfind('i')) - self.assertEquals(s.rfind('i', 0, sublen), SUBSTR.rfind('i')) - self.assertEquals(s.rfind('i', 0, sublen + size), - SUBSTR.rfind('i')) - self.assertEquals(s.rfind('j'), -1) + self.assertEqual(s.rfind(' '), sublen + size + SUBSTR.rfind(' ')) + self.assertEqual(s.rfind(SUBSTR), sublen + size) + self.assertEqual(s.rfind(' ', 0, size), SUBSTR.rfind(' ')) + self.assertEqual(s.rfind(SUBSTR, 0, sublen + size), 0) + self.assertEqual(s.rfind('i'), sublen + size + SUBSTR.rfind('i')) + self.assertEqual(s.rfind('i', 0, sublen), SUBSTR.rfind('i')) + self.assertEqual(s.rfind('i', 0, sublen + size), + SUBSTR.rfind('i')) + self.assertEqual(s.rfind('j'), -1) @bigmemtest(minsize=_2G, memuse=2) def test_rindex(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) - self.assertEquals(s.rindex(' '), + self.assertEqual(s.rindex(' '), sublen + size + SUBSTR.rindex(' ')) - self.assertEquals(s.rindex(SUBSTR), sublen + size) - self.assertEquals(s.rindex(' ', 0, sublen + size - 1), - SUBSTR.rindex(' ')) - self.assertEquals(s.rindex(SUBSTR, 0, sublen + size), 0) - self.assertEquals(s.rindex('i'), - sublen + size + SUBSTR.rindex('i')) - self.assertEquals(s.rindex('i', 0, sublen), SUBSTR.rindex('i')) - self.assertEquals(s.rindex('i', 0, sublen + size), - SUBSTR.rindex('i')) + self.assertEqual(s.rindex(SUBSTR), sublen + size) + self.assertEqual(s.rindex(' ', 0, sublen + size - 1), + SUBSTR.rindex(' ')) + self.assertEqual(s.rindex(SUBSTR, 0, sublen + size), 0) + self.assertEqual(s.rindex('i'), + sublen + size + SUBSTR.rindex('i')) + self.assertEqual(s.rindex('i', 0, sublen), SUBSTR.rindex('i')) + self.assertEqual(s.rindex('i', 0, sublen + size), + SUBSTR.rindex('i')) self.assertRaises(ValueError, s.rindex, 'j') @bigmemtest(minsize=_2G + 10, memuse=1) @@ -334,18 +334,18 @@ SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) self.assertTrue(s.startswith(SUBSTR + ' ')) - self.assertEquals(len(s), size) - self.assertEquals(s.strip(), SUBSTR.strip()) + self.assertEqual(len(s), size) + self.assertEqual(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G + 10, memuse=1) def test_rstrip(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) - self.assertEquals(len(s), size) - self.assertEquals(s.rstrip(), SUBSTR.rstrip()) + self.assertEqual(len(s), size) + self.assertEqual(s.rstrip(), SUBSTR.rstrip()) del s s = SUBSTR.rjust(size) - self.assertEquals(len(s), size) + self.assertEqual(len(s), size) stripped = s.rstrip() self.assertTrue(stripped is s) @@ -360,12 +360,12 @@ SUBSTR = 'a' + ' ' * chunksize s = SUBSTR * chunksize l = s.split() - self.assertEquals(len(l), chunksize) - self.assertEquals(set(l), set(['a'])) + self.assertEqual(len(l), chunksize) + self.assertEqual(set(l), set(['a'])) del l l = s.split('a') - self.assertEquals(len(l), chunksize + 1) - self.assertEquals(set(l), set(['', ' ' * chunksize])) + self.assertEqual(len(l), chunksize + 1) + self.assertEqual(set(l), set(['', ' ' * chunksize])) # Allocates a string of twice size (and briefly two) and a list of # size. Because of internal affairs, the s.split() call produces a @@ -377,12 +377,12 @@ def test_split_large(self, size): s = ' a' * size + ' ' l = s.split() - self.assertEquals(len(l), size) - self.assertEquals(set(l), set(['a'])) + self.assertEqual(len(l), size) + self.assertEqual(set(l), set(['a'])) del l l = s.split('a') - self.assertEquals(len(l), size + 1) - self.assertEquals(set(l), set([' '])) + self.assertEqual(len(l), size + 1) + self.assertEqual(set(l), set([' '])) @bigmemtest(minsize=_2G, memuse=2.1) def test_splitlines(self, size): @@ -392,8 +392,8 @@ SUBSTR = ' ' * chunksize + '\n' + ' ' * chunksize + '\r\n' s = SUBSTR * chunksize l = s.splitlines() - self.assertEquals(len(l), chunksize * 2) - self.assertEquals(set(l), set([' ' * chunksize])) + self.assertEqual(len(l), chunksize * 2) + self.assertEqual(set(l), set([' ' * chunksize])) @bigmemtest(minsize=_2G, memuse=2) def test_startswith(self, size): @@ -407,12 +407,12 @@ def test_strip(self, size): SUBSTR = ' abc def ghi ' s = SUBSTR.rjust(size) - self.assertEquals(len(s), size) - self.assertEquals(s.strip(), SUBSTR.strip()) + self.assertEqual(len(s), size) + self.assertEqual(s.strip(), SUBSTR.strip()) del s s = SUBSTR.ljust(size) - self.assertEquals(len(s), size) - self.assertEquals(s.strip(), SUBSTR.strip()) + self.assertEqual(len(s), size) + self.assertEqual(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G, memuse=2) def test_swapcase(self, size): @@ -421,9 +421,9 @@ repeats = size // sublen + 2 s = SUBSTR * repeats s = s.swapcase() - self.assertEquals(len(s), sublen * repeats) - self.assertEquals(s[:sublen * 3], SUBSTR.swapcase() * 3) - self.assertEquals(s[-sublen * 3:], SUBSTR.swapcase() * 3) + self.assertEqual(len(s), sublen * repeats) + self.assertEqual(s[:sublen * 3], SUBSTR.swapcase() * 3) + self.assertEqual(s[-sublen * 3:], SUBSTR.swapcase() * 3) @bigmemtest(minsize=_2G, memuse=2) def test_title(self, size): @@ -441,19 +441,19 @@ repeats = size // sublen + 2 s = SUBSTR * repeats s = s.translate(trans) - self.assertEquals(len(s), repeats * sublen) - self.assertEquals(s[:sublen], SUBSTR.translate(trans)) - self.assertEquals(s[-sublen:], SUBSTR.translate(trans)) - self.assertEquals(s.count('.'), 0) - self.assertEquals(s.count('!'), repeats * 2) - self.assertEquals(s.count('z'), repeats * 3) + self.assertEqual(len(s), repeats * sublen) + self.assertEqual(s[:sublen], SUBSTR.translate(trans)) + self.assertEqual(s[-sublen:], SUBSTR.translate(trans)) + self.assertEqual(s.count('.'), 0) + self.assertEqual(s.count('!'), repeats * 2) + self.assertEqual(s.count('z'), repeats * 3) @bigmemtest(minsize=_2G + 5, memuse=2) def test_upper(self, size): s = 'a' * size s = s.upper() - self.assertEquals(len(s), size) - self.assertEquals(s.count('A'), size) + self.assertEqual(len(s), size) + self.assertEqual(s.count('A'), size) @bigmemtest(minsize=_2G + 20, memuse=1) def test_zfill(self, size): @@ -461,8 +461,8 @@ s = SUBSTR.zfill(size) self.assertTrue(s.endswith('0' + SUBSTR[1:])) self.assertTrue(s.startswith('-0')) - self.assertEquals(len(s), size) - self.assertEquals(s.count('0'), size - len(SUBSTR)) + self.assertEqual(len(s), size) + self.assertEqual(s.count('0'), size - len(SUBSTR)) @bigmemtest(minsize=_2G + 10, memuse=2) def test_format(self, size): @@ -471,7 +471,7 @@ self.assertTrue(s == sf) del sf sf = '..%s..' % (s,) - self.assertEquals(len(sf), len(s) + 4) + self.assertEqual(len(sf), len(s) + 4) self.assertTrue(sf.startswith('..-')) self.assertTrue(sf.endswith('-..')) del s, sf @@ -481,18 +481,18 @@ s = ''.join([edge, '%s', edge]) del edge s = s % '...' - self.assertEquals(len(s), size * 2 + 3) - self.assertEquals(s.count('.'), 3) - self.assertEquals(s.count('-'), size * 2) + self.assertEqual(len(s), size * 2 + 3) + self.assertEqual(s.count('.'), 3) + self.assertEqual(s.count('-'), size * 2) @bigmemtest(minsize=_2G + 10, memuse=2) def test_repr_small(self, size): s = '-' * size s = repr(s) - self.assertEquals(len(s), size + 2) - self.assertEquals(s[0], "'") - self.assertEquals(s[-1], "'") - self.assertEquals(s.count('-'), size) + self.assertEqual(len(s), size + 2) + self.assertEqual(s[0], "'") + self.assertEqual(s[-1], "'") + self.assertEqual(s.count('-'), size) del s # repr() will create a string four times as large as this 'binary # string', but we don't want to allocate much more than twice @@ -500,21 +500,21 @@ size = size // 5 * 2 s = '\x00' * size s = repr(s) - self.assertEquals(len(s), size * 4 + 2) - self.assertEquals(s[0], "'") - self.assertEquals(s[-1], "'") - self.assertEquals(s.count('\\'), size) - self.assertEquals(s.count('0'), size * 2) + self.assertEqual(len(s), size * 4 + 2) + self.assertEqual(s[0], "'") + self.assertEqual(s[-1], "'") + self.assertEqual(s.count('\\'), size) + self.assertEqual(s.count('0'), size * 2) @bigmemtest(minsize=_2G + 10, memuse=5) def test_repr_large(self, size): s = '\x00' * size s = repr(s) - self.assertEquals(len(s), size * 4 + 2) - self.assertEquals(s[0], "'") - self.assertEquals(s[-1], "'") - self.assertEquals(s.count('\\'), size) - self.assertEquals(s.count('0'), size * 2) + self.assertEqual(len(s), size * 4 + 2) + self.assertEqual(s[0], "'") + self.assertEqual(s[-1], "'") + self.assertEqual(s.count('\\'), size) + self.assertEqual(s.count('0'), size * 2) @bigmemtest(minsize=2**32 // 5, memuse=6+2) def test_unicode_repr(self, size): @@ -526,20 +526,20 @@ @bigmemtest(minsize=_1G + 2, memuse=3) def test_concat(self, size): s = '.' * size - self.assertEquals(len(s), size) + self.assertEqual(len(s), size) s = s + s - self.assertEquals(len(s), size * 2) - self.assertEquals(s.count('.'), size * 2) + self.assertEqual(len(s), size * 2) + self.assertEqual(s.count('.'), size * 2) # This test is meaningful even with size < 2G, as long as the # repeated string is > 2G (but it tests more if both are > 2G :) @bigmemtest(minsize=_1G + 2, memuse=3) def test_repeat(self, size): s = '.' * size - self.assertEquals(len(s), size) + self.assertEqual(len(s), size) s = s * 2 - self.assertEquals(len(s), size * 2) - self.assertEquals(s.count('.'), size * 2) + self.assertEqual(len(s), size * 2) + self.assertEqual(s.count('.'), size * 2) @bigmemtest(minsize=_2G + 20, memuse=1) def test_slice_and_getitem(self, size): @@ -549,26 +549,26 @@ stepsize = len(s) // 100 stepsize = stepsize - (stepsize % sublen) for i in range(0, len(s) - stepsize, stepsize): - self.assertEquals(s[i], SUBSTR[0]) - self.assertEquals(s[i:i + sublen], SUBSTR) - self.assertEquals(s[i:i + sublen:2], SUBSTR[::2]) + self.assertEqual(s[i], SUBSTR[0]) + self.assertEqual(s[i:i + sublen], SUBSTR) + self.assertEqual(s[i:i + sublen:2], SUBSTR[::2]) if i > 0: - self.assertEquals(s[i + sublen - 1:i - 1:-3], - SUBSTR[sublen::-3]) + self.assertEqual(s[i + sublen - 1:i - 1:-3], + SUBSTR[sublen::-3]) # Make sure we do some slicing and indexing near the end of the # string, too. - self.assertEquals(s[len(s) - 1], SUBSTR[-1]) - self.assertEquals(s[-1], SUBSTR[-1]) - self.assertEquals(s[len(s) - 10], SUBSTR[0]) - self.assertEquals(s[-sublen], SUBSTR[0]) - self.assertEquals(s[len(s):], '') - self.assertEquals(s[len(s) - 1:], SUBSTR[-1]) - self.assertEquals(s[-1:], SUBSTR[-1]) - self.assertEquals(s[len(s) - sublen:], SUBSTR) - self.assertEquals(s[-sublen:], SUBSTR) - self.assertEquals(len(s[:]), len(s)) - self.assertEquals(len(s[:len(s) - 5]), len(s) - 5) - self.assertEquals(len(s[5:-5]), len(s) - 10) + self.assertEqual(s[len(s) - 1], SUBSTR[-1]) + self.assertEqual(s[-1], SUBSTR[-1]) + self.assertEqual(s[len(s) - 10], SUBSTR[0]) + self.assertEqual(s[-sublen], SUBSTR[0]) + self.assertEqual(s[len(s):], '') + self.assertEqual(s[len(s) - 1:], SUBSTR[-1]) + self.assertEqual(s[-1:], SUBSTR[-1]) + self.assertEqual(s[len(s) - sublen:], SUBSTR) + self.assertEqual(s[-sublen:], SUBSTR) + self.assertEqual(len(s[:]), len(s)) + self.assertEqual(len(s[:len(s) - 5]), len(s) - 5) + self.assertEqual(len(s[5:-5]), len(s) - 10) self.assertRaises(IndexError, operator.getitem, s, len(s)) self.assertRaises(IndexError, operator.getitem, s, len(s) + 1) @@ -643,9 +643,9 @@ # skipped, in verbose mode.) def basic_concat_test(self, size): t = ((),) * size - self.assertEquals(len(t), size) + self.assertEqual(len(t), size) t = t + t - self.assertEquals(len(t), size * 2) + self.assertEqual(len(t), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_concat_small(self, size): @@ -658,7 +658,7 @@ @bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5) def test_contains(self, size): t = (1, 2, 3, 4, 5) * size - self.assertEquals(len(t), size * 5) + self.assertEqual(len(t), size * 5) self.assertIn(5, t) self.assertNotIn((1, 2, 3, 4, 5), t) self.assertNotIn(0, t) @@ -674,27 +674,27 @@ @bigmemtest(minsize=_2G + 10, memuse=8) def test_index_and_slice(self, size): t = (None,) * size - self.assertEquals(len(t), size) - self.assertEquals(t[-1], None) - self.assertEquals(t[5], None) - self.assertEquals(t[size - 1], None) + self.assertEqual(len(t), size) + self.assertEqual(t[-1], None) + self.assertEqual(t[5], None) + self.assertEqual(t[size - 1], None) self.assertRaises(IndexError, operator.getitem, t, size) - self.assertEquals(t[:5], (None,) * 5) - self.assertEquals(t[-5:], (None,) * 5) - self.assertEquals(t[20:25], (None,) * 5) - self.assertEquals(t[-25:-20], (None,) * 5) - self.assertEquals(t[size - 5:], (None,) * 5) - self.assertEquals(t[size - 5:size], (None,) * 5) - self.assertEquals(t[size - 6:size - 2], (None,) * 4) - self.assertEquals(t[size:size], ()) - self.assertEquals(t[size:size+5], ()) + self.assertEqual(t[:5], (None,) * 5) + self.assertEqual(t[-5:], (None,) * 5) + self.assertEqual(t[20:25], (None,) * 5) + self.assertEqual(t[-25:-20], (None,) * 5) + self.assertEqual(t[size - 5:], (None,) * 5) + self.assertEqual(t[size - 5:size], (None,) * 5) + self.assertEqual(t[size - 6:size - 2], (None,) * 4) + self.assertEqual(t[size:size], ()) + self.assertEqual(t[size:size+5], ()) # Like test_concat, split in two. def basic_test_repeat(self, size): t = ('',) * size - self.assertEquals(len(t), size) + self.assertEqual(len(t), size) t = t * 2 - self.assertEquals(len(t), size * 2) + self.assertEqual(len(t), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_repeat_small(self, size): @@ -717,9 +717,9 @@ else: count = 0 for item in t: - self.assertEquals(item, count) + self.assertEqual(item, count) count += 1 - self.assertEquals(count, size) + self.assertEqual(count, size) @precisionbigmemtest(size=_1G - 25, memuse=9) def test_from_almost_2G_generator(self, size): @@ -727,9 +727,9 @@ t = tuple(xrange(size)) count = 0 for item in t: - self.assertEquals(item, count) + self.assertEqual(item, count) count += 1 - self.assertEquals(count, size) + self.assertEqual(count, size) except MemoryError: pass # acceptable, expected on 32-bit @@ -738,10 +738,10 @@ t = (0,) * size s = repr(t) # The repr of a tuple of 0's is exactly three times the tuple length. - self.assertEquals(len(s), size * 3) - self.assertEquals(s[:5], '(0, 0') - self.assertEquals(s[-5:], '0, 0)') - self.assertEquals(s.count('0'), size) + self.assertEqual(len(s), size * 3) + self.assertEqual(s[:5], '(0, 0') + self.assertEqual(s[-5:], '0, 0)') + self.assertEqual(s.count('0'), size) @bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3) def test_repr_small(self, size): @@ -777,9 +777,9 @@ # skipped, in verbose mode.) def basic_test_concat(self, size): l = [[]] * size - self.assertEquals(len(l), size) + self.assertEqual(len(l), size) l = l + l - self.assertEquals(len(l), size * 2) + self.assertEqual(len(l), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_concat_small(self, size): @@ -792,7 +792,7 @@ def basic_test_inplace_concat(self, size): l = [sys.stdout] * size l += l - self.assertEquals(len(l), size * 2) + self.assertEqual(len(l), size * 2) self.assertTrue(l[0] is l[-1]) self.assertTrue(l[size - 1] is l[size + 1]) @@ -807,7 +807,7 @@ @bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5) def test_contains(self, size): l = [1, 2, 3, 4, 5] * size - self.assertEquals(len(l), size * 5) + self.assertEqual(len(l), size * 5) self.assertIn(5, l) self.assertNotIn([1, 2, 3, 4, 5], l) self.assertNotIn(0, l) @@ -820,66 +820,66 @@ @bigmemtest(minsize=_2G + 10, memuse=8) def test_index_and_slice(self, size): l = [None] * size - self.assertEquals(len(l), size) - self.assertEquals(l[-1], None) - self.assertEquals(l[5], None) - self.assertEquals(l[size - 1], None) + self.assertEqual(len(l), size) + self.assertEqual(l[-1], None) + self.assertEqual(l[5], None) + self.assertEqual(l[size - 1], None) self.assertRaises(IndexError, operator.getitem, l, size) - self.assertEquals(l[:5], [None] * 5) - self.assertEquals(l[-5:], [None] * 5) - self.assertEquals(l[20:25], [None] * 5) - self.assertEquals(l[-25:-20], [None] * 5) - self.assertEquals(l[size - 5:], [None] * 5) - self.assertEquals(l[size - 5:size], [None] * 5) - self.assertEquals(l[size - 6:size - 2], [None] * 4) - self.assertEquals(l[size:size], []) - self.assertEquals(l[size:size+5], []) + self.assertEqual(l[:5], [None] * 5) + self.assertEqual(l[-5:], [None] * 5) + self.assertEqual(l[20:25], [None] * 5) + self.assertEqual(l[-25:-20], [None] * 5) + self.assertEqual(l[size - 5:], [None] * 5) + self.assertEqual(l[size - 5:size], [None] * 5) + self.assertEqual(l[size - 6:size - 2], [None] * 4) + self.assertEqual(l[size:size], []) + self.assertEqual(l[size:size+5], []) l[size - 2] = 5 - self.assertEquals(len(l), size) - self.assertEquals(l[-3:], [None, 5, None]) - self.assertEquals(l.count(5), 1) + self.assertEqual(len(l), size) + self.assertEqual(l[-3:], [None, 5, None]) + self.assertEqual(l.count(5), 1) self.assertRaises(IndexError, operator.setitem, l, size, 6) - self.assertEquals(len(l), size) + self.assertEqual(len(l), size) l[size - 7:] = [1, 2, 3, 4, 5] size -= 2 - self.assertEquals(len(l), size) - self.assertEquals(l[-7:], [None, None, 1, 2, 3, 4, 5]) + self.assertEqual(len(l), size) + self.assertEqual(l[-7:], [None, None, 1, 2, 3, 4, 5]) l[:7] = [1, 2, 3, 4, 5] size -= 2 - self.assertEquals(len(l), size) - self.assertEquals(l[:7], [1, 2, 3, 4, 5, None, None]) + self.assertEqual(len(l), size) + self.assertEqual(l[:7], [1, 2, 3, 4, 5, None, None]) del l[size - 1] size -= 1 - self.assertEquals(len(l), size) - self.assertEquals(l[-1], 4) + self.assertEqual(len(l), size) + self.assertEqual(l[-1], 4) del l[-2:] size -= 2 - self.assertEquals(len(l), size) - self.assertEquals(l[-1], 2) + self.assertEqual(len(l), size) + self.assertEqual(l[-1], 2) del l[0] size -= 1 - self.assertEquals(len(l), size) - self.assertEquals(l[0], 2) + self.assertEqual(len(l), size) + self.assertEqual(l[0], 2) del l[:2] size -= 2 - self.assertEquals(len(l), size) - self.assertEquals(l[0], 4) + self.assertEqual(len(l), size) + self.assertEqual(l[0], 4) # Like test_concat, split in two. def basic_test_repeat(self, size): l = [] * size self.assertFalse(l) l = [''] * size - self.assertEquals(len(l), size) + self.assertEqual(len(l), size) l = l * 2 - self.assertEquals(len(l), size * 2) + self.assertEqual(len(l), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_repeat_small(self, size): @@ -892,13 +892,13 @@ def basic_test_inplace_repeat(self, size): l = [''] l *= size - self.assertEquals(len(l), size) + self.assertEqual(len(l), size) self.assertTrue(l[0] is l[-1]) del l l = [''] * size l *= 2 - self.assertEquals(len(l), size * 2) + self.assertEqual(len(l), size * 2) self.assertTrue(l[size - 1] is l[-1]) @bigmemtest(minsize=_2G // 2 + 2, memuse=16) @@ -913,10 +913,10 @@ l = [0] * size s = repr(l) # The repr of a list of 0's is exactly three times the list length. - self.assertEquals(len(s), size * 3) - self.assertEquals(s[:5], '[0, 0') - self.assertEquals(s[-5:], '0, 0]') - self.assertEquals(s.count('0'), size) + self.assertEqual(len(s), size * 3) + self.assertEqual(s[:5], '[0, 0') + self.assertEqual(s[-5:], '0, 0]') + self.assertEqual(s.count('0'), size) @bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3) def test_repr_small(self, size): @@ -932,20 +932,20 @@ def test_append(self, size): l = [object()] * size l.append(object()) - self.assertEquals(len(l), size+1) + self.assertEqual(len(l), size+1) self.assertTrue(l[-3] is l[-2]) self.assertFalse(l[-2] is l[-1]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_count(self, size): l = [1, 2, 3, 4, 5] * size - self.assertEquals(l.count(1), size) - self.assertEquals(l.count("1"), 0) + self.assertEqual(l.count(1), size) + self.assertEqual(l.count("1"), 0) def basic_test_extend(self, size): l = [file] * size l.extend(l) - self.assertEquals(len(l), size * 2) + self.assertEqual(len(l), size * 2) self.assertTrue(l[0] is l[-1]) self.assertTrue(l[size - 1] is l[size + 1]) @@ -961,9 +961,9 @@ def test_index(self, size): l = [1L, 2L, 3L, 4L, 5L] * size size *= 5 - self.assertEquals(l.index(1), 0) - self.assertEquals(l.index(5, size - 5), size - 1) - self.assertEquals(l.index(5, size - 5, size), size - 1) + self.assertEqual(l.index(1), 0) + self.assertEqual(l.index(5, size - 5), size - 1) + self.assertEqual(l.index(5, size - 5, size), size - 1) self.assertRaises(ValueError, l.index, 1, size - 4, size) self.assertRaises(ValueError, l.index, 6L) @@ -973,80 +973,80 @@ l = [1.0] * size l.insert(size - 1, "A") size += 1 - self.assertEquals(len(l), size) - self.assertEquals(l[-3:], [1.0, "A", 1.0]) + self.assertEqual(len(l), size) + self.assertEqual(l[-3:], [1.0, "A", 1.0]) l.insert(size + 1, "B") size += 1 - self.assertEquals(len(l), size) - self.assertEquals(l[-3:], ["A", 1.0, "B"]) + self.assertEqual(len(l), size) + self.assertEqual(l[-3:], ["A", 1.0, "B"]) l.insert(1, "C") size += 1 - self.assertEquals(len(l), size) - self.assertEquals(l[:3], [1.0, "C", 1.0]) - self.assertEquals(l[size - 3:], ["A", 1.0, "B"]) + self.assertEqual(len(l), size) + self.assertEqual(l[:3], [1.0, "C", 1.0]) + self.assertEqual(l[size - 3:], ["A", 1.0, "B"]) @bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5) def test_pop(self, size): l = [u"a", u"b", u"c", u"d", u"e"] * size size *= 5 - self.assertEquals(len(l), size) + self.assertEqual(len(l), size) item = l.pop() size -= 1 - self.assertEquals(len(l), size) - self.assertEquals(item, u"e") - self.assertEquals(l[-2:], [u"c", u"d"]) + self.assertEqual(len(l), size) + self.assertEqual(item, u"e") + self.assertEqual(l[-2:], [u"c", u"d"]) item = l.pop(0) size -= 1 - self.assertEquals(len(l), size) - self.assertEquals(item, u"a") - self.assertEquals(l[:2], [u"b", u"c"]) + self.assertEqual(len(l), size) + self.assertEqual(item, u"a") + self.assertEqual(l[:2], [u"b", u"c"]) item = l.pop(size - 2) size -= 1 - self.assertEquals(len(l), size) - self.assertEquals(item, u"c") - self.assertEquals(l[-2:], [u"b", u"d"]) + self.assertEqual(len(l), size) + self.assertEqual(item, u"c") + self.assertEqual(l[-2:], [u"b", u"d"]) @bigmemtest(minsize=_2G + 10, memuse=8) def test_remove(self, size): l = [10] * size - self.assertEquals(len(l), size) + self.assertEqual(len(l), size) l.remove(10) size -= 1 - self.assertEquals(len(l), size) + self.assertEqual(len(l), size) # Because of the earlier l.remove(), this append doesn't trigger # a resize. l.append(5) size += 1 - self.assertEquals(len(l), size) - self.assertEquals(l[-2:], [10, 5]) + self.assertEqual(len(l), size) + self.assertEqual(l[-2:], [10, 5]) l.remove(5) size -= 1 - self.assertEquals(len(l), size) - self.assertEquals(l[-2:], [10, 10]) + self.assertEqual(len(l), size) + self.assertEqual(l[-2:], [10, 10]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_reverse(self, size): l = [1, 2, 3, 4, 5] * size l.reverse() - self.assertEquals(len(l), size * 5) - self.assertEquals(l[-5:], [5, 4, 3, 2, 1]) - self.assertEquals(l[:5], [5, 4, 3, 2, 1]) + self.assertEqual(len(l), size * 5) + self.assertEqual(l[-5:], [5, 4, 3, 2, 1]) + self.assertEqual(l[:5], [5, 4, 3, 2, 1]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_sort(self, size): l = [1, 2, 3, 4, 5] * size l.sort() - self.assertEquals(len(l), size * 5) - self.assertEquals(l.count(1), size) - self.assertEquals(l[:10], [1] * 10) - self.assertEquals(l[-10:], [5] * 10) + self.assertEqual(len(l), size * 5) + self.assertEqual(l.count(1), size) + self.assertEqual(l[:10], [1] * 10) + self.assertEqual(l[-10:], [5] * 10) class BufferTest(unittest.TestCase): @@ -1060,9 +1060,9 @@ else: count = 0 for c in b: - self.assertEquals(c, 'A') + self.assertEqual(c, 'A') count += 1 - self.assertEquals(count, size*4) + self.assertEqual(count, size*4) def test_main(): test_support.run_unittest(StrTest, TupleTest, ListTest, BufferTest) diff --git a/lib-python/2.7.0/test/test_time.py b/lib-python/2.7.0/test/test_time.py --- a/lib-python/2.7.0/test/test_time.py +++ b/lib-python/2.7.0/test/test_time.py @@ -94,7 +94,7 @@ # based on its value. expected = "2000 01 01 00 00 00 1 001" result = time.strftime("%Y %m %d %H %M %S %w %j", (0,)*9) - self.assertEquals(expected, result) + self.assertEqual(expected, result) def test_strptime(self): # Should be able to go round-trip from strftime to strptime without diff --git a/lib-python/2.7.0/imaplib.py b/lib-python/2.7.0/imaplib.py --- a/lib-python/2.7.0/imaplib.py +++ b/lib-python/2.7.0/imaplib.py @@ -22,7 +22,7 @@ __version__ = "2.58" -import binascii, random, re, socket, subprocess, sys, time +import binascii, errno, random, re, socket, subprocess, sys, time __all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple", "Int2AP", "ParseFlags", "Time2Internaldate"] @@ -248,7 +248,14 @@ def shutdown(self): """Close I/O established in "open".""" self.file.close() - self.sock.close() + try: + self.sock.shutdown(socket.SHUT_RDWR) + except socket.error as e: + # The server might already have closed the connection + if e.errno != errno.ENOTCONN: + raise + finally: + self.sock.close() def socket(self): @@ -883,14 +890,17 @@ def _command_complete(self, name, tag): - self._check_bye() + # BYE is expected after LOGOUT + if name != 'LOGOUT': + self._check_bye() try: typ, data = self._get_tagged_response(tag) except self.abort, val: raise self.abort('command: %s => %s' % (name, val)) except self.error, val: raise self.error('command: %s => %s' % (name, val)) - self._check_bye() + if name != 'LOGOUT': + self._check_bye() if typ == 'BAD': raise self.error('%s command error: %s %s' % (name, typ, data)) return typ, data diff --git a/lib-python/2.7.0/test/test_ast.py b/lib-python/2.7.0/test/test_ast.py --- a/lib-python/2.7.0/test/test_ast.py +++ b/lib-python/2.7.0/test/test_ast.py @@ -147,7 +147,7 @@ (eval_tests, eval_results, "eval")): for i, o in itertools.izip(input, output): ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST) - self.assertEquals(to_tuple(ast_tree), o) + self.assertEqual(to_tuple(ast_tree), o) self._assertTrueorder(ast_tree, (0, 0)) def test_slice(self): @@ -171,20 +171,20 @@ def test_nodeclasses(self): x = ast.BinOp(1, 2, 3, lineno=0) - self.assertEquals(x.left, 1) - self.assertEquals(x.op, 2) - self.assertEquals(x.right, 3) - self.assertEquals(x.lineno, 0) + self.assertEqual(x.left, 1) + self.assertEqual(x.op, 2) + self.assertEqual(x.right, 3) + self.assertEqual(x.lineno, 0) # node raises exception when not given enough arguments self.assertRaises(TypeError, ast.BinOp, 1, 2) # can set attributes through kwargs too x = ast.BinOp(left=1, op=2, right=3, lineno=0) - self.assertEquals(x.left, 1) - self.assertEquals(x.op, 2) - self.assertEquals(x.right, 3) - self.assertEquals(x.lineno, 0) + self.assertEqual(x.left, 1) + self.assertEqual(x.op, 2) + self.assertEqual(x.right, 3) + self.assertEqual(x.lineno, 0) # this used to fail because Sub._fields was None x = ast.Sub() @@ -202,7 +202,7 @@ for protocol in protocols: for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests): ast2 = mod.loads(mod.dumps(ast, protocol)) - self.assertEquals(to_tuple(ast2), to_tuple(ast)) + self.assertEqual(to_tuple(ast2), to_tuple(ast)) class ASTHelpers_Test(unittest.TestCase): diff --git a/lib-python/2.7.0/test/test_dircache.py b/lib-python/2.7.0/test/test_dircache.py --- a/lib-python/2.7.0/test/test_dircache.py +++ b/lib-python/2.7.0/test/test_dircache.py @@ -35,7 +35,7 @@ def test_listdir(self): ## SUCCESSFUL CASES entries = dircache.listdir(self.tempdir) - self.assertEquals(entries, []) + self.assertEqual(entries, []) # Check that cache is actually caching, not just passing through. self.assertTrue(dircache.listdir(self.tempdir) is entries) @@ -52,7 +52,7 @@ time.sleep(1) self.writeTemp("test1") entries = dircache.listdir(self.tempdir) - self.assertEquals(entries, ['test1']) + self.assertEqual(entries, ['test1']) self.assertTrue(dircache.listdir(self.tempdir) is entries) ## UNSUCCESSFUL CASES @@ -63,7 +63,7 @@ self.mkdirTemp("A") lst = ['A', 'test2', 'test_nonexistent'] dircache.annotate(self.tempdir, lst) - self.assertEquals(lst, ['A/', 'test2', 'test_nonexistent']) + self.assertEqual(lst, ['A/', 'test2', 'test_nonexistent']) def test_main(): From commits-noreply at bitbucket.org Wed Apr 27 12:33:19 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 27 Apr 2011 12:33:19 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: kill this old and forgotten file Message-ID: <20110427103319.0995A282BEC@codespeak.net> Author: Antonio Cuni Branch: documentation-cleanup Changeset: r43659:44dcd6301611 Date: 2011-04-27 12:16 +0200 http://bitbucket.org/pypy/pypy/changeset/44dcd6301611/ Log: kill this old and forgotten file diff --git a/pypy/doc/discussion/cli-optimizations.rst b/pypy/doc/discussion/cli-optimizations.rst deleted file mode 100644 --- a/pypy/doc/discussion/cli-optimizations.rst +++ /dev/null @@ -1,233 +0,0 @@ -Possible optimizations for the CLI backend -========================================== - -Stack push/pop optimization ---------------------------- - -The CLI's VM is a stack based machine: this fact doesn't play nicely -with the SSI form the flowgraphs are generated in. At the moment -gencli does a literal translation of the SSI statements, allocating a -new local variable for each variable of the flowgraph. - -For example, consider the following RPython code and the corresponding -flowgraph:: - - def bar(x, y): - foo(x+y, x-y) - - - inputargs: x_0 y_0 - v0 = int_add(x_0, y_0) - v1 = int_sub(x_0, y_0) - v2 = directcall((sm foo), v0, v1) - -This is the IL code generated by the CLI backend:: - - .locals init (int32 v0, int32 v1, int32 v2) - - block0: - ldarg 'x_0' - ldarg 'y_0' - add - stloc 'v0' - ldarg 'x_0' - ldarg 'y_0' - sub - stloc 'v1' - ldloc 'v0' - ldloc 'v1' - call int32 foo(int32, int32) - stloc 'v2' - -As you can see, the results of 'add' and 'sub' are stored in v0 and -v1, respectively, then v0 and v1 are reloaded onto stack. These -store/load is redundant, since the code would work nicely even without -them:: - - .locals init (int32 v2) - - block0: - ldarg 'x_0' - ldarg 'y_0' - add - ldarg 'x_0' - ldarg 'y_0' - sub - call int32 foo(int32, int32) - stloc 'v2' - -I've checked the native code generated by the Mono Jit on x86 and I've -seen that it does not optimize it. I haven't checked the native code -generated by Microsoft CLR, yet. - -Thus, we might consider to optimize it manually; it should not be so -difficult, but it is not trivial because we have to make sure that the -dropped locals are used only once. - - -Mapping RPython exceptions to native CLI exceptions ---------------------------------------------------- - -Both RPython and CLI have its own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -For now I've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by standard operations. The currently -implemented solution is to do an exception translation on-the-fly; for -example, the 'ind_add_ovf' is translated into the following IL code:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class exceptions.OverflowError::.ctor() - dup - ldsfld class Object_meta pypy.runtime.Constants::exceptions_OverflowError_meta - stfld class Object_meta Object::meta - throw - } - -I.e., it catches the builtin OverflowException and raises a RPython -OverflowError. - -I haven't measured timings yet, but I guess that this machinery brings -to some performance penalties even in the non-overflow case; a -possible optimization is to do the on-the-fly translation only when it -is strictly necessary, i.e. only when the except clause catches an -exception class whose subclass hierarchy is compatible with the -builtin one. As an example, consider the following RPython code:: - - try: - return mylist[0] - except IndexError: - return -1 - -Given that IndexError has no subclasses, we can map it to -IndexOutOfBoundException and directly catch this one:: - - try - { - ldloc 'mylist' - ldc.i4 0 - call int32 getitem(MyListType, int32) - ... - } - catch [mscorlib]System.IndexOutOfBoundException - { - // return -1 - ... - } - -By contrast we can't do so if the except clause catches classes that -don't directly map to any builtin class, such as LookupError:: - - try: - return mylist[0] - except LookupError: - return -1 - -Has to be translated in the old way:: - - .try - { - ldloc 'mylist' - ldc.i4 0 - - .try - { - call int32 getitem(MyListType, int32) - } - catch [mscorlib]System.IndexOutOfBoundException - { - // translate IndexOutOfBoundException into IndexError - newobj instance void class exceptions.IndexError::.ctor() - dup - ldsfld class Object_meta pypy.runtime.Constants::exceptions_IndexError_meta - stfld class Object_meta Object::meta - throw - } - ... - } - .catch exceptions.LookupError - { - // return -1 - ... - } - - -Specializing methods of List ----------------------------- - -Most methods of RPython lists are implemented by ll_* helpers placed -in rpython/rlist.py. For some of those we have a direct correspondent -already implemented in .NET List<>; we could use the oopspec attribute -for doing an on-the-fly replacement of these low level helpers with -their builtin correspondent. As an example the 'append' method is -already mapped to pypylib.List.append. Thanks to Armin Rigo for the -idea of using oopspec. - - -Doing some caching on Dict --------------------------- - -The current implementations of ll_dict_getitem and ll_dict_get in -ootypesystem.rdict do two consecutive lookups (calling ll_contains and -ll_get) on the same key. We might cache the result of -pypylib.Dict.ll_contains so that the successive ll_get don't need a -lookup. Btw, we need some profiling before choosing the best way. Or -we could directly refactor ootypesystem.rdict for doing a single -lookup. - -XXX -I tried it on revision 32917 and performance are slower! I don't know -why, but pypy.net pystone.py is slower by 17%, and pypy.net -richards.py is slower by 71% (!!!). I don't know why, need to be -investigated further. - - -Optimize StaticMethod ---------------------- - -:: - - 2006-10-02, 13:41 - - antocuni: do you try to not wrap static methods that are just called and not passed around - no - I think I don't know how to detect them - antocuni: you should try to render them just as static methods not as instances when possible - you need to track what appears only in direct_calls vs other places - - -Optimize Unicode ----------------- - -We should try to use native .NET unicode facilities instead of our -own. These should save both time (especially startup time) and memory. - -On 2006-10-02 I got these benchmarks: - -Pypy.NET Startup time Memory used -with unicodedata ~12 sec 112508 Kb -without unicodedata ~6 sec 79004 Kb - -The version without unicodedata is buggy, of course. - -Unfortunately it seems that .NET doesn't expose all the things we -need, so we will still need some data. For example there is no way to -get the unicode name of a char. From commits-noreply at bitbucket.org Wed Apr 27 12:33:20 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 27 Apr 2011 12:33:20 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: keep this file; it will probably never be implemented, but it contains useful ideas Message-ID: <20110427103320.4565B282BEC@codespeak.net> Author: Antonio Cuni Branch: documentation-cleanup Changeset: r43660:d9a0889a1e12 Date: 2011-04-27 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/d9a0889a1e12/ Log: keep this file; it will probably never be implemented, but it contains useful ideas diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst --- a/pypy/doc/discussion/VM-integration.rst +++ b/pypy/doc/discussion/VM-integration.rst @@ -1,5 +1,3 @@ -.. XXX anto, do we still need this? - ============================================== Integration of PyPy with host Virtual Machines ============================================== From commits-noreply at bitbucket.org Wed Apr 27 12:33:22 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 27 Apr 2011 12:33:22 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: this file is still useful in case someone wants to implement rpython-jvm bindings; make it up to date by removing all references to the new deatch js backend Message-ID: <20110427103322.748CE282BEC@codespeak.net> Author: Antonio Cuni Branch: documentation-cleanup Changeset: r43661:ba54e7d7fbe6 Date: 2011-04-27 12:33 +0200 http://bitbucket.org/pypy/pypy/changeset/ba54e7d7fbe6/ Log: this file is still useful in case someone wants to implement rpython-jvm bindings; make it up to date by removing all references to the new deatch js backend diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst --- a/pypy/doc/discussion/outline-external-ootype.rst +++ b/pypy/doc/discussion/outline-external-ootype.rst @@ -1,24 +1,10 @@ -.. XXX, anto, can this be killed? - Some discussion about external objects in ootype ================================================ -Current approaches: - -* BasicExternal, used for js backend +Current approach: * SomeCliXxx for .NET backend -BasicExternal -------------- - -* Is using types to make rpython happy (ie, every single method or field - is hardcoded) - -* Supports callbacks by SomeGenericCallable - -* Supports fields, also with callable fields - SomeCliXxx ---------- @@ -28,11 +14,11 @@ * Supports static methods -Would be extremely cool to have just one approach instead of two, -so here are some notes: +Would be extremely cool to generalize the approach to be useful also for the +JVM backend. Here are some notes: * There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, js, jvm for now). + to support any possible backend (cli, jvm for now). * This approach might be eventually extended by a backend itself, but as much as possible code should be factored out. @@ -48,24 +34,22 @@ ================================ The goal of the task is to let RPython program access "external -objects" which are available in the target platform; these include: +entities" which are available in the target platform; these include: - external classes (e.g. for .NET: System.Collections.ArrayList) - - external instances (e.g. for js: window, window.document) + - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - - external functions? (they are not needed for .NET and JVM, maybe - for js?) - -External objects should behave as much as possible as "internal -objects". +External entities should behave as much as possible as "internal +entities". Moreover, we want to preserve the possibility of *testing* RPython programs on top of CPython if possible. For example, it should be possible to RPython programs using .NET external objects using -PythonNet; probably there is something similar for JVM, but not for -JS as I know. +PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: +.. _JPype: http://jpype.sourceforge.net/ +.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm How to represent types ---------------------- @@ -126,11 +110,6 @@ and JVM the job can be easily automatized, since the objects have got precise signatures. -For JS, signatures must be written by hand, so we must provide a -convenient syntax for it; I think it should be possible to use the -current syntax and write a tool which translates it to low-level -types. - RPython interface ----------------- @@ -148,9 +127,8 @@ - access to static methods: return an object which will be annotated as SomeExternalStaticMeth. -Instances are annotated as SomeExternalInstance. Prebuilt external -objects (such as JS's window.document) are annotated as -SomeExternalInstance(const=...). +Instances are annotated as SomeExternalInstance. Prebuilt external objects are +annotated as SomeExternalInstance(const=...). Open issues ----------- @@ -181,18 +159,12 @@ It would be nice to allow programmers to inherit from an external class. Not sure about the implications, though. -Callbacks -~~~~~~~~~ - -I know that they are an issue for JS, but I don't know how they are -currently implemented. - Special methods/properties ~~~~~~~~~~~~~~~~~~~~~~~~~~ In .NET there are special methods that can be accessed using a special syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#. +RPython the same syntax as C#, although we can live without that. Implementation details From commits-noreply at bitbucket.org Wed Apr 27 12:40:33 2011 From: commits-noreply at bitbucket.org (lac) Date: Wed, 27 Apr 2011 12:40:33 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac) revise the faq Message-ID: <20110427104033.586FB282BEC@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43662:d38e306667dd Date: 2011-04-27 12:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d38e306667dd/ Log: (cfbolz, lac) revise the faq diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -43,67 +43,57 @@ complete and well tested, so if your project does not use many extension modules there is a good chance that it will work with PyPy. -We list the differences we know about in `cpython_differences`_. +We list the differences we know about in `cpython differences`_. -There is also an experimental support for CPython extension modules, so -they'll run without change (from current observation, rather with little -change) on trunk. It has been a part of 1.4 release, but support is still -in alpha phase. +-------------------------------------------- +Do CPython Extension modules work with PyPy? +-------------------------------------------- + +We have experimental support for CPython extension modules, so +they run with minor changes. This has been a part of pypy since +the 1.4 release, but support is still in beta phase. CPython +extension modules in PyPy are often much slower than in CPython due to +the need to emulate refcounting. It is often faster to take out your +CPython extension and replace it with a pure python version that the +JIT can see. + +We fully support ctypes-based extensions. + +For information on which third party extensions work (or do not work) +with PyPy see the `compatibility wiki`_. + .. _`extension modules`: cpython_differences.html#extension-modules -.. _`cpython_differences`: cpython_differences.html +.. _`cpython differences`: cpython_differences.html +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home --------------------------------- -On what platforms does PyPy run? --------------------------------- +--------------------------------- +On which platforms does PyPy run? +--------------------------------- PyPy is regularly and extensively tested on Linux machines and on Mac OS X and mostly works under Windows too (but is tested there less extensively). PyPy needs a CPython running on the target platform to bootstrap, as cross compilation is not really meant to work yet. -At the moment you need CPython 2.4 (with ctypes) or CPython 2.5 or 2.6 +At the moment you need CPython 2.5 - 2.7 for the translation process. PyPy's JIT requires an x86 or x86_64 CPU. - ------------------------------------------------ Which Python version (2.x?) does PyPy implement? ------------------------------------------------ -PyPy currently aims to be fully compatible with Python 2.5. That means that -it contains the standard library of Python 2.5 and that it supports 2.5 -features (such as the with statement). +PyPy currently aims to be fully compatible with Python 2.7. That means that +it contains the standard library of Python 2.7 and that it supports 2.7 +features (such as set comprehensions). .. _threading: ------------------------------------------------- -Do threads work? What are the modules that work? +Does PyPy have a GIL? Why? ------------------------------------------------- -Operating system-level threads basically work. If you enable the ``thread`` -module then PyPy will get support for GIL based threading. -Note that PyPy also fully supports `stackless-like -microthreads`_ (although both cannot be mixed yet). - -All pure-python modules should work, unless they rely on ugly -cpython implementation details, in which case it's their fault. -There is an increasing number of compatible CPython extensions working, -including things like wxPython or PIL. This is an ongoing development effort -to bring as many CPython extension modules working as possible. - -.. _`stackless-like microthreads`: stackless.html - - ------------------------------------- -Can I use CPython extension modules? ------------------------------------- - -Yes, but the feature is in alpha state and is available only on trunk -(not in the 1.2 release). However, we'll only ever support well-behaving -CPython extensions. Please consult PyPy developers on IRC or mailing list -for explanations if your favorite module works and how you can help to make -it happen in case it does not. - -We fully support ctypes-based extensions, however. +Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem +is that our garbage collectors are not re-entrant. ------------------------------------------ How do I write extension modules for PyPy? @@ -113,44 +103,27 @@ .. __: extending.html - -.. _`slower than CPython`: -.. _`how fast is pypy`: - ----------------- How fast is PyPy? ----------------- +This really depends on your code. +For pure Python algorithmic code, it is very fast. For more typical +Python programs we generally are 3 times the speed of Cpython 2.6 . +You might be interested in our `benchmarking site`_ and our +`jit documentation`_. -.. _whysoslow: +.. _`benchmarking site`: http://speed.pypy.org -In three words, PyPy is "kind of fast". In more than three -words, the answer to this question is hard to give as a single -number. The fastest PyPy available so far is clearly PyPy -`with a JIT included`_, optimized and translated to C. This -version of PyPy is "kind of fast" in the sense that there are -numerous examples of Python code that run *much faster* than -CPython, up to a large number of times faster. And there are -also examples of code that are just as slow as without the -JIT. A PyPy that does not include a JIT has performance that -is more predictable: it runs generally somewhere between 1 and -2 times slower than CPython, in the worst case up to 4 times -slower. - -Obtaining good measurements for the performance when run on -the CLI or JVM is difficult, but the JIT on the CLI `seems to -work nicely`__ too. - -.. __: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf -.. _`with a JIT included`: jit/index.html +.. _`jit documentation`: jit/index.html .. _`prolog and javascript`: ----------------------------------------------------------------- -Can PyPy support interpreters for other languages beyond Python? ----------------------------------------------------------------- +-------------------------------------------------------------------------- +Can I use PyPy's translation toolchain for other languages besides Python? +-------------------------------------------------------------------------- -The toolsuite that translates the PyPy interpreter is quite +Yes. The toolsuite that translates the PyPy interpreter is quite general and can be used to create optimized versions of interpreters for any language, not just Python. Of course, these interpreters can make use of the same features that PyPy brings to Python: @@ -161,12 +134,12 @@ Currently, we have preliminary versions of a JavaScript interpreter (Leonardo Santagada as his Summer of PyPy project), a `Prolog interpreter`_ (Carl Friedrich Bolz as his Bachelor thesis), and a `SmallTalk interpreter`_ -(produced during a sprint). On the `PyPy "user" main page`_ there are also a -Scheme and Io implementation, all of these are unfinished at the moment. +(produced during a sprint). On the `PyPy bitbucket page`_ there is also a +Scheme and an Io implementation; both of these are unfinished at the moment. .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _`SmallTalk interpreter`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 -.. _`PyPy "user" main page`: https://bitbucket.org/pypy/ +.. _`PyPy bitbucket page`: https://bitbucket.org/pypy/ Development @@ -176,32 +149,21 @@ How do I get into PyPy development? Can I come to sprints? ----------------------------------------------------------- -Sure you can come to sprints! We always welcome newcomers and try to help them -get started in the project as much as possible (e.g. by providing tutorials and -pairing them with experienced PyPy developers). Newcomers should have some -Python experience and read some of the PyPy documentation before coming to a -sprint. +Certainly you can come to sprints! We always welcome newcomers and try +to help them as much as possible to get started with the project. We +provide tutorials and pair them with experienced PyPy +developers. Newcomers should have some Python experience and read some +of the PyPy documentation before coming to a sprint. -Coming to a sprint is usually also the best way to get into PyPy development. -If you get stuck or need advice, `contact us`_. Usually IRC is +Coming to a sprint is usually the best way to get into PyPy development. +If you get stuck or need advice, `contact us`_. IRC is the most immediate way to get feedback (at least during some parts of the day; -many PyPy developers are in Europe) and the `mailing list`_ is better for long +most PyPy developers are in Europe) and the `mailing list`_ is better for long discussions. .. _`contact us`: index.html .. _`mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev ----------------------------------------------------------------------- -I am getting strange errors while playing with PyPy, what should I do? ----------------------------------------------------------------------- - -It seems that a lot of strange, unexplainable problems can be magically -solved by removing all the \*.pyc files from the PyPy source tree -(the script py.cleanup from pypy/tool will do that for you). -Another thing you can do is removing the directory pypy/_cache -completely. If the error is persistent and still annoys you after this -treatment please send us a bug report (or even better, a fix :-) - ------------------------------------------------------------- OSError: ... cannot restore segment prot after reloc... Help? ------------------------------------------------------------- @@ -218,12 +180,12 @@ Be sure to enable it again if you need it! -PyPy translation tool chain -=========================== +The PyPy translation tool chain +=============================== ----------------------------------------- -Can PyPy compile normal Python programs? ----------------------------------------- +--------------------------------------------- +Can PyPy compile normal Python programs to C? +--------------------------------------------- No, PyPy is not a Python compiler. @@ -231,37 +193,14 @@ that a program will manipulate by doing a static analysis. It should be clear if you are familiar with Python, but if in doubt see [BRETT]_. -What could be attempted is static "soft typing", where you would use a -whole bunch of heuristics to guess what types are probably going to show -up where. In this way, you could compile the program into two copies of -itself: a "fast" version and a "slow" version. The former would contain -many guards that allow it to fall back to the latter if needed. That -would be a wholly different project than PyPy, though. (As far as we -understand it, this is the approach that the LLVM__ group would like to -see LLVM used for, so if you feel like working very hard and attempting -something like this, check with them.) +If you want a fast Python program, please use our JIT_ instead. -.. __: http://llvm.org/ - -What PyPy contains is, on the one hand, an non-soft static type -inferencer for RPython, which is a sublanguage that we defined just so -that it's possible and not too hard to do that; and on the other hand, -for the full Python language, we have an interpreter, and a JIT -generator which can produce a Just-In-Time Compiler from the -interpreter. The resulting JIT works for the full Python language in a -way that doesn't need type inference at all. - -For more motivation and details about our approach see also [D05.1]_, -section 3. +.. _JIT: jit/index.html .. [BRETT] Brett Cannon, Localized Type Inference of Atomic Types in Python, http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.90.3231 -.. [D05.1] Compiling Dynamic Language Implementations, - Report from the PyPy project to the E.U., - https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf - .. _`PyPy's RPython`: ------------------------------ @@ -269,30 +208,26 @@ ------------------------------ RPython is a restricted subset of the Python language. It is used for -implementing dynamic language interpreters within the PyPy framework. The -restrictions are to ensure that type inference (and so, ultimately, translation -to other languages) of RPython programs is possible. These restrictions only -apply after the full import happens, so at import time arbitrary Python code can -be executed. +implementing dynamic language interpreters within the PyPy toolchain. The +restrictions ensure that type inference (and so, ultimately, translation +to other languages) of RPython programs is possible. The property of "being RPython" always applies to a full program, not to single -functions or modules (the translation tool chain does a full program analysis). -"Full program" in the context of "being RPython" is all the code reachable from -an "entry point" function. The translation toolchain follows all calls -recursively and discovers what belongs to the program and what not. +functions or modules (the translation toolchain does a full program analysis). +The translation toolchain follows all calls +recursively and discovers what belongs to the program and what does not. -The restrictions that apply to programs to be RPython mostly limit the ability -of mixing types in arbitrary ways. RPython does not allow the usage of two +RPython program restrictions mostly limit the ability +to mix types in arbitrary ways. RPython does not allow the binding of two different types in the same variable. In this respect (and in some others) it -feels a bit like Java. Other features not allowed in RPython are the usage of +feels a bit like Java. Other features not allowed in RPython are the use of special methods (``__xxx__``) except ``__init__`` and ``__del__``, and the -usage of reflection capabilities (e.g. ``__dict__``). +use of reflection capabilities (e.g. ``__dict__``). -Most existing standard library modules are not RPython, except for -some functions in ``os``, ``math`` and ``time`` that are natively -supported. In general it is quite unlikely that an existing Python -program is by chance RPython; it is most likely that it would have to be -heavily rewritten. +You cannot use most existing standard library modules from RPython. The +exceptions are +some functions in ``os``, ``math`` and ``time`` that have native support. + To read more about the RPython limitations read the `RPython description`_. .. _`RPython description`: coding-guide.html#restricted-python @@ -309,29 +244,6 @@ .. _`sandboxed Python Interpreter`: sandbox.html .. _`Zope's RestrictedPython`: http://pypi.python.org/pypi/RestrictedPython -------------------------------------------------------------------------- -Can I use PyPy and RPython to compile smaller parts of my Python program? -------------------------------------------------------------------------- - -No. That would be possible, and we played with early attempts in that -direction, but there are many delicate issues: for example, how the -compiled and the non-compiled parts exchange data. Supporting this in a -nice way would be a lot of work. - -PyPy is certainly a good starting point for someone that would like to -work in that direction. Early attempts were dropped because they -conflicted with refactorings that we needed in order to progress on the -rest of PyPy; the currently active developers of PyPy have different -priorities. If someone wants to start working in that direction I -imagine that he might get a (very little) bit of support from us, -though. - -Alternatively, it's possible to write a mixed-module, i.e. an extension -module for PyPy in RPython, which you can then import from your Python -program when it runs on top of PyPy. This is similar to writing a C -extension module for CPython in term of investment of effort (without -all the INCREF/DECREF mess, though). - ------------------------------------------------------ What's the ``"NOT_RPYTHON"`` I see in some docstrings? ------------------------------------------------------ @@ -347,7 +259,7 @@ ------------------------------------------------------------------- It's not necessarily nonsense, but it's not really The PyPy Way. It's -pretty hard, without some kind of type inference, to translate, say this +pretty hard, without some kind of type inference, to translate this Python:: a + b @@ -366,16 +278,16 @@ Do I have to rewrite my programs in RPython? -------------------------------------------- -No. PyPy always runs your code in its own interpreter, which is a -full and compliant Python 2.5 interpreter. RPython_ is only the +No. And you shouldn't try. PyPy always runs your code in its own interpreter, which is a +full and compliant Python 2.7 interpreter. RPython is only the language in which parts of PyPy itself are written and extension -modules for it. The answer to whether something needs to be written as -an extension module, apart from the "gluing to external libraries" reason, will -change over time as speed for normal Python code improves. +modules for it. Not only is it not necessary for you to rewrite your +code in RPython, it probably won't give you any speed improvements if you +try. -------------------------- -Which backends are there? -------------------------- +--------------------------------------------------- +Which backends are there for the RPython toolchain? +--------------------------------------------------- Currently, there are backends for C_, the CLI_, and the JVM_. All of these can translate the entire PyPy interpreter. @@ -392,31 +304,22 @@ See the `getting-started`_ guide. +.. _`getting-started`: getting-started-python.html + .. _`how do I compile my own interpreters`: ------------------------------------- How do I compile my own interpreters? ------------------------------------- +Begin by reading `Andrew Brown's tutorial`_ . -Start from the example of -`pypy/translator/goal/targetnopstandalone.py`_, which you compile by -typing:: - - python translate.py targetnopstandalone - -You can have a look at intermediate C source code, which is (at the -moment) put in ``/tmp/usession-*/testing_1/testing_1.c``. Of course, -all the functions and stuff used directly and indirectly by your -``entry_point()`` function has to be RPython_. - - -.. _`RPython`: coding-guide.html#rpython -.. _`getting-started`: getting-started.html - -.. include:: _ref.rst +.. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html ---------------------------------------------------------- Why does PyPy draw a Mandelbrot fractal while translating? ---------------------------------------------------------- Because it's fun. + +.. include:: _ref.rst + From commits-noreply at bitbucket.org Wed Apr 27 12:44:38 2011 From: commits-noreply at bitbucket.org (lac) Date: Wed, 27 Apr 2011 12:44:38 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge Message-ID: <20110427104438.2BBBE282BEC@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43663:505c263eb524 Date: 2011-04-27 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/505c263eb524/ Log: merge diff --git a/pypy/doc/config/objspace.std.optimized_int_add.rst b/pypy/doc/config/objspace.std.optimized_int_add.txt copy from pypy/doc/config/objspace.std.optimized_int_add.rst copy to pypy/doc/config/objspace.std.optimized_int_add.txt diff --git a/pypy/doc/config/objspace.std.withcelldict.rst b/pypy/doc/config/objspace.std.withcelldict.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withcelldict.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable cell-dicts. This optimization is not helpful without the JIT. In the -presence of the JIT, it greatly helps looking up globals. diff --git a/pypy/doc/config/objspace.std.withprebuiltint.rst b/pypy/doc/config/objspace.std.withprebuiltint.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withprebuiltint.rst +++ /dev/null @@ -1,5 +0,0 @@ -This option enables the caching of small integer objects (similar to what -CPython does). The range of which integers are cached can be influenced with -the :config:`objspace.std.prebuiltintfrom` and -:config:`objspace.std.prebuiltintto` options. - diff --git a/pypy/doc/config/objspace.honor__builtins__.rst b/pypy/doc/config/objspace.honor__builtins__.txt copy from pypy/doc/config/objspace.honor__builtins__.rst copy to pypy/doc/config/objspace.honor__builtins__.txt diff --git a/pypy/doc/config/objspace.std.optimized_comparison_op.rst b/pypy/doc/config/objspace.std.optimized_comparison_op.txt copy from pypy/doc/config/objspace.std.optimized_comparison_op.rst copy to pypy/doc/config/objspace.std.optimized_comparison_op.txt diff --git a/pypy/doc/config/translation.list_comprehension_operations.rst b/pypy/doc/config/translation.list_comprehension_operations.rst deleted file mode 100644 --- a/pypy/doc/config/translation.list_comprehension_operations.rst +++ /dev/null @@ -1,2 +0,0 @@ -Experimental optimization for list comprehensions in RPython. - diff --git a/pypy/doc/config/objspace.soabi.rst b/pypy/doc/config/objspace.soabi.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.soabi.rst +++ /dev/null @@ -1,14 +0,0 @@ -This option controls the tag included into extension module file names. The -default is something like `pypy-14`, which means that `import foo` will look for -a file named `foo.pypy-14.so` (or `foo.pypy-14.pyd` on Windows). - -This is an implementation of PEP3149_, with two differences: - - * the filename without tag `foo.so` is not considered. - * the feature is also available on Windows. - -When set to the empty string (with `--soabi=`), the interpreter will only look -for a file named `foo.so`, and will crash if this file was compiled for another -Python interpreter. - -.. _PEP3149: http://www.python.org/dev/peps/pep-3149/ diff --git a/pypy/doc/config/objspace.name.rst b/pypy/doc/config/objspace.name.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.name.rst +++ /dev/null @@ -1,16 +0,0 @@ -Determine which `Object Space`_ to use. The `Standard Object Space`_ gives the -normal Python semantics, the others are `Object Space Proxies`_ giving -additional features (except the Flow Object Space which is not intended -for normal usage): - - * thunk_: The thunk object space adds lazy evaluation to PyPy. - * taint_: The taint object space adds soft security features. - * dump_: Using this object spaces results in the dumpimp of all operations - to a log. - -.. _`Object Space`: ../objspace.html -.. _`Object Space Proxies`: ../objspace-proxies.html -.. _`Standard Object Space`: ../objspace.html#standard-object-space -.. _thunk: ../objspace-proxies.html#thunk -.. _taint: ../objspace-proxies.html#taint -.. _dump: ../objspace-proxies.html#dump diff --git a/pypy/doc/config/translation.ootype.mangle.rst b/pypy/doc/config/translation.ootype.mangle.rst deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.rst +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/config/test/test_makerestdoc.py b/pypy/config/test/test_makerestdoc.py --- a/pypy/config/test/test_makerestdoc.py +++ b/pypy/config/test/test_makerestdoc.py @@ -20,14 +20,14 @@ config = Config(descr) txt = descr.make_rest_doc().text() - result = {"": checkrest(txt, descr._name + ".txt")} + result = {"": txt} for path in config.getpaths(include_groups=True): subconf, step = config._cfgimpl_get_home_by_path(path) fullpath = (descr._name + "." + path) prefix = fullpath.rsplit(".", 1)[0] txt = getattr(subconf._cfgimpl_descr, step).make_rest_doc( prefix).text() - result[path] = checkrest(txt, fullpath + ".txt") + result[path] = txt return result def test_simple(): @@ -68,7 +68,6 @@ ChoiceOption("bar", "more doc", ["a", "b", "c"], default="a")]) result = generate_html(descr) - assert "more doc" in result[""] def test_cmdline_overview(): descr = OptionDescription("foo", "doc", [ diff --git a/pypy/doc/config/objspace.usemodules._warnings.rst b/pypy/doc/config/objspace.usemodules._warnings.txt copy from pypy/doc/config/objspace.usemodules._warnings.rst copy to pypy/doc/config/objspace.usemodules._warnings.txt diff --git a/pypy/doc/config/objspace.usemodules._random.rst b/pypy/doc/config/objspace.usemodules._random.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._random.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_random' module. It is necessary to use the module "random" from the standard library. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.pyexpat.rst b/pypy/doc/config/objspace.usemodules.pyexpat.txt copy from pypy/doc/config/objspace.usemodules.pyexpat.rst copy to pypy/doc/config/objspace.usemodules.pyexpat.txt diff --git a/pypy/doc/config/objspace.std.withmapdict.rst b/pypy/doc/config/objspace.std.withmapdict.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.rst +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/doc/config/translation.stackless.rst b/pypy/doc/config/translation.stackless.rst deleted file mode 100644 --- a/pypy/doc/config/translation.stackless.rst +++ /dev/null @@ -1,5 +0,0 @@ -Run the `stackless transform`_ on each generated graph, which enables the use -of coroutines at RPython level and the "stackless" module when translating -PyPy. - -.. _`stackless transform`: ../stackless.html diff --git a/pypy/doc/config/translation.backendopt.none.rst b/pypy/doc/config/translation.backendopt.none.txt copy from pypy/doc/config/translation.backendopt.none.rst copy to pypy/doc/config/translation.backendopt.none.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.txt copy from pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst copy to pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.txt diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.rst b/pypy/doc/config/objspace.std.withprebuiltchar.rst deleted file mode 100644 diff --git a/pypy/doc/config/translation.backend.rst b/pypy/doc/config/translation.backend.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backend.rst +++ /dev/null @@ -1,3 +0,0 @@ -Which backend to use when translating, see `translation documentation`_. - -.. _`translation documentation`: ../translation.html diff --git a/pypy/doc/config/objspace.usemodules.token.rst b/pypy/doc/config/objspace.usemodules.token.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.token.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'token' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._winreg.rst b/pypy/doc/config/objspace.usemodules._winreg.txt copy from pypy/doc/config/objspace.usemodules._winreg.rst copy to pypy/doc/config/objspace.usemodules._winreg.txt diff --git a/pypy/doc/config/translation.debug.rst b/pypy/doc/config/translation.debug.rst deleted file mode 100644 --- a/pypy/doc/config/translation.debug.rst +++ /dev/null @@ -1,2 +0,0 @@ -Record extra debugging information during annotation. This leads to slightly -less obscure error messages. diff --git a/pypy/doc/config/objspace.usemodules.math.rst b/pypy/doc/config/objspace.usemodules.math.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.math.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'math' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.backendopt.rst b/pypy/doc/config/translation.backendopt.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.rst +++ /dev/null @@ -1,5 +0,0 @@ -This group contains options about various backend optimization passes. Most of -them are described in the `EU report about optimization`_ - -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf - diff --git a/pypy/doc/config/translation.jit.rst b/pypy/doc/config/translation.jit.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable the JIT generator, for targets that have JIT support. -Experimental so far. diff --git a/pypy/doc/config/objspace.std.builtinshortcut.rst b/pypy/doc/config/objspace.std.builtinshortcut.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.builtinshortcut.rst +++ /dev/null @@ -1,5 +0,0 @@ -A shortcut speeding up primitive operations between built-in types. - -This is a space-time trade-off: at the moment, this option makes a -translated pypy-c executable bigger by about 1.7 MB. (This can probably -be improved with careful analysis.) diff --git a/pypy/doc/config/translation.cc.rst b/pypy/doc/config/translation.cc.txt copy from pypy/doc/config/translation.cc.rst copy to pypy/doc/config/translation.cc.txt diff --git a/pypy/doc/config/objspace.std.prebuiltintfrom.rst b/pypy/doc/config/objspace.std.prebuiltintfrom.txt copy from pypy/doc/config/objspace.std.prebuiltintfrom.rst copy to pypy/doc/config/objspace.std.prebuiltintfrom.txt diff --git a/pypy/doc/config/objspace.usemodules.operator.rst b/pypy/doc/config/objspace.usemodules.operator.txt copy from pypy/doc/config/objspace.usemodules.operator.rst copy to pypy/doc/config/objspace.usemodules.operator.txt diff --git a/pypy/doc/config/objspace.usemodules.__pypy__.rst b/pypy/doc/config/objspace.usemodules.__pypy__.txt copy from pypy/doc/config/objspace.usemodules.__pypy__.rst copy to pypy/doc/config/objspace.usemodules.__pypy__.txt diff --git a/pypy/doc/config/objspace.honor__builtins__.rst b/pypy/doc/config/objspace.honor__builtins__.rst deleted file mode 100644 diff --git a/pypy/doc/config/objspace.std.multimethods.rst b/pypy/doc/config/objspace.std.multimethods.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.multimethods.rst +++ /dev/null @@ -1,8 +0,0 @@ -Choose the multimethod implementation. - -* ``doubledispatch`` turns - a multimethod call into a sequence of normal method calls. - -* ``mrd`` uses a technique known as Multiple Row Displacement - which precomputes a few compact tables of numbers and - function pointers. diff --git a/pypy/doc/config/translation.linkerflags.rst b/pypy/doc/config/translation.linkerflags.rst deleted file mode 100644 --- a/pypy/doc/config/translation.linkerflags.rst +++ /dev/null @@ -1,1 +0,0 @@ -Experimental. Specify extra flags to pass to the linker. diff --git a/pypy/doc/config/objspace.std.withmethodcache.rst b/pypy/doc/config/objspace.std.withmethodcache.txt copy from pypy/doc/config/objspace.std.withmethodcache.rst copy to pypy/doc/config/objspace.std.withmethodcache.txt diff --git a/pypy/doc/config/objspace.usemodules._random.rst b/pypy/doc/config/objspace.usemodules._random.txt copy from pypy/doc/config/objspace.usemodules._random.rst copy to pypy/doc/config/objspace.usemodules._random.txt diff --git a/pypy/doc/config/objspace.usemodules.mmap.rst b/pypy/doc/config/objspace.usemodules.mmap.txt copy from pypy/doc/config/objspace.usemodules.mmap.rst copy to pypy/doc/config/objspace.usemodules.mmap.txt diff --git a/pypy/doc/config/translation.simplifying.rst b/pypy/doc/config/translation.simplifying.txt copy from pypy/doc/config/translation.simplifying.rst copy to pypy/doc/config/translation.simplifying.txt diff --git a/pypy/doc/config/objspace.std.withmethodcache.rst b/pypy/doc/config/objspace.std.withmethodcache.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. diff --git a/pypy/doc/config/translation.jit_backend.rst b/pypy/doc/config/translation.jit_backend.txt copy from pypy/doc/config/translation.jit_backend.rst copy to pypy/doc/config/translation.jit_backend.txt diff --git a/pypy/doc/config/objspace.usemodules.__pypy__.rst b/pypy/doc/config/objspace.usemodules.__pypy__.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.__pypy__.rst +++ /dev/null @@ -1,9 +0,0 @@ -Use the '__pypy__' module. -This module is expected to be working and is included by default. -It contains special PyPy-specific functionality. -For example most of the special functions described in the `object space proxies` -document are in the module. -See the `__pypy__ module documentation`_ for more details. - -.. _`object space proxy`: ../objspace-proxies.html -.. _`__pypy__ module documentation`: ../__pypy__-module.html diff --git a/pypy/doc/config/objspace.usemodules._hashlib.rst b/pypy/doc/config/objspace.usemodules._hashlib.txt copy from pypy/doc/config/objspace.usemodules._hashlib.rst copy to pypy/doc/config/objspace.usemodules._hashlib.txt diff --git a/pypy/doc/config/objspace.usemodules.posix.rst b/pypy/doc/config/objspace.usemodules.posix.txt copy from pypy/doc/config/objspace.usemodules.posix.rst copy to pypy/doc/config/objspace.usemodules.posix.txt diff --git a/pypy/doc/config/objspace.std.sharesmallstr.rst b/pypy/doc/config/objspace.std.sharesmallstr.rst deleted file mode 100644 diff --git a/pypy/doc/config/objspace.geninterp.rst b/pypy/doc/config/objspace.geninterp.txt copy from pypy/doc/config/objspace.geninterp.rst copy to pypy/doc/config/objspace.geninterp.txt diff --git a/pypy/doc/config/translation.cli.exception_transformer.rst b/pypy/doc/config/translation.cli.exception_transformer.txt copy from pypy/doc/config/translation.cli.exception_transformer.rst copy to pypy/doc/config/translation.cli.exception_transformer.txt diff --git a/pypy/doc/config/translation.backendopt.stack_optimization.rst b/pypy/doc/config/translation.backendopt.stack_optimization.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.stack_optimization.rst +++ /dev/null @@ -1,1 +0,0 @@ -Enable the optimized code generation for stack based machine, if the backend support it diff --git a/pypy/doc/config/objspace.usemodules._codecs.rst b/pypy/doc/config/objspace.usemodules._codecs.txt copy from pypy/doc/config/objspace.usemodules._codecs.rst copy to pypy/doc/config/objspace.usemodules._codecs.txt diff --git a/pypy/doc/config/objspace.usemodules.unicodedata.rst b/pypy/doc/config/objspace.usemodules.unicodedata.txt copy from pypy/doc/config/objspace.usemodules.unicodedata.rst copy to pypy/doc/config/objspace.usemodules.unicodedata.txt diff --git a/pypy/doc/config/translation.rst b/pypy/doc/config/translation.rst deleted file mode 100644 --- a/pypy/doc/config/translation.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/translation.jit_backend.rst b/pypy/doc/config/translation.jit_backend.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit_backend.rst +++ /dev/null @@ -1,2 +0,0 @@ -Choose the backend to use for the JIT. -By default, this is the best backend for the current platform. diff --git a/pypy/doc/config/objspace.std.newshortcut.rst b/pypy/doc/config/objspace.std.newshortcut.txt copy from pypy/doc/config/objspace.std.newshortcut.rst copy to pypy/doc/config/objspace.std.newshortcut.txt diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.rst b/pypy/doc/config/objspace.std.methodcachesizeexp.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.methodcachesizeexp.rst +++ /dev/null @@ -1,1 +0,0 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. diff --git a/pypy/doc/config/objspace.std.rst b/pypy/doc/config/objspace.std.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.extmodules.rst +++ /dev/null @@ -1,12 +0,0 @@ -You can pass a comma-separated list of third-party builtin modules -which should be translated along with the standard modules within -``pypy.module``. - -The module names need to be fully qualified (i.e. have a ``.`` in them), -be on the ``$PYTHONPATH`` and not conflict with any existing ones, e.g. -``mypkg.somemod``. - -Once translated, the module will be accessible with a simple:: - - import somemod - diff --git a/pypy/doc/config/objspace.usemodules._ast.rst b/pypy/doc/config/objspace.usemodules._ast.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._ast.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_ast' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.sys.rst b/pypy/doc/config/objspace.usemodules.sys.txt copy from pypy/doc/config/objspace.usemodules.sys.rst copy to pypy/doc/config/objspace.usemodules.sys.txt diff --git a/pypy/doc/config/translation.ootype.rst b/pypy/doc/config/translation.ootype.txt copy from pypy/doc/config/translation.ootype.rst copy to pypy/doc/config/translation.ootype.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline.rst b/pypy/doc/config/translation.backendopt.profile_based_inline.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.profile_based_inline.rst +++ /dev/null @@ -1,10 +0,0 @@ -Inline flowgraphs only for call-sites for which there was a minimal -number of calls during an instrumented run of the program. Callee -flowgraphs are considered candidates based on a weight heuristic like -for basic inlining. (see :config:`translation.backendopt.inline`, -:config:`translation.backendopt.profile_based_inline_threshold` ). - -The option takes as value a string which is the arguments to pass to -the program for the instrumented run. - -This optimization is not used by default. \ No newline at end of file diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -31,32 +31,38 @@ -rm -rf $(BUILDDIR)/* html: + python config/generate.py $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: + python config/generate.py $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: + python config/generate.py $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: + python config/generate.py $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: + python config/generate.py $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: + python config/generate.py $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ @@ -66,6 +72,7 @@ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" latex: + python config/generate.py $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @@ -73,17 +80,20 @@ "run these through (pdf)latex." changes: + python config/generate.py $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: + python config/generate.py $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: + python config/generate.py $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." diff --git a/pypy/doc/config/objspace.usemodules._md5.rst b/pypy/doc/config/objspace.usemodules._md5.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._md5.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the built-in '_md5' module. -This module is expected to be working and is included by default. -There is also a pure Python version in lib_pypy which is used -if the built-in is disabled, but it is several orders of magnitude -slower. diff --git a/pypy/doc/config/objspace.std.builtinshortcut.rst b/pypy/doc/config/objspace.std.builtinshortcut.txt copy from pypy/doc/config/objspace.std.builtinshortcut.rst copy to pypy/doc/config/objspace.std.builtinshortcut.txt diff --git a/pypy/doc/config/objspace.std.optimized_list_getitem.rst b/pypy/doc/config/objspace.std.optimized_list_getitem.txt copy from pypy/doc/config/objspace.std.optimized_list_getitem.rst copy to pypy/doc/config/objspace.std.optimized_list_getitem.txt diff --git a/pypy/doc/config/objspace.std.withropeunicode.rst b/pypy/doc/config/objspace.std.withropeunicode.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withropeunicode.rst +++ /dev/null @@ -1,7 +0,0 @@ -Use ropes to implement unicode strings (and also normal strings). - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#ropes - - diff --git a/pypy/doc/config/objspace.std.optimized_int_add.rst b/pypy/doc/config/objspace.std.optimized_int_add.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_int_add.rst +++ /dev/null @@ -1,2 +0,0 @@ -Optimize the addition of two integers a bit. Enabling this option gives small -speedups. diff --git a/pypy/doc/config/objspace.usemodules._multiprocessing.rst b/pypy/doc/config/objspace.usemodules._multiprocessing.txt copy from pypy/doc/config/objspace.usemodules._multiprocessing.rst copy to pypy/doc/config/objspace.usemodules._multiprocessing.txt diff --git a/pypy/doc/config/objspace.std.withrope.rst b/pypy/doc/config/objspace.std.withrope.txt copy from pypy/doc/config/objspace.std.withrope.rst copy to pypy/doc/config/objspace.std.withrope.txt diff --git a/pypy/doc/config/objspace.usemodules.operator.rst b/pypy/doc/config/objspace.usemodules.operator.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.operator.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'operator' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.name.rst b/pypy/doc/config/objspace.name.txt copy from pypy/doc/config/objspace.name.rst copy to pypy/doc/config/objspace.name.txt diff --git a/pypy/doc/config/translation.stackless.rst b/pypy/doc/config/translation.stackless.txt copy from pypy/doc/config/translation.stackless.rst copy to pypy/doc/config/translation.stackless.txt diff --git a/pypy/doc/config/objspace.usemodules.termios.rst b/pypy/doc/config/objspace.usemodules.termios.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.termios.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'termios' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.timing.rst b/pypy/doc/config/objspace.timing.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.timing.rst +++ /dev/null @@ -1,1 +0,0 @@ -timing of various parts of the interpreter (simple profiling) diff --git a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst b/pypy/doc/config/translation.builtins_can_raise_exceptions.rst deleted file mode 100644 --- a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/objspace.usepycfiles.rst b/pypy/doc/config/objspace.usepycfiles.txt copy from pypy/doc/config/objspace.usepycfiles.rst copy to pypy/doc/config/objspace.usepycfiles.txt diff --git a/pypy/doc/config/translation.gcremovetypeptr.rst b/pypy/doc/config/translation.gcremovetypeptr.txt copy from pypy/doc/config/translation.gcremovetypeptr.rst copy to pypy/doc/config/translation.gcremovetypeptr.txt diff --git a/pypy/doc/config/objspace.timing.rst b/pypy/doc/config/objspace.timing.txt copy from pypy/doc/config/objspace.timing.rst copy to pypy/doc/config/objspace.timing.txt diff --git a/pypy/doc/config/translation.shared.rst b/pypy/doc/config/translation.shared.rst deleted file mode 100644 --- a/pypy/doc/config/translation.shared.rst +++ /dev/null @@ -1,2 +0,0 @@ -Build pypy as a shared library or a DLL, with a small executable to run it. -This is necessary on Windows to expose the C API provided by the cpyext module. diff --git a/pypy/doc/config/objspace.std.sharesmallstr.rst b/pypy/doc/config/objspace.std.sharesmallstr.txt copy from pypy/doc/config/objspace.std.sharesmallstr.rst copy to pypy/doc/config/objspace.std.sharesmallstr.txt diff --git a/pypy/doc/config/objspace.usemodules.select.rst b/pypy/doc/config/objspace.usemodules.select.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.select.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'select' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.usemodules.select.rst b/pypy/doc/config/objspace.usemodules.select.txt copy from pypy/doc/config/objspace.usemodules.select.rst copy to pypy/doc/config/objspace.usemodules.select.txt diff --git a/pypy/doc/config/objspace.usemodules._md5.rst b/pypy/doc/config/objspace.usemodules._md5.txt copy from pypy/doc/config/objspace.usemodules._md5.rst copy to pypy/doc/config/objspace.usemodules._md5.txt diff --git a/pypy/doc/config/translation.platform.rst b/pypy/doc/config/translation.platform.txt copy from pypy/doc/config/translation.platform.rst copy to pypy/doc/config/translation.platform.txt diff --git a/pypy/doc/config/objspace.usemodules.signal.rst b/pypy/doc/config/objspace.usemodules.signal.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.signal.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'signal' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.rst b/pypy/doc/config/objspace.std.methodcachesizeexp.txt copy from pypy/doc/config/objspace.std.methodcachesizeexp.rst copy to pypy/doc/config/objspace.std.methodcachesizeexp.txt diff --git a/pypy/doc/config/translation.backendopt.constfold.rst b/pypy/doc/config/translation.backendopt.constfold.txt copy from pypy/doc/config/translation.backendopt.constfold.rst copy to pypy/doc/config/translation.backendopt.constfold.txt diff --git a/pypy/doc/config/objspace.std.withrangelist.rst b/pypy/doc/config/objspace.std.withrangelist.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrangelist.rst +++ /dev/null @@ -1,11 +0,0 @@ -Enable "range list" objects. They are an additional implementation of the Python -``list`` type, indistinguishable for the normal user. Whenever the ``range`` -builtin is called, an range list is returned. As long as this list is not -mutated (and for example only iterated over), it uses only enough memory to -store the start, stop and step of the range. This makes using ``range`` as -efficient as ``xrange``, as long as the result is only used in a ``for``-loop. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists - diff --git a/pypy/doc/config/translation.gcrootfinder.rst b/pypy/doc/config/translation.gcrootfinder.txt copy from pypy/doc/config/translation.gcrootfinder.rst copy to pypy/doc/config/translation.gcrootfinder.txt diff --git a/pypy/doc/config/translation.force_make.rst b/pypy/doc/config/translation.force_make.rst deleted file mode 100644 --- a/pypy/doc/config/translation.force_make.rst +++ /dev/null @@ -1,1 +0,0 @@ -Force executing makefile instead of using platform. diff --git a/pypy/doc/config/objspace.std.withrangelist.rst b/pypy/doc/config/objspace.std.withrangelist.txt copy from pypy/doc/config/objspace.std.withrangelist.rst copy to pypy/doc/config/objspace.std.withrangelist.txt diff --git a/pypy/doc/config/objspace.soabi.rst b/pypy/doc/config/objspace.soabi.txt copy from pypy/doc/config/objspace.soabi.rst copy to pypy/doc/config/objspace.soabi.txt diff --git a/pypy/doc/config/objspace.std.newshortcut.rst b/pypy/doc/config/objspace.std.newshortcut.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.newshortcut.rst +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: cache and shortcut calling __new__ from builtin types diff --git a/pypy/doc/config/objspace.std.optimized_list_getitem.rst b/pypy/doc/config/objspace.std.optimized_list_getitem.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_list_getitem.rst +++ /dev/null @@ -1,1 +0,0 @@ -Optimized list[int] a bit. diff --git a/pypy/doc/config/objspace.std.prebuiltintto.rst b/pypy/doc/config/objspace.std.prebuiltintto.txt copy from pypy/doc/config/objspace.std.prebuiltintto.rst copy to pypy/doc/config/objspace.std.prebuiltintto.txt diff --git a/pypy/doc/config/objspace.usemodules._ast.rst b/pypy/doc/config/objspace.usemodules._ast.txt copy from pypy/doc/config/objspace.usemodules._ast.rst copy to pypy/doc/config/objspace.usemodules._ast.txt diff --git a/pypy/doc/config/objspace.usemodules.parser.rst b/pypy/doc/config/objspace.usemodules.parser.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.parser.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the 'parser' module. -This is PyPy implementation of the standard library 'parser' module (e.g. if -this option is enabled and you say ``import parser`` you get this module). -It is enabled by default. diff --git a/pypy/doc/config/objspace.disable_call_speedhacks.rst b/pypy/doc/config/objspace.disable_call_speedhacks.txt copy from pypy/doc/config/objspace.disable_call_speedhacks.rst copy to pypy/doc/config/objspace.disable_call_speedhacks.txt diff --git a/pypy/doc/config/translation.jit_profiler.rst b/pypy/doc/config/translation.jit_profiler.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit_profiler.rst +++ /dev/null @@ -1,1 +0,0 @@ -Integrate profiler support into the JIT diff --git a/pypy/doc/config/translation.rst b/pypy/doc/config/translation.txt copy from pypy/doc/config/translation.rst copy to pypy/doc/config/translation.txt diff --git a/pypy/doc/config/objspace.std.withcelldict.rst b/pypy/doc/config/objspace.std.withcelldict.txt copy from pypy/doc/config/objspace.std.withcelldict.rst copy to pypy/doc/config/objspace.std.withcelldict.txt diff --git a/pypy/doc/config/objspace.usemodules.signal.rst b/pypy/doc/config/objspace.usemodules.signal.txt copy from pypy/doc/config/objspace.usemodules.signal.rst copy to pypy/doc/config/objspace.usemodules.signal.txt diff --git a/pypy/doc/config/translation.output.rst b/pypy/doc/config/translation.output.rst deleted file mode 100644 --- a/pypy/doc/config/translation.output.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify file name that the produced executable gets. diff --git a/pypy/doc/config/objspace.allworkingmodules.rst b/pypy/doc/config/objspace.allworkingmodules.txt copy from pypy/doc/config/objspace.allworkingmodules.rst copy to pypy/doc/config/objspace.allworkingmodules.txt diff --git a/pypy/doc/config/objspace.usemodules.fcntl.rst b/pypy/doc/config/objspace.usemodules.fcntl.txt copy from pypy/doc/config/objspace.usemodules.fcntl.rst copy to pypy/doc/config/objspace.usemodules.fcntl.txt diff --git a/pypy/doc/config/objspace.rst b/pypy/doc/config/objspace.txt copy from pypy/doc/config/objspace.rst copy to pypy/doc/config/objspace.txt diff --git a/pypy/doc/config/objspace.usemodules._weakref.rst b/pypy/doc/config/objspace.usemodules._weakref.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._weakref.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use the '_weakref' module, necessary for the standard lib 'weakref' module. -PyPy's weakref implementation is not completely stable yet. The first -difference to CPython is that weak references only go away after the next -garbage collection, not immediately. The other problem seems to be that under -certain circumstances (that we have not determined) weak references keep the -object alive. diff --git a/pypy/doc/config/objspace.usemodules.array.rst b/pypy/doc/config/objspace.usemodules.array.txt copy from pypy/doc/config/objspace.usemodules.array.rst copy to pypy/doc/config/objspace.usemodules.array.txt diff --git a/pypy/doc/config/translation.make_jobs.rst b/pypy/doc/config/translation.make_jobs.txt copy from pypy/doc/config/translation.make_jobs.rst copy to pypy/doc/config/translation.make_jobs.txt diff --git a/pypy/doc/config/objspace.std.prebuiltintto.rst b/pypy/doc/config/objspace.std.prebuiltintto.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.prebuiltintto.rst +++ /dev/null @@ -1,1 +0,0 @@ -See :config:`objspace.std.withprebuiltint`. diff --git a/pypy/doc/config/translation.vanilla.rst b/pypy/doc/config/translation.vanilla.rst deleted file mode 100644 --- a/pypy/doc/config/translation.vanilla.rst +++ /dev/null @@ -1,2 +0,0 @@ -Try to make the resulting compiled program as portable (=movable to another -machine) as possible. Which is not much. diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -135,7 +135,6 @@ implementations for various purposes (see below). This is now the default implementation of dictionaries in the Python interpreter. -option. Sharing Dicts +++++++++++++ @@ -206,28 +205,11 @@ User Class Optimizations ------------------------ -Shadow Tracking -+++++++++++++++ - -Shadow tracking is a general optimization that speeds up method calls for user -classes (that don't have special meta-class). For this a special dict -representation is used together with multidicts. This dict representation is -used only for instance dictionaries. The instance dictionary tracks whether an -instance attribute shadows an attribute of its class. This makes method calls -slightly faster in the following way: When calling a method the first thing that -is checked is the class dictionary to find descriptors. Normally, when a method -is found, the instance dictionary is then checked for instance attributes -shadowing the class attribute. If we know that there is no shadowing (since -instance dict tells us that) we can save this lookup on the instance dictionary. - -*This was deprecated and is no longer available.* - Method Caching ++++++++++++++ -Shadow tracking is also an important building block for the method caching -optimization. A method cache is introduced where the result of a method lookup +A method cache is introduced where the result of a method lookup is stored (which involves potentially many lookups in the base classes of a class). Entries in the method cache are stored using a hash computed from the name being looked up, the call site (i.e. the bytecode object and @@ -345,14 +327,12 @@ improving results by anything from 15-40 per cent. Another optimization, or rather set of optimizations, that has a uniformly good -effect is the set of three 'method optimizations', i.e. shadow tracking, the +effect are the two 'method optimizations', i.e. the method cache and the LOOKUP_METHOD and CALL_METHOD opcodes. On a heavily object-oriented benchmark (richards) they combine to give a speed-up of nearly 50%, and even on the extremely un-object-oriented pystone benchmark, the improvement is over 20%. -.. waffles about ropes - When building pypy, all generally useful optimizations are turned on by default unless you explicitly lower the translation optimization level with the ``--opt`` option. diff --git a/pypy/doc/config/objspace.usemodules._multiprocessing.rst b/pypy/doc/config/objspace.usemodules._multiprocessing.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._multiprocessing.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_multiprocessing' module. -Used by the 'multiprocessing' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.oracle.rst b/pypy/doc/config/objspace.usemodules.oracle.txt copy from pypy/doc/config/objspace.usemodules.oracle.rst copy to pypy/doc/config/objspace.usemodules.oracle.txt diff --git a/pypy/doc/config/objspace.usemodules.errno.rst b/pypy/doc/config/objspace.usemodules.errno.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.errno.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'errno' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules.posix.rst b/pypy/doc/config/objspace.usemodules.posix.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.posix.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the essential 'posix' module. -This module is essential, included by default and cannot be removed (even when -specified explicitly, the option gets overridden later). diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.rst b/pypy/doc/config/objspace.std.getattributeshortcut.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.rst +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/doc/config/objspace.usemodules.cpyext.rst b/pypy/doc/config/objspace.usemodules.cpyext.txt copy from pypy/doc/config/objspace.usemodules.cpyext.rst copy to pypy/doc/config/objspace.usemodules.cpyext.txt diff --git a/pypy/doc/config/translation.platform.rst b/pypy/doc/config/translation.platform.rst deleted file mode 100644 --- a/pypy/doc/config/translation.platform.rst +++ /dev/null @@ -1,1 +0,0 @@ -select the target platform, in case of cross-compilation diff --git a/pypy/doc/config/translation.instrumentctl.rst b/pypy/doc/config/translation.instrumentctl.txt copy from pypy/doc/config/translation.instrumentctl.rst copy to pypy/doc/config/translation.instrumentctl.txt diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst --- a/pypy/doc/discussion/VM-integration.rst +++ b/pypy/doc/discussion/VM-integration.rst @@ -1,5 +1,3 @@ -.. XXX anto, do we still need this? - ============================================== Integration of PyPy with host Virtual Machines ============================================== diff --git a/pypy/doc/config/translation.fork_before.rst b/pypy/doc/config/translation.fork_before.rst deleted file mode 100644 --- a/pypy/doc/config/translation.fork_before.rst +++ /dev/null @@ -1,4 +0,0 @@ -This is an option mostly useful when working on the PyPy toolchain. If you use -it, translate.py will fork before the specified phase. If the translation -crashes after that fork, you can fix the bug in the toolchain, and continue -translation at the fork-point. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.rst b/pypy/doc/config/objspace.std.withmethodcachecounter.txt copy from pypy/doc/config/objspace.std.withmethodcachecounter.rst copy to pypy/doc/config/objspace.std.withmethodcachecounter.txt diff --git a/pypy/doc/config/translation.fork_before.rst b/pypy/doc/config/translation.fork_before.txt copy from pypy/doc/config/translation.fork_before.rst copy to pypy/doc/config/translation.fork_before.txt diff --git a/pypy/doc/config/translation.gcremovetypeptr.rst b/pypy/doc/config/translation.gcremovetypeptr.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gcremovetypeptr.rst +++ /dev/null @@ -1,1 +0,0 @@ -If set, save one word in every object. Framework GC only. diff --git a/pypy/doc/config/objspace.usemodules._lsprof.rst b/pypy/doc/config/objspace.usemodules._lsprof.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._lsprof.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the '_lsprof' module. diff --git a/pypy/doc/config/translation.jit_profiler.rst b/pypy/doc/config/translation.jit_profiler.txt copy from pypy/doc/config/translation.jit_profiler.rst copy to pypy/doc/config/translation.jit_profiler.txt diff --git a/pypy/doc/config/objspace.usemodules._sha.rst b/pypy/doc/config/objspace.usemodules._sha.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._sha.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the built-in _'sha' module. -This module is expected to be working and is included by default. -There is also a pure Python version in lib_pypy which is used -if the built-in is disabled, but it is several orders of magnitude -slower. diff --git a/pypy/doc/config/translation.force_make.rst b/pypy/doc/config/translation.force_make.txt copy from pypy/doc/config/translation.force_make.rst copy to pypy/doc/config/translation.force_make.txt diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py --- a/pypy/config/makerestdoc.py +++ b/pypy/config/makerestdoc.py @@ -28,9 +28,6 @@ fullpath = get_fullpath(self, path) result = Rest( Title(fullpath, abovechar="=", belowchar="="), - Directive("contents"), - Paragraph(Link("back to parent", path + ".html")), - Title("Basic Option Information"), ListItem(Strong("name:"), self._name), ListItem(Strong("description:"), self.doc)) if self.cmdline is not None: @@ -132,36 +129,18 @@ def make_rest_doc(self, path=""): fullpath = get_fullpath(self, path) content = Rest( - Title(fullpath, abovechar="=", belowchar="="), - Directive("contents")) - if path: - content.add( - Paragraph(Link("back to parent", path + ".html"))) + Title(fullpath, abovechar="=", belowchar="=")) + toctree = [] + for child in self._children: + subpath = fullpath + "." + child._name + toctree.append(subpath) + content.add(Directive("toctree", *toctree, maxdepth=4)) content.join( - Title("Basic Option Information"), ListItem(Strong("name:"), self._name), - ListItem(Strong("description:"), self.doc), - Title("Sub-Options")) + ListItem(Strong("description:"), self.doc)) stack = [] - prefix = fullpath curr = content config = Config(self) - for ending in self.getpaths(include_groups=True): - subpath = fullpath + "." + ending - while not (subpath.startswith(prefix) and - subpath[len(prefix)] == "."): - curr, prefix = stack.pop() - print subpath, fullpath, ending, curr - sub, step = config._cfgimpl_get_home_by_path(ending) - doc = getattr(sub._cfgimpl_descr, step).doc - if doc: - new = curr.add(ListItem(Link(subpath + ":", subpath + ".html"), - Em(doc))) - else: - new = curr.add(ListItem(Link(subpath + ":", subpath + ".html"))) - stack.append((curr, prefix)) - prefix = subpath - curr = new return content diff --git a/pypy/doc/config/translation.instrumentctl.rst b/pypy/doc/config/translation.instrumentctl.rst deleted file mode 100644 --- a/pypy/doc/config/translation.instrumentctl.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/discussion/cli-optimizations.rst b/pypy/doc/discussion/cli-optimizations.rst deleted file mode 100644 --- a/pypy/doc/discussion/cli-optimizations.rst +++ /dev/null @@ -1,233 +0,0 @@ -Possible optimizations for the CLI backend -========================================== - -Stack push/pop optimization ---------------------------- - -The CLI's VM is a stack based machine: this fact doesn't play nicely -with the SSI form the flowgraphs are generated in. At the moment -gencli does a literal translation of the SSI statements, allocating a -new local variable for each variable of the flowgraph. - -For example, consider the following RPython code and the corresponding -flowgraph:: - - def bar(x, y): - foo(x+y, x-y) - - - inputargs: x_0 y_0 - v0 = int_add(x_0, y_0) - v1 = int_sub(x_0, y_0) - v2 = directcall((sm foo), v0, v1) - -This is the IL code generated by the CLI backend:: - - .locals init (int32 v0, int32 v1, int32 v2) - - block0: - ldarg 'x_0' - ldarg 'y_0' - add - stloc 'v0' - ldarg 'x_0' - ldarg 'y_0' - sub - stloc 'v1' - ldloc 'v0' - ldloc 'v1' - call int32 foo(int32, int32) - stloc 'v2' - -As you can see, the results of 'add' and 'sub' are stored in v0 and -v1, respectively, then v0 and v1 are reloaded onto stack. These -store/load is redundant, since the code would work nicely even without -them:: - - .locals init (int32 v2) - - block0: - ldarg 'x_0' - ldarg 'y_0' - add - ldarg 'x_0' - ldarg 'y_0' - sub - call int32 foo(int32, int32) - stloc 'v2' - -I've checked the native code generated by the Mono Jit on x86 and I've -seen that it does not optimize it. I haven't checked the native code -generated by Microsoft CLR, yet. - -Thus, we might consider to optimize it manually; it should not be so -difficult, but it is not trivial because we have to make sure that the -dropped locals are used only once. - - -Mapping RPython exceptions to native CLI exceptions ---------------------------------------------------- - -Both RPython and CLI have its own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -For now I've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by standard operations. The currently -implemented solution is to do an exception translation on-the-fly; for -example, the 'ind_add_ovf' is translated into the following IL code:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class exceptions.OverflowError::.ctor() - dup - ldsfld class Object_meta pypy.runtime.Constants::exceptions_OverflowError_meta - stfld class Object_meta Object::meta - throw - } - -I.e., it catches the builtin OverflowException and raises a RPython -OverflowError. - -I haven't measured timings yet, but I guess that this machinery brings -to some performance penalties even in the non-overflow case; a -possible optimization is to do the on-the-fly translation only when it -is strictly necessary, i.e. only when the except clause catches an -exception class whose subclass hierarchy is compatible with the -builtin one. As an example, consider the following RPython code:: - - try: - return mylist[0] - except IndexError: - return -1 - -Given that IndexError has no subclasses, we can map it to -IndexOutOfBoundException and directly catch this one:: - - try - { - ldloc 'mylist' - ldc.i4 0 - call int32 getitem(MyListType, int32) - ... - } - catch [mscorlib]System.IndexOutOfBoundException - { - // return -1 - ... - } - -By contrast we can't do so if the except clause catches classes that -don't directly map to any builtin class, such as LookupError:: - - try: - return mylist[0] - except LookupError: - return -1 - -Has to be translated in the old way:: - - .try - { - ldloc 'mylist' - ldc.i4 0 - - .try - { - call int32 getitem(MyListType, int32) - } - catch [mscorlib]System.IndexOutOfBoundException - { - // translate IndexOutOfBoundException into IndexError - newobj instance void class exceptions.IndexError::.ctor() - dup - ldsfld class Object_meta pypy.runtime.Constants::exceptions_IndexError_meta - stfld class Object_meta Object::meta - throw - } - ... - } - .catch exceptions.LookupError - { - // return -1 - ... - } - - -Specializing methods of List ----------------------------- - -Most methods of RPython lists are implemented by ll_* helpers placed -in rpython/rlist.py. For some of those we have a direct correspondent -already implemented in .NET List<>; we could use the oopspec attribute -for doing an on-the-fly replacement of these low level helpers with -their builtin correspondent. As an example the 'append' method is -already mapped to pypylib.List.append. Thanks to Armin Rigo for the -idea of using oopspec. - - -Doing some caching on Dict --------------------------- - -The current implementations of ll_dict_getitem and ll_dict_get in -ootypesystem.rdict do two consecutive lookups (calling ll_contains and -ll_get) on the same key. We might cache the result of -pypylib.Dict.ll_contains so that the successive ll_get don't need a -lookup. Btw, we need some profiling before choosing the best way. Or -we could directly refactor ootypesystem.rdict for doing a single -lookup. - -XXX -I tried it on revision 32917 and performance are slower! I don't know -why, but pypy.net pystone.py is slower by 17%, and pypy.net -richards.py is slower by 71% (!!!). I don't know why, need to be -investigated further. - - -Optimize StaticMethod ---------------------- - -:: - - 2006-10-02, 13:41 - - antocuni: do you try to not wrap static methods that are just called and not passed around - no - I think I don't know how to detect them - antocuni: you should try to render them just as static methods not as instances when possible - you need to track what appears only in direct_calls vs other places - - -Optimize Unicode ----------------- - -We should try to use native .NET unicode facilities instead of our -own. These should save both time (especially startup time) and memory. - -On 2006-10-02 I got these benchmarks: - -Pypy.NET Startup time Memory used -with unicodedata ~12 sec 112508 Kb -without unicodedata ~6 sec 79004 Kb - -The version without unicodedata is buggy, of course. - -Unfortunately it seems that .NET doesn't expose all the things we -need, so we will still need some data. For example there is no way to -get the unicode name of a char. diff --git a/pypy/doc/config/translation.backendopt.constfold.rst b/pypy/doc/config/translation.backendopt.constfold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.constfold.rst +++ /dev/null @@ -1,1 +0,0 @@ -Do constant folding of operations and constant propagation on flowgraphs. diff --git a/pypy/doc/config/objspace.usemodules.pyexpat.rst b/pypy/doc/config/objspace.usemodules.pyexpat.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.pyexpat.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use (experimental) pyexpat module written in RPython, instead of CTypes -version which is used by default. diff --git a/pypy/doc/config/objspace.usemodules.binascii.rst b/pypy/doc/config/objspace.usemodules.binascii.txt copy from pypy/doc/config/objspace.usemodules.binascii.rst copy to pypy/doc/config/objspace.usemodules.binascii.txt diff --git a/pypy/doc/config/translation.type_system.rst b/pypy/doc/config/translation.type_system.txt copy from pypy/doc/config/translation.type_system.rst copy to pypy/doc/config/translation.type_system.txt diff --git a/pypy/doc/config/objspace.std.withtypeversion.rst b/pypy/doc/config/objspace.std.withtypeversion.txt copy from pypy/doc/config/objspace.std.withtypeversion.rst copy to pypy/doc/config/objspace.std.withtypeversion.txt diff --git a/pypy/doc/config/objspace.usemodules._io.rst b/pypy/doc/config/objspace.usemodules._io.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._io.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_io module. -Used by the 'io' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._lsprof.rst b/pypy/doc/config/objspace.usemodules._lsprof.txt copy from pypy/doc/config/objspace.usemodules._lsprof.rst copy to pypy/doc/config/objspace.usemodules._lsprof.txt diff --git a/pypy/doc/config/translation.backendopt.remove_asserts.rst b/pypy/doc/config/translation.backendopt.remove_asserts.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.remove_asserts.rst +++ /dev/null @@ -1,1 +0,0 @@ -Remove raising of assertions from the flowgraphs, which might give small speedups. diff --git a/pypy/doc/config/objspace.translationmodules.rst b/pypy/doc/config/objspace.translationmodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.translationmodules.rst +++ /dev/null @@ -1,1 +0,0 @@ -This option enables all modules which are needed to translate PyPy using PyPy. diff --git a/pypy/doc/config/objspace.usemodules.array.rst b/pypy/doc/config/objspace.usemodules.array.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.array.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use interpreter-level version of array module (on by default). diff --git a/pypy/doc/config/objspace.usemodules.termios.rst b/pypy/doc/config/objspace.usemodules.termios.txt copy from pypy/doc/config/objspace.usemodules.termios.rst copy to pypy/doc/config/objspace.usemodules.termios.txt diff --git a/pypy/doc/config/translation.backendopt.mallocs.rst b/pypy/doc/config/translation.backendopt.mallocs.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.mallocs.rst +++ /dev/null @@ -1,29 +0,0 @@ -This optimization enables "malloc removal", which "explodes" -allocations of structures which do not escape from the function they -are allocated in into one or more additional local variables. - -An example. Consider this rather unlikely seeming code:: - - class C: - pass - def f(y): - c = C() - c.x = y - return c.x - -Malloc removal will spot that the ``C`` object can never leave ``f`` -and replace the above with code like this:: - - def f(y): - _c__x = y - return _c__x - -It is rare for code to be directly written in a way that allows this -optimization to be useful, but inlining often results in opportunities -for its use (and indeed, this is one of the main reasons PyPy does its -own inlining rather than relying on the C compilers). - -For much more information about this and other optimizations you can -read section 4.1 of the technical report on "Massive Parallelism and -Translation Aspects" which you can find on the `Technical reports page -<../index-report.html>`__. diff --git a/pypy/doc/config/translation.rweakref.rst b/pypy/doc/config/translation.rweakref.rst deleted file mode 100644 --- a/pypy/doc/config/translation.rweakref.rst +++ /dev/null @@ -1,3 +0,0 @@ -This indicates if the backend and GC policy support RPython-level weakrefs. -Can be tested in an RPython program to select between two implementation -strategies. diff --git a/pypy/doc/config/objspace.opcodes.rst b/pypy/doc/config/objspace.opcodes.txt copy from pypy/doc/config/objspace.opcodes.rst copy to pypy/doc/config/objspace.opcodes.txt diff --git a/pypy/doc/config/objspace.usemodules._locale.rst b/pypy/doc/config/objspace.usemodules._locale.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._locale.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the '_locale' module. -This module runs _locale written in RPython (instead of ctypes version). -It's not really finished yet; it's enabled by default on Windows. diff --git a/pypy/doc/config/objspace.usemodules.cStringIO.rst b/pypy/doc/config/objspace.usemodules.cStringIO.txt copy from pypy/doc/config/objspace.usemodules.cStringIO.rst copy to pypy/doc/config/objspace.usemodules.cStringIO.txt diff --git a/pypy/doc/config/objspace.usemodules.thread.rst b/pypy/doc/config/objspace.usemodules.thread.txt copy from pypy/doc/config/objspace.usemodules.thread.rst copy to pypy/doc/config/objspace.usemodules.thread.txt diff --git a/pypy/doc/config/objspace.std.logspaceoptypes.rst b/pypy/doc/config/objspace.std.logspaceoptypes.txt copy from pypy/doc/config/objspace.std.logspaceoptypes.rst copy to pypy/doc/config/objspace.std.logspaceoptypes.txt diff --git a/pypy/doc/config/translation.simplifying.rst b/pypy/doc/config/translation.simplifying.rst deleted file mode 100644 --- a/pypy/doc/config/translation.simplifying.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.txt copy from pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst copy to pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.txt diff --git a/pypy/doc/config/objspace.usemodules.cStringIO.rst b/pypy/doc/config/objspace.usemodules.cStringIO.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.cStringIO.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the built-in cStringIO module. - -If not enabled, importing cStringIO gives you the app-level -implementation from the standard library StringIO module. diff --git a/pypy/doc/config/objspace.usemodules._stackless.rst b/pypy/doc/config/objspace.usemodules._stackless.txt copy from pypy/doc/config/objspace.usemodules._stackless.rst copy to pypy/doc/config/objspace.usemodules._stackless.txt diff --git a/pypy/doc/config/objspace.usemodules.zlib.rst b/pypy/doc/config/objspace.usemodules.zlib.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.zlib.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'zlib' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.instrument.rst b/pypy/doc/config/translation.instrument.txt copy from pypy/doc/config/translation.instrument.rst copy to pypy/doc/config/translation.instrument.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal option. Switch to a different weight heuristic for inlining. -This is for profile-based inlining (:config:`translation.backendopt.profile_based_inline`). - -.. internal diff --git a/pypy/doc/config/translation.insist.rst b/pypy/doc/config/translation.insist.txt copy from pypy/doc/config/translation.insist.rst copy to pypy/doc/config/translation.insist.txt diff --git a/pypy/doc/config/objspace.std.withmapdict.rst b/pypy/doc/config/objspace.std.withmapdict.txt copy from pypy/doc/config/objspace.std.withmapdict.rst copy to pypy/doc/config/objspace.std.withmapdict.txt diff --git a/pypy/doc/config/objspace.usemodules._ssl.rst b/pypy/doc/config/objspace.usemodules._ssl.txt copy from pypy/doc/config/objspace.usemodules._ssl.rst copy to pypy/doc/config/objspace.usemodules._ssl.txt diff --git a/pypy/doc/config/translation.linkerflags.rst b/pypy/doc/config/translation.linkerflags.txt copy from pypy/doc/config/translation.linkerflags.rst copy to pypy/doc/config/translation.linkerflags.txt diff --git a/pypy/doc/config/translation.withsmallfuncsets.rst b/pypy/doc/config/translation.withsmallfuncsets.rst deleted file mode 100644 --- a/pypy/doc/config/translation.withsmallfuncsets.rst +++ /dev/null @@ -1,3 +0,0 @@ -Represent function sets smaller than this option's value as an integer instead -of a function pointer. A call is then done via a switch on that integer, which -allows inlining etc. Small numbers for this can speed up PyPy (try 5). diff --git a/pypy/doc/config/objspace.usemodules.errno.rst b/pypy/doc/config/objspace.usemodules.errno.txt copy from pypy/doc/config/objspace.usemodules.errno.rst copy to pypy/doc/config/objspace.usemodules.errno.txt diff --git a/pypy/doc/config/objspace.usemodules.itertools.rst b/pypy/doc/config/objspace.usemodules.itertools.txt copy from pypy/doc/config/objspace.usemodules.itertools.rst copy to pypy/doc/config/objspace.usemodules.itertools.txt diff --git a/pypy/doc/config/translation.cli.exception_transformer.rst b/pypy/doc/config/translation.cli.exception_transformer.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cli.exception_transformer.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the exception transformer instead of the native .NET exceptions to -implement RPython exceptions. Enable this option only if you know what -you are doing. diff --git a/pypy/doc/config/objspace.usemodules.marshal.rst b/pypy/doc/config/objspace.usemodules.marshal.txt copy from pypy/doc/config/objspace.usemodules.marshal.rst copy to pypy/doc/config/objspace.usemodules.marshal.txt diff --git a/pypy/doc/config/objspace.std.withsmallint.rst b/pypy/doc/config/objspace.std.withsmallint.txt copy from pypy/doc/config/objspace.std.withsmallint.rst copy to pypy/doc/config/objspace.std.withsmallint.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline_threshold.txt diff --git a/pypy/doc/config/objspace.usemodules._sre.rst b/pypy/doc/config/objspace.usemodules._sre.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._sre.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_sre' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.backendopt.print_statistics.rst b/pypy/doc/config/translation.backendopt.print_statistics.txt copy from pypy/doc/config/translation.backendopt.print_statistics.rst copy to pypy/doc/config/translation.backendopt.print_statistics.txt diff --git a/pypy/doc/config/translation.taggedpointers.rst b/pypy/doc/config/translation.taggedpointers.rst deleted file mode 100644 --- a/pypy/doc/config/translation.taggedpointers.rst +++ /dev/null @@ -1,3 +0,0 @@ -Enable tagged pointers. This option is mostly useful for the Smalltalk and -Prolog interpreters. For the Python interpreter the option -:config:`objspace.std.withsmallint` should be used. diff --git a/pypy/doc/config/objspace.usemodules.imp.rst b/pypy/doc/config/objspace.usemodules.imp.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.imp.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'imp' module. -This module is included by default. diff --git a/pypy/doc/config/objspace.usemodules.time.rst b/pypy/doc/config/objspace.usemodules.time.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.time.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the 'time' module. - -Obsolete; use :config:`objspace.usemodules.rctime` for our up-to-date version -of the application-level 'time' module. diff --git a/pypy/doc/config/objspace.std.withtproxy.rst b/pypy/doc/config/objspace.std.withtproxy.txt copy from pypy/doc/config/objspace.std.withtproxy.rst copy to pypy/doc/config/objspace.std.withtproxy.txt diff --git a/pypy/doc/config/translation.output.rst b/pypy/doc/config/translation.output.txt copy from pypy/doc/config/translation.output.rst copy to pypy/doc/config/translation.output.txt diff --git a/pypy/doc/config/objspace.std.mutable_builtintypes.rst b/pypy/doc/config/objspace.std.mutable_builtintypes.txt copy from pypy/doc/config/objspace.std.mutable_builtintypes.rst copy to pypy/doc/config/objspace.std.mutable_builtintypes.txt diff --git a/pypy/doc/config/translation.taggedpointers.rst b/pypy/doc/config/translation.taggedpointers.txt copy from pypy/doc/config/translation.taggedpointers.rst copy to pypy/doc/config/translation.taggedpointers.txt diff --git a/pypy/doc/config/translation.backendopt.print_statistics.rst b/pypy/doc/config/translation.backendopt.print_statistics.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.print_statistics.rst +++ /dev/null @@ -1,2 +0,0 @@ -Debugging option. Print statics about the forest of flowgraphs as they -go through the various backend optimizations. \ No newline at end of file diff --git a/pypy/doc/config/objspace.usemodules._locale.rst b/pypy/doc/config/objspace.usemodules._locale.txt copy from pypy/doc/config/objspace.usemodules._locale.rst copy to pypy/doc/config/objspace.usemodules._locale.txt diff --git a/pypy/doc/config/translation.instrument.rst b/pypy/doc/config/translation.instrument.rst deleted file mode 100644 --- a/pypy/doc/config/translation.instrument.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/objspace.usemodules._warnings.rst b/pypy/doc/config/objspace.usemodules._warnings.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._warnings.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the '_warning' module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.rst b/pypy/doc/config/objspace.std.getattributeshortcut.txt copy from pypy/doc/config/objspace.std.getattributeshortcut.rst copy to pypy/doc/config/objspace.std.getattributeshortcut.txt diff --git a/pypy/doc/config/translation.countmallocs.rst b/pypy/doc/config/translation.countmallocs.txt copy from pypy/doc/config/translation.countmallocs.rst copy to pypy/doc/config/translation.countmallocs.txt diff --git a/pypy/doc/config/objspace.std.withstrjoin.rst b/pypy/doc/config/objspace.std.withstrjoin.txt copy from pypy/doc/config/objspace.std.withstrjoin.rst copy to pypy/doc/config/objspace.std.withstrjoin.txt diff --git a/pypy/doc/config/translation.debug.rst b/pypy/doc/config/translation.debug.txt copy from pypy/doc/config/translation.debug.rst copy to pypy/doc/config/translation.debug.txt diff --git a/pypy/doc/config/objspace.usemodules.token.rst b/pypy/doc/config/objspace.usemodules.token.txt copy from pypy/doc/config/objspace.usemodules.token.rst copy to pypy/doc/config/objspace.usemodules.token.txt diff --git a/pypy/doc/config/translation.backendopt.mallocs.rst b/pypy/doc/config/translation.backendopt.mallocs.txt copy from pypy/doc/config/translation.backendopt.mallocs.rst copy to pypy/doc/config/translation.backendopt.mallocs.txt diff --git a/pypy/doc/config/translation.vanilla.rst b/pypy/doc/config/translation.vanilla.txt copy from pypy/doc/config/translation.vanilla.rst copy to pypy/doc/config/translation.vanilla.txt diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.rst b/pypy/doc/config/objspace.std.withprebuiltchar.txt copy from pypy/doc/config/objspace.std.withprebuiltchar.rst copy to pypy/doc/config/objspace.std.withprebuiltchar.txt diff --git a/pypy/doc/config/translation.profopt.rst b/pypy/doc/config/translation.profopt.rst deleted file mode 100644 --- a/pypy/doc/config/translation.profopt.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use GCCs profile-guided optimizations. This option specifies the the -arguments with which to call pypy-c (and in general the translated -RPython program) to gather profile data. Example for pypy-c: "-c 'from -richards import main;main(); from test import pystone; -pystone.main()'" diff --git a/pypy/doc/config/objspace.usemodules.clr.rst b/pypy/doc/config/objspace.usemodules.clr.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/objspace.usemodules.crypt.rst b/pypy/doc/config/objspace.usemodules.crypt.txt copy from pypy/doc/config/objspace.usemodules.crypt.rst copy to pypy/doc/config/objspace.usemodules.crypt.txt diff --git a/pypy/doc/config/objspace.usemodules._ssl.rst b/pypy/doc/config/objspace.usemodules._ssl.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._ssl.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the '_ssl' module, which implements SSL socket operations. diff --git a/pypy/doc/config/objspace.usemodules._socket.rst b/pypy/doc/config/objspace.usemodules._socket.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._socket.rst +++ /dev/null @@ -1,7 +0,0 @@ -Use the '_socket' module. - -This is our implementation of '_socket', the Python builtin module -exposing socket primitives, which is wrapped and used by the standard -library 'socket.py' module. It is based on `rffi`_. - -.. _`rffi`: ../rffi.html diff --git a/pypy/doc/config/translation.backendopt.inline.rst b/pypy/doc/config/translation.backendopt.inline.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.inline.rst +++ /dev/null @@ -1,10 +0,0 @@ -Inline flowgraphs based on an heuristic, the default one considers -essentially the a weight for the flowgraph based on the number of -low-level operations in them (see -:config:`translation.backendopt.inline_threshold` ). - -Some amount of inlining in order to have RPython builtin type helpers -inlined is needed for malloc removal -(:config:`translation.backendopt.mallocs`) to be effective. - -This optimization is used by default. diff --git a/pypy/doc/config/objspace.std.withropeunicode.rst b/pypy/doc/config/objspace.std.withropeunicode.txt copy from pypy/doc/config/objspace.std.withropeunicode.rst copy to pypy/doc/config/objspace.std.withropeunicode.txt diff --git a/pypy/doc/config/objspace.std.multimethods.rst b/pypy/doc/config/objspace.std.multimethods.txt copy from pypy/doc/config/objspace.std.multimethods.rst copy to pypy/doc/config/objspace.std.multimethods.txt diff --git a/pypy/doc/config/objspace.std.withsmalllong.rst b/pypy/doc/config/objspace.std.withsmalllong.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withsmalllong.rst +++ /dev/null @@ -1,5 +0,0 @@ -Enable "small longs", an additional implementation of the Python -type "long", implemented with a C long long. It is mostly useful -on 32-bit; on 64-bit, a C long long is the same as a C long, so -its usefulness is limited to Python objects of type "long" that -would anyway fit in an "int". diff --git a/pypy/doc/config/objspace.opcodes.rst b/pypy/doc/config/objspace.opcodes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.opcodes.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.usemodules.parser.rst b/pypy/doc/config/objspace.usemodules.parser.txt copy from pypy/doc/config/objspace.usemodules.parser.rst copy to pypy/doc/config/objspace.usemodules.parser.txt diff --git a/pypy/doc/config/objspace.std.withrope.rst b/pypy/doc/config/objspace.std.withrope.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrope.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enable ropes to be the default string implementation. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#ropes - - diff --git a/pypy/doc/config/objspace.usemodules.crypt.rst b/pypy/doc/config/objspace.usemodules.crypt.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.crypt.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'crypt' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.std.logspaceoptypes.rst b/pypy/doc/config/objspace.std.logspaceoptypes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.logspaceoptypes.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. internal - -Wrap "simple" bytecode implementations like BINARY_ADD with code that collects -information about which types these bytecodes receive as arguments. diff --git a/pypy/doc/config/objspace.usemodules.rst b/pypy/doc/config/objspace.usemodules.txt copy from pypy/doc/config/objspace.usemodules.rst copy to pypy/doc/config/objspace.usemodules.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal option. Switch to a different weight heuristic for inlining. -This is for clever malloc removal (:config:`translation.backendopt.clever_malloc_removal`). - -.. internal diff --git a/pypy/doc/config/objspace.usemodules._demo.rst b/pypy/doc/config/objspace.usemodules._demo.txt copy from pypy/doc/config/objspace.usemodules._demo.rst copy to pypy/doc/config/objspace.usemodules._demo.txt diff --git a/pypy/doc/config/translation.noprofopt.rst b/pypy/doc/config/translation.noprofopt.txt copy from pypy/doc/config/translation.noprofopt.rst copy to pypy/doc/config/translation.noprofopt.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst +++ /dev/null @@ -1,2 +0,0 @@ -Weight threshold used to decide whether to inline flowgraphs. -This is for clever malloc removal (:config:`translation.backendopt.clever_malloc_removal`). diff --git a/pypy/doc/config/translation.gcrootfinder.rst b/pypy/doc/config/translation.gcrootfinder.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gcrootfinder.rst +++ /dev/null @@ -1,15 +0,0 @@ -Choose method how to find roots in the GC. Boehm and refcounting have their own -methods, this is mostly only interesting for framework GCs. For those you have -a choice of various alternatives: - - - use a shadow stack (XXX link to paper), e.g. explicitly maintaining a stack - of roots - - - use stackless to find roots by unwinding the stack. Requires - :config:`translation.stackless`. Note that this turned out to - be slower than just using a shadow stack. - - - use GCC and i386 specific assembler hackery to find the roots on the stack. - This is fastest but platform specific. - - - Use LLVM's GC facilities to find the roots. diff --git a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst b/pypy/doc/config/translation.builtins_can_raise_exceptions.txt copy from pypy/doc/config/translation.builtins_can_raise_exceptions.rst copy to pypy/doc/config/translation.builtins_can_raise_exceptions.txt diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -17,6 +17,7 @@ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ ^pypy/doc/.+\.html$ +^pypy/doc/config/.+\.rst$ ^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ ^pypy/translator/c/src/libffi_msvc/.+\.obj$ diff --git a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst b/pypy/doc/config/translation.backendopt.raisingop2direct_call.txt copy from pypy/doc/config/translation.backendopt.raisingop2direct_call.rst copy to pypy/doc/config/translation.backendopt.raisingop2direct_call.txt diff --git a/pypy/doc/config/objspace.usemodules._minimal_curses.rst b/pypy/doc/config/objspace.usemodules._minimal_curses.txt copy from pypy/doc/config/objspace.usemodules._minimal_curses.rst copy to pypy/doc/config/objspace.usemodules._minimal_curses.txt diff --git a/pypy/doc/config/objspace.std.withdictmeasurement.rst b/pypy/doc/config/objspace.std.withdictmeasurement.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withdictmeasurement.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/translation.ootype.mangle.rst b/pypy/doc/config/translation.ootype.mangle.txt copy from pypy/doc/config/translation.ootype.mangle.rst copy to pypy/doc/config/translation.ootype.mangle.txt diff --git a/pypy/doc/config/objspace.usemodules.zipimport.rst b/pypy/doc/config/objspace.usemodules.zipimport.txt copy from pypy/doc/config/objspace.usemodules.zipimport.rst copy to pypy/doc/config/objspace.usemodules.zipimport.txt diff --git a/pypy/doc/config/translation.jit_ffi.rst b/pypy/doc/config/translation.jit_ffi.txt copy from pypy/doc/config/translation.jit_ffi.rst copy to pypy/doc/config/translation.jit_ffi.txt diff --git a/pypy/doc/config/objspace.usemodules.itertools.rst b/pypy/doc/config/objspace.usemodules.itertools.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.itertools.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the interp-level 'itertools' module. -If not included, a slower app-level version of itertools is used. diff --git a/pypy/doc/config/translation.list_comprehension_operations.rst b/pypy/doc/config/translation.list_comprehension_operations.txt copy from pypy/doc/config/translation.list_comprehension_operations.rst copy to pypy/doc/config/translation.list_comprehension_operations.txt diff --git a/pypy/doc/config/objspace.usemodules.rst b/pypy/doc/config/objspace.usemodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.usemodules._rawffi.rst b/pypy/doc/config/objspace.usemodules._rawffi.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._rawffi.rst +++ /dev/null @@ -1,3 +0,0 @@ -An experimental module providing very low-level interface to -C-level libraries, for use when implementing ctypes, not -intended for a direct use at all. \ No newline at end of file diff --git a/pypy/doc/config/objspace.usemodules._pickle_support.rst b/pypy/doc/config/objspace.usemodules._pickle_support.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._pickle_support.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use the '_pickle_support' module. -Internal helpers for pickling runtime builtin types (frames, cells, etc) -for `stackless`_ tasklet pickling support. -.. _`stackless`: ../stackless.html - -.. internal diff --git a/pypy/doc/config/objspace.usemodules._demo.rst b/pypy/doc/config/objspace.usemodules._demo.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._demo.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the '_demo' module. - -This is the demo module for mixed modules. Not enabled by default. diff --git a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst b/pypy/doc/config/translation.backendopt.merge_if_blocks.txt copy from pypy/doc/config/translation.backendopt.merge_if_blocks.rst copy to pypy/doc/config/translation.backendopt.merge_if_blocks.txt diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.txt copy from pypy/doc/config/objspace.extmodules.rst copy to pypy/doc/config/objspace.extmodules.txt diff --git a/pypy/doc/config/objspace.usemodules._rawffi.rst b/pypy/doc/config/objspace.usemodules._rawffi.txt copy from pypy/doc/config/objspace.usemodules._rawffi.rst copy to pypy/doc/config/objspace.usemodules._rawffi.txt diff --git a/pypy/doc/config/translation.ootype.rst b/pypy/doc/config/translation.ootype.rst deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.rst +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/config/objspace.usemodules._hashlib.rst b/pypy/doc/config/objspace.usemodules._hashlib.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._hashlib.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_hashlib' module. -Used by the 'hashlib' standard lib module, and indirectly by the various cryptographic libs. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._socket.rst b/pypy/doc/config/objspace.usemodules._socket.txt copy from pypy/doc/config/objspace.usemodules._socket.rst copy to pypy/doc/config/objspace.usemodules._socket.txt diff --git a/pypy/doc/config/translation.cc.rst b/pypy/doc/config/translation.cc.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cc.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify which C compiler to use. diff --git a/pypy/doc/config/objspace.lonepycfiles.rst b/pypy/doc/config/objspace.lonepycfiles.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.lonepycfiles.rst +++ /dev/null @@ -1,16 +0,0 @@ -If turned on, PyPy accepts to import a module ``x`` if it finds a -file ``x.pyc`` even if there is no file ``x.py``. - -This is the way that CPython behaves, but it is disabled by -default for PyPy because it is a common cause of issues: most -typically, the ``x.py`` file is removed (manually or by a -version control system) but the ``x`` module remains -accidentally importable because the ``x.pyc`` file stays -around. - -The usual reason for wanting this feature is to distribute -non-open-source Python programs by distributing ``pyc`` files -only, but this use case is not practical for PyPy at the -moment because multiple versions of PyPy compiled with various -optimizations might be unable to load each other's ``pyc`` -files. diff --git a/pypy/doc/config/objspace.std.withtypeversion.rst b/pypy/doc/config/objspace.std.withtypeversion.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.rst +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/config/translation.thread.rst b/pypy/doc/config/translation.thread.txt copy from pypy/doc/config/translation.thread.rst copy to pypy/doc/config/translation.thread.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.txt diff --git a/pypy/doc/config/objspace.usemodules.fcntl.rst b/pypy/doc/config/objspace.usemodules.fcntl.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.fcntl.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'fcntl' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.disable_call_speedhacks.rst b/pypy/doc/config/objspace.disable_call_speedhacks.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.disable_call_speedhacks.rst +++ /dev/null @@ -1,2 +0,0 @@ -disable the speed hacks that the interpreter normally does. Usually you don't -want to set this to False, but some object spaces require it. diff --git a/pypy/doc/config/objspace.usemodules.gc.rst b/pypy/doc/config/objspace.usemodules.gc.txt copy from pypy/doc/config/objspace.usemodules.gc.rst copy to pypy/doc/config/objspace.usemodules.gc.txt diff --git a/pypy/doc/config/objspace.std.withsmalllong.rst b/pypy/doc/config/objspace.std.withsmalllong.txt copy from pypy/doc/config/objspace.std.withsmalllong.rst copy to pypy/doc/config/objspace.std.withsmalllong.txt diff --git a/pypy/doc/config/objspace.nofaking.rst b/pypy/doc/config/objspace.nofaking.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.nofaking.rst +++ /dev/null @@ -1,7 +0,0 @@ -This options prevents the automagic borrowing of implementations of -modules and types not present in PyPy from CPython. - -As such, it is required when translating, as then there is no CPython -to borrow from. For running py.py it is useful for testing the -implementation of modules like "posix", but it makes everything even -slower than it is already. diff --git a/pypy/doc/config/translation.gctransformer.rst b/pypy/doc/config/translation.gctransformer.txt copy from pypy/doc/config/translation.gctransformer.rst copy to pypy/doc/config/translation.gctransformer.txt diff --git a/pypy/doc/config/translation.backend.rst b/pypy/doc/config/translation.backend.txt copy from pypy/doc/config/translation.backend.rst copy to pypy/doc/config/translation.backend.txt diff --git a/pypy/doc/config/translation.backendopt.really_remove_asserts.rst b/pypy/doc/config/translation.backendopt.really_remove_asserts.txt copy from pypy/doc/config/translation.backendopt.really_remove_asserts.rst copy to pypy/doc/config/translation.backendopt.really_remove_asserts.txt diff --git a/pypy/doc/config/objspace.usemodules.exceptions.rst b/pypy/doc/config/objspace.usemodules.exceptions.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.exceptions.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'exceptions' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/config/objspace.std.withstrjoin.rst b/pypy/doc/config/objspace.std.withstrjoin.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrjoin.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enable "string join" objects. - -See the page about `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#string-join-objects - - diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal.txt copy from pypy/doc/config/translation.backendopt.clever_malloc_removal.rst copy to pypy/doc/config/translation.backendopt.clever_malloc_removal.txt diff --git a/pypy/doc/config/objspace.usemodules.gc.rst b/pypy/doc/config/objspace.usemodules.gc.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.gc.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the 'gc' module. -This module is expected to be working and is included by default. -Note that since the gc module is highly implementation specific, it contains -only the ``collect`` function in PyPy, which forces a collection when compiled -with the framework or with Boehm. diff --git a/pypy/doc/config/objspace.usemodules.micronumpy.rst b/pypy/doc/config/objspace.usemodules.micronumpy.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.micronumpy.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the micronumpy module. -This module provides a very basic numpy-like interface. Major use-case -is to show how jit scales for other code. diff --git a/pypy/doc/config/translation.log.rst b/pypy/doc/config/translation.log.rst deleted file mode 100644 --- a/pypy/doc/config/translation.log.rst +++ /dev/null @@ -1,5 +0,0 @@ -Include debug prints in the translation. - -These must be enabled by setting the PYPYLOG environment variable. -The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug.h. diff --git a/pypy/doc/config/objspace.usemodules.rbench.rst b/pypy/doc/config/objspace.usemodules.rbench.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rbench.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the built-in 'rbench' module. -This module contains geninterpreted versions of pystone and richards, -so it is useful to measure the interpretation overhead of the various -pypy-\*. diff --git a/pypy/doc/config/objspace.usemodules.__builtin__.rst b/pypy/doc/config/objspace.usemodules.__builtin__.txt copy from pypy/doc/config/objspace.usemodules.__builtin__.rst copy to pypy/doc/config/objspace.usemodules.__builtin__.txt diff --git a/pypy/doc/config/objspace.std.withstrbuf.rst b/pypy/doc/config/objspace.std.withstrbuf.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrbuf.rst +++ /dev/null @@ -1,4 +0,0 @@ -Enable "string buffer" objects. - -Similar to "string join" objects, but using a StringBuilder to represent -a string built by repeated application of ``+=``. diff --git a/pypy/doc/config/translation.compilerflags.rst b/pypy/doc/config/translation.compilerflags.txt copy from pypy/doc/config/translation.compilerflags.rst copy to pypy/doc/config/translation.compilerflags.txt diff --git a/pypy/doc/config/objspace.usemodules.cmath.rst b/pypy/doc/config/objspace.usemodules.cmath.txt copy from pypy/doc/config/objspace.usemodules.cmath.rst copy to pypy/doc/config/objspace.usemodules.cmath.txt diff --git a/pypy/doc/config/objspace.usemodules._bisect.rst b/pypy/doc/config/objspace.usemodules._bisect.txt copy from pypy/doc/config/objspace.usemodules._bisect.rst copy to pypy/doc/config/objspace.usemodules._bisect.txt diff --git a/pypy/doc/config/translation.no__thread.rst b/pypy/doc/config/translation.no__thread.txt copy from pypy/doc/config/translation.no__thread.rst copy to pypy/doc/config/translation.no__thread.txt diff --git a/pypy/doc/config/translation.noprofopt.rst b/pypy/doc/config/translation.noprofopt.rst deleted file mode 100644 diff --git a/pypy/doc/config/objspace.usemodules._minimal_curses.rst b/pypy/doc/config/objspace.usemodules._minimal_curses.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._minimal_curses.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_curses' module. -This module is just a stub. It only implements a few functions. diff --git a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst b/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst +++ /dev/null @@ -1,12 +0,0 @@ -Introduce a new opcode called ``CALL_LIKELY_BUILTIN``. It is used when something -is called, that looks like a builtin function (but could in reality be shadowed -by a name in the module globals). For all module globals dictionaries it is -then tracked which builtin name is shadowed in this module. If the -``CALL_LIKELY_BUILTIN`` opcode is executed, it is checked whether the builtin is -shadowed. If not, the corresponding builtin is called. Otherwise the object that -is shadowing it is called instead. If no shadowing is happening, this saves two -dictionary lookups on calls to builtins. - -For more information, see the section in `Standard Interpreter Optimizations`_. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#call-likely-builtin diff --git a/pypy/doc/config/objspace.usemodules.symbol.rst b/pypy/doc/config/objspace.usemodules.symbol.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.symbol.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'symbol' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.backendopt.storesink.rst b/pypy/doc/config/translation.backendopt.storesink.txt copy from pypy/doc/config/translation.backendopt.storesink.rst copy to pypy/doc/config/translation.backendopt.storesink.txt diff --git a/pypy/doc/config/translation.cli.rst b/pypy/doc/config/translation.cli.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cli.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/translation.backendopt.remove_asserts.rst b/pypy/doc/config/translation.backendopt.remove_asserts.txt copy from pypy/doc/config/translation.backendopt.remove_asserts.rst copy to pypy/doc/config/translation.backendopt.remove_asserts.txt diff --git a/pypy/doc/config/translation.cli.rst b/pypy/doc/config/translation.cli.txt copy from pypy/doc/config/translation.cli.rst copy to pypy/doc/config/translation.cli.txt diff --git a/pypy/doc/config/translation.backendopt.none.rst b/pypy/doc/config/translation.backendopt.none.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.none.rst +++ /dev/null @@ -1,1 +0,0 @@ -Do not run any backend optimizations. diff --git a/pypy/doc/config/objspace.std.optimized_comparison_op.rst b/pypy/doc/config/objspace.std.optimized_comparison_op.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_comparison_op.rst +++ /dev/null @@ -1,1 +0,0 @@ -Optimize the comparison of two integers a bit. diff --git a/pypy/doc/config/objspace.usemodules._testing.rst b/pypy/doc/config/objspace.usemodules._testing.txt copy from pypy/doc/config/objspace.usemodules._testing.rst copy to pypy/doc/config/objspace.usemodules._testing.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline.rst b/pypy/doc/config/translation.backendopt.profile_based_inline.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline.txt diff --git a/pypy/doc/config/objspace.geninterp.rst b/pypy/doc/config/objspace.geninterp.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.geninterp.rst +++ /dev/null @@ -1,4 +0,0 @@ -This option enables `geninterp`_. This will usually make the PyPy interpreter -significantly faster (but also a bit bigger). - -.. _`geninterp`: ../geninterp.html diff --git a/pypy/doc/config/objspace.usemodules.zipimport.rst b/pypy/doc/config/objspace.usemodules.zipimport.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.zipimport.rst +++ /dev/null @@ -1,3 +0,0 @@ -This module implements zipimport mechanism described -in PEP 302. It's supposed to work and translate, so it's included -by default \ No newline at end of file diff --git a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst b/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst +++ /dev/null @@ -1,10 +0,0 @@ -Enable a pair of bytecodes that speed up method calls. -See ``pypy.interpreter.callmethod`` for a description. - -The goal is to avoid creating the bound method object in the common -case. So far, this only works for calls with no keyword, no ``*arg`` -and no ``**arg`` but it would be easy to extend. - -For more information, see the section in `Standard Interpreter Optimizations`_. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#lookup-method-call-method diff --git a/pypy/doc/config/objspace.usemodules.marshal.rst b/pypy/doc/config/objspace.usemodules.marshal.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.marshal.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'marshal' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.std.withtproxy.rst b/pypy/doc/config/objspace.std.withtproxy.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtproxy.rst +++ /dev/null @@ -1,3 +0,0 @@ -Enable `transparent proxies`_. - -.. _`transparent proxies`: ../objspace-proxies.html#tproxy diff --git a/pypy/doc/config/objspace.usemodules._codecs.rst b/pypy/doc/config/objspace.usemodules._codecs.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._codecs.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_codecs' module. -Used by the 'codecs' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst b/pypy/doc/config/translation.backendopt.merge_if_blocks.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst +++ /dev/null @@ -1,26 +0,0 @@ -This optimization converts parts of flow graphs that result from -chains of ifs and elifs like this into merged blocks. - -By default flow graphing this kind of code:: - - if x == 0: - f() - elif x == 1: - g() - elif x == 4: - h() - else: - j() - -will result in a chain of blocks with two exits, somewhat like this: - -.. image:: unmergedblocks.png - -(reflecting how Python would interpret this code). Running this -optimization will transform the block structure to contain a single -"choice block" with four exits: - -.. image:: mergedblocks.png - -This can then be turned into a switch by the C backend, allowing the C -compiler to produce more efficient code. diff --git a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst b/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. Transformation required by the LLVM backend. - -.. internal diff --git a/pypy/doc/config/translation.log.rst b/pypy/doc/config/translation.log.txt copy from pypy/doc/config/translation.log.rst copy to pypy/doc/config/translation.log.txt diff --git a/pypy/doc/config/objspace.usemodules.rbench.rst b/pypy/doc/config/objspace.usemodules.rbench.txt copy from pypy/doc/config/objspace.usemodules.rbench.rst copy to pypy/doc/config/objspace.usemodules.rbench.txt diff --git a/pypy/doc/config/objspace.usemodules._file.rst b/pypy/doc/config/objspace.usemodules._file.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._file.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the '_file' module. It is an internal module that contains helper -functionality for the builtin ``file`` type. - -.. internal diff --git a/pypy/doc/config/objspace.usemodules.pypyjit.rst b/pypy/doc/config/objspace.usemodules.pypyjit.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.pypyjit.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'pypyjit' module. diff --git a/pypy/doc/config/translation.secondaryentrypoints.rst b/pypy/doc/config/translation.secondaryentrypoints.rst deleted file mode 100644 --- a/pypy/doc/config/translation.secondaryentrypoints.rst +++ /dev/null @@ -1,1 +0,0 @@ -Enable secondary entrypoints support list. Needed for cpyext module. diff --git a/pypy/doc/config/translation.dump_static_data_info.rst b/pypy/doc/config/translation.dump_static_data_info.txt copy from pypy/doc/config/translation.dump_static_data_info.rst copy to pypy/doc/config/translation.dump_static_data_info.txt diff --git a/pypy/doc/config/objspace.usemodules.zlib.rst b/pypy/doc/config/objspace.usemodules.zlib.txt copy from pypy/doc/config/objspace.usemodules.zlib.rst copy to pypy/doc/config/objspace.usemodules.zlib.txt diff --git a/pypy/doc/config/translation.backendopt.inline_heuristic.rst b/pypy/doc/config/translation.backendopt.inline_heuristic.txt copy from pypy/doc/config/translation.backendopt.inline_heuristic.rst copy to pypy/doc/config/translation.backendopt.inline_heuristic.txt diff --git a/pypy/doc/config/objspace.usemodules.symbol.rst b/pypy/doc/config/objspace.usemodules.symbol.txt copy from pypy/doc/config/objspace.usemodules.symbol.rst copy to pypy/doc/config/objspace.usemodules.symbol.txt diff --git a/pypy/doc/config/translation.backendopt.really_remove_asserts.rst b/pypy/doc/config/translation.backendopt.really_remove_asserts.rst deleted file mode 100644 diff --git a/pypy/doc/config/translation.make_jobs.rst b/pypy/doc/config/translation.make_jobs.rst deleted file mode 100644 --- a/pypy/doc/config/translation.make_jobs.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify number of make jobs for make command. diff --git a/pypy/doc/config/objspace.rst b/pypy/doc/config/objspace.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.usemodules.rctime.rst b/pypy/doc/config/objspace.usemodules.rctime.txt copy from pypy/doc/config/objspace.usemodules.rctime.rst copy to pypy/doc/config/objspace.usemodules.rctime.txt diff --git a/pypy/doc/config/objspace.usemodules._sre.rst b/pypy/doc/config/objspace.usemodules._sre.txt copy from pypy/doc/config/objspace.usemodules._sre.rst copy to pypy/doc/config/objspace.usemodules._sre.txt diff --git a/pypy/doc/config/objspace.nofaking.rst b/pypy/doc/config/objspace.nofaking.txt copy from pypy/doc/config/objspace.nofaking.rst copy to pypy/doc/config/objspace.nofaking.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst +++ /dev/null @@ -1,10 +0,0 @@ -Try to inline flowgraphs based on whether doing so would enable malloc -removal (:config:`translation.backendopt.mallocs`.) by eliminating -calls that result in escaping. This is an experimental optimization, -also right now some eager inlining is necessary for helpers doing -malloc itself to be inlined first for this to be effective. -This option enable also an extra subsequent malloc removal phase. - -Callee flowgraphs are considered candidates based on a weight heuristic like -for basic inlining. (see :config:`translation.backendopt.inline`, -:config:`translation.backendopt.clever_malloc_removal_threshold` ). diff --git a/pypy/doc/config/objspace.usemodules.sys.rst b/pypy/doc/config/objspace.usemodules.sys.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.sys.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'sys' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/config/objspace.usemodules._collections.rst b/pypy/doc/config/objspace.usemodules._collections.txt copy from pypy/doc/config/objspace.usemodules._collections.rst copy to pypy/doc/config/objspace.usemodules._collections.txt diff --git a/pypy/doc/config/translation.backendopt.inline.rst b/pypy/doc/config/translation.backendopt.inline.txt copy from pypy/doc/config/translation.backendopt.inline.rst copy to pypy/doc/config/translation.backendopt.inline.txt diff --git a/pypy/doc/config/objspace.std.prebuiltintfrom.rst b/pypy/doc/config/objspace.std.prebuiltintfrom.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.prebuiltintfrom.rst +++ /dev/null @@ -1,1 +0,0 @@ -see :config:`objspace.std.withprebuiltint`. diff --git a/pypy/doc/config/translation.countmallocs.rst b/pypy/doc/config/translation.countmallocs.rst deleted file mode 100644 --- a/pypy/doc/config/translation.countmallocs.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal; used by some of the C backend tests to check that the number of -allocations matches the number of frees. - -.. internal diff --git a/pypy/doc/config/objspace.usemodules._io.rst b/pypy/doc/config/objspace.usemodules._io.txt copy from pypy/doc/config/objspace.usemodules._io.rst copy to pypy/doc/config/objspace.usemodules._io.txt diff --git a/pypy/doc/config/objspace.usemodules._winreg.rst b/pypy/doc/config/objspace.usemodules._winreg.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._winreg.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the built-in '_winreg' module, provides access to the Windows registry. -This module is expected to be working and is included by default on Windows. diff --git a/pypy/doc/config/objspace.usemodules.clr.rst b/pypy/doc/config/objspace.usemodules.clr.txt copy from pypy/doc/config/objspace.usemodules.clr.rst copy to pypy/doc/config/objspace.usemodules.clr.txt diff --git a/pypy/doc/config/translation.jit_ffi.rst b/pypy/doc/config/translation.jit_ffi.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit_ffi.rst +++ /dev/null @@ -1,1 +0,0 @@ -Internal option: enable OptFfiCall in the jit optimizations. diff --git a/pypy/doc/config/objspace.usemodules._pickle_support.rst b/pypy/doc/config/objspace.usemodules._pickle_support.txt copy from pypy/doc/config/objspace.usemodules._pickle_support.rst copy to pypy/doc/config/objspace.usemodules._pickle_support.txt diff --git a/pypy/doc/config/translation.verbose.rst b/pypy/doc/config/translation.verbose.rst deleted file mode 100644 --- a/pypy/doc/config/translation.verbose.rst +++ /dev/null @@ -1,1 +0,0 @@ -Print some more information during translation. diff --git a/pypy/doc/config/objspace.usemodules.math.rst b/pypy/doc/config/objspace.usemodules.math.txt copy from pypy/doc/config/objspace.usemodules.math.rst copy to pypy/doc/config/objspace.usemodules.math.txt diff --git a/pypy/doc/config/translation.compilerflags.rst b/pypy/doc/config/translation.compilerflags.rst deleted file mode 100644 --- a/pypy/doc/config/translation.compilerflags.rst +++ /dev/null @@ -1,1 +0,0 @@ -Experimental. Specify extra flags to pass to the C compiler. diff --git a/pypy/doc/config/objspace.std.withsmallint.rst b/pypy/doc/config/objspace.std.withsmallint.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withsmallint.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use "tagged pointers" to represent small enough integer values: Integers that -fit into 31 bits (respective 63 bits on 64 bit machines) are not represented by -boxing them in an instance of ``W_IntObject``. Instead they are represented as a -pointer having the lowest bit set and the rest of the bits used to store the -value of the integer. This gives a small speedup for integer operations as well -as better memory behaviour. diff --git a/pypy/tool/rest/rst.py b/pypy/tool/rest/rst.py --- a/pypy/tool/rest/rst.py +++ b/pypy/tool/rest/rst.py @@ -389,18 +389,14 @@ indent = ' ' def __init__(self, name, *args, **options): self.name = name - self.content = options.pop('content', []) - children = list(args) - super(Directive, self).__init__(*children) + self.content = args + super(Directive, self).__init__() self.options = options def text(self): # XXX not very pretty... - namechunksize = len(self.name) + 2 - self.children.insert(0, Text('X' * namechunksize)) - txt = super(Directive, self).text() - txt = '.. %s::%s' % (self.name, txt[namechunksize + 3:],) - options = '\n'.join([' :%s: %s' % (k, v) for (k, v) in + txt = '.. %s::' % (self.name,) + options = '\n'.join([' :%s: %s' % (k, v) for (k, v) in self.options.iteritems()]) if options: txt += '\n%s' % (options,) @@ -408,10 +404,7 @@ if self.content: txt += '\n' for item in self.content: - assert item.parentclass == Rest, 'only top-level items allowed' - assert not item.indent - item.indent = ' ' - txt += '\n' + item.text() + txt += '\n ' + item return txt diff --git a/pypy/doc/config/translation.sandbox.rst b/pypy/doc/config/translation.sandbox.txt copy from pypy/doc/config/translation.sandbox.rst copy to pypy/doc/config/translation.sandbox.txt diff --git a/pypy/doc/config/translation.gctransformer.rst b/pypy/doc/config/translation.gctransformer.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gctransformer.rst +++ /dev/null @@ -1,1 +0,0 @@ -internal option diff --git a/pypy/doc/config/objspace.usemodules.binascii.rst b/pypy/doc/config/objspace.usemodules.binascii.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.binascii.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the RPython 'binascii' module. diff --git a/pypy/doc/config/translation.backendopt.stack_optimization.rst b/pypy/doc/config/translation.backendopt.stack_optimization.txt copy from pypy/doc/config/translation.backendopt.stack_optimization.rst copy to pypy/doc/config/translation.backendopt.stack_optimization.txt diff --git a/pypy/doc/config/objspace.std.rst b/pypy/doc/config/objspace.std.txt copy from pypy/doc/config/objspace.std.rst copy to pypy/doc/config/objspace.std.txt diff --git a/pypy/doc/config/objspace.usemodules.micronumpy.rst b/pypy/doc/config/objspace.usemodules.micronumpy.txt copy from pypy/doc/config/objspace.usemodules.micronumpy.rst copy to pypy/doc/config/objspace.usemodules.micronumpy.txt diff --git a/pypy/doc/config/objspace.usemodules.thread.rst b/pypy/doc/config/objspace.usemodules.thread.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.thread.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'thread' module. diff --git a/pypy/doc/config/objspace.usemodules.mmap.rst b/pypy/doc/config/objspace.usemodules.mmap.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.mmap.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'mmap' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.std.withstrbuf.rst b/pypy/doc/config/objspace.std.withstrbuf.txt copy from pypy/doc/config/objspace.std.withstrbuf.rst copy to pypy/doc/config/objspace.std.withstrbuf.txt diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst --- a/pypy/doc/discussion/outline-external-ootype.rst +++ b/pypy/doc/discussion/outline-external-ootype.rst @@ -1,24 +1,10 @@ -.. XXX, anto, can this be killed? - Some discussion about external objects in ootype ================================================ -Current approaches: - -* BasicExternal, used for js backend +Current approach: * SomeCliXxx for .NET backend -BasicExternal -------------- - -* Is using types to make rpython happy (ie, every single method or field - is hardcoded) - -* Supports callbacks by SomeGenericCallable - -* Supports fields, also with callable fields - SomeCliXxx ---------- @@ -28,11 +14,11 @@ * Supports static methods -Would be extremely cool to have just one approach instead of two, -so here are some notes: +Would be extremely cool to generalize the approach to be useful also for the +JVM backend. Here are some notes: * There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, js, jvm for now). + to support any possible backend (cli, jvm for now). * This approach might be eventually extended by a backend itself, but as much as possible code should be factored out. @@ -48,24 +34,22 @@ ================================ The goal of the task is to let RPython program access "external -objects" which are available in the target platform; these include: +entities" which are available in the target platform; these include: - external classes (e.g. for .NET: System.Collections.ArrayList) - - external instances (e.g. for js: window, window.document) + - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - - external functions? (they are not needed for .NET and JVM, maybe - for js?) - -External objects should behave as much as possible as "internal -objects". +External entities should behave as much as possible as "internal +entities". Moreover, we want to preserve the possibility of *testing* RPython programs on top of CPython if possible. For example, it should be possible to RPython programs using .NET external objects using -PythonNet; probably there is something similar for JVM, but not for -JS as I know. +PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: +.. _JPype: http://jpype.sourceforge.net/ +.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm How to represent types ---------------------- @@ -126,11 +110,6 @@ and JVM the job can be easily automatized, since the objects have got precise signatures. -For JS, signatures must be written by hand, so we must provide a -convenient syntax for it; I think it should be possible to use the -current syntax and write a tool which translates it to low-level -types. - RPython interface ----------------- @@ -148,9 +127,8 @@ - access to static methods: return an object which will be annotated as SomeExternalStaticMeth. -Instances are annotated as SomeExternalInstance. Prebuilt external -objects (such as JS's window.document) are annotated as -SomeExternalInstance(const=...). +Instances are annotated as SomeExternalInstance. Prebuilt external objects are +annotated as SomeExternalInstance(const=...). Open issues ----------- @@ -181,18 +159,12 @@ It would be nice to allow programmers to inherit from an external class. Not sure about the implications, though. -Callbacks -~~~~~~~~~ - -I know that they are an issue for JS, but I don't know how they are -currently implemented. - Special methods/properties ~~~~~~~~~~~~~~~~~~~~~~~~~~ In .NET there are special methods that can be accessed using a special syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#. +RPython the same syntax as C#, although we can live without that. Implementation details diff --git a/pypy/doc/config/translation.backendopt.rst b/pypy/doc/config/translation.backendopt.txt copy from pypy/doc/config/translation.backendopt.rst copy to pypy/doc/config/translation.backendopt.txt diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.rst b/pypy/doc/config/objspace.std.withmethodcachecounter.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcachecounter.rst +++ /dev/null @@ -1,1 +0,0 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. diff --git a/pypy/doc/config/objspace.usemodules.rctime.rst b/pypy/doc/config/objspace.usemodules.rctime.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rctime.rst +++ /dev/null @@ -1,7 +0,0 @@ -Use the 'rctime' module. - -'rctime' is our `rffi`_ based implementation of the builtin 'time' module. -It supersedes the less complete :config:`objspace.usemodules.time`, -at least for C-like targets (the C and LLVM backends). - -.. _`rffi`: ../rffi.html diff --git a/pypy/doc/config/objspace.usemodules._sha.rst b/pypy/doc/config/objspace.usemodules._sha.txt copy from pypy/doc/config/objspace.usemodules._sha.rst copy to pypy/doc/config/objspace.usemodules._sha.txt diff --git a/pypy/doc/config/objspace.usemodules.time.rst b/pypy/doc/config/objspace.usemodules.time.txt copy from pypy/doc/config/objspace.usemodules.time.rst copy to pypy/doc/config/objspace.usemodules.time.txt diff --git a/pypy/doc/config/objspace.translationmodules.rst b/pypy/doc/config/objspace.translationmodules.txt copy from pypy/doc/config/objspace.translationmodules.rst copy to pypy/doc/config/objspace.translationmodules.txt diff --git a/pypy/doc/config/translation.backendopt.inline_threshold.rst b/pypy/doc/config/translation.backendopt.inline_threshold.txt copy from pypy/doc/config/translation.backendopt.inline_threshold.rst copy to pypy/doc/config/translation.backendopt.inline_threshold.txt diff --git a/pypy/doc/config/translation.backendopt.inline_heuristic.rst b/pypy/doc/config/translation.backendopt.inline_heuristic.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.inline_heuristic.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal option. Switch to a different weight heuristic for inlining. -This is for basic inlining (:config:`translation.backendopt.inline`). - -.. internal diff --git a/pypy/doc/config/translation.verbose.rst b/pypy/doc/config/translation.verbose.txt copy from pypy/doc/config/translation.verbose.rst copy to pypy/doc/config/translation.verbose.txt diff --git a/pypy/doc/config/translation.secondaryentrypoints.rst b/pypy/doc/config/translation.secondaryentrypoints.txt copy from pypy/doc/config/translation.secondaryentrypoints.rst copy to pypy/doc/config/translation.secondaryentrypoints.txt diff --git a/pypy/doc/config/objspace.lonepycfiles.rst b/pypy/doc/config/objspace.lonepycfiles.txt copy from pypy/doc/config/objspace.lonepycfiles.rst copy to pypy/doc/config/objspace.lonepycfiles.txt diff --git a/pypy/doc/config/objspace.usemodules.oracle.rst b/pypy/doc/config/objspace.usemodules.oracle.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.oracle.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'oracle' module. -This module is off by default, requires oracle client installed. diff --git a/pypy/doc/config/translation.cli.trace_calls.rst b/pypy/doc/config/translation.cli.trace_calls.txt copy from pypy/doc/config/translation.cli.trace_calls.rst copy to pypy/doc/config/translation.cli.trace_calls.txt diff --git a/pypy/doc/config/objspace.usemodules.struct.rst b/pypy/doc/config/objspace.usemodules.struct.txt copy from pypy/doc/config/objspace.usemodules.struct.rst copy to pypy/doc/config/objspace.usemodules.struct.txt diff --git a/pypy/tool/rest/rest.py b/pypy/tool/rest/rest.py --- a/pypy/tool/rest/rest.py +++ b/pypy/tool/rest/rest.py @@ -10,14 +10,12 @@ pass def convert_rest_html(source, source_path, stylesheet=None, encoding='latin1'): - from pypy.tool.rest import directive """ return html latin1-encoded document for the given input. source a ReST-string sourcepath where to look for includes (basically) stylesheet path (to be used if any) """ from docutils.core import publish_string - directive.set_backend_and_register_directives("html") kwargs = { 'stylesheet' : stylesheet, 'stylesheet_path': None, diff --git a/pypy/doc/config/objspace.usemodules._collections.rst b/pypy/doc/config/objspace.usemodules._collections.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._collections.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_collections' module. -Used by the 'collections' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._testing.rst b/pypy/doc/config/objspace.usemodules._testing.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._testing.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the '_testing' module. This module exists only for PyPy own testing purposes. - -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -1,15 +1,48 @@ import autopath import py -from pypy.config import pypyoption, translationoption, config +from pypy.config import pypyoption, translationoption, config, makerestdoc from pypy.doc.config.confrest import all_optiondescrs +all_optiondescrs = [pypyoption.pypy_optiondescription, + translationoption.translation_optiondescription, + ] +start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) + +def make_rst(basename): + txtpath = thisdir.join(basename) + txtpath.ensure() + rstpath = txtpath.new(ext=".rst") + + fullpath = txtpath.purebasename + start = fullpath.split(".")[0] + path = fullpath.rsplit(".", 1)[0] + basedescr = start_to_descr.get(start) + if basedescr is None: + return + if fullpath.count(".") == 0: + descr = basedescr + path = "" + else: + conf = config.Config(basedescr) + subconf, step = conf._cfgimpl_get_home_by_path( + fullpath.split(".", 1)[1]) + descr = getattr(subconf._cfgimpl_descr, step) + text = unicode(descr.make_rest_doc(path).text()) + if txtpath.check(file=True): + content = txtpath.read() + if content: + text += "\n\n" + text = u"%s\n\n%s" % (text, unicode(txtpath.read(), "utf-8")) + rstpath.write(text.encode("utf-8")) + + thisdir = py.path.local(__file__).dirpath() for descr in all_optiondescrs: prefix = descr._name c = config.Config(descr) - thisdir.join(prefix + ".rst").ensure() + thisdir.join(prefix + ".txt").ensure() + make_rst(prefix + ".txt") for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".rst" - f = thisdir.join(basename) - f.ensure() + basename = prefix + "." + p + ".txt" + make_rst(basename) diff --git a/pypy/doc/config/objspace.usemodules._weakref.rst b/pypy/doc/config/objspace.usemodules._weakref.txt copy from pypy/doc/config/objspace.usemodules._weakref.rst copy to pypy/doc/config/objspace.usemodules._weakref.txt diff --git a/pypy/doc/config/objspace.usemodules.struct.rst b/pypy/doc/config/objspace.usemodules.struct.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.struct.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the built-in 'struct' module. -This module is expected to be working and is included by default. -There is also a pure Python version in lib_pypy which is used -if the built-in is disabled, but it is several orders of magnitude -slower. diff --git a/pypy/doc/config/translation.cli.trace_calls.rst b/pypy/doc/config/translation.cli.trace_calls.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/config/objspace.std.withstrslice.rst b/pypy/doc/config/objspace.std.withstrslice.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrslice.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enable "string slice" objects. - -See the page about `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#string-slice-objects - - diff --git a/pypy/doc/config/translation.dump_static_data_info.rst b/pypy/doc/config/translation.dump_static_data_info.rst deleted file mode 100644 --- a/pypy/doc/config/translation.dump_static_data_info.rst +++ /dev/null @@ -1,3 +0,0 @@ -Dump information about static prebuilt constants, to the file -TARGETNAME.staticdata.info in the /tmp/usession-... directory. This file can -be later inspected using the script ``bin/reportstaticdata.py``. diff --git a/pypy/doc/config/objspace.allworkingmodules.rst b/pypy/doc/config/objspace.allworkingmodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.allworkingmodules.rst +++ /dev/null @@ -1,6 +0,0 @@ -This option enables the usage of all modules that are known to be working well -and that translate without problems. - -Note that this option defaults to True (except when running -``py.py`` because it takes a long time to start). To force it -to False, use ``--no-allworkingmodules``. diff --git a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst b/pypy/doc/config/objspace.opcodes.CALL_METHOD.txt copy from pypy/doc/config/objspace.opcodes.CALL_METHOD.rst copy to pypy/doc/config/objspace.opcodes.CALL_METHOD.txt diff --git a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst b/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt copy from pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst copy to pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt diff --git a/pypy/doc/config/translation.sandbox.rst b/pypy/doc/config/translation.sandbox.rst deleted file mode 100644 --- a/pypy/doc/config/translation.sandbox.rst +++ /dev/null @@ -1,15 +0,0 @@ -Generate a special fully-sandboxed executable. - -The fully-sandboxed executable cannot be run directly, but -only as a subprocess of an outer "controlling" process. The -sandboxed process is "safe" in the sense that it doesn't do -any library or system call - instead, whenever it would like -to perform such an operation, it marshals the operation name -and the arguments to its stdout and it waits for the -marshalled result on its stdin. This controller process must -handle these operation requests, in any way it likes, allowing -full virtualization. - -For examples of controller processes, see -``pypy/translator/sandbox/interact.py`` and -``pypy/translator/sandbox/pypy_interact.py``. diff --git a/pypy/doc/config/objspace.usemodules.bz2.rst b/pypy/doc/config/objspace.usemodules.bz2.txt copy from pypy/doc/config/objspace.usemodules.bz2.rst copy to pypy/doc/config/objspace.usemodules.bz2.txt diff --git a/pypy/doc/config/translation.rweakref.rst b/pypy/doc/config/translation.rweakref.txt copy from pypy/doc/config/translation.rweakref.rst copy to pypy/doc/config/translation.rweakref.txt diff --git a/pypy/doc/config/objspace.std.withstrslice.rst b/pypy/doc/config/objspace.std.withstrslice.txt copy from pypy/doc/config/objspace.std.withstrslice.rst copy to pypy/doc/config/objspace.std.withstrslice.txt diff --git a/pypy/doc/config/objspace.std.withprebuiltint.rst b/pypy/doc/config/objspace.std.withprebuiltint.txt copy from pypy/doc/config/objspace.std.withprebuiltint.rst copy to pypy/doc/config/objspace.std.withprebuiltint.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst +++ /dev/null @@ -1,2 +0,0 @@ -Weight threshold used to decide whether to inline flowgraphs. -This is for profile-based inlining (:config:`translation.backendopt.profile_based_inline`). diff --git a/pypy/doc/config/translation.withsmallfuncsets.rst b/pypy/doc/config/translation.withsmallfuncsets.txt copy from pypy/doc/config/translation.withsmallfuncsets.rst copy to pypy/doc/config/translation.withsmallfuncsets.txt diff --git a/pypy/doc/config/translation.gc.rst b/pypy/doc/config/translation.gc.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gc.rst +++ /dev/null @@ -1,13 +0,0 @@ -Choose the Garbage Collector used by the translated program: - - - "ref": reference counting. Takes very long to translate and the result is - slow. - - - "marksweep": naive mark & sweep. - - - "semispace": a copying semi-space GC. - - - "generation": a generational GC using the semi-space GC for the - older generation. - - - "boehm": use the Boehm conservative GC. diff --git a/pypy/doc/config/translation.gc.rst b/pypy/doc/config/translation.gc.txt copy from pypy/doc/config/translation.gc.rst copy to pypy/doc/config/translation.gc.txt diff --git a/pypy/doc/config/objspace.usemodules.imp.rst b/pypy/doc/config/objspace.usemodules.imp.txt copy from pypy/doc/config/objspace.usemodules.imp.rst copy to pypy/doc/config/objspace.usemodules.imp.txt diff --git a/pypy/doc/config/objspace.usemodules.bz2.rst b/pypy/doc/config/objspace.usemodules.bz2.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.bz2.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'bz2' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/translation.shared.rst b/pypy/doc/config/translation.shared.txt copy from pypy/doc/config/translation.shared.rst copy to pypy/doc/config/translation.shared.txt diff --git a/pypy/doc/config/objspace.usemodules.pypyjit.rst b/pypy/doc/config/objspace.usemodules.pypyjit.txt copy from pypy/doc/config/objspace.usemodules.pypyjit.rst copy to pypy/doc/config/objspace.usemodules.pypyjit.txt diff --git a/pypy/doc/config/objspace.usemodules._file.rst b/pypy/doc/config/objspace.usemodules._file.txt copy from pypy/doc/config/objspace.usemodules._file.rst copy to pypy/doc/config/objspace.usemodules._file.txt diff --git a/pypy/doc/config/objspace.usemodules.unicodedata.rst b/pypy/doc/config/objspace.usemodules.unicodedata.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.unicodedata.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'unicodedata' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/translation.type_system.rst b/pypy/doc/config/translation.type_system.rst deleted file mode 100644 --- a/pypy/doc/config/translation.type_system.rst +++ /dev/null @@ -1,4 +0,0 @@ -Which type system to use when rtyping_. This option should not be set -explicitly. - -.. _rtyping: ../rtyper.html diff --git a/pypy/doc/config/objspace.usemodules._ffi.rst b/pypy/doc/config/objspace.usemodules._ffi.txt copy from pypy/doc/config/objspace.usemodules._ffi.rst copy to pypy/doc/config/objspace.usemodules._ffi.txt diff --git a/pypy/doc/config/translation.jit.rst b/pypy/doc/config/translation.jit.txt copy from pypy/doc/config/translation.jit.rst copy to pypy/doc/config/translation.jit.txt diff --git a/pypy/doc/config/objspace.logbytecodes.rst b/pypy/doc/config/objspace.logbytecodes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.logbytecodes.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/objspace.usemodules.__builtin__.rst b/pypy/doc/config/objspace.usemodules.__builtin__.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.__builtin__.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '__builtin__' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/config/objspace.usemodules._bisect.rst b/pypy/doc/config/objspace.usemodules._bisect.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._bisect.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the '_bisect' module. -Used, optionally, by the 'bisect' standard lib module. This module is expected to be working and is included by default. - - diff --git a/pypy/doc/config/translation.insist.rst b/pypy/doc/config/translation.insist.rst deleted file mode 100644 --- a/pypy/doc/config/translation.insist.rst +++ /dev/null @@ -1,4 +0,0 @@ -Don't stop on the first `rtyping`_ error. Instead, try to rtype as much as -possible and show the collected error messages in the end. - -.. _`rtyping`: ../rtyper.html diff --git a/pypy/doc/config/objspace.usemodules.exceptions.rst b/pypy/doc/config/objspace.usemodules.exceptions.txt copy from pypy/doc/config/objspace.usemodules.exceptions.rst copy to pypy/doc/config/objspace.usemodules.exceptions.txt diff --git a/pypy/doc/config/objspace.usepycfiles.rst b/pypy/doc/config/objspace.usepycfiles.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usepycfiles.rst +++ /dev/null @@ -1,4 +0,0 @@ -If this option is used, then PyPy imports and generates "pyc" files in the -same way as CPython. This is true by default and there is not much reason -to turn it off nowadays. If off, PyPy never produces "pyc" files and -ignores any "pyc" file that might already be present. diff --git a/pypy/doc/config/objspace.usemodules.cpyext.rst b/pypy/doc/config/objspace.usemodules.cpyext.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.cpyext.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use (experimental) cpyext module, that tries to load and run CPython extension modules diff --git a/pypy/doc/config/translation.profopt.rst b/pypy/doc/config/translation.profopt.txt copy from pypy/doc/config/translation.profopt.rst copy to pypy/doc/config/translation.profopt.txt diff --git a/pypy/doc/config/objspace.usemodules._ffi.rst b/pypy/doc/config/objspace.usemodules._ffi.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._ffi.rst +++ /dev/null @@ -1,1 +0,0 @@ -Applevel interface to libffi. It is more high level than _rawffi, and most importantly it is JIT friendly diff --git a/pypy/doc/config/translation.backendopt.inline_threshold.rst b/pypy/doc/config/translation.backendopt.inline_threshold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.inline_threshold.rst +++ /dev/null @@ -1,2 +0,0 @@ -Weight threshold used to decide whether to inline flowgraphs. -This is for basic inlining (:config:`translation.backendopt.inline`). diff --git a/pypy/doc/config/objspace.std.withdictmeasurement.rst b/pypy/doc/config/objspace.std.withdictmeasurement.txt copy from pypy/doc/config/objspace.std.withdictmeasurement.rst copy to pypy/doc/config/objspace.std.withdictmeasurement.txt diff --git a/pypy/doc/config/objspace.usemodules._stackless.rst b/pypy/doc/config/objspace.usemodules._stackless.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._stackless.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use the '_stackless' module. - -Exposes the `stackless` primitives, and also implies a stackless build. -See also :config:`translation.stackless`. - -.. _`stackless`: ../stackless.html diff --git a/pypy/doc/config/objspace.std.mutable_builtintypes.rst b/pypy/doc/config/objspace.std.mutable_builtintypes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.mutable_builtintypes.rst +++ /dev/null @@ -1,1 +0,0 @@ -Allow modification of builtin types. Disabled by default. diff --git a/pypy/doc/config/objspace.logbytecodes.rst b/pypy/doc/config/objspace.logbytecodes.txt copy from pypy/doc/config/objspace.logbytecodes.rst copy to pypy/doc/config/objspace.logbytecodes.txt diff --git a/pypy/doc/config/translation.backendopt.storesink.rst b/pypy/doc/config/translation.backendopt.storesink.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.storesink.rst +++ /dev/null @@ -1,1 +0,0 @@ -Store sinking optimization. On by default. diff --git a/pypy/doc/config/translation.thread.rst b/pypy/doc/config/translation.thread.rst deleted file mode 100644 --- a/pypy/doc/config/translation.thread.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable threading. The only target where this has visible effect is PyPy (this -also enables the ``thread`` module then). diff --git a/pypy/doc/config/translation.no__thread.rst b/pypy/doc/config/translation.no__thread.rst deleted file mode 100644 --- a/pypy/doc/config/translation.no__thread.rst +++ /dev/null @@ -1,4 +0,0 @@ -Don't use gcc __thread attribute for fast thread local storage -implementation . Increases the chance that moving the resulting -executable to another same processor Linux machine will work. (see -:config:`translation.vanilla`). diff --git a/pypy/doc/config/objspace.usemodules.cmath.rst b/pypy/doc/config/objspace.usemodules.cmath.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.cmath.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'cmath' module. -This module is expected to be working and is included by default. From commits-noreply at bitbucket.org Wed Apr 27 13:01:41 2011 From: commits-noreply at bitbucket.org (lac) Date: Wed, 27 Apr 2011 13:01:41 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz) change crufty, needswork and throwaway to be .txt files not Message-ID: <20110427110141.E67D536C20E@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43665:fd89ce7e8ce9 Date: 2011-04-27 12:48 +0200 http://bitbucket.org/pypy/pypy/changeset/fd89ce7e8ce9/ Log: (lac, cfbolz) change crufty, needswork and throwaway to be .txt files not .rst since they are just included by other files. diff --git a/pypy/doc/needswork.rst b/pypy/doc/needswork.rst deleted file mode 100644 --- a/pypy/doc/needswork.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. warning:: - - This documentation needs work (as discussed during the Gothenburg sprint in 2011) diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,4 +1,4 @@ -.. include:: needswork.rst +.. include:: needswork.txt .. _glossary: diff --git a/pypy/doc/crufty.rst b/pypy/doc/crufty.txt copy from pypy/doc/crufty.rst copy to pypy/doc/crufty.txt diff --git a/pypy/doc/needswork.rst b/pypy/doc/needswork.txt copy from pypy/doc/needswork.rst copy to pypy/doc/needswork.txt diff --git a/pypy/doc/crufty.rst b/pypy/doc/crufty.rst deleted file mode 100644 --- a/pypy/doc/crufty.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. warning:: - - This documentation may be out-of-date or obsolete (identified on 2011-03-14 at the PyCon US sprint) diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,4 +1,4 @@ -.. include:: needswork.rst +.. include:: needswork.txt .. needs work, it talks about svn. also, it is not really user documentation diff --git a/pypy/doc/throwaway.rst b/pypy/doc/throwaway.txt copy from pypy/doc/throwaway.rst copy to pypy/doc/throwaway.txt diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,4 +1,4 @@ -.. include:: needswork.rst +.. include:: needswork.txt ============================= lib_pypy/distributed features diff --git a/pypy/doc/throwaway.rst b/pypy/doc/throwaway.rst deleted file mode 100644 --- a/pypy/doc/throwaway.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. warning:: - - This documentation should be removed (as discussed during the Gothenburg sprint in 2011) From commits-noreply at bitbucket.org Wed Apr 27 13:01:44 2011 From: commits-noreply at bitbucket.org (lac) Date: Wed, 27 Apr 2011 13:01:44 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz) remove dead link Message-ID: <20110427110144.1E97F282C18@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43666:ca91d4773324 Date: 2011-04-27 12:55 +0200 http://bitbucket.org/pypy/pypy/changeset/ca91d4773324/ Log: (lac, cfbolz) remove dead link diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -78,6 +78,4 @@ .. _`CERN (July 2010)`: http://morepypy.blogspot.com/2010/07/cern-sprint-report-wrapping-c-libraries.html .. _`Düsseldorf (October 2010)`: http://morepypy.blogspot.com/2010/10/dusseldorf-sprint-report-2010.html -Further event notes: -* :ref:`eventhistory.rst` From commits-noreply at bitbucket.org Wed Apr 27 13:01:47 2011 From: commits-noreply at bitbucket.org (lac) Date: Wed, 27 Apr 2011 13:01:47 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110427110147.58EAC282BEC@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43667:527c07310b48 Date: 2011-04-27 13:01 +0200 http://bitbucket.org/pypy/pypy/changeset/527c07310b48/ Log: merge heads diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst --- a/pypy/doc/dot-net.rst +++ b/pypy/doc/dot-net.rst @@ -9,4 +9,3 @@ cli-backend.rst clr-module.rst - carbonpython.rst diff --git a/pypy/translator/cli/carbonpython.py b/pypy/translator/cli/carbonpython.py deleted file mode 100644 --- a/pypy/translator/cli/carbonpython.py +++ /dev/null @@ -1,160 +0,0 @@ -#! /usr/bin/env python -""" -Usage: carbonpython.py [dll-name] - -Compiles an RPython module into a .NET dll. -""" - -import sys -import new -import types -import os.path -import inspect - -from pypy.translator.driver import TranslationDriver -from pypy.translator.cli.entrypoint import DllEntryPoint - -class DllDef: - def __init__(self, name, namespace, functions=[], dontmangle=True, isnetmodule=False): - self.name = name - self.namespace = namespace - self.functions = functions # [(function, annotation), ...] - self.isnetmodule = isnetmodule - self.driver = TranslationDriver() - if dontmangle: - self.driver.config.translation.ootype.mangle = False - self.driver.setup_library(self) - - def add_function(self, func, inputtypes): - self.functions.append((func, inputtypes)) - - def get_entrypoint(self, bk): - graphs = [bk.getdesc(f).cachedgraph(None) for f, _ in self.functions] - return DllEntryPoint(self.name, graphs, self.isnetmodule) - - def compile(self): - # add all functions to the appropriate namespace - if self.namespace: - for func, _ in self.functions: - if not hasattr(func, '_namespace_'): - func._namespace_ = self.namespace - self.driver.proceed(['compile_cli']) - -class export(object): - def __new__(self, *args, **kwds): - if len(args) == 1 and isinstance(args[0], types.FunctionType): - func = args[0] - func._inputtypes_ = () - return func - return object.__new__(self, *args, **kwds) - - def __init__(self, *args, **kwds): - self.inputtypes = args - self.namespace = kwds.pop('namespace', None) - if len(kwds) > 0: - raise TypeError, "unexpected keyword argument: '%s'" % kwds.keys()[0] - - def __call__(self, func): - func._inputtypes_ = self.inputtypes - if self.namespace is not None: - func._namespace_ = self.namespace - return func - -def is_exported(obj): - return isinstance(obj, (types.FunctionType, types.UnboundMethodType)) \ - and hasattr(obj, '_inputtypes_') - -def collect_entrypoints(dic): - entrypoints = [] - for item in dic.itervalues(): - if is_exported(item): - entrypoints.append((item, item._inputtypes_)) - elif isinstance(item, types.ClassType) or isinstance(item, type): - entrypoints += collect_class_entrypoints(item) - return entrypoints - -def collect_class_entrypoints(cls): - try: - __init__ = cls.__init__ - if not is_exported(__init__): - return [] - except AttributeError: - return [] - - entrypoints = [(wrap_init(cls, __init__), __init__._inputtypes_)] - for item in cls.__dict__.itervalues(): - if item is not __init__.im_func and is_exported(item): - inputtypes = (cls,) + item._inputtypes_ - entrypoints.append((wrap_method(item), inputtypes)) - return entrypoints - -def getarglist(meth): - arglist, starargs, kwargs, defaults = inspect.getargspec(meth) - assert starargs is None, '*args not supported yet' - assert kwargs is None, '**kwds not supported yet' - assert defaults is None, 'default values not supported yet' - return arglist - -def wrap_init(cls, meth): - arglist = getarglist(meth)[1:] # discard self - args = ', '.join(arglist) - source = 'def __internal__ctor(%s): return %s(%s)' % ( - args, cls.__name__, args) - mydict = {cls.__name__: cls} - print source - exec source in mydict - return mydict['__internal__ctor'] - -def wrap_method(meth, is_init=False): - arglist = getarglist(meth) - name = '__internal__%s' % meth.func_name - selfvar = arglist[0] - args = ', '.join(arglist) - params = ', '.join(arglist[1:]) - source = 'def %s(%s): return %s.%s(%s)' % ( - name, args, selfvar, meth.func_name, params) - mydict = {} - print source - exec source in mydict - return mydict[name] - - -def compile_dll(filename, dllname=None, copy_dll=True): - dirname, name = os.path.split(filename) - if dllname is None: - dllname, _ = os.path.splitext(name) - elif dllname.endswith('.dll'): - dllname, _ = os.path.splitext(dllname) - module = new.module(dllname) - namespace = module.__dict__.get('_namespace_', dllname) - sys.path.insert(0, dirname) - execfile(filename, module.__dict__) - sys.path.pop(0) - - dll = DllDef(dllname, namespace) - dll.functions = collect_entrypoints(module.__dict__) - dll.compile() - if copy_dll: - dll.driver.copy_cli_dll() - -def main(argv): - if len(argv) == 2: - filename = argv[1] - dllname = None - elif len(argv) == 3: - filename = argv[1] - dllname = argv[2] - else: - print >> sys.stderr, __doc__ - sys.exit(2) - - if not filename.endswith('.py'): - filename += '.py' - if not os.path.exists(filename): - print >> sys.stderr, "Cannot find file %s" % filename - sys.exit(1) - compile_dll(filename, dllname) - -if __name__ == '__main__': - main(sys.argv) - diff --git a/pypy/translator/cli/test/test_carbonpython.py b/pypy/translator/cli/test/test_carbonpython.py deleted file mode 100644 --- a/pypy/translator/cli/test/test_carbonpython.py +++ /dev/null @@ -1,175 +0,0 @@ -import py -py.test.skip("it passes usually, but fails on buildbot, no clue why") - -import os -import os.path -from pypy.tool import udir -from pypy.translator.cli.rte import Target -from pypy.translator.cli.carbonpython import DllDef, export, collect_entrypoints,\ - collect_class_entrypoints, compile_dll -from pypy.translator.cli.test.runtest import CliFunctionWrapper, CliTest - -TEMPLATE = """ -using System; -using System.Collections; -class CarbonPytonTest { - public static void Main() { - %s - } -} -""" - -class TestCarbonPython(CliTest): - - def _csharp(self, source, references=[], netmodules=[]): - tmpfile = udir.udir.join('tmp.cs') - tmpfile.write(TEMPLATE % source) - flags = ['/r:%s' % ref for ref in references] - flags += ['/addmodule:%s' % mod for mod in netmodules] - - class MyTarget(Target): - SOURCES = [str(tmpfile)] - FLAGS = flags - OUTPUT = 'tmp.exe' - SRC_DIR = str(udir.udir) - - func = CliFunctionWrapper(MyTarget.get()) - return func() - - def test_compilation(self): - res = self._csharp('Console.WriteLine(42);') - assert res == 42 - - def test_func_namespace(self): - def foo(x): - return x+1 - def bar(x): - return foo(x) - foo._namespace_ = 'MyNamespace.MyClass' - bar._namespace_ = 'MyClass' - res = self.interpret(bar, [41], backendopt=False) - assert res == 42 - - def test_simple_functions(self): - def foo(x): - return x+1 - def bar(x): - return x*2 - dll = DllDef('test', 'Test', [(foo, [int]), - (bar, [int])]) - dll.compile() - res = self._csharp('Console.WriteLine("{0}, {1}", Test.foo(42), Test.bar(42));', ['test']) - assert res == (43, 84) - - def test_export(self): - @export(int, float) - def foo(x, y): - pass - @export(int, float, namespace='test') - def bar(x, y): - pass - @export - def baz(): - pass - - assert foo._inputtypes_ == (int, float) - assert not hasattr(foo, '_namespace_') - assert bar._inputtypes_ == (int, float) - assert bar._namespace_ == 'test' - assert baz._inputtypes_ == () - - def test_collect_entrypoints(self): - @export(int, float) - def foo(x, y): - pass - def bar(x, y): - pass - mydict = dict(foo=foo, bar=bar, x=42) - entrypoints = collect_entrypoints(mydict) - assert entrypoints == [(foo, (int, float))] - - def test_collect_class_entrypoints(self): - class NotExported: - def __init__(self): - pass - - class MyClass: - @export - def __init__(self): - pass - @export(int) - def foo(self, x): - return x - - assert collect_class_entrypoints(NotExported) == [] - entrypoints = collect_class_entrypoints(MyClass) - assert len(entrypoints) == 2 - assert entrypoints[0][1] == () # __init__ inputtypes - assert entrypoints[1][1] == (MyClass, int) # foo inputtypes - - def test_compile_class(self): - py.test.skip('This test fails every other day. No clue why :-(') - class MyClass: - @export(int) - def __init__(self, x): - self.x = x - @export(int, int) - def add(self, y, z): - return self.x + y + z - MyClass.__module__ = 'Test' # put the class in the Test namespace - - entrypoints = collect_entrypoints({'MyClass': MyClass}) - dll = DllDef('test', 'Test', entrypoints) - dll.compile() - res = self._csharp(""" - Test.MyClass obj = new Test.MyClass(); - obj.__init__(39); - Console.WriteLine(obj.add(1, 2)); - """, ['test']) - assert res == 42 - - def test_export_cliclass(self): - py.test.skip('it fails every other day on builbot, no clue why') - from pypy.translator.cli.dotnet import CLR - - @export(CLR.System.Collections.ArrayList, int) - def getitem(obj, i): - return obj.get_Item(i) - - entrypoints = collect_entrypoints({'getitem': getitem}) - dll = DllDef('test', 'Test', entrypoints) - dll.compile() - res = self._csharp(""" - ArrayList obj = new ArrayList(); - obj.Add(42); - Console.WriteLine(Test.getitem(obj, 0)); - """, ['test']) - assert res == 42 - - def test_compile_dll(self): - py.test.skip('This test fails every other day. No clue why :-(') - cwd, _ = os.path.split(__file__) - mylib_py = os.path.join(cwd, 'mylib.py') - compile_dll(mylib_py, copy_dll=False) - res = self._csharp(""" - Console.WriteLine(mylib.sum(20, 22)); - """, ['mylib']) - assert res == 42 - - def test_compile_dll_alternative_name(self): - cwd, _ = os.path.split(__file__) - mylib_py = os.path.join(cwd, 'mylib.py') - compile_dll(mylib_py, 'mylibxxx.dll', copy_dll=False) - res = self._csharp(""" - Console.WriteLine(mylibxxx.sum(20, 22)); - """, ['mylibxxx']) - assert res == 42 - - def test_compile_netmodule(self): - def foo(x): - return x+1 - dll = DllDef('mymodule', 'Test', [(foo, [int])], isnetmodule=True) - dll.compile() - res = self._csharp('Console.WriteLine("{0}", Test.foo(41));', - netmodules = ['mymodule']) - diff --git a/pypy/bin/carbonpython.py b/pypy/bin/carbonpython.py deleted file mode 100755 --- a/pypy/bin/carbonpython.py +++ /dev/null @@ -1,5 +0,0 @@ -#! /usr/bin/env python -import autopath, sys -from pypy.translator.cli.carbonpython import main - -main(sys.argv) diff --git a/pypy/translator/cli/test/mylib.py b/pypy/translator/cli/test/mylib.py deleted file mode 100644 --- a/pypy/translator/cli/test/mylib.py +++ /dev/null @@ -1,5 +0,0 @@ -from pypy.translator.cli.carbonpython import export - - at export(int, int) -def sum(a, b): - return a+b diff --git a/pypy/doc/carbonpython.rst b/pypy/doc/carbonpython.rst deleted file mode 100644 --- a/pypy/doc/carbonpython.rst +++ /dev/null @@ -1,230 +0,0 @@ -================================================== -CarbonPython, aka C# considered harmful -================================================== - -CarbonPython overview -===================== - -CarbonPython is an experimental RPython to .NET compiler. Its main -focus is to produce DLLs to be used by other .NET programs, not -standalone executables; if you want to compile an RPython standalone -program, have a look to `translate.py`_. - -Compiled RPython programs are much faster (up to 250x) than -interpreted IronPython programs, hence it might be a convenient -replacement for C# when more speed is needed. RPython programs can be -as fast as C# programs. - -RPython is a restrict subset of Python, static enough to be analyzed -and compiled efficiently to lower level languages. To read more about -the RPython limitations read the `RPython description`_. - -**Disclaimer**: RPython is a much less convenient language than Python -to program with. If you do not need speed, there is no reason to look -at RPython. - -**Big disclaimer**: CarbonPython is still in a pre-alpha stage: it's -not meant to be used for production code, and the API might change in -the future. Despite this, it might be useful in some situations and -you are encouraged to try it by yourself. Suggestions, bug-reports and -even better patches are welcome. - -.. _`RPython description`: coding-guide.html#restricted-python -.. _`translate.py`: faq.html#how-do-i-compile-my-own-interpreters - - -Quick start -=========== - -Suppose you want to write a little DLL in RPython and call its -function from C#. - -Here is the file mylibrary.py:: - - from pypy.translator.cli.carbonpython import export - - @export(int, int) - def add(x, y): - return x+y - - @export(int, int) - def sub(x, y): - return x-y - - -And here the C# program main.cs:: - - using System; - public class CarbonPythonTest - { - public static void Main() - { - Console.WriteLine(mylibrary.add(40, 2)); - Console.WriteLine(mylibrary.sub(44, 2)); - } - } - -Once the files have been created, you can compile ``mylibrary.py`` -with CarbonPython to get the corresponding DLL:: - - $ python carbonpython.py mylibrary.py - ... lot of stuff - -Then, we compile main.cs into an executable, being sure to add a -reference to the newly created ``mylibrary.dll``:: - - # with mono on linux - $ gmcs /r:mylibrary.dll main.cs - - # with Microsoft CLR on windows - c:\> csc /r:mylibrary main.cs - -Now we can run the executable to see whether the answers are right:: - - $ mono main.exe - 42 - 42 - - -Multiple entry-points -===================== - -In RPython, the type of each variable is inferred by the `Annotator`_: -the annotator analyzed the whole program top-down starting from an -entry-point, i.e. a function whose we specified the types of the -parameters. - -This approach works for a standalone executables, but not for a -library that by definition is composed by more than one -entry-point. Thus, you need to explicitly specify which functions you -want to include in your DLL, together with the expected input types. - -To mark a function as an entry-point, you use the ``@export`` -decorator, which is defined in ``pypy.translator.cli.carbonpython``, -as shown by the previous example. Note that you do not need to -specify the return type, because it is automatically inferenced by the -annotator. - -.. _`Annotator`: translation.html#annotator - - -Namespaces -========== - -Since `CLS`_ (Common Language Specification) does not support module -level static methods, RPython functions marked as entry-points are -compiled to static methods of a class, in order to be accessible by -every CLS-compliant language such as C# or VB.NET. - -The class which each function is placed in depends on its -**namespace**; for example, if the namespace of a function ``foo`` is -``A.B.C``, the function will be rendered as a static method of the -``C`` class inside the ``A.B`` namespace. This allows C# and -IronPython code to call the function using the intuitive ``A.B.C.foo`` -syntax. - -By default, the default namespace for exported function is the same as -the name of the module. Thus in the previous example the default -namespace is ``mylibrary`` and the functions are placed inside the -corresponding class in the global namespace. - -You can change the default namespace by setting the ``_namespace_`` -variable in the module you are compiling:: - - _namespace_ = 'Foo.Bar' - - @export(int, int) - def f(x, y): - pass - -Finally, you can also set a specific namespace on a per-function -basis, using the appropriate keyword argument of the ``@export`` -decorator:: - - @export(int, int, namespace='Foo.Bar') - def f(x, y): - pass - - -.. _`CLS`: http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-335.pdf - - -Exporting classes -================= - -RPython libraries can also export classes: to export a class, add the -``@export`` decorator to its ``__init__`` method; similarly, you can -also export any methods of the class:: - - class MyClass: - - @export(int) - def __init__(self, x): - self.x = x - - @export - def getx(self): - return self.x - - -Note that the type of ``self`` must not be specified: it will -automatically assumed to be ``MyClass``. - -The ``__init__`` method is not automatically mapped to the .NET -constructor; to properly initialize an RPython object from C# or -IronPython code you need to explicitly call ``__init__``; for example, -in C#:: - - MyClass obj = new MyClass(); - obj.__init__(x); - -Note that this is needed only when calling RPython code from -outside; the RPython compiler automatically calls ``__init__`` -whenever an RPython class is instantiated. - -In the future this discrepancy will be fixed and the ``__init__`` -method will be automatically mapped to the constructor. - - -Accessing .NET libraries -======================== - -**Warning**: the API for accessing .NET classes from RPython is highly -experimental and will probably change in the future. - -In RPython you can access native .NET classes through the ``CLR`` -object defined in ``translator.cli.dotnet``: from there, you can -navigate through namespaces using the usual dot notation; for example, -``CLR.System.Collections.ArrayList`` refers to the ``ArrayList`` class -in the ``System.Collections`` namespace. - -To instantiate a .NET class, simply call it:: - - ArrayList = CLR.System.Collections.ArrayList - def foo(): - obj = ArrayList() - obj.Add(42) - return obj - -At the moment there is no special syntax support for indexers and -properties: for example, you can't access ArrayList's elements using -the square bracket notation, but you have to call the call the -``get_Item`` and ``set_Item`` methods; similarly, to access a property -``XXX`` you need to call ``get_XXX`` and ``set_XXX``:: - - def foo(): - obj = ArrayList() - obj.Add(42) - print obj.get_Item(0) - print obj.get_Count() - -Static methods and are also supported, as well as overloadings:: - - Math = CLR.System.Math - def foo(): - print Math.Abs(-42) - print Math.Abs(-42.0) - - -At the moment, it is not possible to reference assemblies other than -mscorlib. This will be fixed soon. From commits-noreply at bitbucket.org Wed Apr 27 14:26:55 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Wed, 27 Apr 2011 14:26:55 +0200 (CEST) Subject: [pypy-svn] pypy default: Add a workaround for the Microsoft compiler, Message-ID: <20110427122655.75DE1282BEC@codespeak.net> Author: Amaury Forgeot d'Arc Branch: Changeset: r43668:7dcd98388a50 Date: 2011-04-27 14:13 +0200 http://bitbucket.org/pypy/pypy/changeset/7dcd98388a50/ Log: Add a workaround for the Microsoft compiler, which forbids function pointers for some math functions when the "Generate intrinsic function" optimization is enabled. diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -8,10 +8,20 @@ from pypy.tool.autopath import pypydir from pypy.rlib import jit, rposix from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.translator.platform import platform from pypy.rlib.rfloat import isinf, isnan, INFINITY, NAN if sys.platform == "win32": - eci = ExternalCompilationInfo() + if platform.name == "msvc": + # When compiled with /O2 or /Oi (enable intrinsic functions) + # It's no more possible to take the address of some math functions. + # Ensure that the compiler chooses real functions instead. + eci = ExternalCompilationInfo( + includes = ['math.h'], + post_include_bits = ['#pragma function(floor)'], + ) + else: + eci = ExternalCompilationInfo() # Some math functions are C99 and not defined by the Microsoft compiler cdir = py.path.local(pypydir).join('translator', 'c') math_eci = ExternalCompilationInfo( From commits-noreply at bitbucket.org Wed Apr 27 14:29:20 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 14:29:20 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: rename _ref.rst to _ref.txt Message-ID: <20110427122920.847B7282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43669:541308f58d6a Date: 2011-04-27 14:27 +0200 http://bitbucket.org/pypy/pypy/changeset/541308f58d6a/ Log: rename _ref.rst to _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -232,4 +232,4 @@ .. _`RPython Typer`: rtyper.html .. _`subsystem implementing the Python language`: architecture.html#standard-interpreter -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst --- a/pypy/doc/docindex.rst +++ b/pypy/doc/docindex.rst @@ -306,5 +306,4 @@ .. _`graph viewer`: getting-started-dev.html#try-out-the-translator .. _`compatibility matrix`: image/compat-matrix.png -.. include:: _ref.rst - +.. include:: _ref.txt diff --git a/pypy/doc/_ref.rst b/pypy/doc/_ref.txt copy from pypy/doc/_ref.rst copy to pypy/doc/_ref.txt diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -126,4 +126,4 @@ More details are available as comments at the start of the source in `pypy/rpython/memory/gc/markcompact.py`_. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -308,4 +308,4 @@ .. _clr: clr-module.html .. _`CPythons core language regression tests`: http://codespeak.net:8099/summary?category=applevel&branch=%3Ctrunk%3E -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/configuration.rst b/pypy/doc/configuration.rst --- a/pypy/doc/configuration.rst +++ b/pypy/doc/configuration.rst @@ -190,4 +190,4 @@ are attached to the object space, also under the name ``config`` and are described in `pypy/config/pypyoption.py`_. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -791,4 +791,5 @@ assert res == ~3 .. _annotator: translation.html#the-annotation-pass -.. include:: _ref.rst + +.. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -637,4 +637,4 @@ .. _`What PyPy can do for your objects`: objspace-proxies.html -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -404,5 +404,4 @@ .. _`directory reference`: docindex.html#directory-reference -.. include:: _ref.rst - +.. include:: _ref.txt diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -222,4 +222,4 @@ *more* case on PyPy than on CPython 2.6/2.7.) -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -408,4 +408,4 @@ as a reference for the exact attributes of interpreter classes visible at application level. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -744,4 +744,4 @@ collection of functions (which may refer to each other in a mutually recursive fashion) and annotate and rtype them all at once. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/jit/_ref.rst b/pypy/doc/jit/_ref.txt copy from pypy/doc/jit/_ref.rst copy to pypy/doc/jit/_ref.txt diff --git a/pypy/doc/_ref.rst b/pypy/doc/_ref.rst deleted file mode 100644 --- a/pypy/doc/_ref.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. _`./LICENSE`: https://bitbucket.org/pypy/pypy/src/default/./LICENSE -.. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ -.. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py -.. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ -.. _`lib-python/2.5.2/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.5.2/dis.py -.. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ -.. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py -.. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py -.. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/ -.. _`pypy/annotation/annrpython.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/annrpython.py -.. _`pypy/annotation/binaryop.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/binaryop.py -.. _`pypy/annotation/builtin.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/builtin.py -.. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ -.. _`pypy/bin/translatorshell.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/translatorshell.py -.. _`pypy/config/`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/ -.. _`pypy/config/pypyoption.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/pypyoption.py -.. _`pypy/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/translationoption.py -.. _`pypy/doc/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/ -.. _`pypy/doc/config/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/config/ -.. _`pypy/doc/discussion/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/discussion/ -.. _`pypy/interpreter/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/ -.. _`pypy/interpreter/argument.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/argument.py -.. _`pypy/interpreter/astcompiler/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ -.. _`pypy/interpreter/astcompiler/assemble.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/assemble.py -.. _`pypy/interpreter/astcompiler/ast.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ast.py -.. _`pypy/interpreter/astcompiler/astbuilder.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/astbuilder.py -.. _`pypy/interpreter/astcompiler/asthelpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/asthelpers.py -.. _`pypy/interpreter/astcompiler/codegen.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/codegen.py -.. _`pypy/interpreter/astcompiler/optimize.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/optimize.py -.. _`pypy/interpreter/astcompiler/symtable.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/symtable.py -.. _`pypy/interpreter/astcompiler/tools/Python.asdl`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/tools/Python.asdl -.. _`pypy/interpreter/astcompiler/tools/asdl_py.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/tools/asdl_py.py -.. _`pypy/interpreter/baseobjspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/baseobjspace.py -.. _`pypy/interpreter/eval.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/eval.py -.. _`pypy/interpreter/executioncontext.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/executioncontext.py -.. _`pypy/interpreter/function.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/function.py -.. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py -.. _`pypy/interpreter/generator.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/generator.py -.. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py -.. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py -.. _`pypy/interpreter/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/objspace.py -.. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py -.. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py -.. _`pypy/interpreter/pyparser/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/ -.. _`pypy/interpreter/pyparser/future.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/future.py -.. _`pypy/interpreter/pyparser/metaparser.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/metaparser.py -.. _`pypy/interpreter/pyparser/parser.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/parser.py -.. _`pypy/interpreter/pyparser/pyparse.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pyparse.py -.. _`pypy/interpreter/pyparser/pytokenizer.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pytokenizer.py -.. _`pypy/interpreter/typedef.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/typedef.py -.. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ -.. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py -.. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ -.. _`pypy/objspace/flow/model/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/model/ -.. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py -.. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py -.. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py -.. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py -.. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py -.. _`pypy/objspace/taint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/taint.py -.. _`pypy/objspace/thunk.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/thunk.py -.. _`pypy/objspace/trace`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace -.. _`pypy/objspace/trace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace.py -.. _`pypy/rlib/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/ -.. _`pypy/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/listsort.py -.. _`pypy/rlib/nonconst.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/nonconst.py -.. _`pypy/rlib/objectmodel.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/objectmodel.py -.. _`pypy/rlib/parsing/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/parsing/ -.. _`pypy/rlib/parsing/tree.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/parsing/tree.py -.. _`pypy/rlib/rarithmetic.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rarithmetic.py -.. _`pypy/rlib/rbigint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rbigint.py -.. _`pypy/rlib/rrandom.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rrandom.py -.. _`pypy/rlib/rsocket.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rsocket.py -.. _`pypy/rlib/rstack.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rstack.py -.. _`pypy/rlib/streamio.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/streamio.py -.. _`pypy/rlib/test/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/test/ -.. _`pypy/rlib/unroll.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/unroll.py -.. _`pypy/rpython/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ -.. _`pypy/rpython/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/ -.. _`pypy/rpython/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/lltype.py -.. _`pypy/rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/ -.. _`pypy/rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/generation.py -.. _`pypy/rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/hybrid.py -.. _`pypy/rpython/memory/gc/markcompact.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/markcompact.py -.. _`pypy/rpython/memory/gc/marksweep.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/marksweep.py -.. _`pypy/rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/semispace.py -.. _`pypy/rpython/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ -.. _`pypy/rpython/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ootype.py -.. _`pypy/rpython/rint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rint.py -.. _`pypy/rpython/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rlist.py -.. _`pypy/rpython/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rmodel.py -.. _`pypy/rpython/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rtyper.py -.. _`pypy/rpython/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/test/test_llinterp.py -.. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ -.. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ -.. _`pypy/tool/traceconfig.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/traceconfig.py -.. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/ -.. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/ -.. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ -.. _`pypy/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/cli/ -.. _`pypy/translator/goal/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/ -.. _`pypy/translator/goal/targetnopstandalone.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/targetnopstandalone.py -.. _`pypy/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/jvm/ -.. _`pypy/translator/stackless/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/stackless/ -.. _`pypy/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/tool/ diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -121,4 +121,4 @@ .. _bug reports: https://codespeak.net/issue/pypy-dev/ -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -11,7 +11,7 @@ def makeref(docdir): - reffile = docdir.join('_ref.rst') + reffile = docdir.join('_ref.txt') linkrex = py.std.re.compile('`(\S+)`_') @@ -39,7 +39,7 @@ found = True addlink(linkname, issue_url+linkname) if found: - assert ".. include:: _ref.rst" in content, "you need to include _ref.rst in %s" % (textfile, ) + assert ".. include:: _ref.txt" in content, "you need to include _ref.txt in %s" % (textfile, ) items = name2target.items() items.sort() diff --git a/pypy/doc/jit/_ref.rst b/pypy/doc/jit/_ref.rst deleted file mode 100644 diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -611,4 +611,4 @@ .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -712,4 +712,4 @@ .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _`json format`: http://www.json.org -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/parser.rst b/pypy/doc/parser.rst --- a/pypy/doc/parser.rst +++ b/pypy/doc/parser.rst @@ -100,4 +100,4 @@ information like the line number table and stack depth are computed. Finally, everything is passed to a brand new ``PyCode`` object. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -1061,4 +1061,4 @@ which will check that remote URLs are reachable. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -600,4 +600,4 @@ .. _`documentation of the greenlets`: http://packages.python.org/greenlet/ .. _`Stackless Transform`: translation.html#the-stackless-transform -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -321,5 +321,5 @@ Because it's fun. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -260,5 +260,5 @@ .. _`generate Just-In-Time Compilers`: jit/index.html .. _`JIT Generation in PyPy`: jit/index.html -.. include:: _ref.rst +.. include:: _ref.txt From commits-noreply at bitbucket.org Wed Apr 27 14:29:22 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 14:29:22 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix link generation Message-ID: <20110427122922.2BEA0282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43670:b07c1d6ed3b3 Date: 2011-04-27 14:28 +0200 http://bitbucket.org/pypy/pypy/changeset/b07c1d6ed3b3/ Log: fix link generation diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,12 +1,11 @@ -.. _`./LICENSE`: https://bitbucket.org/pypy/pypy/src/default/./LICENSE .. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ .. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ -.. _`lib-python/2.5.2/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.5.2/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py +.. _`pypy/annotation`: .. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/ .. _`pypy/annotation/annrpython.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/annrpython.py .. _`pypy/annotation/binaryop.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/binaryop.py @@ -19,8 +18,10 @@ .. _`pypy/doc/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/ .. _`pypy/doc/config/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/config/ .. _`pypy/doc/discussion/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/discussion/ +.. _`pypy/interpreter`: .. _`pypy/interpreter/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/ .. _`pypy/interpreter/argument.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/argument.py +.. _`pypy/interpreter/astcompiler`: .. _`pypy/interpreter/astcompiler/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ .. _`pypy/interpreter/astcompiler/assemble.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/assemble.py .. _`pypy/interpreter/astcompiler/ast.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ast.py @@ -40,9 +41,9 @@ .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py .. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py -.. _`pypy/interpreter/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/objspace.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py +.. _`pypy/interpreter/pyparser`: .. _`pypy/interpreter/pyparser/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/ .. _`pypy/interpreter/pyparser/future.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/future.py .. _`pypy/interpreter/pyparser/metaparser.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/metaparser.py @@ -50,16 +51,21 @@ .. _`pypy/interpreter/pyparser/pyparse.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pyparse.py .. _`pypy/interpreter/pyparser/pytokenizer.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pytokenizer.py .. _`pypy/interpreter/typedef.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/typedef.py +.. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py .. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py +.. _`pypy/objspace`: .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ .. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py +.. _`pypy/objspace/flow`: .. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ -.. _`pypy/objspace/flow/model/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/model/ +.. _`pypy/objspace/flow/model.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/model.py +.. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ .. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py +.. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py .. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py @@ -68,8 +74,8 @@ .. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/objspace/taint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/taint.py .. _`pypy/objspace/thunk.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/thunk.py -.. _`pypy/objspace/trace`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace .. _`pypy/objspace/trace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace.py +.. _`pypy/rlib`: .. _`pypy/rlib/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/ .. _`pypy/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/listsort.py .. _`pypy/rlib/nonconst.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/nonconst.py @@ -82,8 +88,9 @@ .. _`pypy/rlib/rsocket.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rsocket.py .. _`pypy/rlib/rstack.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rstack.py .. _`pypy/rlib/streamio.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/streamio.py -.. _`pypy/rlib/test/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/test/ +.. _`pypy/rlib/test`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/test/ .. _`pypy/rlib/unroll.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/unroll.py +.. _`pypy/rpython`: .. _`pypy/rpython/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ .. _`pypy/rpython/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/ .. _`pypy/rpython/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/lltype.py @@ -104,12 +111,12 @@ .. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`pypy/tool/traceconfig.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/traceconfig.py +.. _`pypy/translator`: .. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/ .. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/ .. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ .. _`pypy/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/cli/ .. _`pypy/translator/goal/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/ -.. _`pypy/translator/goal/targetnopstandalone.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/targetnopstandalone.py .. _`pypy/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/jvm/ .. _`pypy/translator/stackless/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/stackless/ .. _`pypy/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/tool/ diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -23,7 +23,7 @@ return name2target.setdefault(linktarget, []).append(linkname) - for textfile in docdir.listdir(): # for subdirs, see below + for textfile in sorted(docdir.listdir()): # for subdirs, see below if textfile.ext != '.rst': continue content = textfile.read() @@ -32,9 +32,10 @@ if '/' in linkname: found = True assert distdir.join(linkname).check(), "link %s in %s is dead" % (linkname, textfile) + url = bitbucket_url + linkname if not linkname.endswith("/") and distdir.join(linkname).check(dir=1): - linkname += "/" - addlink(linkname, bitbucket_url + linkname) + url += "/" + addlink(linkname, url) elif linkname.startswith('issue'): found = True addlink(linkname, issue_url+linkname) From commits-noreply at bitbucket.org Wed Apr 27 14:31:25 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 14:31:25 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix link Message-ID: <20110427123125.D4B50282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43671:cbf61f98fa64 Date: 2011-04-27 14:31 +0200 http://bitbucket.org/pypy/pypy/changeset/cbf61f98fa64/ Log: fix link diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -37,8 +37,7 @@ calling its ``frame.eval()`` method. This main entry point initialize appropriate namespaces and then interprets each bytecode instruction. Python's standard library contains -the `lib-python/2.7.0/dishpkknosbest -.py`_ module which allows to view +the `lib-python/2.7.0/dis.py`_ module which allows to view the Virtual's machine bytecode instructions:: >>> import dis From commits-noreply at bitbucket.org Wed Apr 27 14:35:30 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 27 Apr 2011 14:35:30 +0200 (CEST) Subject: [pypy-svn] pypy default: (iko, rguillebert, arigo) Message-ID: <20110427123530.12D5436C210@codespeak.net> Author: Armin Rigo Branch: Changeset: r43672:89abd949c0b0 Date: 2011-04-27 14:34 +0200 http://bitbucket.org/pypy/pypy/changeset/89abd949c0b0/ Log: (iko, rguillebert, arigo) Don't use Mac OS/X may-be-broken implementation of poll(). diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -15,6 +15,7 @@ _MSVC = target_platform.name == "msvc" _MINGW = target_platform.name == "mingw32" _SOLARIS = sys.platform == "sunos5" +_MACOSX = sys.platform == "darwin" if _POSIX: includes = ('sys/types.h', @@ -590,7 +591,11 @@ pollfdarray = rffi.CArray(pollfd) poll = external('poll', [lltype.Ptr(pollfdarray), nfds_t, rffi.INT], rffi.INT) - + # workaround for Mac OS/X on which poll() seems to behave a bit strangely + # (see test_recv_send_timeout in pypy.module._socket.test.test_sock_app) + # https://issues.apache.org/bugzilla/show_bug.cgi?id=34332 + poll_may_be_broken = _MACOSX + elif WIN32: from pypy.rlib import rwin32 # diff --git a/pypy/rlib/rsocket.py b/pypy/rlib/rsocket.py --- a/pypy/rlib/rsocket.py +++ b/pypy/rlib/rsocket.py @@ -629,7 +629,7 @@ _c.ioctlsocket(self.fd, _c.FIONBIO, flag) lltype.free(flag, flavor='raw') - if hasattr(_c, 'poll'): + if hasattr(_c, 'poll') and not _c.poll_may_be_broken: def _select(self, for_writing): """Returns 0 when reading/writing is possible, 1 when timing out and -1 on error.""" From commits-noreply at bitbucket.org Wed Apr 27 14:37:13 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 27 Apr 2011 14:37:13 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix for Macs, where platform.cc is e.g. "gcc-4.0". Message-ID: <20110427123713.4832C36C210@codespeak.net> Author: Armin Rigo Branch: Changeset: r43673:fc9dc975bb55 Date: 2011-04-27 11:06 +0200 http://bitbucket.org/pypy/pypy/changeset/fc9dc975bb55/ Log: Fix for Macs, where platform.cc is e.g. "gcc-4.0". diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,7 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc == 'gcc': +elif platform.cc.startswith('gcc'): out = platform.execute(platform.cc, '--version').out match = re.search(' (\d+\.\d+(\.\d+)*)', out) if match: From commits-noreply at bitbucket.org Wed Apr 27 14:37:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 27 Apr 2011 14:37:15 +0200 (CEST) Subject: [pypy-svn] pypy default: (rguillebert, iko, arigo) Message-ID: <20110427123715.C22A0282C18@codespeak.net> Author: Armin Rigo Branch: Changeset: r43674:56b8077746b7 Date: 2011-04-27 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/56b8077746b7/ Log: (rguillebert, iko, arigo) Fix mmap().resize() to raise the correct exception on BSDs. diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -5,7 +5,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.rlib import rmmap -from pypy.rlib.rmmap import RValueError, RTypeError, ROverflowError +from pypy.rlib.rmmap import RValueError, RTypeError, ROverflowError, RSystemError import sys import os import platform @@ -123,6 +123,9 @@ self.mmap.resize(newsize) except OSError, e: raise mmap_error(self.space, e) + except RSystemError, e: + raise OperationError(self.space.w_SystemError, + self.space.wrap(e.message)) def __len__(self): return self.space.wrap(self.mmap.size) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -363,6 +363,28 @@ m.close() f.close() + def test_resize_bsd(self): + import sys + if ("darwin" not in sys.platform) and ("freebsd" not in sys.platform): + skip("resize works under not OSX or FreeBSD") + + import mmap + import os + + f = open(self.tmpname + "p", "w+") + f.write("foobar") + f.flush() + m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_READ) + raises(TypeError, m.resize, 1) + m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_COPY) + raises(TypeError, m.resize, 1) + m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_WRITE) + f_size = os.fstat(f.fileno()).st_size + assert m.size() == f_size == 6 + raises(SystemError, m.resize, 10) + f_size = os.fstat(f.fileno()).st_size + assert m.size() == f_size == 6 + def test_len(self): from mmap import mmap diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -27,6 +27,10 @@ def __init__(self, message): self.message = message +class RSystemError(Exception): + def __init__(self, message): + self.message = message + includes = ["sys/types.h"] if _POSIX: includes += ['unistd.h', 'sys/mman.h'] @@ -511,7 +515,7 @@ if _POSIX: if not has_mremap: - raise OSError(-11111, "No mremap available") + raise RSystemError("mmap: resizing not available--no mremap()") # resize the underlying file first os.ftruncate(self.fd, self.offset + newsize) From commits-noreply at bitbucket.org Wed Apr 27 14:37:18 2011 From: commits-noreply at bitbucket.org (arigo) Date: Wed, 27 Apr 2011 14:37:18 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110427123718.49E71282C18@codespeak.net> Author: Armin Rigo Branch: Changeset: r43675:f7965308716b Date: 2011-04-27 14:37 +0200 http://bitbucket.org/pypy/pypy/changeset/f7965308716b/ Log: merge heads diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,7 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc == 'gcc': +elif platform.cc.startswith('gcc'): out = platform.execute(platform.cc, '--version').out match = re.search(' (\d+\.\d+(\.\d+)*)', out) if match: diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -5,7 +5,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped from pypy.rlib import rmmap -from pypy.rlib.rmmap import RValueError, RTypeError, ROverflowError +from pypy.rlib.rmmap import RValueError, RTypeError, ROverflowError, RSystemError import sys import os import platform @@ -123,6 +123,9 @@ self.mmap.resize(newsize) except OSError, e: raise mmap_error(self.space, e) + except RSystemError, e: + raise OperationError(self.space.w_SystemError, + self.space.wrap(e.message)) def __len__(self): return self.space.wrap(self.mmap.size) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -363,6 +363,28 @@ m.close() f.close() + def test_resize_bsd(self): + import sys + if ("darwin" not in sys.platform) and ("freebsd" not in sys.platform): + skip("resize works under not OSX or FreeBSD") + + import mmap + import os + + f = open(self.tmpname + "p", "w+") + f.write("foobar") + f.flush() + m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_READ) + raises(TypeError, m.resize, 1) + m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_COPY) + raises(TypeError, m.resize, 1) + m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_WRITE) + f_size = os.fstat(f.fileno()).st_size + assert m.size() == f_size == 6 + raises(SystemError, m.resize, 10) + f_size = os.fstat(f.fileno()).st_size + assert m.size() == f_size == 6 + def test_len(self): from mmap import mmap diff --git a/pypy/rlib/rmmap.py b/pypy/rlib/rmmap.py --- a/pypy/rlib/rmmap.py +++ b/pypy/rlib/rmmap.py @@ -27,6 +27,10 @@ def __init__(self, message): self.message = message +class RSystemError(Exception): + def __init__(self, message): + self.message = message + includes = ["sys/types.h"] if _POSIX: includes += ['unistd.h', 'sys/mman.h'] @@ -511,7 +515,7 @@ if _POSIX: if not has_mremap: - raise OSError(-11111, "No mremap available") + raise RSystemError("mmap: resizing not available--no mremap()") # resize the underlying file first os.ftruncate(self.fd, self.offset + newsize) From commits-noreply at bitbucket.org Wed Apr 27 14:40:55 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 27 Apr 2011 14:40:55 +0200 (CEST) Subject: [pypy-svn] pypy merge-stdlib-2.7.1: (antocuni, berdario) the output produced by pstats.py changed in 2.7.1, fix it. Also, change the name of the test to be more -k friendly Message-ID: <20110427124055.05A9F36C210@codespeak.net> Author: Antonio Cuni Branch: merge-stdlib-2.7.1 Changeset: r43676:a92b6ca951d7 Date: 2011-04-27 14:21 +0200 http://bitbucket.org/pypy/pypy/changeset/a92b6ca951d7/ Log: (antocuni, berdario) the output produced by pstats.py changed in 2.7.1, fix it. Also, change the name of the test to be more -k friendly diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -117,7 +117,7 @@ assert 0.9 < subentry.totaltime < 2.9 #assert 0.9 < subentry.inlinetime < 2.9 - def test_cprofile(self): + def test_use_cprofile(self): import sys, os # XXX this is evil trickery to walk around the fact that we don't # have __file__ at app-level here @@ -187,7 +187,7 @@ expected_output = {} expected_output['print_stats'] = """\ - 126 function calls (106 primitive calls) in 1.000 CPU seconds + 126 function calls (106 primitive calls) in 1.000 seconds Ordered by: standard name From commits-noreply at bitbucket.org Wed Apr 27 15:17:28 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 15:17:28 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: kill link Message-ID: <20110427131728.366F1282BEC@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43678:970b1de06b6b Date: 2011-04-27 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/970b1de06b6b/ Log: kill link diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -8,7 +8,6 @@ .. toctree:: - discussion/cli-optimizations.rst discussion/distribution-implementation.rst discussion/distribution-newattempt.rst discussion/distribution-roadmap.rst From commits-noreply at bitbucket.org Wed Apr 27 15:17:31 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 15:17:31 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: also generate the cmdline overview Message-ID: <20110427131731.E2687282C19@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43679:4a884b3994cc Date: 2011-04-27 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/4a884b3994cc/ Log: also generate the cmdline overview diff --git a/pypy/doc/config/commandline.rst b/pypy/doc/config/commandline.rst deleted file mode 100644 --- a/pypy/doc/config/commandline.rst +++ /dev/null @@ -1,33 +0,0 @@ - -.. contents:: - - -.. _objspace: -.. _`overview-of-command-line-options-for-objspace`: - -------------------------------- -PyPy Python interpreter options -------------------------------- - -The following options can be used after ``translate.py -targetpypystandalone`` or as options to ``py.py``. - -.. GENERATE: objspace - - -.. _translation: -.. _`overview-of-command-line-options-for-translation`: - ---------------------------- -General translation options ---------------------------- - -The following are options of ``translate.py``. They must be -given before the ``targetxxx`` on the command line. - -* `--opt -O:`__ set the optimization level `[0, 1, size, mem, 2, 3]` - -.. __: opt.html - -.. GENERATE: translation - diff --git a/pypy/doc/config/commandline.rst b/pypy/doc/config/commandline.txt copy from pypy/doc/config/commandline.rst copy to pypy/doc/config/commandline.txt diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -8,6 +8,18 @@ ] start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) +def make_cmdline_overview(): + result = [] + txtpath = thisdir.join("commandline.txt") + for line in txtpath.read().splitlines(): + if line.startswith('.. GENERATE:'): + start = line[len('.. GENERATE:'):].strip() + descr = start_to_descr[start] + line = makerestdoc.make_cmdline_overview(descr, title=False).text() + result.append(line) + rstpath = txtpath.new(ext=".rst") + rstpath.write("\n".join(result)) + def make_rst(basename): txtpath = thisdir.join(basename) txtpath.ensure() @@ -46,3 +58,5 @@ for p in c.getpaths(include_groups=True): basename = prefix + "." + p + ".txt" make_rst(basename) + +make_cmdline_overview() diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,6 +1,7 @@ .. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ .. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ +.. _`lib-python/2.7.0/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7.0/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py From commits-noreply at bitbucket.org Wed Apr 27 15:17:37 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Wed, 27 Apr 2011 15:17:37 +0200 (CEST) Subject: [pypy-svn] pypy merge-stdlib-2.7.1: close about-to-be-merged branch Message-ID: <20110427131737.DC7F4282C18@codespeak.net> Author: Antonio Cuni Branch: merge-stdlib-2.7.1 Changeset: r43680:a83dc6a2d56f Date: 2011-04-27 15:16 +0200 http://bitbucket.org/pypy/pypy/changeset/a83dc6a2d56f/ Log: close about-to-be-merged branch From commits-noreply at bitbucket.org Wed Apr 27 21:42:32 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 21:42:32 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: make compat-matrix less ridiculously outdated Message-ID: <20110427194232.6D355282BF2@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43682:0ad2d1d26fea Date: 2011-04-27 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/0ad2d1d26fea/ Log: make compat-matrix less ridiculously outdated diff --git a/pypy/doc/image/compat-matrix.sxc b/pypy/doc/image/compat-matrix.sxc index 8086ba6179a7bcd49f43067ae42a80f2a5d3cca3..e202accc5fdeffd04172fca0c5e0447486bd8c55 GIT binary patch [cut] diff --git a/pypy/doc/image/compat-matrix.png b/pypy/doc/image/compat-matrix.png index 162c06062b42cdb56745203c7b181707a1b14a69..012097e51f06306d6b9f5bee46f0c17e10b5b1e2 GIT binary patch [cut] From commits-noreply at bitbucket.org Wed Apr 27 21:42:34 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 21:42:34 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: add missing things to toctree Message-ID: <20110427194234.B4F26282BF2@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43683:224accc32740 Date: 2011-04-27 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/224accc32740/ Log: add missing things to toctree diff --git a/pypy/doc/config/index.rst b/pypy/doc/config/index.rst --- a/pypy/doc/config/index.rst +++ b/pypy/doc/config/index.rst @@ -50,3 +50,12 @@ .. _`overview`: commandline.html .. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html .. _`What PyPy can do for your objects`: ../objspace-proxies.html + + +.. toctree:: + :maxdepth: 2 + + commandline + translation + objspace + opt diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py --- a/pypy/config/makerestdoc.py +++ b/pypy/config/makerestdoc.py @@ -147,11 +147,9 @@ def _get_section_header(cmdline, fullpath, subdescr): # XXX: pypy specific hack txtfile = configdocdir.join(fullpath + ".txt") - print txtfile, if not txtfile.check(): - print "not found" + print txtfile, "not found" return "" - print "found" content = txtfile.read() if ".. internal" in content: return "Internal Options" diff --git a/pypy/doc/config/commandline.txt b/pypy/doc/config/commandline.txt --- a/pypy/doc/config/commandline.txt +++ b/pypy/doc/config/commandline.txt @@ -1,6 +1,6 @@ .. contents:: - + .. _objspace: .. _`overview-of-command-line-options-for-objspace`: diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -94,6 +94,7 @@ objspace.rst __pypy__-module.rst objspace-proxies.rst + config/index.rst dev_method.rst extending.rst From commits-noreply at bitbucket.org Wed Apr 27 21:42:40 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 21:42:40 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix non-symmetry Message-ID: <20110427194240.AC20E282C1E@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43684:6f8910062344 Date: 2011-04-27 17:04 +0200 http://bitbucket.org/pypy/pypy/changeset/6f8910062344/ Log: fix non-symmetry diff --git a/pypy/doc/image/compat-matrix.sxc b/pypy/doc/image/compat-matrix.sxc index e202accc5fdeffd04172fca0c5e0447486bd8c55..ab8455241eda4ec63bf647905b63bf9849fb8675 GIT binary patch [cut] diff --git a/pypy/doc/image/compat-matrix.png b/pypy/doc/image/compat-matrix.png index 012097e51f06306d6b9f5bee46f0c17e10b5b1e2..060537165eca2f94eee1fabb9a0c235fe39e51ee GIT binary patch [cut] From commits-noreply at bitbucket.org Wed Apr 27 21:42:43 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Wed, 27 Apr 2011 21:42:43 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac): put the stuff from docindex to the main index page, which makes much more sense for readthedocs Message-ID: <20110427194243.0DB85282C18@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43685:4c65565e1bb3 Date: 2011-04-27 17:05 +0200 http://bitbucket.org/pypy/pypy/changeset/4c65565e1bb3/ Log: (cfbolz, lac): put the stuff from docindex to the main index page, which makes much more sense for readthedocs diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst deleted file mode 100644 --- a/pypy/doc/docindex.rst +++ /dev/null @@ -1,309 +0,0 @@ -================================================= -PyPy - a Python_ implementation written in Python -================================================= - -.. _Python: http://docs.python.org/release/2.5.2/ - - -.. contents:: :depth: 1 - - -PyPy User Documentation -=============================================== - -`getting started`_ provides hands-on instructions -including a two-liner to run the PyPy Python interpreter -on your system, examples on advanced features and -entry points for using PyPy's translation tool chain. - -`FAQ`_ contains some frequently asked questions. - -New features of PyPy's Python Interpreter and -Translation Framework: - - * `Differences between PyPy and CPython`_ - * `What PyPy can do for your objects`_ - * `Stackless and coroutines`_ - * `JIT Generation in PyPy`_ - * `Sandboxing Python code`_ - -Status_ of the project. - - -Project Documentation -===================================== - -PyPy was funded by the EU for several years. See the `web site of the EU -project`_ for more details. - -.. _`web site of the EU project`: http://pypy.org - -architecture_ gives a complete view of PyPy's basic design. - -`coding guide`_ helps you to write code for PyPy (especially also describes -coding in RPython a bit). - -`sprint reports`_ lists reports written at most of our sprints, from -2003 to the present. - -`papers, talks and related projects`_ lists presentations -and related projects as well as our published papers. - -`PyPy video documentation`_ is a page linking to the videos (e.g. of talks and -introductions) that are available. - -`Technical reports`_ is a page that contains links to the -reports that we submitted to the European Union. - -`development methodology`_ describes our sprint-driven approach. - -`LICENSE`_ contains licensing details (basically a straight MIT-license). - -`Glossary`_ of PyPy words to help you align your inner self with -the PyPy universe. - - -Status -=================================== - -PyPy can be used to run Python programs on Linux, OS/X, -Windows, on top of .NET, and on top of Java. -To dig into PyPy it is recommended to try out the current -Subversion HEAD, which is always working or mostly working, -instead of the latest release, which is `1.2.0`__. - -.. __: release-1.2.0.html - -PyPy is mainly developed on Linux and Mac OS X. Windows is supported, -but platform-specific bugs tend to take longer before we notice and fix -them. Linux 64-bit machines are supported (though it may also take some -time before we notice and fix bugs). - -PyPy's own tests `summary`_, daily updated, run through BuildBot infrastructure. -You can also find CPython's compliance tests run with compiled ``pypy-c`` -executables there. - -information dating from early 2007: - -`PyPy LOC statistics`_ shows LOC statistics about PyPy. - -`PyPy statistics`_ is a page with various statistics about the PyPy project. - -`compatibility matrix`_ is a diagram that shows which of the various features -of the PyPy interpreter work together with which other features. - - -Source Code Documentation -=============================================== - -`object spaces`_ discusses the object space interface -and several implementations. - -`bytecode interpreter`_ explains the basic mechanisms -of the bytecode interpreter and virtual machine. - -`interpreter optimizations`_ describes our various strategies for -improving the performance of our interpreter, including alternative -object implementations (for strings, dictionaries and lists) in the -standard object space. - -`translation`_ is a detailed overview of our translation process. The -rtyper_ is the largest component of our translation process. - -`dynamic-language translation`_ is a paper that describes -the translation process, especially the flow object space -and the annotator in detail. (This document is one -of the `EU reports`_.) - -`low-level encapsulation`_ describes how our approach hides -away a lot of low level details. This document is also part -of the `EU reports`_. - -`translation aspects`_ describes how we weave different -properties into our interpreter during the translation -process. This document is also part of the `EU reports`_. - -`garbage collector`_ strategies that can be used by the virtual -machines produced by the translation process. - -`parser`_ contains (outdated, unfinished) documentation about -the parser. - -`rlib`_ describes some modules that can be used when implementing programs in -RPython. - -`configuration documentation`_ describes the various configuration options that -allow you to customize PyPy. - -`CLI backend`_ describes the details of the .NET backend. - -`JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler -from our Python interpreter. - - - -.. _`FAQ`: faq.html -.. _Glossary: glossary.html -.. _`PyPy video documentation`: video-index.html -.. _parser: parser.html -.. _`development methodology`: dev_method.html -.. _`sprint reports`: sprint-reports.html -.. _`papers, talks and related projects`: extradoc.html -.. _`PyPy LOC statistics`: http://codespeak.net/~hpk/pypy-stat/ -.. _`PyPy statistics`: http://codespeak.net/pypy/trunk/pypy/doc/statistic -.. _`object spaces`: objspace.html -.. _`interpreter optimizations`: interpreter-optimizations.html -.. _`translation`: translation.html -.. _`dynamic-language translation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf -.. _`low-level encapsulation`: low-level-encapsulation.html -.. _`translation aspects`: translation-aspects.html -.. _`configuration documentation`: config/ -.. _`coding guide`: coding-guide.html -.. _`architecture`: architecture.html -.. _`getting started`: getting-started.html -.. _`bytecode interpreter`: interpreter.html -.. _`EU reports`: index-report.html -.. _`Technical reports`: index-report.html -.. _`summary`: http://codespeak.net:8099/summary -.. _`ideas for PyPy related projects`: project-ideas.html -.. _`Nightly builds and benchmarks`: http://tuatara.cs.uni-duesseldorf.de/benchmark.html -.. _`directory reference`: -.. _`rlib`: rlib.html -.. _`Sandboxing Python code`: sandbox.html -.. _`LICENSE`: https://bitbucket.org/pypy/pypy/src/default/LICENSE - -PyPy directory cross-reference ------------------------------- - -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: - -================================ =========================================== -Directory explanation/links -================================ =========================================== -`pypy/annotation/`_ `type inferencing code`_ for `RPython`_ programs - -`pypy/bin/`_ command-line scripts, mainly `py.py`_ and `translatorshell.py`_ - -`pypy/config/`_ handles the numerous options for building and running PyPy - -`pypy/doc/`_ text versions of PyPy developer documentation - -`pypy/doc/config/`_ documentation for the numerous translation options - -`pypy/doc/discussion/`_ drafts of ideas and documentation - -``doc/*/`` other specific documentation topics or tools - -`pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) - -`pypy/interpreter/pyparser/`_ interpreter-level Python source parser - -`pypy/interpreter/astcompiler/`_ interpreter-level bytecode compiler, via an AST - representation - -`pypy/module/`_ contains `mixed modules`_ implementing core modules with - both application and interpreter level code. - Not all are finished and working. Use the ``--withmod-xxx`` - or ``--allworkingmodules`` translation options. - -`pypy/objspace/`_ `object space`_ implementations - -`pypy/objspace/trace.py`_ the `trace object space`_ monitoring bytecode and space operations - -`pypy/objspace/dump.py`_ the dump object space saves a large, searchable log file - with all operations - -`pypy/objspace/taint.py`_ the `taint object space`_, providing object tainting - -`pypy/objspace/thunk.py`_ the `thunk object space`_, providing unique object features - -`pypy/objspace/flow/`_ the FlowObjSpace_ implementing `abstract interpretation`_ - -`pypy/objspace/std/`_ the StdObjSpace_ implementing CPython's objects and types - -`pypy/rlib/`_ a `"standard library"`_ for RPython_ programs - -`pypy/rpython/`_ the `RPython Typer`_ - -`pypy/rpython/lltypesystem/`_ the `low-level type system`_ for C-like backends - -`pypy/rpython/ootypesystem/`_ the `object-oriented type system`_ for OO backends - -`pypy/rpython/memory/`_ the `garbage collector`_ construction framework - -`pypy/tool/`_ various utilities and hacks used from various places - -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic - tools - -`pypy/tool/pytest/`_ support code for our `testing methods`_ - -`pypy/translator/`_ translation_ backends and support code - -`pypy/translator/backendopt/`_ general optimizations that run before a backend generates code - -`pypy/translator/c/`_ the `GenC backend`_, producing C code from an - RPython program (generally via the rtyper_) - -`pypy/translator/cli/`_ the `CLI backend`_ for `.NET`_ (Microsoft CLR or Mono_) - -`pypy/translator/goal/`_ our `main PyPy-translation scripts`_ live here - -`pypy/translator/jvm/`_ the Java backend - -`pypy/translator/stackless/`_ the `Stackless Transform`_ - -`pypy/translator/tool/`_ helper tools for translation, including the Pygame - `graph viewer`_ - -``*/test/`` many directories have a test subdirectory containing test - modules (see `Testing in PyPy`_) - -``_cache/`` holds cache files from internally `translating application - level to interpreterlevel`_ code. -================================ =========================================== - -.. _`bytecode interpreter`: interpreter.html -.. _`translating application level to interpreterlevel`: geninterp.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules -.. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf -.. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space -.. _`trace object space`: objspace.html#the-trace-object-space -.. _`taint object space`: objspace-proxies.html#taint -.. _`thunk object space`: objspace-proxies.html#thunk -.. _`transparent proxies`: objspace-proxies.html#tproxy -.. _`Differences between PyPy and CPython`: cpython_differences.html -.. _`What PyPy can do for your objects`: objspace-proxies.html -.. _`Stackless and coroutines`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space -.. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer -.. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html -.. _`py.py`: getting-started-python.html#the-py.py-interpreter -.. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator -.. _JIT: jit/index.html -.. _`JIT Generation in PyPy`: jit/index.html -.. _`just-in-time compiler generator`: jit/index.html -.. _rtyper: rtyper.html -.. _`low-level type system`: rtyper.html#low-level-type -.. _`object-oriented type system`: rtyper.html#oo-type -.. _`garbage collector`: garbage_collection.html -.. _`Stackless Transform`: translation.html#the-stackless-transform -.. _`main PyPy-translation scripts`: getting-started-python.html#translating-the-pypy-python-interpreter -.. _`.NET`: http://www.microsoft.com/net/ -.. _Mono: http://www.mono-project.com/ -.. _`"standard library"`: rlib.html -.. _`graph viewer`: getting-started-dev.html#try-out-the-translator -.. _`compatibility matrix`: image/compat-matrix.png - -.. include:: _ref.txt diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -1,4 +1,4 @@ - +=================================== Writing extension modules for pypy =================================== diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -15,16 +15,33 @@ * `PyPy Blog`_: news and status info about PyPy -* `Documentation`_: extensive documentation about PyPy. - -* `Getting Started`_: Getting started and playing with PyPy. - * `Papers`_: Academic papers, talks, and related projects * `Videos`_: Videos of PyPy talks and presentations * `speed.pypy.org`_: Daily benchmarks of how fast PyPy is +Documentation for the PyPy Python Interpreter +=============================================== + +`getting started`_ provides hands-on instructions +including a two-liner to run the PyPy Python interpreter +on your system, examples on advanced features and +entry points for using PyPy's translation tool chain. + +`FAQ`_ contains some frequently asked questions. + +New features of PyPy's Python Interpreter and +Translation Framework: + + * `Differences between PyPy and CPython`_ + * `What PyPy can do for your objects`_ + * `Stackless and coroutines`_ + * `JIT Generation in PyPy`_ + * `Sandboxing Python code`_ + +Status_ of the project. + Mailing lists, bug tracker, IRC channel ============================================= @@ -70,8 +87,284 @@ .. _`Release 1.4`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org -Detailed Documentation -====================== + + +Project Documentation +===================================== + +PyPy was funded by the EU for several years. See the `web site of the EU +project`_ for more details. + +.. _`web site of the EU project`: http://pypy.org + +architecture_ gives a complete view of PyPy's basic design. + +`coding guide`_ helps you to write code for PyPy (especially also describes +coding in RPython a bit). + +`sprint reports`_ lists reports written at most of our sprints, from +2003 to the present. + +`papers, talks and related projects`_ lists presentations +and related projects as well as our published papers. + +`PyPy video documentation`_ is a page linking to the videos (e.g. of talks and +introductions) that are available. + +`Technical reports`_ is a page that contains links to the +reports that we submitted to the European Union. + +`development methodology`_ describes our sprint-driven approach. + +`LICENSE`_ contains licensing details (basically a straight MIT-license). + +`Glossary`_ of PyPy words to help you align your inner self with +the PyPy universe. + + +Status +=================================== + +PyPy can be used to run Python programs on Linux, OS/X, +Windows, on top of .NET, and on top of Java. +To dig into PyPy it is recommended to try out the current +Subversion HEAD, which is always working or mostly working, +instead of the latest release, which is `1.2.0`__. + +.. __: release-1.2.0.html + +PyPy is mainly developed on Linux and Mac OS X. Windows is supported, +but platform-specific bugs tend to take longer before we notice and fix +them. Linux 64-bit machines are supported (though it may also take some +time before we notice and fix bugs). + +PyPy's own tests `summary`_, daily updated, run through BuildBot infrastructure. +You can also find CPython's compliance tests run with compiled ``pypy-c`` +executables there. + +information dating from early 2007: + +`PyPy LOC statistics`_ shows LOC statistics about PyPy. + +`PyPy statistics`_ is a page with various statistics about the PyPy project. + +`compatibility matrix`_ is a diagram that shows which of the various features +of the PyPy interpreter work together with which other features. + + +Source Code Documentation +=============================================== + +`object spaces`_ discusses the object space interface +and several implementations. + +`bytecode interpreter`_ explains the basic mechanisms +of the bytecode interpreter and virtual machine. + +`interpreter optimizations`_ describes our various strategies for +improving the performance of our interpreter, including alternative +object implementations (for strings, dictionaries and lists) in the +standard object space. + +`translation`_ is a detailed overview of our translation process. The +rtyper_ is the largest component of our translation process. + +`dynamic-language translation`_ is a paper that describes +the translation process, especially the flow object space +and the annotator in detail. (This document is one +of the `EU reports`_.) + +`low-level encapsulation`_ describes how our approach hides +away a lot of low level details. This document is also part +of the `EU reports`_. + +`translation aspects`_ describes how we weave different +properties into our interpreter during the translation +process. This document is also part of the `EU reports`_. + +`garbage collector`_ strategies that can be used by the virtual +machines produced by the translation process. + +`parser`_ contains (outdated, unfinished) documentation about +the parser. + +`rlib`_ describes some modules that can be used when implementing programs in +RPython. + +`configuration documentation`_ describes the various configuration options that +allow you to customize PyPy. + +`CLI backend`_ describes the details of the .NET backend. + +`JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler +from our Python interpreter. + + + +.. _`FAQ`: faq.html +.. _Glossary: glossary.html +.. _`PyPy video documentation`: video-index.html +.. _parser: parser.html +.. _`development methodology`: dev_method.html +.. _`sprint reports`: sprint-reports.html +.. _`papers, talks and related projects`: extradoc.html +.. _`PyPy LOC statistics`: http://codespeak.net/~hpk/pypy-stat/ +.. _`PyPy statistics`: http://codespeak.net/pypy/trunk/pypy/doc/statistic +.. _`object spaces`: objspace.html +.. _`interpreter optimizations`: interpreter-optimizations.html +.. _`translation`: translation.html +.. _`dynamic-language translation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`low-level encapsulation`: low-level-encapsulation.html +.. _`translation aspects`: translation-aspects.html +.. _`configuration documentation`: config/ +.. _`coding guide`: coding-guide.html +.. _`architecture`: architecture.html +.. _`getting started`: getting-started.html +.. _`bytecode interpreter`: interpreter.html +.. _`EU reports`: index-report.html +.. _`Technical reports`: index-report.html +.. _`summary`: http://codespeak.net:8099/summary +.. _`ideas for PyPy related projects`: project-ideas.html +.. _`Nightly builds and benchmarks`: http://tuatara.cs.uni-duesseldorf.de/benchmark.html +.. _`directory reference`: +.. _`rlib`: rlib.html +.. _`Sandboxing Python code`: sandbox.html +.. _`LICENSE`: https://bitbucket.org/pypy/pypy/src/default/LICENSE + +PyPy directory cross-reference +------------------------------ + +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: + +================================ =========================================== +Directory explanation/links +================================ =========================================== +`pypy/annotation/`_ `type inferencing code`_ for `RPython`_ programs + +`pypy/bin/`_ command-line scripts, mainly `py.py`_ and `translatorshell.py`_ + +`pypy/config/`_ handles the numerous options for building and running PyPy + +`pypy/doc/`_ text versions of PyPy developer documentation + +`pypy/doc/config/`_ documentation for the numerous translation options + +`pypy/doc/discussion/`_ drafts of ideas and documentation + +``doc/*/`` other specific documentation topics or tools + +`pypy/interpreter/`_ `bytecode interpreter`_ and related objects + (frames, functions, modules,...) + +`pypy/interpreter/pyparser/`_ interpreter-level Python source parser + +`pypy/interpreter/astcompiler/`_ interpreter-level bytecode compiler, via an AST + representation + +`pypy/module/`_ contains `mixed modules`_ implementing core modules with + both application and interpreter level code. + Not all are finished and working. Use the ``--withmod-xxx`` + or ``--allworkingmodules`` translation options. + +`pypy/objspace/`_ `object space`_ implementations + +`pypy/objspace/trace.py`_ the `trace object space`_ monitoring bytecode and space operations + +`pypy/objspace/dump.py`_ the dump object space saves a large, searchable log file + with all operations + +`pypy/objspace/taint.py`_ the `taint object space`_, providing object tainting + +`pypy/objspace/thunk.py`_ the `thunk object space`_, providing unique object features + +`pypy/objspace/flow/`_ the FlowObjSpace_ implementing `abstract interpretation`_ + +`pypy/objspace/std/`_ the StdObjSpace_ implementing CPython's objects and types + +`pypy/rlib/`_ a `"standard library"`_ for RPython_ programs + +`pypy/rpython/`_ the `RPython Typer`_ + +`pypy/rpython/lltypesystem/`_ the `low-level type system`_ for C-like backends + +`pypy/rpython/ootypesystem/`_ the `object-oriented type system`_ for OO backends + +`pypy/rpython/memory/`_ the `garbage collector`_ construction framework + +`pypy/tool/`_ various utilities and hacks used from various places + +`pypy/tool/algo/`_ general-purpose algorithmic and mathematic + tools + +`pypy/tool/pytest/`_ support code for our `testing methods`_ + +`pypy/translator/`_ translation_ backends and support code + +`pypy/translator/backendopt/`_ general optimizations that run before a backend generates code + +`pypy/translator/c/`_ the `GenC backend`_, producing C code from an + RPython program (generally via the rtyper_) + +`pypy/translator/cli/`_ the `CLI backend`_ for `.NET`_ (Microsoft CLR or Mono_) + +`pypy/translator/goal/`_ our `main PyPy-translation scripts`_ live here + +`pypy/translator/jvm/`_ the Java backend + +`pypy/translator/stackless/`_ the `Stackless Transform`_ + +`pypy/translator/tool/`_ helper tools for translation, including the Pygame + `graph viewer`_ + +``*/test/`` many directories have a test subdirectory containing test + modules (see `Testing in PyPy`_) + +``_cache/`` holds cache files from internally `translating application + level to interpreterlevel`_ code. +================================ =========================================== + +.. _`bytecode interpreter`: interpreter.html +.. _`translating application level to interpreterlevel`: geninterp.html +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules +.. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf +.. _`object space`: objspace.html +.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _`trace object space`: objspace.html#the-trace-object-space +.. _`taint object space`: objspace-proxies.html#taint +.. _`thunk object space`: objspace-proxies.html#thunk +.. _`transparent proxies`: objspace-proxies.html#tproxy +.. _`Differences between PyPy and CPython`: cpython_differences.html +.. _`What PyPy can do for your objects`: objspace-proxies.html +.. _`Stackless and coroutines`: stackless.html +.. _StdObjSpace: objspace.html#the-standard-object-space +.. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer +.. _`testing methods`: coding-guide.html#testing-in-pypy +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc +.. _`CLI backend`: cli-backend.html +.. _`py.py`: getting-started-python.html#the-py.py-interpreter +.. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator +.. _JIT: jit/index.html +.. _`JIT Generation in PyPy`: jit/index.html +.. _`just-in-time compiler generator`: jit/index.html +.. _rtyper: rtyper.html +.. _`low-level type system`: rtyper.html#low-level-type +.. _`object-oriented type system`: rtyper.html#oo-type +.. _`garbage collector`: garbage_collection.html +.. _`Stackless Transform`: translation.html#the-stackless-transform +.. _`main PyPy-translation scripts`: getting-started-python.html#translating-the-pypy-python-interpreter +.. _`.NET`: http://www.microsoft.com/net/ +.. _Mono: http://www.mono-project.com/ +.. _`"standard library"`: rlib.html +.. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. _`compatibility matrix`: image/compat-matrix.png + .. The following documentation is important and reasonably up-to-date: @@ -80,6 +373,7 @@ .. toctree:: :maxdepth: 1 + :hidden: getting-started.rst getting-started-python.rst @@ -145,3 +439,5 @@ * :ref:`search` * :ref:`glossary` + +.. include:: _ref.txt From commits-noreply at bitbucket.org Thu Apr 28 10:16:33 2011 From: commits-noreply at bitbucket.org (fijal) Date: Thu, 28 Apr 2011 10:16:33 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Kill the distribution thoughts (they are a bit random admitedly) Message-ID: <20110428081633.2825D282B9E@codespeak.net> Author: Maciej Fijalkowski Branch: documentation-cleanup Changeset: r43698:2059ff1eebb8 Date: 2011-04-28 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/2059ff1eebb8/ Log: Kill the distribution thoughts (they are a bit random admitedly) diff --git a/pypy/doc/discussion/distribution-roadmap.rst b/pypy/doc/discussion/distribution-roadmap.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution-roadmap.rst +++ /dev/null @@ -1,72 +0,0 @@ -Distribution: -============= - -Some random thoughts about automatic (or not) distribution layer. - -What I want to achieve is to make clean approach to perform -distribution mechanism with virtually any distribution heuristic. - -First step - RPython level: ---------------------------- - -First (simplest) step is to allow user to write RPython programs with -some kind of remote control over program execution. For start I would -suggest using RMI (Remote Method Invocation) and remote object access -(in case of low level it would be struct access). For the simplicity -it will make some sense to target high-level platform at the beginning -(CLI platform seems like obvious choice), which provides more primitives -for performing such operations. To make attempt easier, I'll provide -some subset of type system to be serializable which can go as parameters -to such a call. - -I take advantage of several assumptions: - -* globals are constants - this allows us to just run multiple instances - of the same program on multiple machines and perform RMI. - -* I/O is explicit - this makes GIL problem not that important. XXX: I've got - to read more about GIL to notice if this is true. - -Second step - doing it a little bit more automatically: -------------------------------------------------------- - -The second step is to allow some heuristic to live and change -calls to RMI calls. This should follow some assumptions (which may vary, -regarding implementation): - -* Not to move I/O to different machine (we can track I/O and side-effects - in RPython code). - -* Make sure all C calls are safe to transfer if we want to do that (this - depends on probably static API declaration from programmer "I'm sure this - C call has no side-effects", we don't want to check it in C) or not transfer - them at all. - -* Perform it all statically, at the time of program compilation. - -* We have to generate serialization methods for some classes, which - we want to transfer (Same engine might be used to allow JSON calls in JS - backend to transfer arbitrary python object). - -Third step - Just-in-time distribution: ---------------------------------------- - -The biggest step here is to provide JIT integration into distribution -system. This should allow to make it really useful (probably compile-time -distribution will not work for example for whole Python interpreter, because -of too huge granularity). This is quite unclear for me how to do that -(JIT is not complete and I don't know too much about it). Probably we -take JIT information about graphs and try to feed it to heuristic in some way -to change the calls into RMI. - -Problems to fight with: ------------------------ - -Most problems are to make mechanism working efficiently, so: - -* Avoid too much granularity (copying a lot of objects in both directions - all the time) - -* Make heuristic not eat too much CPU time/memory and all of that. - -* ... diff --git a/pypy/doc/discussion/distribution-implementation.rst b/pypy/doc/discussion/distribution-implementation.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution-implementation.rst +++ /dev/null @@ -1,91 +0,0 @@ -===================================================== -Random implementation details of distribution attempt -===================================================== - -.. contents:: -.. sectnum:: - -This document attempts to broaden this `dist thoughts`_. - -.. _`dist thoughts`: distribution-newattempt.html - -Basic implementation: ---------------------- - -First we do split objects into value-only primitives (like int) and other. -Basically immutable builtin types which cannot contain user-level objects -(int, float, long, str, None, etc.) will be always transferred as value-only -objects (having no states etc.). The every other object (user created classes, -instances, modules, lists, tuples, etc. etc.) are always executed by reference. -(Of course if somebody wants to ie. copy the instance, he can marshal/pickle -this to string and send, but it's outside the scope of this attempt). Special -case might be immutable data structure (tuple, frozenset) containing simple -types (this becomes simple type). - -XXX: What to do with code types? Marshalling them and sending seems to have no -sense. Remote execution? Local execution with remote f_locals and f_globals? - -Every remote object has got special class W_RemoteXXX where XXX is interp-level -class implementing this object. W_RemoteXXX implements all the operations -by using special app-level code that sends method name and arguments over the wire -(arguments might be either simple objects which are simply send over the app-level -code or references to local objects). - -So the basic scheme would look like:: - - remote_ref = remote("Object reference") - remote_ref.any_method() - -``remote_ref`` in above example looks like normal python object to user, -but is implemented differently (W_RemoteXXX), and uses app-level proxy -to forward each interp-level method call. - -Abstraction layers: -------------------- - -In this section we define remote side as a side on which calls are -executed and local side is the one on which calls are run. - -* Looking from the local side, first thing that we see is object - which looks like normal object (has got the same interp-level typedef) - but has got different implementation. Basically this is the shallow copy - of remote object (however you define shallow, it's up to the code which - makes the copy. Basically the copy which can be marshalled or send over - the wire or saved for future purpose). This is W_RemoteXXX where XXX is - real object name. Some operations on that object requires accessing remote - side of the object, some might not need such (for example remote int - is totally the same int as local one, it could not even be implemented - differently). - -* For every interp-level operation, which accesses internals that are not - accessible at the local side, (basically all attribute accesses which - are accessing things that are subclasses of W_Object) we provide special - W_Remote version, which downloads necessary object when needed - (if accessed). This is the same as normal W_RemoteXXX (we know the type!) - but not needed yet. - -* From the remote point of view, every exported object which needs such - has got a local appropriate storage W_LocalXXX where XXX is a type - by which it could be accessed from a wire. - -The real pain: --------------- - -For every attribute access when we get W_RemoteXXX, we need to check -the download flag - which sucks a bit. (And we have to support it somehow -in annotator, which sucks a lot). The (some) idea is to wrap all the methods -with additional checks, but that's both unclear and probably not necessary. - -XXX If we can easily change underlying implementation of an object, than -this might become way easier. Right now I'll try to have it working and -thing about RPython later. - -App-level remote tool: ----------------------- - -For purpose of app-level tool which can transfer the data (well, socket might -be enough, but suppose I want to be more flexible), I would use `py.execnet`_, -probably using some of the Armin's hacks to rewrite it using greenlets instead -of threads. - -.. _`py.execnet`: http://codespeak.net/execnet/ diff --git a/pypy/doc/discussion/distribution.rst b/pypy/doc/discussion/distribution.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. XXX fijal, can this be killed? - -=================================================== -(Semi)-transparent distribution of RPython programs -=================================================== - -Some (rough) ideas how I see distribution ------------------------------------------ - -The main point about it, is to behave very much like JIT - not -to perform distribution on Python source code level, but instead -perform distribution of RPython source, and eventually perform -distribution of interpreter at the end. - -This attempt gives same advantages as off-line JIT (any RPython based -interpreter, etc.) and gives nice field to play with different -distribution heuristics. This also makes eventually nice possibility -of integrating JIT with distribution, thus allowing distribution -heuristics to have more information that they might have otherwise and -as well with specializing different nodes in performing different tasks. - -Flow graph level ----------------- - -Probably the best place to perform distribution attempt is to insert -special graph distributing operations into low-level graphs (either lltype -or ootype based), which will allow distribution heuristic to decide -on entrypoint to block/graph/some other structure??? what variables/functions -are accessed inside some part and if it's worth transferring it over wire. - -Backend level -------------- - -Backends will need explicit support for distribution of any kind. Basically -it should be possible for backend to remotely call block/graph/structure -in any manner (it should strongly depend on backend possibilities). diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -8,10 +8,6 @@ .. toctree:: - discussion/distribution-implementation.rst - discussion/distribution-newattempt.rst - discussion/distribution-roadmap.rst - discussion/distribution.rst discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/discussion/distribution-newattempt.rst b/pypy/doc/discussion/distribution-newattempt.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution-newattempt.rst +++ /dev/null @@ -1,65 +0,0 @@ -Distribution: -============= - -This is outcome of Armin's and Samuele's ideas and our discussion, -kept together by fijal. - -The communication layer: -======================== - -Communication layer is the layer which takes care of explicit -communication. Suppose we do have two (or more) running interpreters -on different machines or in different processes. Let's call it *local side* -(the one on which we're operating) and *remote side*. - -What we want to achieve is to have a transparent enough layer on local -side, which does not allow user to tell the objects local and remote apart -(despite __pypy__.internal_repr, which I would consider cheating). - -Because in pypy we have possibility to have different implementations -for types (even builtin ones), we can use that mechanism to implement -our simple RMI. - -The idea is to provide thin layer for accessing remote object, lays as -different implementation for any possible object. So if you perform any -operation on an object locally, which is really a remote object, you -perform all method lookup and do a call on it. Than proxy object -redirects the call to app-level code (socket, execnet, whatever) which -calls remote interpreter with given parameters. It's important that we -can always perform such a call, even if types are not marshallable, because -we can provide remote proxies of local objects to remote side in that case. - -XXX: Need to explain in a bit more informative way. - -Example: --------- - -Suppose we do have ``class A`` and instance ``a = A()`` on remote side -and we want to access this from a local side. We make an object of type -``object`` and we do copy -``__dict__`` keys with values, which correspond to objects on the remote -side (have the same type to user) but they've got different implementation. -(Ie. method calling will look like quite different). - -Even cooler example: --------------------- - -Reminding hpk's example of 5-liner remote file server. With this we make:: - - f = remote_side.import(open) - f("file_name").read() - -Implementation plans: ---------------------- - -We need: - -* app-level primitives for having 'remote proxy' accessible - -* some "serialiser" which is not truly serialising stuff, but making - sure communication will go. - -* interp-level proxy object which emulates every possible object which - delegates operations to app-level primitive proxy. - -* to make it work.... From commits-noreply at bitbucket.org Thu Apr 28 10:48:52 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Thu, 28 Apr 2011 10:48:52 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: (all): planning for today Message-ID: <20110428084852.A2A27282B9E@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3541:ae63eb8df689 Date: 2011-04-28 10:48 +0200 http://bitbucket.org/pypy/extradoc/changeset/ae63eb8df689/ Log: (all): planning for today diff --git a/sprintinfo/gothenburg-2011/planning.txt b/sprintinfo/gothenburg-2011/planning.txt --- a/sprintinfo/gothenburg-2011/planning.txt +++ b/sprintinfo/gothenburg-2011/planning.txt @@ -2,41 +2,47 @@ - Armin - Carl Friedrich - Laura - - Håkan - Lukas - Anders - Romain - Dario + - Kristján + - Anto tasks: - release 1.5 - - fix the import problem TESTING - - merge in 2.7.1 stuff (Anto, Dario) - - documentation (Anto, Carl Friedrich, a bit of Laura) MORE PROGRESS + - merge in 2.7.1 stuff DONE + - documentation (Anto, Laura) MORE PROGRESS + - document minimark (Armin) + - document __builtins__ behaviour in cpython-differences + - mercurial in the coding-guide + - release announcement (Anto, Laura) - look at the tracker - - investigate breakage DONE - - investigate Mac problems (Armin, Dario, Romain, Iko) + - investigate Mac problems IN PROGRESS (Iko, Dario, Romain) - find out whether cProfile is giving interesting information (Anto, Lukas) + - add a warning about Windows 64 (Kristján, Armin) + +- discuss stackless+jit integration (Armin, Kristján) +- discuss embedding issues (Armin, Kristján) - branches to be integrated/finished afterwards - 32-on-64 - lukas' branches: list-strategies/dict-strategies - new-dict-proxy READY - - merge new-dict-proxy into post-release (Lukas) + - merge new-dict-proxy into post-release (Lukas) DONE - out-of-line guards - refactor-not-in-translator - håkan's branches - jitypes2 - other tasks - - continue tracing after invalid loops TESTING - look into cython (Armin, Romain, Dario) FORK + BASIC ARCH HAPPENED - investigate Open End software on top of PyPy EASIER THAN FEARED - (feedback to wesley chun's paragraphs: Armin, Laura) - presentations/discussions - Lukas' presentation on memory improvements DONE - - what are håkan's branches doing? (today after lunch) + - what are håkan's branches doing? DONE - codespeak migration - EuroPython keynote/training (Anto, Armin) From commits-noreply at bitbucket.org Thu Apr 28 11:11:21 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 28 Apr 2011 11:11:21 +0200 (CEST) Subject: [pypy-svn] pypy default: add these two papers Message-ID: <20110428091121.26CF6282B9E@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43699:6230d4c7263f Date: 2011-04-27 17:41 +0200 http://bitbucket.org/pypy/pypy/changeset/6230d4c7263f/ Log: add these two papers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -7,6 +7,13 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) + +* `Allocation Removal by Partial Evaluation in a Tracing JIT`_, + C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo + +* `Towards a Jitting VM for Prolog Execution`_, + C.F. Bolz, M. Leuschel, D, Schneider + * `High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`_, A. Cuni, Ph.D. thesis @@ -58,6 +65,8 @@ .. _bibtex: http://codespeak.net/svn/pypy/extradoc/talk/bibtex.bib +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: http://codespeak.net/svn/pypy/extradoc/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: http://codespeak.net/svn/pypy/extradoc/talk/icooolps2009/bolz-tracing-jit.pdf From commits-noreply at bitbucket.org Thu Apr 28 11:11:23 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 28 Apr 2011 11:11:23 +0200 (CEST) Subject: [pypy-svn] pypy default: fix test_package after the renaming of lib-python/2.7.1 into 2.7 Message-ID: <20110428091123.3809C282C18@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43700:41d5ddbc3af5 Date: 2011-04-28 11:04 +0200 http://bitbucket.org/pypy/pypy/changeset/41d5ddbc3af5/ Log: fix test_package after the renaming of lib-python/2.7.1 into 2.7 diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -23,7 +23,7 @@ builddir = package.package(py.path.local(pypydir).dirpath(), test, rename_pypy_c) prefix = builddir.join(test) - cpyver = '%d.%d.%d' % CPYTHON_VERSION[:3] + cpyver = '%d.%d' % CPYTHON_VERSION[:2] assert prefix.join('lib-python', cpyver, 'test').check() if sys.platform == 'win32': assert prefix.join('pypy-c.exe').check() From commits-noreply at bitbucket.org Thu Apr 28 11:11:24 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 28 Apr 2011 11:11:24 +0200 (CEST) Subject: [pypy-svn] pypy default: fix test_app_main after the renaming of lib-python/2.7.1 into 2.7 Message-ID: <20110428091124.E3312282C18@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43701:1c66c0745948 Date: 2011-04-28 11:06 +0200 http://bitbucket.org/pypy/pypy/changeset/1c66c0745948/ Log: fix test_app_main after the renaming of lib-python/2.7.1 into 2.7 diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -781,7 +781,7 @@ # setup code for test_get_library_path # ------------------------------------ from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION - cpy_ver = '%d.%d.%d' % CPYTHON_VERSION[:3] + cpy_ver = '%d.%d' % CPYTHON_VERSION[:2] goal_dir = os.path.dirname(app_main) # build a directory hierarchy like which contains both bin/pypy-c and From commits-noreply at bitbucket.org Thu Apr 28 11:11:26 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 28 Apr 2011 11:11:26 +0200 (CEST) Subject: [pypy-svn] pypy default: fix test_pypy_interact after the renaming of lib-python/2.7.1 into 2.7 Message-ID: <20110428091126.C4789282C1A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43702:6931ab029af1 Date: 2011-04-28 11:09 +0200 http://bitbucket.org/pypy/pypy/changeset/6931ab029af1/ Log: fix test_pypy_interact after the renaming of lib-python/2.7.1 into 2.7 diff --git a/pypy/translator/sandbox/test/test_pypy_interact.py b/pypy/translator/sandbox/test/test_pypy_interact.py --- a/pypy/translator/sandbox/test/test_pypy_interact.py +++ b/pypy/translator/sandbox/test/test_pypy_interact.py @@ -4,7 +4,7 @@ from pypy.translator.interactive import Translation from pypy.module.sys.version import CPYTHON_VERSION -VERSION = '%d.%d.%d' % CPYTHON_VERSION[:3] +VERSION = '%d.%d' % CPYTHON_VERSION[:2] SITE_PY_CONTENT = open(os.path.join(autopath.pypydir, '..', 'lib-python', From commits-noreply at bitbucket.org Thu Apr 28 11:11:28 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 28 Apr 2011 11:11:28 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110428091128.4F81F282C1A@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43703:caeb84608a73 Date: 2011-04-28 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/caeb84608a73/ Log: merge heads diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -7,6 +7,13 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) + +* `Allocation Removal by Partial Evaluation in a Tracing JIT`_, + C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo + +* `Towards a Jitting VM for Prolog Execution`_, + C.F. Bolz, M. Leuschel, D, Schneider + * `High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`_, A. Cuni, Ph.D. thesis @@ -58,6 +65,8 @@ .. _bibtex: http://codespeak.net/svn/pypy/extradoc/talk/bibtex.bib +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: http://codespeak.net/svn/pypy/extradoc/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: http://codespeak.net/svn/pypy/extradoc/talk/icooolps2009/bolz-tracing-jit.pdf From commits-noreply at bitbucket.org Thu Apr 28 11:46:25 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 11:46:25 +0200 (CEST) Subject: [pypy-svn] pypy default: Document the minimark gc. Message-ID: <20110428094625.485DB36C20A@codespeak.net> Author: Armin Rigo Branch: Changeset: r43704:eaba7adf5188 Date: 2011-04-28 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/eaba7adf5188/ Log: Document the minimark gc. diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -8,10 +8,11 @@ Introduction ============ -**Warning**: The overview and description of our garbage collection -strategy and framework is not here but in the `EU-report on this -topic`_. The present document describes the specific garbage collectors -that we wrote in our framework. +The overview and description of our garbage collection strategy and +framework can be found in the `EU-report on this topic`_. Please refer +to that file for an old, but still more or less accurate, description. +The present document describes the specific garbage collectors that we +wrote in our framework. .. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf @@ -26,6 +27,9 @@ For more details, see the `overview of command line options for translation`_. +The following overview is written in chronological order, so the "best" +GC (which is the default when translating) is the last one below. + .. _`overview of command line options for translation`: config/commandline.html#translation Mark and Sweep @@ -124,4 +128,90 @@ More details are available as comments at the start of the source in `rpython/memory/gc/markcompact.py`_. +Minimark GC +----------- + +This is a simplification and rewrite of the ideas from the Hybrid GC. +It uses a nursery for the young objects, and mark-and-sweep for the old +objects. This is a moving GC, but objects may only move once (from +the nursery to the old stage). + +The main difference with the Hybrid GC is that the mark-and-sweep +objects (the "old stage") are directly handled by the GC's custom +allocator, instead of being handled by malloc() calls. The gain is that +it is then possible, during a major collection, to walk through all old +generation objects without needing to store a list of pointers to them. +So as a first approximation, when compared to the Hybrid GC, the +Minimark GC saves one word of memory per old object. + +There are a number of environment variables that can be tweaked to +influence the GC. (Their default value should be ok for most usages.) +You can read more about them at the start of +`rpython/memory/gc/minimark.py`_. + +In more details: + +- The small newly malloced objects are allocated in the nursery (case 1). + All objects living in the nursery are "young". + +- The big objects are always handled directly by the system malloc(). + But the big newly malloced objects are still "young" when they are + allocated (case 2), even though they don't live in the nursery. + +- When the nursery is full, we do a minor collection, i.e. we find + which "young" objects are still alive (from cases 1 and 2). The + "young" flag is then removed. The surviving case 1 objects are moved + to the old stage. The dying case 2 objects are immediately freed. + +- The old stage is an area of memory containing old (small) objects. It + is handled by `rpython/memory/gc/minimarkpage.py`_. It is organized + as "arenas" of 256KB or 512KB, subdivided into "pages" of 4KB or 8KB. + Each page can either be free, or contain small objects of all the same + size. Furthermore at any point in time each object location can be + either allocated or freed. The basic design comes from ``obmalloc.c`` + from CPython (which itself comes from the same source as the Linux + system malloc()). + +- New objects are added to the old stage at every minor collection. + Immediately after a minor collection, when we reach some threshold, we + trigger a major collection. This is the mark-and-sweep step. It walks + over *all* objects (mark), and then frees some fraction of them (sweep). + This means that the only time when we want to free objects is while + walking over all of them; we never ask to free an object given just its + address. This allows some simplifications and memory savings when + compared to ``obmalloc.c``. + +- As with all generational collectors, this GC needs a write barrier to + record which old objects have a reference to young objects. + +- Additionally, we found out that it is useful to handle the case of + big arrays specially: when we allocate a big array (with the system + malloc()), we reserve a small number of bytes before. When the array + grows old, we use the extra bytes as a set of bits. Each bit + represents 128 entries in the array. Whenever the write barrier is + called to record a reference from the Nth entry of the array to some + young object, we set the bit number ``(N/128)`` to 1. This can + considerably speed up minor collections, because we then only have to + scan 128 entries of the array instead of all of them. + +- As usual, we need special care about weak references, and objects with + finalizers. Weak references are allocated in the nursery, and if they + survive they move to the old stage, as usual for all objects; the + difference is that the reference they contain must either follow the + object, or be set to NULL if the object dies. And the objects with + finalizers, considered rare enough, are immediately allocated old to + simplify the design. In particular their ``__del__`` method can only + be called just after a major collection. + +- The objects move once only, so we can use a trick to implement id() + and hash(). If the object is not in the nursery, it won't move any + more, so its id() and hash() are the object's address, cast to an + integer. If the object is in the nursery, and we ask for its id() + or its hash(), then we pre-reserve a location in the old stage, and + return the address of that location. If the object survives the + next minor collection, we move it there, and so its id() and hash() + are preserved. If the object dies then the pre-reserved location + becomes free garbage, to be collected at the next major collection. + + .. include:: _ref.rst From commits-noreply at bitbucket.org Thu Apr 28 11:48:18 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 11:48:18 +0200 (CEST) Subject: [pypy-svn] pypy default: Add minimark here too. Message-ID: <20110428094818.31F1936C20A@codespeak.net> Author: Armin Rigo Branch: Changeset: r43705:b6442348c27d Date: 2011-04-28 11:48 +0200 http://bitbucket.org/pypy/pypy/changeset/b6442348c27d/ Log: Add minimark here too. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -116,11 +116,12 @@ * ``--stackless``: this produces a pypy-c that includes features inspired by `Stackless Python `__. - * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid``: + * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid|minimark``: choose between using the `Boehm-Demers-Weiser garbage collector`_, our reference - counting implementation or four of own collector implementations - (the default depends on the optimization level). + counting implementation or one of own collector implementations + (the default depends on the optimization level but is usually + ``minimark``). Find a more detailed description of the various options in our `configuration sections`_. From commits-noreply at bitbucket.org Thu Apr 28 12:58:54 2011 From: commits-noreply at bitbucket.org (iko) Date: Thu, 28 Apr 2011 12:58:54 +0200 (CEST) Subject: [pypy-svn] pypy default: struct passwd is different on MacOSX Message-ID: <20110428105854.D16C2282B9E@codespeak.net> Author: Anders Hammarquist Branch: Changeset: r43706:77d5cc106afd Date: 2011-04-28 11:36 +0200 http://bitbucket.org/pypy/pypy/changeset/77d5cc106afd/ Log: struct passwd is different on MacOSX diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py --- a/lib_pypy/pwd.py +++ b/lib_pypy/pwd.py @@ -15,7 +15,7 @@ raise ImportError("No pwd module on Windows") from ctypes_support import standard_c_lib as libc -from ctypes import Structure, POINTER, c_int, c_char_p +from ctypes import Structure, POINTER, c_int, c_char_p, c_long try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -23,25 +23,50 @@ uid_t = c_int gid_t = c_int +time_t = c_long -class passwd(Structure): - _fields_ = ( - ("pw_name", c_char_p), - ("pw_passwd", c_char_p), - ("pw_uid", uid_t), - ("pw_gid", gid_t), - ("pw_gecos", c_char_p), - ("pw_dir", c_char_p), - ("pw_shell", c_char_p), - ) - def __iter__(self): - yield self.pw_name - yield self.pw_passwd - yield self.pw_uid - yield self.pw_gid - yield self.pw_gecos - yield self.pw_dir - yield self.pw_shell +if sys.platform == 'darwin': + class passwd(Structure): + _fields_ = ( + ("pw_name", c_char_p), + ("pw_passwd", c_char_p), + ("pw_uid", uid_t), + ("pw_gid", gid_t), + ("pw_change", time_t), + ("pw_class", c_char_p), + ("pw_gecos", c_char_p), + ("pw_dir", c_char_p), + ("pw_shell", c_char_p), + ("pw_expire", time_t), + ("pw_fields", c_int), + ) + def __iter__(self): + yield self.pw_name + yield self.pw_passwd + yield self.pw_uid + yield self.pw_gid + yield self.pw_gecos + yield self.pw_dir + yield self.pw_shell +else: + class passwd(Structure): + _fields_ = ( + ("pw_name", c_char_p), + ("pw_passwd", c_char_p), + ("pw_uid", uid_t), + ("pw_gid", gid_t), + ("pw_gecos", c_char_p), + ("pw_dir", c_char_p), + ("pw_shell", c_char_p), + ) + def __iter__(self): + yield self.pw_name + yield self.pw_passwd + yield self.pw_uid + yield self.pw_gid + yield self.pw_gecos + yield self.pw_dir + yield self.pw_shell class struct_passwd(tuple): """ From commits-noreply at bitbucket.org Thu Apr 28 12:58:55 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Thu, 28 Apr 2011 12:58:55 +0200 (CEST) Subject: [pypy-svn] pypy default: (iko, rguillebert) Fixes dbm related segfaults on Mac OS Message-ID: <20110428105855.F30F3282B9E@codespeak.net> Author: Romain Guillebert Branch: Changeset: r43707:472c3b62d62d Date: 2011-04-28 12:54 +0200 http://bitbucket.org/pypy/pypy/changeset/472c3b62d62d/ Log: (iko, rguillebert) Fixes dbm related segfaults on Mac OS diff --git a/lib_pypy/dbm.py b/lib_pypy/dbm.py --- a/lib_pypy/dbm.py +++ b/lib_pypy/dbm.py @@ -138,14 +138,14 @@ library = "GNU gdbm" funcs = {} -_init_func('open', (c_char_p, c_int, c_int)) -_init_func('close', restype=c_void_p) -_init_func('firstkey', restype=datum) -_init_func('nextkey', restype=datum) -_init_func('fetch', restype=datum) -_init_func('store', restype=c_int) -_init_func('error') -_init_func('delete', restype=c_int) +_init_func('open', (c_char_p, c_int, c_int), restype=c_void_p) +_init_func('close', (c_void_p,), restype=c_void_p) +_init_func('firstkey', (c_void_p,), restype=datum) +_init_func('nextkey', (c_void_p,), restype=datum) +_init_func('fetch', (c_void_p, datum), restype=datum) +_init_func('store', (c_void_p, datum, datum, c_int), restype=c_int) +_init_func('error', (c_void_p,), restype=c_int) +_init_func('delete', (c_void_p, datum), restype=c_int) lib.DBM_INSERT = 0 lib.DBM_REPLACE = 1 From commits-noreply at bitbucket.org Thu Apr 28 12:58:57 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Thu, 28 Apr 2011 12:58:57 +0200 (CEST) Subject: [pypy-svn] pypy default: (dario, iko, rguillebert) Fixes a segfault on sqlite Message-ID: <20110428105857.C9702282C18@codespeak.net> Author: Romain Guillebert Branch: Changeset: r43708:a8dd6ee71b33 Date: 2011-04-28 12:57 +0200 http://bitbucket.org/pypy/pypy/changeset/a8dd6ee71b33/ Log: (dario, iko, rguillebert) Fixes a segfault on sqlite diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -148,6 +148,9 @@ sqlite.sqlite3_value_text.argtypes = [c_void_p] sqlite.sqlite3_value_text.restype = c_char_p +sqlite.sqlite3_value_type.argtypes = [c_void_p] +sqlite.sqlite3_value_type.restype = c_int + sqlite.sqlite3_bind_int.argtypes = [c_void_p, c_int, c_int] sqlite.sqlite3_bind_parameter_count.argtypes = [c_void_p] sqlite.sqlite3_bind_parameter_count.restype = c_int From commits-noreply at bitbucket.org Thu Apr 28 12:59:03 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Thu, 28 Apr 2011 12:59:03 +0200 (CEST) Subject: [pypy-svn] pypy default: Merge heads Message-ID: <20110428105903.2C029282C19@codespeak.net> Author: Romain Guillebert Branch: Changeset: r43709:d68c0f35cb8d Date: 2011-04-28 12:58 +0200 http://bitbucket.org/pypy/pypy/changeset/d68c0f35cb8d/ Log: Merge heads diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -8,10 +8,11 @@ Introduction ============ -**Warning**: The overview and description of our garbage collection -strategy and framework is not here but in the `EU-report on this -topic`_. The present document describes the specific garbage collectors -that we wrote in our framework. +The overview and description of our garbage collection strategy and +framework can be found in the `EU-report on this topic`_. Please refer +to that file for an old, but still more or less accurate, description. +The present document describes the specific garbage collectors that we +wrote in our framework. .. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf @@ -26,6 +27,9 @@ For more details, see the `overview of command line options for translation`_. +The following overview is written in chronological order, so the "best" +GC (which is the default when translating) is the last one below. + .. _`overview of command line options for translation`: config/commandline.html#translation Mark and Sweep @@ -124,4 +128,90 @@ More details are available as comments at the start of the source in `rpython/memory/gc/markcompact.py`_. +Minimark GC +----------- + +This is a simplification and rewrite of the ideas from the Hybrid GC. +It uses a nursery for the young objects, and mark-and-sweep for the old +objects. This is a moving GC, but objects may only move once (from +the nursery to the old stage). + +The main difference with the Hybrid GC is that the mark-and-sweep +objects (the "old stage") are directly handled by the GC's custom +allocator, instead of being handled by malloc() calls. The gain is that +it is then possible, during a major collection, to walk through all old +generation objects without needing to store a list of pointers to them. +So as a first approximation, when compared to the Hybrid GC, the +Minimark GC saves one word of memory per old object. + +There are a number of environment variables that can be tweaked to +influence the GC. (Their default value should be ok for most usages.) +You can read more about them at the start of +`rpython/memory/gc/minimark.py`_. + +In more details: + +- The small newly malloced objects are allocated in the nursery (case 1). + All objects living in the nursery are "young". + +- The big objects are always handled directly by the system malloc(). + But the big newly malloced objects are still "young" when they are + allocated (case 2), even though they don't live in the nursery. + +- When the nursery is full, we do a minor collection, i.e. we find + which "young" objects are still alive (from cases 1 and 2). The + "young" flag is then removed. The surviving case 1 objects are moved + to the old stage. The dying case 2 objects are immediately freed. + +- The old stage is an area of memory containing old (small) objects. It + is handled by `rpython/memory/gc/minimarkpage.py`_. It is organized + as "arenas" of 256KB or 512KB, subdivided into "pages" of 4KB or 8KB. + Each page can either be free, or contain small objects of all the same + size. Furthermore at any point in time each object location can be + either allocated or freed. The basic design comes from ``obmalloc.c`` + from CPython (which itself comes from the same source as the Linux + system malloc()). + +- New objects are added to the old stage at every minor collection. + Immediately after a minor collection, when we reach some threshold, we + trigger a major collection. This is the mark-and-sweep step. It walks + over *all* objects (mark), and then frees some fraction of them (sweep). + This means that the only time when we want to free objects is while + walking over all of them; we never ask to free an object given just its + address. This allows some simplifications and memory savings when + compared to ``obmalloc.c``. + +- As with all generational collectors, this GC needs a write barrier to + record which old objects have a reference to young objects. + +- Additionally, we found out that it is useful to handle the case of + big arrays specially: when we allocate a big array (with the system + malloc()), we reserve a small number of bytes before. When the array + grows old, we use the extra bytes as a set of bits. Each bit + represents 128 entries in the array. Whenever the write barrier is + called to record a reference from the Nth entry of the array to some + young object, we set the bit number ``(N/128)`` to 1. This can + considerably speed up minor collections, because we then only have to + scan 128 entries of the array instead of all of them. + +- As usual, we need special care about weak references, and objects with + finalizers. Weak references are allocated in the nursery, and if they + survive they move to the old stage, as usual for all objects; the + difference is that the reference they contain must either follow the + object, or be set to NULL if the object dies. And the objects with + finalizers, considered rare enough, are immediately allocated old to + simplify the design. In particular their ``__del__`` method can only + be called just after a major collection. + +- The objects move once only, so we can use a trick to implement id() + and hash(). If the object is not in the nursery, it won't move any + more, so its id() and hash() are the object's address, cast to an + integer. If the object is in the nursery, and we ask for its id() + or its hash(), then we pre-reserve a location in the old stage, and + return the address of that location. If the object survives the + next minor collection, we move it there, and so its id() and hash() + are preserved. If the object dies then the pre-reserved location + becomes free garbage, to be collected at the next major collection. + + .. include:: _ref.rst diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -116,11 +116,12 @@ * ``--stackless``: this produces a pypy-c that includes features inspired by `Stackless Python `__. - * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid``: + * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid|minimark``: choose between using the `Boehm-Demers-Weiser garbage collector`_, our reference - counting implementation or four of own collector implementations - (the default depends on the optimization level). + counting implementation or one of own collector implementations + (the default depends on the optimization level but is usually + ``minimark``). Find a more detailed description of the various options in our `configuration sections`_. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -7,6 +7,13 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) + +* `Allocation Removal by Partial Evaluation in a Tracing JIT`_, + C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo + +* `Towards a Jitting VM for Prolog Execution`_, + C.F. Bolz, M. Leuschel, D, Schneider + * `High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`_, A. Cuni, Ph.D. thesis @@ -58,6 +65,8 @@ .. _bibtex: http://codespeak.net/svn/pypy/extradoc/talk/bibtex.bib +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: http://codespeak.net/svn/pypy/extradoc/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: http://codespeak.net/svn/pypy/extradoc/talk/icooolps2009/bolz-tracing-jit.pdf diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -527,7 +527,8 @@ filename = filepart + suffix return FindInfo(modtype, filename, None, suffix, filemode) except StreamErrors: - pass + pass # XXX! must not eat all exceptions, e.g. + # Out of file descriptors. # not found return None @@ -946,7 +947,8 @@ except StreamErrors: if stream: stream.close() - return None + return None # XXX! must not eat all exceptions, e.g. + # Out of file descriptors. def read_compiled_module(space, cpathname, strbuf): """ Read a code object from a file and check it for validity """ diff --git a/lib-python/modified-2.7/test/test_file2k.py b/lib-python/modified-2.7/test/test_file2k.py --- a/lib-python/modified-2.7/test/test_file2k.py +++ b/lib-python/modified-2.7/test/test_file2k.py @@ -529,6 +529,9 @@ pass self._create_file() self._run_workers(worker, nb_workers) + # make sure that all files can be closed now + del self.all_files + gc_collect() if test_support.verbose: # Useful verbose statistics when tuning this test to take # less time to run but still ensuring that its still useful. diff --git a/pypy/translator/goal/test2/test_app_main.py b/pypy/translator/goal/test2/test_app_main.py --- a/pypy/translator/goal/test2/test_app_main.py +++ b/pypy/translator/goal/test2/test_app_main.py @@ -781,7 +781,7 @@ # setup code for test_get_library_path # ------------------------------------ from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION - cpy_ver = '%d.%d.%d' % CPYTHON_VERSION[:3] + cpy_ver = '%d.%d' % CPYTHON_VERSION[:2] goal_dir = os.path.dirname(app_main) # build a directory hierarchy like which contains both bin/pypy-c and diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -23,7 +23,7 @@ builddir = package.package(py.path.local(pypydir).dirpath(), test, rename_pypy_c) prefix = builddir.join(test) - cpyver = '%d.%d.%d' % CPYTHON_VERSION[:3] + cpyver = '%d.%d' % CPYTHON_VERSION[:2] assert prefix.join('lib-python', cpyver, 'test').check() if sys.platform == 'win32': assert prefix.join('pypy-c.exe').check() diff --git a/pypy/translator/sandbox/test/test_pypy_interact.py b/pypy/translator/sandbox/test/test_pypy_interact.py --- a/pypy/translator/sandbox/test/test_pypy_interact.py +++ b/pypy/translator/sandbox/test/test_pypy_interact.py @@ -4,7 +4,7 @@ from pypy.translator.interactive import Translation from pypy.module.sys.version import CPYTHON_VERSION -VERSION = '%d.%d.%d' % CPYTHON_VERSION[:3] +VERSION = '%d.%d' % CPYTHON_VERSION[:2] SITE_PY_CONTENT = open(os.path.join(autopath.pypydir, '..', 'lib-python', From commits-noreply at bitbucket.org Thu Apr 28 14:54:56 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 28 Apr 2011 14:54:56 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, antocuni) put the pypy dir in front of sys.path, to be sure that "import pypy" really finds the current package (and not, e.g., another pypy which is in PYTHONPATH) Message-ID: <20110428125456.9677E36C211@codespeak.net> Author: Antonio Cuni Branch: documentation-cleanup Changeset: r43710:36167b240007 Date: 2011-04-28 14:54 +0200 http://bitbucket.org/pypy/pypy/changeset/36167b240007/ Log: (lac, antocuni) put the pypy dir in front of sys.path, to be sure that "import pypy" really finds the current package (and not, e.g., another pypy which is in PYTHONPATH) diff --git a/pypy/doc/pypyconfig.py b/pypy/doc/pypyconfig.py --- a/pypy/doc/pypyconfig.py +++ b/pypy/doc/pypyconfig.py @@ -2,7 +2,7 @@ def setup(app): import sys, os - sys.path.append(os.path.abspath("../../")) + sys.path.insert(0, os.path.abspath("../../")) from pypy.config import makerestdoc import py role = makerestdoc.register_config_role(py.path.local()) diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -35,20 +35,11 @@ .. _`downloading them from the download page`: http://pypy.org/download.html -If you choose to use mercurial, -first make sure you have ``subversion`` installed. You must issue the following command on your command line, DOS box, or terminal:: hg clone http://bitbucket.org/pypy/pypy pypy -If you get an error like this:: - - abort: repository [svn]http://codespeak.net/svn/pypy/build/testrunner not found! - -it probably means that your mercurial version is too old. You need at least -Mercurial 1.6 to clone the PyPy repository. - This will clone the repository and place it into a directory named ``pypy``, and will get you the PyPy source in ``pypy/pypy`` and documentation files in ``pypy/pypy/doc``. From commits-noreply at bitbucket.org Thu Apr 28 16:13:57 2011 From: commits-noreply at bitbucket.org (iko) Date: Thu, 28 Apr 2011 16:13:57 +0200 (CEST) Subject: [pypy-svn] pypy xapian: encode utf8 Message-ID: <20110428141357.E910836C20A@codespeak.net> Author: Anders Hammarquist Branch: xapian Changeset: r43711:ce4a4022a789 Date: 2011-04-26 20:16 +0200 http://bitbucket.org/pypy/pypy/changeset/ce4a4022a789/ Log: encode utf8 diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -188,6 +188,20 @@ assert space.unwrap(w_u) == 'sp' rffi.free_charp(u) + def test_encode_utf8(self, space, api): + uni = u'abcdefg' + data = rffi.unicode2wcharp(uni) + w_s = api.PyUnicode_EncodeUTF8(data, len(uni), lltype.nullptr(rffi.CCHARP.TO)) + assert space.eq_w(space.wrap("abcdefg"), w_s) + rffi.free_wcharp(data) + + uni = u'r�ksm�rg�s' + data = rffi.unicode2wcharp(uni) + w_s = api.PyUnicode_EncodeUTF8(data, len(uni), lltype.nullptr(rffi.CCHARP.TO)) + assert space.eq_w(space.wrap("r\xc3\xa4ksm\xc3\xb6rg\xc3\xa5s"), w_s) + rffi.free_wcharp(data) + + def test_IS(self, space, api): for char in [0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x85, 0xa0, 0x1680, 0x2000, 0x2001, 0x2002, diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1,4 +1,4 @@ -from pypy.module.cpyext.api import ( +xfrom pypy.module.cpyext.api import ( cpython_api, PyObject, PyObjectP, CANNOT_FAIL ) from pypy.module.cpyext.complexobject import Py_complex_ptr as Py_complex @@ -2545,15 +2545,6 @@ changes in your code for properly supporting 64-bit systems.""" raise NotImplementedError - at cpython_api([rffi.CWCHARP, Py_ssize_t, rffi.CCHARP], PyObject) -def PyUnicode_EncodeUTF8(space, s, size, errors): - """Encode the Py_UNICODE buffer of the given size using UTF-8 and return a - Python string object. Return NULL if an exception was raised by the codec. - - This function used an int type for size. This might require - changes in your code for properly supporting 64-bit systems.""" - raise NotImplementedError - @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP], PyObject) def PyUnicode_DecodeUTF32(space, s, size, errors, byteorder): """Decode length bytes from a UTF-32 encoded buffer string and return the diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -406,6 +406,22 @@ w_errors = space.w_None return space.call_method(w_str, 'decode', space.wrap("utf-8"), w_errors) + at cpython_api([rffi.CWCHARP, Py_ssize_t, rffi.CCHARP], PyObject) +def PyUnicode_EncodeUTF8(space, s, size, errors): + """Encode the Py_UNICODE buffer of the given size using UTF-8 and return a + Python string object. Return NULL if an exception was raised by the codec. + + This function used an int type for size. This might require + changes in your code for properly supporting 64-bit systems.""" + + w_s = space.wrap(rffi.wcharpsize2unicode(s, size)) + if errors: + w_errors = space.wrap(rffi.charp2str(errors)) + else: + w_errors = space.w_None + return space.call_method(w_s, 'encode', space.wrap('utf-8'), w_errors) + + @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP], PyObject) def PyUnicode_DecodeUTF16(space, s, size, llerrors, pbyteorder): """Decode length bytes from a UTF-16 encoded buffer string and return the From commits-noreply at bitbucket.org Thu Apr 28 16:14:00 2011 From: commits-noreply at bitbucket.org (iko) Date: Thu, 28 Apr 2011 16:14:00 +0200 (CEST) Subject: [pypy-svn] pypy default: Make ctypes warn about calling functions before declaring argument Message-ID: <20110428141400.C5F3F282BF2@codespeak.net> Author: Anders Hammarquist Branch: Changeset: r43712:78112cfc1526 Date: 2011-04-28 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/78112cfc1526/ Log: Make ctypes warn about calling functions before declaring argument and return value types. diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py @@ -427,3 +427,28 @@ a[1].x = 33 u = dll.ret_un_func(a[1]) assert u.y == 33*10000 + + def test_warnings(self): + import warnings + warnings.simplefilter("always") + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer() + assert len(w) == 2 + assert issubclass(w[0].category, RuntimeWarning) + assert issubclass(w[1].category, RuntimeWarning) + assert "C function without declared arguments called" in str(w[0].message) + assert "C function without declared return type called" in str(w[1].message) + + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer.argtypes = [] + dll.get_an_integer() + assert len(w) == 1 + assert issubclass(w[0].category, RuntimeWarning) + assert "C function without declared return type called" in str(w[0].message) + + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer.restype = None + dll.get_an_integer() + assert len(w) == 0 + + warnings.resetwarnings() diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py @@ -217,6 +217,7 @@ CTP = CFUNCTYPE(None) cfunc = dll._testfunc_callback_void cfunc.argtypes = [CTP] + cfunc.restype = int cfunc(CTP(callback)) out, err = capsys.readouterr() assert (out, err) == ("", "") diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -1,6 +1,7 @@ import _rawffi import sys import traceback +import warnings from _ctypes.basics import ArgumentError, keepalive_key from _ctypes.basics import _CData, _CDataMeta, cdata_from_address @@ -69,6 +70,8 @@ _com_index = None _com_iid = None + __restype_set = False + def _getargtypes(self): return self._argtypes_ @@ -134,6 +137,7 @@ return self._restype_ def _setrestype(self, restype): + self.__restype_set = True self._ptr = None if restype is int: from ctypes import c_int @@ -288,7 +292,13 @@ return if argtypes is None: + warnings.warn('C function without declared arguments called', + RuntimeWarning, stacklevel=2) argtypes = [] + + if not self.__restype_set: + warnings.warn('C function without declared return type called', + RuntimeWarning, stacklevel=2) if self._com_index: from ctypes import cast, c_void_p, POINTER From commits-noreply at bitbucket.org Thu Apr 28 16:14:02 2011 From: commits-noreply at bitbucket.org (iko) Date: Thu, 28 Apr 2011 16:14:02 +0200 (CEST) Subject: [pypy-svn] pypy default: declare arugments of libversion Message-ID: <20110428141402.299FE282BF2@codespeak.net> Author: Anders Hammarquist Branch: Changeset: r43713:793a72d624ae Date: 2011-04-28 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/793a72d624ae/ Log: declare arugments of libversion diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -177,6 +177,7 @@ sqlite.sqlite3_errmsg.restype = c_char_p sqlite.sqlite3_get_autocommit.argtypes = [c_void_p] sqlite.sqlite3_get_autocommit.restype = c_int +sqlite.sqlite3_libversion.argtypes = [] sqlite.sqlite3_libversion.restype = c_char_p sqlite.sqlite3_open.argtypes = [c_char_p, c_void_p] sqlite.sqlite3_prepare_v2.argtypes = [c_void_p, c_char_p, c_int, c_void_p, POINTER(c_char_p)] From commits-noreply at bitbucket.org Thu Apr 28 16:19:56 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 16:19:56 +0200 (CEST) Subject: [pypy-svn] pypy default: The uid and gid are unsigned integers, not signed integers. Message-ID: <20110428141956.7D6C5282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43714:c608917430ad Date: 2011-04-28 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/c608917430ad/ Log: The uid and gid are unsigned integers, not signed integers. This is important on MacOSX, where e.g. "nobody" has a uid of 2**32-2. diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py --- a/lib_pypy/pwd.py +++ b/lib_pypy/pwd.py @@ -15,14 +15,14 @@ raise ImportError("No pwd module on Windows") from ctypes_support import standard_c_lib as libc -from ctypes import Structure, POINTER, c_int, c_char_p, c_long +from ctypes import Structure, POINTER, c_int, c_char_p, c_long, c_uint try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f -uid_t = c_int -gid_t = c_int +uid_t = c_uint +gid_t = c_uint time_t = c_long if sys.platform == 'darwin': From commits-noreply at bitbucket.org Thu Apr 28 16:19:59 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 16:19:59 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110428141959.AB80C282C18@codespeak.net> Author: Armin Rigo Branch: Changeset: r43715:5c61d613db61 Date: 2011-04-28 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/5c61d613db61/ Log: merge heads diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py @@ -427,3 +427,28 @@ a[1].x = 33 u = dll.ret_un_func(a[1]) assert u.y == 33*10000 + + def test_warnings(self): + import warnings + warnings.simplefilter("always") + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer() + assert len(w) == 2 + assert issubclass(w[0].category, RuntimeWarning) + assert issubclass(w[1].category, RuntimeWarning) + assert "C function without declared arguments called" in str(w[0].message) + assert "C function without declared return type called" in str(w[1].message) + + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer.argtypes = [] + dll.get_an_integer() + assert len(w) == 1 + assert issubclass(w[0].category, RuntimeWarning) + assert "C function without declared return type called" in str(w[0].message) + + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer.restype = None + dll.get_an_integer() + assert len(w) == 0 + + warnings.resetwarnings() diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -177,6 +177,7 @@ sqlite.sqlite3_errmsg.restype = c_char_p sqlite.sqlite3_get_autocommit.argtypes = [c_void_p] sqlite.sqlite3_get_autocommit.restype = c_int +sqlite.sqlite3_libversion.argtypes = [] sqlite.sqlite3_libversion.restype = c_char_p sqlite.sqlite3_open.argtypes = [c_char_p, c_void_p] sqlite.sqlite3_prepare_v2.argtypes = [c_void_p, c_char_p, c_int, c_void_p, POINTER(c_char_p)] diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py @@ -217,6 +217,7 @@ CTP = CFUNCTYPE(None) cfunc = dll._testfunc_callback_void cfunc.argtypes = [CTP] + cfunc.restype = int cfunc(CTP(callback)) out, err = capsys.readouterr() assert (out, err) == ("", "") diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -1,6 +1,7 @@ import _rawffi import sys import traceback +import warnings from _ctypes.basics import ArgumentError, keepalive_key from _ctypes.basics import _CData, _CDataMeta, cdata_from_address @@ -69,6 +70,8 @@ _com_index = None _com_iid = None + __restype_set = False + def _getargtypes(self): return self._argtypes_ @@ -134,6 +137,7 @@ return self._restype_ def _setrestype(self, restype): + self.__restype_set = True self._ptr = None if restype is int: from ctypes import c_int @@ -288,7 +292,13 @@ return if argtypes is None: + warnings.warn('C function without declared arguments called', + RuntimeWarning, stacklevel=2) argtypes = [] + + if not self.__restype_set: + warnings.warn('C function without declared return type called', + RuntimeWarning, stacklevel=2) if self._com_index: from ctypes import cast, c_void_p, POINTER From commits-noreply at bitbucket.org Thu Apr 28 16:38:56 2011 From: commits-noreply at bitbucket.org (lac) Date: Thu, 28 Apr 2011 16:38:56 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Begin the process of standardising on the name *The RPython toolchain*. Message-ID: <20110428143856.7A328282B9E@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43716:a7d36c603b2b Date: 2011-04-28 16:37 +0200 http://bitbucket.org/pypy/pypy/changeset/a7d36c603b2b/ Log: Begin the process of standardising on the name *The RPython toolchain*. While I was there I changed a paragraph that talked about our goal to eventually have a JIT. Various English fixes, and the odd sentence that had a double world 'we want to to do something' diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -1,6 +1,6 @@ -===================== - PyPy - Translation -===================== +============================= + PyPy - The RPython Toolchain +============================= .. contents:: diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -16,22 +16,20 @@ * a common translation and support framework for producing implementations of dynamic languages, emphasizing a clean separation between language specification and implementation - aspects. + aspects. We call this the `RPython toolchain`_. * a compliant, flexible and fast implementation of the Python_ Language - using the above framework to enable new advanced features without having - to encode low level details into it. + which uses the above toolchain to enable new advanced high-level features + without having to encode the low-level details. -By separating concerns in this way, we intend for our implementation -of Python - and other dynamic languages - to become robust against almost -all implementation decisions, including target platform, memory and -threading models, optimizations applied, up to to the point of being able to -automatically *generate* Just-in-Time compilers for dynamic languages. - -Conversely, our implementation techniques, including the JIT compiler -generator, should become robust against changes in the languages -implemented. - +By separating concerns in this way, our implementation +of Python - and other dynamic languages - is able to automatically +generate a Just-in-Time compiler for any dynamic language. It also +allows a mix-and-match approach to implementation decisions, including +many that have historically been outside of a user's control, such as +target platform, memory and +threading models, garbage collection strategies, and optimizations applied, +including whether or not to have a JIT in the first place. High Level Goals ============================= @@ -40,9 +38,10 @@ ----------------------------------------------- Traditionally, language interpreters are written in a target platform language -like C/Posix, Java or C#. Each such implementation fundamentally provides -a mapping from application source code to the target environment. One of -the goals of the "all-encompassing" environments, like the .NET framework +such as C/Posix, Java or C#. Each implementation provides +a fundamental mapping between application source code and the target +environment. One of +the goals of the "all-encompassing" environments, such as the .NET framework and to some extent the Java virtual machine, is to provide standardized and higher level functionalities in order to support language implementers for writing language implementations. @@ -50,7 +49,7 @@ PyPy is experimenting with a more ambitious approach. We are using a subset of the high-level language Python, called RPython_, in which we write languages as simple interpreters with few references to and -dependencies on lower level details. Our translation framework then +dependencies on lower level details. The `RPython toolchain`_ produces a concrete virtual machine for the platform of our choice by inserting appropriate lower level aspects. The result can be customized by selecting other feature and platform configurations. @@ -58,8 +57,8 @@ Our goal is to provide a possible solution to the problem of language implementers: having to write ``l * o * p`` interpreters for ``l`` dynamic languages and ``p`` platforms with ``o`` crucial design -decisions. PyPy aims at having any one of these parameters changeable -independently from each other: +decisions. PyPy aims at making it possible to change each of these +variables independently such that: * ``l``: the language that we analyze can be evolved or entirely replaced; @@ -121,8 +120,8 @@ The Translation Framework ------------------------- -The job of the translation tool chain is to translate RPython_ programs -into an efficient version of that program for one of various target +The job of the RPython toolchain is to translate RPython_ programs +into an efficient version of that program for one of the various target platforms, generally one that is considerably lower-level than Python. The approach we have taken is to reduce the level of abstraction of the @@ -133,7 +132,7 @@ assume an object-oriented model with classes, instances and methods (as, for example, the Java and .NET virtual machines do). -The translation tool chain never sees the RPython source code or syntax +The RPython toolchain never sees the RPython source code or syntax trees, but rather starts with the *code objects* that define the behaviour of the function objects one gives it as input. It can be considered as "freezing" a pre-imported RPython program into an @@ -161,7 +160,7 @@ and compiled into an executable. This process is described in much more detail in the `document about -the translation process`_ and in the paper `Compiling dynamic language +the RPython toolchain`_ and in the paper `Compiling dynamic language implementations`_. .. _`control flow graph`: translation.html#the-flow-model @@ -169,10 +168,9 @@ .. _Annotator: translation.html#the-annotation-pass .. _RTyper: rtyper.html#overview .. _`various transformations`: translation.html#the-optional-transformations -.. _`document about the translation process`: translation.html +.. _`document about the RPython toolchain`: translation.html .. _`garbage collector`: garbage_collection.html - - +.. _`RPython toolchain`: translation.html .. _`standard interpreter`: .. _`python interpreter`: From commits-noreply at bitbucket.org Thu Apr 28 16:39:01 2011 From: commits-noreply at bitbucket.org (lac) Date: Thu, 28 Apr 2011 16:39:01 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110428143901.3125C282C18@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43717:51377fd863b6 Date: 2011-04-28 16:38 +0200 http://bitbucket.org/pypy/pypy/changeset/51377fd863b6/ Log: merge heads diff --git a/pypy/doc/pypyconfig.py b/pypy/doc/pypyconfig.py --- a/pypy/doc/pypyconfig.py +++ b/pypy/doc/pypyconfig.py @@ -2,7 +2,7 @@ def setup(app): import sys, os - sys.path.append(os.path.abspath("../../")) + sys.path.insert(0, os.path.abspath("../../")) from pypy.config import makerestdoc import py role = makerestdoc.register_config_role(py.path.local()) diff --git a/pypy/doc/discussion/distribution-roadmap.rst b/pypy/doc/discussion/distribution-roadmap.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution-roadmap.rst +++ /dev/null @@ -1,72 +0,0 @@ -Distribution: -============= - -Some random thoughts about automatic (or not) distribution layer. - -What I want to achieve is to make clean approach to perform -distribution mechanism with virtually any distribution heuristic. - -First step - RPython level: ---------------------------- - -First (simplest) step is to allow user to write RPython programs with -some kind of remote control over program execution. For start I would -suggest using RMI (Remote Method Invocation) and remote object access -(in case of low level it would be struct access). For the simplicity -it will make some sense to target high-level platform at the beginning -(CLI platform seems like obvious choice), which provides more primitives -for performing such operations. To make attempt easier, I'll provide -some subset of type system to be serializable which can go as parameters -to such a call. - -I take advantage of several assumptions: - -* globals are constants - this allows us to just run multiple instances - of the same program on multiple machines and perform RMI. - -* I/O is explicit - this makes GIL problem not that important. XXX: I've got - to read more about GIL to notice if this is true. - -Second step - doing it a little bit more automatically: -------------------------------------------------------- - -The second step is to allow some heuristic to live and change -calls to RMI calls. This should follow some assumptions (which may vary, -regarding implementation): - -* Not to move I/O to different machine (we can track I/O and side-effects - in RPython code). - -* Make sure all C calls are safe to transfer if we want to do that (this - depends on probably static API declaration from programmer "I'm sure this - C call has no side-effects", we don't want to check it in C) or not transfer - them at all. - -* Perform it all statically, at the time of program compilation. - -* We have to generate serialization methods for some classes, which - we want to transfer (Same engine might be used to allow JSON calls in JS - backend to transfer arbitrary python object). - -Third step - Just-in-time distribution: ---------------------------------------- - -The biggest step here is to provide JIT integration into distribution -system. This should allow to make it really useful (probably compile-time -distribution will not work for example for whole Python interpreter, because -of too huge granularity). This is quite unclear for me how to do that -(JIT is not complete and I don't know too much about it). Probably we -take JIT information about graphs and try to feed it to heuristic in some way -to change the calls into RMI. - -Problems to fight with: ------------------------ - -Most problems are to make mechanism working efficiently, so: - -* Avoid too much granularity (copying a lot of objects in both directions - all the time) - -* Make heuristic not eat too much CPU time/memory and all of that. - -* ... diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -35,20 +35,11 @@ .. _`downloading them from the download page`: http://pypy.org/download.html -If you choose to use mercurial, -first make sure you have ``subversion`` installed. You must issue the following command on your command line, DOS box, or terminal:: hg clone http://bitbucket.org/pypy/pypy pypy -If you get an error like this:: - - abort: repository [svn]http://codespeak.net/svn/pypy/build/testrunner not found! - -it probably means that your mercurial version is too old. You need at least -Mercurial 1.6 to clone the PyPy repository. - This will clone the repository and place it into a directory named ``pypy``, and will get you the PyPy source in ``pypy/pypy`` and documentation files in ``pypy/pypy/doc``. diff --git a/pypy/doc/discussion/distribution.rst b/pypy/doc/discussion/distribution.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. XXX fijal, can this be killed? - -=================================================== -(Semi)-transparent distribution of RPython programs -=================================================== - -Some (rough) ideas how I see distribution ------------------------------------------ - -The main point about it, is to behave very much like JIT - not -to perform distribution on Python source code level, but instead -perform distribution of RPython source, and eventually perform -distribution of interpreter at the end. - -This attempt gives same advantages as off-line JIT (any RPython based -interpreter, etc.) and gives nice field to play with different -distribution heuristics. This also makes eventually nice possibility -of integrating JIT with distribution, thus allowing distribution -heuristics to have more information that they might have otherwise and -as well with specializing different nodes in performing different tasks. - -Flow graph level ----------------- - -Probably the best place to perform distribution attempt is to insert -special graph distributing operations into low-level graphs (either lltype -or ootype based), which will allow distribution heuristic to decide -on entrypoint to block/graph/some other structure??? what variables/functions -are accessed inside some part and if it's worth transferring it over wire. - -Backend level -------------- - -Backends will need explicit support for distribution of any kind. Basically -it should be possible for backend to remotely call block/graph/structure -in any manner (it should strongly depend on backend possibilities). diff --git a/pypy/doc/discussion/distribution-newattempt.rst b/pypy/doc/discussion/distribution-newattempt.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution-newattempt.rst +++ /dev/null @@ -1,65 +0,0 @@ -Distribution: -============= - -This is outcome of Armin's and Samuele's ideas and our discussion, -kept together by fijal. - -The communication layer: -======================== - -Communication layer is the layer which takes care of explicit -communication. Suppose we do have two (or more) running interpreters -on different machines or in different processes. Let's call it *local side* -(the one on which we're operating) and *remote side*. - -What we want to achieve is to have a transparent enough layer on local -side, which does not allow user to tell the objects local and remote apart -(despite __pypy__.internal_repr, which I would consider cheating). - -Because in pypy we have possibility to have different implementations -for types (even builtin ones), we can use that mechanism to implement -our simple RMI. - -The idea is to provide thin layer for accessing remote object, lays as -different implementation for any possible object. So if you perform any -operation on an object locally, which is really a remote object, you -perform all method lookup and do a call on it. Than proxy object -redirects the call to app-level code (socket, execnet, whatever) which -calls remote interpreter with given parameters. It's important that we -can always perform such a call, even if types are not marshallable, because -we can provide remote proxies of local objects to remote side in that case. - -XXX: Need to explain in a bit more informative way. - -Example: --------- - -Suppose we do have ``class A`` and instance ``a = A()`` on remote side -and we want to access this from a local side. We make an object of type -``object`` and we do copy -``__dict__`` keys with values, which correspond to objects on the remote -side (have the same type to user) but they've got different implementation. -(Ie. method calling will look like quite different). - -Even cooler example: --------------------- - -Reminding hpk's example of 5-liner remote file server. With this we make:: - - f = remote_side.import(open) - f("file_name").read() - -Implementation plans: ---------------------- - -We need: - -* app-level primitives for having 'remote proxy' accessible - -* some "serialiser" which is not truly serialising stuff, but making - sure communication will go. - -* interp-level proxy object which emulates every possible object which - delegates operations to app-level primitive proxy. - -* to make it work.... diff --git a/pypy/doc/discussion/distribution-implementation.rst b/pypy/doc/discussion/distribution-implementation.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution-implementation.rst +++ /dev/null @@ -1,91 +0,0 @@ -===================================================== -Random implementation details of distribution attempt -===================================================== - -.. contents:: -.. sectnum:: - -This document attempts to broaden this `dist thoughts`_. - -.. _`dist thoughts`: distribution-newattempt.html - -Basic implementation: ---------------------- - -First we do split objects into value-only primitives (like int) and other. -Basically immutable builtin types which cannot contain user-level objects -(int, float, long, str, None, etc.) will be always transferred as value-only -objects (having no states etc.). The every other object (user created classes, -instances, modules, lists, tuples, etc. etc.) are always executed by reference. -(Of course if somebody wants to ie. copy the instance, he can marshal/pickle -this to string and send, but it's outside the scope of this attempt). Special -case might be immutable data structure (tuple, frozenset) containing simple -types (this becomes simple type). - -XXX: What to do with code types? Marshalling them and sending seems to have no -sense. Remote execution? Local execution with remote f_locals and f_globals? - -Every remote object has got special class W_RemoteXXX where XXX is interp-level -class implementing this object. W_RemoteXXX implements all the operations -by using special app-level code that sends method name and arguments over the wire -(arguments might be either simple objects which are simply send over the app-level -code or references to local objects). - -So the basic scheme would look like:: - - remote_ref = remote("Object reference") - remote_ref.any_method() - -``remote_ref`` in above example looks like normal python object to user, -but is implemented differently (W_RemoteXXX), and uses app-level proxy -to forward each interp-level method call. - -Abstraction layers: -------------------- - -In this section we define remote side as a side on which calls are -executed and local side is the one on which calls are run. - -* Looking from the local side, first thing that we see is object - which looks like normal object (has got the same interp-level typedef) - but has got different implementation. Basically this is the shallow copy - of remote object (however you define shallow, it's up to the code which - makes the copy. Basically the copy which can be marshalled or send over - the wire or saved for future purpose). This is W_RemoteXXX where XXX is - real object name. Some operations on that object requires accessing remote - side of the object, some might not need such (for example remote int - is totally the same int as local one, it could not even be implemented - differently). - -* For every interp-level operation, which accesses internals that are not - accessible at the local side, (basically all attribute accesses which - are accessing things that are subclasses of W_Object) we provide special - W_Remote version, which downloads necessary object when needed - (if accessed). This is the same as normal W_RemoteXXX (we know the type!) - but not needed yet. - -* From the remote point of view, every exported object which needs such - has got a local appropriate storage W_LocalXXX where XXX is a type - by which it could be accessed from a wire. - -The real pain: --------------- - -For every attribute access when we get W_RemoteXXX, we need to check -the download flag - which sucks a bit. (And we have to support it somehow -in annotator, which sucks a lot). The (some) idea is to wrap all the methods -with additional checks, but that's both unclear and probably not necessary. - -XXX If we can easily change underlying implementation of an object, than -this might become way easier. Right now I'll try to have it working and -thing about RPython later. - -App-level remote tool: ----------------------- - -For purpose of app-level tool which can transfer the data (well, socket might -be enough, but suppose I want to be more flexible), I would use `py.execnet`_, -probably using some of the Armin's hacks to rewrite it using greenlets instead -of threads. - -.. _`py.execnet`: http://codespeak.net/execnet/ diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -8,10 +8,6 @@ .. toctree:: - discussion/distribution-implementation.rst - discussion/distribution-newattempt.rst - discussion/distribution-roadmap.rst - discussion/distribution.rst discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst From commits-noreply at bitbucket.org Thu Apr 28 17:14:55 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 17:14:55 +0200 (CEST) Subject: [pypy-svn] pypy default: Backout c608917430ad. It's a mess. See next checkin for explanations. Message-ID: <20110428151455.78E1A36C202@codespeak.net> Author: Armin Rigo Branch: Changeset: r43718:9af9046d106b Date: 2011-04-28 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/9af9046d106b/ Log: Backout c608917430ad. It's a mess. See next checkin for explanations. diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py --- a/lib_pypy/pwd.py +++ b/lib_pypy/pwd.py @@ -15,14 +15,14 @@ raise ImportError("No pwd module on Windows") from ctypes_support import standard_c_lib as libc -from ctypes import Structure, POINTER, c_int, c_char_p, c_long, c_uint +from ctypes import Structure, POINTER, c_int, c_char_p, c_long try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f -uid_t = c_uint -gid_t = c_uint +uid_t = c_int +gid_t = c_int time_t = c_long if sys.platform == 'darwin': From commits-noreply at bitbucket.org Thu Apr 28 17:14:57 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 17:14:57 +0200 (CEST) Subject: [pypy-svn] pypy default: (iko, rguillebert, arigo) Message-ID: <20110428151457.B2242282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43719:e6a6bec95962 Date: 2011-04-28 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/e6a6bec95962/ Log: (iko, rguillebert, arigo) CPython 2.7 semantics are too messy to follow exactly, e.g. setuid(-2) works on 32-bit but not on 64-bit. The exact rules are rather complicated, and of course the ones we implemented are a bit different than CPython's. As a result, we decided to just accept any 'int', i.e. any C signed long. This is basically reverting back to the situation of CPython 2.5, I think. diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- +from __future__ import with_statement from pypy.objspace.std import StdObjSpace from pypy.tool.udir import udir from pypy.conftest import gettestobjspace @@ -506,6 +507,7 @@ if hasattr(os, 'setuid'): def test_os_setuid_error(self): + skip("overflow checking disabled for now") os = self.posix raises((OSError, ValueError, OverflowError), os.setuid, -100000) @@ -527,6 +529,7 @@ if hasattr(os, 'setgid'): def test_os_setgid_error(self): + skip("overflow checking disabled for now") os = self.posix raises((OSError, ValueError, OverflowError), os.setgid, -100000) @@ -534,7 +537,7 @@ def test_os_getsid(self): os = self.posix assert os.getsid(0) == self.getsid0 - raises((OSError, ValueError, OverflowError), os.getsid, -100000) + raises(OSError, os.getsid, -100000) if hasattr(os, 'sysconf'): def test_os_sysconf(self): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -15,7 +15,12 @@ _WIN = sys.platform == 'win32' c_int = "c_int" -c_nonnegint = "c_nonnegint" + +# CPython 2.7 semantics are too messy to follow exactly, +# e.g. setuid(-2) works on 32-bit but not on 64-bit. As a result, +# we decided to just accept any 'int', i.e. any C signed long. +c_uid_t = int +c_gid_t = int class FileEncoder(object): def __init__(self, space, w_obj): @@ -823,7 +828,7 @@ """ return space.wrap(os.getuid()) - at unwrap_spec(arg=c_nonnegint) + at unwrap_spec(arg=int) def setuid(space, arg): """ setuid(uid) @@ -835,7 +840,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(arg=c_nonnegint) + at unwrap_spec(arg=c_uid_t) def seteuid(space, arg): """ seteuid(uid) @@ -847,7 +852,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(arg=c_nonnegint) + at unwrap_spec(arg=c_gid_t) def setgid(space, arg): """ setgid(gid) @@ -859,7 +864,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(arg=c_nonnegint) + at unwrap_spec(arg=c_gid_t) def setegid(space, arg): """ setegid(gid) @@ -960,7 +965,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(ruid=c_int, euid=c_int) + at unwrap_spec(ruid=c_uid_t, euid=c_uid_t) def setreuid(space, ruid, euid): """ setreuid(ruid, euid) @@ -972,7 +977,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(rgid=c_int, egid=c_int) + at unwrap_spec(rgid=c_gid_t, egid=c_gid_t) def setregid(space, rgid, egid): """ setregid(rgid, egid) @@ -1056,7 +1061,7 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(path=str, uid=c_int, gid=c_int) + at unwrap_spec(path=str, uid=c_uid_t, gid=c_gid_t) def chown(space, path, uid, gid): try: os.chown(path, uid, gid) @@ -1064,7 +1069,7 @@ raise wrap_oserror(space, e, path) return space.w_None - at unwrap_spec(path=str, uid=c_int, gid=c_int) + at unwrap_spec(path=str, uid=c_uid_t, gid=c_gid_t) def lchown(space, path, uid, gid): try: os.lchown(path, uid, gid) From commits-noreply at bitbucket.org Thu Apr 28 17:21:50 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Thu, 28 Apr 2011 17:21:50 +0200 (CEST) Subject: [pypy-svn] pypy default: (anto, dario, rguillebert) Expat is buggy on Mac OS X, skip the test Message-ID: <20110428152150.07F2D36C202@codespeak.net> Author: Romain Guillebert Branch: Changeset: r43720:9993380416c4 Date: 2011-04-28 16:59 +0200 http://bitbucket.org/pypy/pypy/changeset/9993380416c4/ Log: (anto, dario, rguillebert) Expat is buggy on Mac OS X, skip the test diff --git a/lib-python/2.7/test/test_pyexpat.py b/lib-python/modified-2.7/test/test_pyexpat.py copy from lib-python/2.7/test/test_pyexpat.py copy to lib-python/modified-2.7/test/test_pyexpat.py --- a/lib-python/2.7/test/test_pyexpat.py +++ b/lib-python/modified-2.7/test/test_pyexpat.py @@ -559,6 +559,9 @@ self.assertEqual(self.n, 4) class MalformedInputText(unittest.TestCase): + # CPython seems to ship its own version of expat, they fixed it on this commit : + # http://svn.python.org/view?revision=74429&view=revision + @unittest.skipIf(sys.platform == "darwin", "Expat is broken on Mac OS X 10.6.6") def test1(self): xml = "\0\r\n" parser = expat.ParserCreate() @@ -568,6 +571,7 @@ except expat.ExpatError as e: self.assertEqual(str(e), 'unclosed token: line 2, column 0') + @unittest.skipIf(sys.platform == "darwin", "Expat is broken on Mac OS X 10.6.6") def test2(self): xml = "\r\n" parser = expat.ParserCreate() diff --git a/lib-python/2.7/test/test_pyexpat.py b/lib-python/2.7/test/test_pyexpat.py deleted file mode 100644 --- a/lib-python/2.7/test/test_pyexpat.py +++ /dev/null @@ -1,593 +0,0 @@ -# XXX TypeErrors on calling handlers, or on bad return values from a -# handler, are obscure and unhelpful. - -import StringIO, sys -import unittest - -from xml.parsers import expat - -from test.test_support import sortdict, run_unittest - - -class SetAttributeTest(unittest.TestCase): - def setUp(self): - self.parser = expat.ParserCreate(namespace_separator='!') - self.set_get_pairs = [ - [0, 0], - [1, 1], - [2, 1], - [0, 0], - ] - - def test_returns_unicode(self): - for x, y in self.set_get_pairs: - self.parser.returns_unicode = x - self.assertEqual(self.parser.returns_unicode, y) - - def test_ordered_attributes(self): - for x, y in self.set_get_pairs: - self.parser.ordered_attributes = x - self.assertEqual(self.parser.ordered_attributes, y) - - def test_specified_attributes(self): - for x, y in self.set_get_pairs: - self.parser.specified_attributes = x - self.assertEqual(self.parser.specified_attributes, y) - - -data = '''\ - - - - - - - - -%unparsed_entity; -]> - - - - Contents of subelements - - -&external_entity; - -''' - - -# Produce UTF-8 output -class ParseTest(unittest.TestCase): - class Outputter: - def __init__(self): - self.out = [] - - def StartElementHandler(self, name, attrs): - self.out.append('Start element: ' + repr(name) + ' ' + - sortdict(attrs)) - - def EndElementHandler(self, name): - self.out.append('End element: ' + repr(name)) - - def CharacterDataHandler(self, data): - data = data.strip() - if data: - self.out.append('Character data: ' + repr(data)) - - def ProcessingInstructionHandler(self, target, data): - self.out.append('PI: ' + repr(target) + ' ' + repr(data)) - - def StartNamespaceDeclHandler(self, prefix, uri): - self.out.append('NS decl: ' + repr(prefix) + ' ' + repr(uri)) - - def EndNamespaceDeclHandler(self, prefix): - self.out.append('End of NS decl: ' + repr(prefix)) - - def StartCdataSectionHandler(self): - self.out.append('Start of CDATA section') - - def EndCdataSectionHandler(self): - self.out.append('End of CDATA section') - - def CommentHandler(self, text): - self.out.append('Comment: ' + repr(text)) - - def NotationDeclHandler(self, *args): - name, base, sysid, pubid = args - self.out.append('Notation declared: %s' %(args,)) - - def UnparsedEntityDeclHandler(self, *args): - entityName, base, systemId, publicId, notationName = args - self.out.append('Unparsed entity decl: %s' %(args,)) - - def NotStandaloneHandler(self, userData): - self.out.append('Not standalone') - return 1 - - def ExternalEntityRefHandler(self, *args): - context, base, sysId, pubId = args - self.out.append('External entity ref: %s' %(args[1:],)) - return 1 - - def DefaultHandler(self, userData): - pass - - def DefaultHandlerExpand(self, userData): - pass - - handler_names = [ - 'StartElementHandler', 'EndElementHandler', - 'CharacterDataHandler', 'ProcessingInstructionHandler', - 'UnparsedEntityDeclHandler', 'NotationDeclHandler', - 'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler', - 'CommentHandler', 'StartCdataSectionHandler', - 'EndCdataSectionHandler', - 'DefaultHandler', 'DefaultHandlerExpand', - #'NotStandaloneHandler', - 'ExternalEntityRefHandler' - ] - - def test_utf8(self): - - out = self.Outputter() - parser = expat.ParserCreate(namespace_separator='!') - for name in self.handler_names: - setattr(parser, name, getattr(out, name)) - parser.returns_unicode = 0 - parser.Parse(data, 1) - - # Verify output - op = out.out - self.assertEqual(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'') - self.assertEqual(op[1], "Comment: ' comment data '") - self.assertEqual(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)") - self.assertEqual(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')") - self.assertEqual(op[4], "Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}") - self.assertEqual(op[5], "NS decl: 'myns' 'http://www.python.org/namespace'") - self.assertEqual(op[6], "Start element: 'http://www.python.org/namespace!subelement' {}") - self.assertEqual(op[7], "Character data: 'Contents of subelements'") - self.assertEqual(op[8], "End element: 'http://www.python.org/namespace!subelement'") - self.assertEqual(op[9], "End of NS decl: 'myns'") - self.assertEqual(op[10], "Start element: 'sub2' {}") - self.assertEqual(op[11], 'Start of CDATA section') - self.assertEqual(op[12], "Character data: 'contents of CDATA section'") - self.assertEqual(op[13], 'End of CDATA section') - self.assertEqual(op[14], "End element: 'sub2'") - self.assertEqual(op[15], "External entity ref: (None, 'entity.file', None)") - self.assertEqual(op[16], "End element: 'root'") - - def test_unicode(self): - # Try the parse again, this time producing Unicode output - out = self.Outputter() - parser = expat.ParserCreate(namespace_separator='!') - parser.returns_unicode = 1 - for name in self.handler_names: - setattr(parser, name, getattr(out, name)) - - parser.Parse(data, 1) - - op = out.out - self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') - self.assertEqual(op[1], "Comment: u' comment data '") - self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") - self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") - self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") - self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") - self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") - self.assertEqual(op[7], "Character data: u'Contents of subelements'") - self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'") - self.assertEqual(op[9], "End of NS decl: u'myns'") - self.assertEqual(op[10], "Start element: u'sub2' {}") - self.assertEqual(op[11], 'Start of CDATA section') - self.assertEqual(op[12], "Character data: u'contents of CDATA section'") - self.assertEqual(op[13], 'End of CDATA section') - self.assertEqual(op[14], "End element: u'sub2'") - self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)") - self.assertEqual(op[16], "End element: u'root'") - - def test_parse_file(self): - # Try parsing a file - out = self.Outputter() - parser = expat.ParserCreate(namespace_separator='!') - parser.returns_unicode = 1 - for name in self.handler_names: - setattr(parser, name, getattr(out, name)) - file = StringIO.StringIO(data) - - parser.ParseFile(file) - - op = out.out - self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') - self.assertEqual(op[1], "Comment: u' comment data '") - self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") - self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") - self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") - self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") - self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") - self.assertEqual(op[7], "Character data: u'Contents of subelements'") - self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'") - self.assertEqual(op[9], "End of NS decl: u'myns'") - self.assertEqual(op[10], "Start element: u'sub2' {}") - self.assertEqual(op[11], 'Start of CDATA section') - self.assertEqual(op[12], "Character data: u'contents of CDATA section'") - self.assertEqual(op[13], 'End of CDATA section') - self.assertEqual(op[14], "End element: u'sub2'") - self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)") - self.assertEqual(op[16], "End element: u'root'") - - -class NamespaceSeparatorTest(unittest.TestCase): - def test_legal(self): - # Tests that make sure we get errors when the namespace_separator value - # is illegal, and that we don't for good values: - expat.ParserCreate() - expat.ParserCreate(namespace_separator=None) - expat.ParserCreate(namespace_separator=' ') - - def test_illegal(self): - try: - expat.ParserCreate(namespace_separator=42) - self.fail() - except TypeError, e: - self.assertEqual(str(e), - 'ParserCreate() argument 2 must be string or None, not int') - - try: - expat.ParserCreate(namespace_separator='too long') - self.fail() - except ValueError, e: - self.assertEqual(str(e), - 'namespace_separator must be at most one character, omitted, or None') - - def test_zero_length(self): - # ParserCreate() needs to accept a namespace_separator of zero length - # to satisfy the requirements of RDF applications that are required - # to simply glue together the namespace URI and the localname. Though - # considered a wart of the RDF specifications, it needs to be supported. - # - # See XML-SIG mailing list thread starting with - # http://mail.python.org/pipermail/xml-sig/2001-April/005202.html - # - expat.ParserCreate(namespace_separator='') # too short - - -class InterningTest(unittest.TestCase): - def test(self): - # Test the interning machinery. - p = expat.ParserCreate() - L = [] - def collector(name, *args): - L.append(name) - p.StartElementHandler = collector - p.EndElementHandler = collector - p.Parse(" ", 1) - tag = L[0] - self.assertEqual(len(L), 6) - for entry in L: - # L should have the same string repeated over and over. - self.assertTrue(tag is entry) - - -class BufferTextTest(unittest.TestCase): - def setUp(self): - self.stuff = [] - self.parser = expat.ParserCreate() - self.parser.buffer_text = 1 - self.parser.CharacterDataHandler = self.CharacterDataHandler - - def check(self, expected, label): - self.assertEqual(self.stuff, expected, - "%s\nstuff = %r\nexpected = %r" - % (label, self.stuff, map(unicode, expected))) - - def CharacterDataHandler(self, text): - self.stuff.append(text) - - def StartElementHandler(self, name, attrs): - self.stuff.append("<%s>" % name) - bt = attrs.get("buffer-text") - if bt == "yes": - self.parser.buffer_text = 1 - elif bt == "no": - self.parser.buffer_text = 0 - - def EndElementHandler(self, name): - self.stuff.append("" % name) - - def CommentHandler(self, data): - self.stuff.append("" % data) - - def setHandlers(self, handlers=[]): - for name in handlers: - setattr(self.parser, name, getattr(self, name)) - - def test_default_to_disabled(self): - parser = expat.ParserCreate() - self.assertFalse(parser.buffer_text) - - def test_buffering_enabled(self): - # Make sure buffering is turned on - self.assertTrue(self.parser.buffer_text) - self.parser.Parse("123", 1) - self.assertEqual(self.stuff, ['123'], - "buffered text not properly collapsed") - - def test1(self): - # XXX This test exposes more detail of Expat's text chunking than we - # XXX like, but it tests what we need to concisely. - self.setHandlers(["StartElementHandler"]) - self.parser.Parse("12\n34\n5", 1) - self.assertEqual(self.stuff, - ["", "1", "", "2", "\n", "3", "", "4\n5"], - "buffering control not reacting as expected") - - def test2(self): - self.parser.Parse("1<2> \n 3", 1) - self.assertEqual(self.stuff, ["1<2> \n 3"], - "buffered text not properly collapsed") - - def test3(self): - self.setHandlers(["StartElementHandler"]) - self.parser.Parse("123", 1) - self.assertEqual(self.stuff, ["", "1", "", "2", "", "3"], - "buffered text not properly split") - - def test4(self): - self.setHandlers(["StartElementHandler", "EndElementHandler"]) - self.parser.CharacterDataHandler = None - self.parser.Parse("123", 1) - self.assertEqual(self.stuff, - ["", "", "", "", "", ""]) - - def test5(self): - self.setHandlers(["StartElementHandler", "EndElementHandler"]) - self.parser.Parse("123", 1) - self.assertEqual(self.stuff, - ["", "1", "", "", "2", "", "", "3", ""]) - - def test6(self): - self.setHandlers(["CommentHandler", "EndElementHandler", - "StartElementHandler"]) - self.parser.Parse("12345 ", 1) - self.assertEqual(self.stuff, - ["", "1", "", "", "2", "", "", "345", ""], - "buffered text not properly split") - - def test7(self): - self.setHandlers(["CommentHandler", "EndElementHandler", - "StartElementHandler"]) - self.parser.Parse("12345 ", 1) - self.assertEqual(self.stuff, - ["", "1", "", "", "2", "", "", "3", - "", "4", "", "5", ""], - "buffered text not properly split") - - -# Test handling of exception from callback: -class HandlerExceptionTest(unittest.TestCase): - def StartElementHandler(self, name, attrs): - raise RuntimeError(name) - - def test(self): - parser = expat.ParserCreate() - parser.StartElementHandler = self.StartElementHandler - try: - parser.Parse("", 1) - self.fail() - except RuntimeError, e: - self.assertEqual(e.args[0], 'a', - "Expected RuntimeError for element 'a', but" + \ - " found %r" % e.args[0]) - - -# Test Current* members: -class PositionTest(unittest.TestCase): - def StartElementHandler(self, name, attrs): - self.check_pos('s') - - def EndElementHandler(self, name): - self.check_pos('e') - - def check_pos(self, event): - pos = (event, - self.parser.CurrentByteIndex, - self.parser.CurrentLineNumber, - self.parser.CurrentColumnNumber) - self.assertTrue(self.upto < len(self.expected_list), - 'too many parser events') - expected = self.expected_list[self.upto] - self.assertEqual(pos, expected, - 'Expected position %s, got position %s' %(pos, expected)) - self.upto += 1 - - def test(self): - self.parser = expat.ParserCreate() - self.parser.StartElementHandler = self.StartElementHandler - self.parser.EndElementHandler = self.EndElementHandler - self.upto = 0 - self.expected_list = [('s', 0, 1, 0), ('s', 5, 2, 1), ('s', 11, 3, 2), - ('e', 15, 3, 6), ('e', 17, 4, 1), ('e', 22, 5, 0)] - - xml = '\n \n \n \n' - self.parser.Parse(xml, 1) - - -class sf1296433Test(unittest.TestCase): - def test_parse_only_xml_data(self): - # http://python.org/sf/1296433 - # - xml = "%s" % ('a' * 1025) - # this one doesn't crash - #xml = "%s" % ('a' * 10000) - - class SpecificException(Exception): - pass - - def handler(text): - raise SpecificException - - parser = expat.ParserCreate() - parser.CharacterDataHandler = handler - - self.assertRaises(Exception, parser.Parse, xml) - -class ChardataBufferTest(unittest.TestCase): - """ - test setting of chardata buffer size - """ - - def test_1025_bytes(self): - self.assertEqual(self.small_buffer_test(1025), 2) - - def test_1000_bytes(self): - self.assertEqual(self.small_buffer_test(1000), 1) - - def test_wrong_size(self): - parser = expat.ParserCreate() - parser.buffer_text = 1 - def f(size): - parser.buffer_size = size - - self.assertRaises(TypeError, f, sys.maxint+1) - self.assertRaises(ValueError, f, -1) - self.assertRaises(ValueError, f, 0) - - def test_unchanged_size(self): - xml1 = ("%s" % ('a' * 512)) - xml2 = 'a'*512 + '' - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_size = 512 - parser.buffer_text = 1 - - # Feed 512 bytes of character data: the handler should be called - # once. - self.n = 0 - parser.Parse(xml1) - self.assertEqual(self.n, 1) - - # Reassign to buffer_size, but assign the same size. - parser.buffer_size = parser.buffer_size - self.assertEqual(self.n, 1) - - # Try parsing rest of the document - parser.Parse(xml2) - self.assertEqual(self.n, 2) - - - def test_disabling_buffer(self): - xml1 = "%s" % ('a' * 512) - xml2 = ('b' * 1024) - xml3 = "%s" % ('c' * 1024) - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_text = 1 - parser.buffer_size = 1024 - self.assertEqual(parser.buffer_size, 1024) - - # Parse one chunk of XML - self.n = 0 - parser.Parse(xml1, 0) - self.assertEqual(parser.buffer_size, 1024) - self.assertEqual(self.n, 1) - - # Turn off buffering and parse the next chunk. - parser.buffer_text = 0 - self.assertFalse(parser.buffer_text) - self.assertEqual(parser.buffer_size, 1024) - for i in range(10): - parser.Parse(xml2, 0) - self.assertEqual(self.n, 11) - - parser.buffer_text = 1 - self.assertTrue(parser.buffer_text) - self.assertEqual(parser.buffer_size, 1024) - parser.Parse(xml3, 1) - self.assertEqual(self.n, 12) - - - - def make_document(self, bytes): - return ("" + bytes * 'a' + '') - - def counting_handler(self, text): - self.n += 1 - - def small_buffer_test(self, buffer_len): - xml = "%s" % ('a' * buffer_len) - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_size = 1024 - parser.buffer_text = 1 - - self.n = 0 - parser.Parse(xml) - return self.n - - def test_change_size_1(self): - xml1 = "%s" % ('a' * 1024) - xml2 = "aaa%s" % ('a' * 1025) - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_text = 1 - parser.buffer_size = 1024 - self.assertEqual(parser.buffer_size, 1024) - - self.n = 0 - parser.Parse(xml1, 0) - parser.buffer_size *= 2 - self.assertEqual(parser.buffer_size, 2048) - parser.Parse(xml2, 1) - self.assertEqual(self.n, 2) - - def test_change_size_2(self): - xml1 = "a%s" % ('a' * 1023) - xml2 = "aaa%s" % ('a' * 1025) - parser = expat.ParserCreate() - parser.CharacterDataHandler = self.counting_handler - parser.buffer_text = 1 - parser.buffer_size = 2048 - self.assertEqual(parser.buffer_size, 2048) - - self.n=0 - parser.Parse(xml1, 0) - parser.buffer_size //= 2 - self.assertEqual(parser.buffer_size, 1024) - parser.Parse(xml2, 1) - self.assertEqual(self.n, 4) - -class MalformedInputText(unittest.TestCase): - def test1(self): - xml = "\0\r\n" - parser = expat.ParserCreate() - try: - parser.Parse(xml, True) - self.fail() - except expat.ExpatError as e: - self.assertEqual(str(e), 'unclosed token: line 2, column 0') - - def test2(self): - xml = "\r\n" - parser = expat.ParserCreate() - try: - parser.Parse(xml, True) - self.fail() - except expat.ExpatError as e: - self.assertEqual(str(e), 'XML declaration not well-formed: line 1, column 14') - -def test_main(): - run_unittest(SetAttributeTest, - ParseTest, - NamespaceSeparatorTest, - InterningTest, - BufferTextTest, - HandlerExceptionTest, - PositionTest, - sf1296433Test, - ChardataBufferTest, - MalformedInputText) - -if __name__ == "__main__": - test_main() From commits-noreply at bitbucket.org Thu Apr 28 17:21:51 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Thu, 28 Apr 2011 17:21:51 +0200 (CEST) Subject: [pypy-svn] pypy default: (anto, dario, rguillebert) Adds an app level test for the pyexpat Message-ID: <20110428152151.7626636C202@codespeak.net> Author: Romain Guillebert Branch: Changeset: r43721:dec1401601ca Date: 2011-04-28 17:19 +0200 http://bitbucket.org/pypy/pypy/changeset/dec1401601ca/ Log: (anto, dario, rguillebert) Adds an app level test for the pyexpat RPython module diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -1,5 +1,6 @@ from pypy.conftest import gettestobjspace from pypy.module.pyexpat.interp_pyexpat import global_storage +from pytest import skip class AppTestPyexpat: def setup_class(cls): @@ -28,6 +29,15 @@ assert isinstance(pyexpat.version_info, tuple) assert isinstance(pyexpat.version_info[0], int) + def test_malformed_xml(self): + import sys + if sys.platform == "darwin": + skip("Fails with the version of expat on Mac OS 10.6.6") + import pyexpat + xml = "\0\r\n" + parser = pyexpat.ParserCreate() + raises(pyexpat.ExpatError, "parser.Parse(xml, True)") + def test_encoding(self): import pyexpat for encoding_arg in (None, 'utf-8', 'iso-8859-1'): From commits-noreply at bitbucket.org Thu Apr 28 17:21:56 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Thu, 28 Apr 2011 17:21:56 +0200 (CEST) Subject: [pypy-svn] pypy default: Merge heads Message-ID: <20110428152156.78FD736C204@codespeak.net> Author: Romain Guillebert Branch: Changeset: r43722:cc753d45bee3 Date: 2011-04-28 17:21 +0200 http://bitbucket.org/pypy/pypy/changeset/cc753d45bee3/ Log: Merge heads diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py @@ -427,3 +427,28 @@ a[1].x = 33 u = dll.ret_un_func(a[1]) assert u.y == 33*10000 + + def test_warnings(self): + import warnings + warnings.simplefilter("always") + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer() + assert len(w) == 2 + assert issubclass(w[0].category, RuntimeWarning) + assert issubclass(w[1].category, RuntimeWarning) + assert "C function without declared arguments called" in str(w[0].message) + assert "C function without declared return type called" in str(w[1].message) + + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer.argtypes = [] + dll.get_an_integer() + assert len(w) == 1 + assert issubclass(w[0].category, RuntimeWarning) + assert "C function without declared return type called" in str(w[0].message) + + with warnings.catch_warnings(record=True) as w: + dll.get_an_integer.restype = None + dll.get_an_integer() + assert len(w) == 0 + + warnings.resetwarnings() diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -177,6 +177,7 @@ sqlite.sqlite3_errmsg.restype = c_char_p sqlite.sqlite3_get_autocommit.argtypes = [c_void_p] sqlite.sqlite3_get_autocommit.restype = c_int +sqlite.sqlite3_libversion.argtypes = [] sqlite.sqlite3_libversion.restype = c_char_p sqlite.sqlite3_open.argtypes = [c_char_p, c_void_p] sqlite.sqlite3_prepare_v2.argtypes = [c_void_p, c_char_p, c_int, c_void_p, POINTER(c_char_p)] diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py @@ -217,6 +217,7 @@ CTP = CFUNCTYPE(None) cfunc = dll._testfunc_callback_void cfunc.argtypes = [CTP] + cfunc.restype = int cfunc(CTP(callback)) out, err = capsys.readouterr() assert (out, err) == ("", "") diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -1,6 +1,7 @@ import _rawffi import sys import traceback +import warnings from _ctypes.basics import ArgumentError, keepalive_key from _ctypes.basics import _CData, _CDataMeta, cdata_from_address @@ -69,6 +70,8 @@ _com_index = None _com_iid = None + __restype_set = False + def _getargtypes(self): return self._argtypes_ @@ -134,6 +137,7 @@ return self._restype_ def _setrestype(self, restype): + self.__restype_set = True self._ptr = None if restype is int: from ctypes import c_int @@ -288,7 +292,13 @@ return if argtypes is None: + warnings.warn('C function without declared arguments called', + RuntimeWarning, stacklevel=2) argtypes = [] + + if not self.__restype_set: + warnings.warn('C function without declared return type called', + RuntimeWarning, stacklevel=2) if self._com_index: from ctypes import cast, c_void_p, POINTER diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- +from __future__ import with_statement from pypy.objspace.std import StdObjSpace from pypy.tool.udir import udir from pypy.conftest import gettestobjspace @@ -506,6 +507,7 @@ if hasattr(os, 'setuid'): def test_os_setuid_error(self): + skip("overflow checking disabled for now") os = self.posix raises((OSError, ValueError, OverflowError), os.setuid, -100000) @@ -527,6 +529,7 @@ if hasattr(os, 'setgid'): def test_os_setgid_error(self): + skip("overflow checking disabled for now") os = self.posix raises((OSError, ValueError, OverflowError), os.setgid, -100000) @@ -534,7 +537,7 @@ def test_os_getsid(self): os = self.posix assert os.getsid(0) == self.getsid0 - raises((OSError, ValueError, OverflowError), os.getsid, -100000) + raises(OSError, os.getsid, -100000) if hasattr(os, 'sysconf'): def test_os_sysconf(self): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -15,7 +15,12 @@ _WIN = sys.platform == 'win32' c_int = "c_int" -c_nonnegint = "c_nonnegint" + +# CPython 2.7 semantics are too messy to follow exactly, +# e.g. setuid(-2) works on 32-bit but not on 64-bit. As a result, +# we decided to just accept any 'int', i.e. any C signed long. +c_uid_t = int +c_gid_t = int class FileEncoder(object): def __init__(self, space, w_obj): @@ -823,7 +828,7 @@ """ return space.wrap(os.getuid()) - at unwrap_spec(arg=c_nonnegint) + at unwrap_spec(arg=int) def setuid(space, arg): """ setuid(uid) @@ -835,7 +840,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(arg=c_nonnegint) + at unwrap_spec(arg=c_uid_t) def seteuid(space, arg): """ seteuid(uid) @@ -847,7 +852,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(arg=c_nonnegint) + at unwrap_spec(arg=c_gid_t) def setgid(space, arg): """ setgid(gid) @@ -859,7 +864,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(arg=c_nonnegint) + at unwrap_spec(arg=c_gid_t) def setegid(space, arg): """ setegid(gid) @@ -960,7 +965,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(ruid=c_int, euid=c_int) + at unwrap_spec(ruid=c_uid_t, euid=c_uid_t) def setreuid(space, ruid, euid): """ setreuid(ruid, euid) @@ -972,7 +977,7 @@ raise wrap_oserror(space, e) return space.w_None - at unwrap_spec(rgid=c_int, egid=c_int) + at unwrap_spec(rgid=c_gid_t, egid=c_gid_t) def setregid(space, rgid, egid): """ setregid(rgid, egid) @@ -1056,7 +1061,7 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(path=str, uid=c_int, gid=c_int) + at unwrap_spec(path=str, uid=c_uid_t, gid=c_gid_t) def chown(space, path, uid, gid): try: os.chown(path, uid, gid) @@ -1064,7 +1069,7 @@ raise wrap_oserror(space, e, path) return space.w_None - at unwrap_spec(path=str, uid=c_int, gid=c_int) + at unwrap_spec(path=str, uid=c_uid_t, gid=c_gid_t) def lchown(space, path, uid, gid): try: os.lchown(path, uid, gid) From commits-noreply at bitbucket.org Thu Apr 28 17:29:17 2011 From: commits-noreply at bitbucket.org (rguillebert) Date: Thu, 28 Apr 2011 17:29:17 +0200 (CEST) Subject: [pypy-svn] pypy default: Puts back the original test_pyexpat Message-ID: <20110428152917.2CC0E36C204@codespeak.net> Author: Guillebert Romain Branch: Changeset: r43723:02d0c3e9ab01 Date: 2011-04-28 17:28 +0200 http://bitbucket.org/pypy/pypy/changeset/02d0c3e9ab01/ Log: Puts back the original test_pyexpat diff --git a/lib-python/2.7/test/test_pyexpat.py b/lib-python/2.7/test/test_pyexpat.py new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/test_pyexpat.py @@ -0,0 +1,593 @@ +# XXX TypeErrors on calling handlers, or on bad return values from a +# handler, are obscure and unhelpful. + +import StringIO, sys +import unittest + +from xml.parsers import expat + +from test.test_support import sortdict, run_unittest + + +class SetAttributeTest(unittest.TestCase): + def setUp(self): + self.parser = expat.ParserCreate(namespace_separator='!') + self.set_get_pairs = [ + [0, 0], + [1, 1], + [2, 1], + [0, 0], + ] + + def test_returns_unicode(self): + for x, y in self.set_get_pairs: + self.parser.returns_unicode = x + self.assertEqual(self.parser.returns_unicode, y) + + def test_ordered_attributes(self): + for x, y in self.set_get_pairs: + self.parser.ordered_attributes = x + self.assertEqual(self.parser.ordered_attributes, y) + + def test_specified_attributes(self): + for x, y in self.set_get_pairs: + self.parser.specified_attributes = x + self.assertEqual(self.parser.specified_attributes, y) + + +data = '''\ + + + + + + + + +%unparsed_entity; +]> + + + + Contents of subelements + + +&external_entity; + +''' + + +# Produce UTF-8 output +class ParseTest(unittest.TestCase): + class Outputter: + def __init__(self): + self.out = [] + + def StartElementHandler(self, name, attrs): + self.out.append('Start element: ' + repr(name) + ' ' + + sortdict(attrs)) + + def EndElementHandler(self, name): + self.out.append('End element: ' + repr(name)) + + def CharacterDataHandler(self, data): + data = data.strip() + if data: + self.out.append('Character data: ' + repr(data)) + + def ProcessingInstructionHandler(self, target, data): + self.out.append('PI: ' + repr(target) + ' ' + repr(data)) + + def StartNamespaceDeclHandler(self, prefix, uri): + self.out.append('NS decl: ' + repr(prefix) + ' ' + repr(uri)) + + def EndNamespaceDeclHandler(self, prefix): + self.out.append('End of NS decl: ' + repr(prefix)) + + def StartCdataSectionHandler(self): + self.out.append('Start of CDATA section') + + def EndCdataSectionHandler(self): + self.out.append('End of CDATA section') + + def CommentHandler(self, text): + self.out.append('Comment: ' + repr(text)) + + def NotationDeclHandler(self, *args): + name, base, sysid, pubid = args + self.out.append('Notation declared: %s' %(args,)) + + def UnparsedEntityDeclHandler(self, *args): + entityName, base, systemId, publicId, notationName = args + self.out.append('Unparsed entity decl: %s' %(args,)) + + def NotStandaloneHandler(self, userData): + self.out.append('Not standalone') + return 1 + + def ExternalEntityRefHandler(self, *args): + context, base, sysId, pubId = args + self.out.append('External entity ref: %s' %(args[1:],)) + return 1 + + def DefaultHandler(self, userData): + pass + + def DefaultHandlerExpand(self, userData): + pass + + handler_names = [ + 'StartElementHandler', 'EndElementHandler', + 'CharacterDataHandler', 'ProcessingInstructionHandler', + 'UnparsedEntityDeclHandler', 'NotationDeclHandler', + 'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler', + 'CommentHandler', 'StartCdataSectionHandler', + 'EndCdataSectionHandler', + 'DefaultHandler', 'DefaultHandlerExpand', + #'NotStandaloneHandler', + 'ExternalEntityRefHandler' + ] + + def test_utf8(self): + + out = self.Outputter() + parser = expat.ParserCreate(namespace_separator='!') + for name in self.handler_names: + setattr(parser, name, getattr(out, name)) + parser.returns_unicode = 0 + parser.Parse(data, 1) + + # Verify output + op = out.out + self.assertEqual(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'') + self.assertEqual(op[1], "Comment: ' comment data '") + self.assertEqual(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)") + self.assertEqual(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')") + self.assertEqual(op[4], "Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}") + self.assertEqual(op[5], "NS decl: 'myns' 'http://www.python.org/namespace'") + self.assertEqual(op[6], "Start element: 'http://www.python.org/namespace!subelement' {}") + self.assertEqual(op[7], "Character data: 'Contents of subelements'") + self.assertEqual(op[8], "End element: 'http://www.python.org/namespace!subelement'") + self.assertEqual(op[9], "End of NS decl: 'myns'") + self.assertEqual(op[10], "Start element: 'sub2' {}") + self.assertEqual(op[11], 'Start of CDATA section') + self.assertEqual(op[12], "Character data: 'contents of CDATA section'") + self.assertEqual(op[13], 'End of CDATA section') + self.assertEqual(op[14], "End element: 'sub2'") + self.assertEqual(op[15], "External entity ref: (None, 'entity.file', None)") + self.assertEqual(op[16], "End element: 'root'") + + def test_unicode(self): + # Try the parse again, this time producing Unicode output + out = self.Outputter() + parser = expat.ParserCreate(namespace_separator='!') + parser.returns_unicode = 1 + for name in self.handler_names: + setattr(parser, name, getattr(out, name)) + + parser.Parse(data, 1) + + op = out.out + self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') + self.assertEqual(op[1], "Comment: u' comment data '") + self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") + self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") + self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") + self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") + self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") + self.assertEqual(op[7], "Character data: u'Contents of subelements'") + self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'") + self.assertEqual(op[9], "End of NS decl: u'myns'") + self.assertEqual(op[10], "Start element: u'sub2' {}") + self.assertEqual(op[11], 'Start of CDATA section') + self.assertEqual(op[12], "Character data: u'contents of CDATA section'") + self.assertEqual(op[13], 'End of CDATA section') + self.assertEqual(op[14], "End element: u'sub2'") + self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)") + self.assertEqual(op[16], "End element: u'root'") + + def test_parse_file(self): + # Try parsing a file + out = self.Outputter() + parser = expat.ParserCreate(namespace_separator='!') + parser.returns_unicode = 1 + for name in self.handler_names: + setattr(parser, name, getattr(out, name)) + file = StringIO.StringIO(data) + + parser.ParseFile(file) + + op = out.out + self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') + self.assertEqual(op[1], "Comment: u' comment data '") + self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") + self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") + self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") + self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") + self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") + self.assertEqual(op[7], "Character data: u'Contents of subelements'") + self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'") + self.assertEqual(op[9], "End of NS decl: u'myns'") + self.assertEqual(op[10], "Start element: u'sub2' {}") + self.assertEqual(op[11], 'Start of CDATA section') + self.assertEqual(op[12], "Character data: u'contents of CDATA section'") + self.assertEqual(op[13], 'End of CDATA section') + self.assertEqual(op[14], "End element: u'sub2'") + self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)") + self.assertEqual(op[16], "End element: u'root'") + + +class NamespaceSeparatorTest(unittest.TestCase): + def test_legal(self): + # Tests that make sure we get errors when the namespace_separator value + # is illegal, and that we don't for good values: + expat.ParserCreate() + expat.ParserCreate(namespace_separator=None) + expat.ParserCreate(namespace_separator=' ') + + def test_illegal(self): + try: + expat.ParserCreate(namespace_separator=42) + self.fail() + except TypeError, e: + self.assertEqual(str(e), + 'ParserCreate() argument 2 must be string or None, not int') + + try: + expat.ParserCreate(namespace_separator='too long') + self.fail() + except ValueError, e: + self.assertEqual(str(e), + 'namespace_separator must be at most one character, omitted, or None') + + def test_zero_length(self): + # ParserCreate() needs to accept a namespace_separator of zero length + # to satisfy the requirements of RDF applications that are required + # to simply glue together the namespace URI and the localname. Though + # considered a wart of the RDF specifications, it needs to be supported. + # + # See XML-SIG mailing list thread starting with + # http://mail.python.org/pipermail/xml-sig/2001-April/005202.html + # + expat.ParserCreate(namespace_separator='') # too short + + +class InterningTest(unittest.TestCase): + def test(self): + # Test the interning machinery. + p = expat.ParserCreate() + L = [] + def collector(name, *args): + L.append(name) + p.StartElementHandler = collector + p.EndElementHandler = collector + p.Parse(" ", 1) + tag = L[0] + self.assertEqual(len(L), 6) + for entry in L: + # L should have the same string repeated over and over. + self.assertTrue(tag is entry) + + +class BufferTextTest(unittest.TestCase): + def setUp(self): + self.stuff = [] + self.parser = expat.ParserCreate() + self.parser.buffer_text = 1 + self.parser.CharacterDataHandler = self.CharacterDataHandler + + def check(self, expected, label): + self.assertEqual(self.stuff, expected, + "%s\nstuff = %r\nexpected = %r" + % (label, self.stuff, map(unicode, expected))) + + def CharacterDataHandler(self, text): + self.stuff.append(text) + + def StartElementHandler(self, name, attrs): + self.stuff.append("<%s>" % name) + bt = attrs.get("buffer-text") + if bt == "yes": + self.parser.buffer_text = 1 + elif bt == "no": + self.parser.buffer_text = 0 + + def EndElementHandler(self, name): + self.stuff.append("" % name) + + def CommentHandler(self, data): + self.stuff.append("" % data) + + def setHandlers(self, handlers=[]): + for name in handlers: + setattr(self.parser, name, getattr(self, name)) + + def test_default_to_disabled(self): + parser = expat.ParserCreate() + self.assertFalse(parser.buffer_text) + + def test_buffering_enabled(self): + # Make sure buffering is turned on + self.assertTrue(self.parser.buffer_text) + self.parser.Parse("123", 1) + self.assertEqual(self.stuff, ['123'], + "buffered text not properly collapsed") + + def test1(self): + # XXX This test exposes more detail of Expat's text chunking than we + # XXX like, but it tests what we need to concisely. + self.setHandlers(["StartElementHandler"]) + self.parser.Parse("12\n34\n5", 1) + self.assertEqual(self.stuff, + ["", "1", "", "2", "\n", "3", "", "4\n5"], + "buffering control not reacting as expected") + + def test2(self): + self.parser.Parse("1<2> \n 3", 1) + self.assertEqual(self.stuff, ["1<2> \n 3"], + "buffered text not properly collapsed") + + def test3(self): + self.setHandlers(["StartElementHandler"]) + self.parser.Parse("123", 1) + self.assertEqual(self.stuff, ["", "1", "", "2", "", "3"], + "buffered text not properly split") + + def test4(self): + self.setHandlers(["StartElementHandler", "EndElementHandler"]) + self.parser.CharacterDataHandler = None + self.parser.Parse("123", 1) + self.assertEqual(self.stuff, + ["", "", "", "", "", ""]) + + def test5(self): + self.setHandlers(["StartElementHandler", "EndElementHandler"]) + self.parser.Parse("123", 1) + self.assertEqual(self.stuff, + ["", "1", "", "", "2", "", "", "3", ""]) + + def test6(self): + self.setHandlers(["CommentHandler", "EndElementHandler", + "StartElementHandler"]) + self.parser.Parse("12345 ", 1) + self.assertEqual(self.stuff, + ["", "1", "", "", "2", "", "", "345", ""], + "buffered text not properly split") + + def test7(self): + self.setHandlers(["CommentHandler", "EndElementHandler", + "StartElementHandler"]) + self.parser.Parse("12345 ", 1) + self.assertEqual(self.stuff, + ["", "1", "", "", "2", "", "", "3", + "", "4", "", "5", ""], + "buffered text not properly split") + + +# Test handling of exception from callback: +class HandlerExceptionTest(unittest.TestCase): + def StartElementHandler(self, name, attrs): + raise RuntimeError(name) + + def test(self): + parser = expat.ParserCreate() + parser.StartElementHandler = self.StartElementHandler + try: + parser.Parse("", 1) + self.fail() + except RuntimeError, e: + self.assertEqual(e.args[0], 'a', + "Expected RuntimeError for element 'a', but" + \ + " found %r" % e.args[0]) + + +# Test Current* members: +class PositionTest(unittest.TestCase): + def StartElementHandler(self, name, attrs): + self.check_pos('s') + + def EndElementHandler(self, name): + self.check_pos('e') + + def check_pos(self, event): + pos = (event, + self.parser.CurrentByteIndex, + self.parser.CurrentLineNumber, + self.parser.CurrentColumnNumber) + self.assertTrue(self.upto < len(self.expected_list), + 'too many parser events') + expected = self.expected_list[self.upto] + self.assertEqual(pos, expected, + 'Expected position %s, got position %s' %(pos, expected)) + self.upto += 1 + + def test(self): + self.parser = expat.ParserCreate() + self.parser.StartElementHandler = self.StartElementHandler + self.parser.EndElementHandler = self.EndElementHandler + self.upto = 0 + self.expected_list = [('s', 0, 1, 0), ('s', 5, 2, 1), ('s', 11, 3, 2), + ('e', 15, 3, 6), ('e', 17, 4, 1), ('e', 22, 5, 0)] + + xml = '\n \n \n \n' + self.parser.Parse(xml, 1) + + +class sf1296433Test(unittest.TestCase): + def test_parse_only_xml_data(self): + # http://python.org/sf/1296433 + # + xml = "%s" % ('a' * 1025) + # this one doesn't crash + #xml = "%s" % ('a' * 10000) + + class SpecificException(Exception): + pass + + def handler(text): + raise SpecificException + + parser = expat.ParserCreate() + parser.CharacterDataHandler = handler + + self.assertRaises(Exception, parser.Parse, xml) + +class ChardataBufferTest(unittest.TestCase): + """ + test setting of chardata buffer size + """ + + def test_1025_bytes(self): + self.assertEqual(self.small_buffer_test(1025), 2) + + def test_1000_bytes(self): + self.assertEqual(self.small_buffer_test(1000), 1) + + def test_wrong_size(self): + parser = expat.ParserCreate() + parser.buffer_text = 1 + def f(size): + parser.buffer_size = size + + self.assertRaises(TypeError, f, sys.maxint+1) + self.assertRaises(ValueError, f, -1) + self.assertRaises(ValueError, f, 0) + + def test_unchanged_size(self): + xml1 = ("%s" % ('a' * 512)) + xml2 = 'a'*512 + '' + parser = expat.ParserCreate() + parser.CharacterDataHandler = self.counting_handler + parser.buffer_size = 512 + parser.buffer_text = 1 + + # Feed 512 bytes of character data: the handler should be called + # once. + self.n = 0 + parser.Parse(xml1) + self.assertEqual(self.n, 1) + + # Reassign to buffer_size, but assign the same size. + parser.buffer_size = parser.buffer_size + self.assertEqual(self.n, 1) + + # Try parsing rest of the document + parser.Parse(xml2) + self.assertEqual(self.n, 2) + + + def test_disabling_buffer(self): + xml1 = "%s" % ('a' * 512) + xml2 = ('b' * 1024) + xml3 = "%s" % ('c' * 1024) + parser = expat.ParserCreate() + parser.CharacterDataHandler = self.counting_handler + parser.buffer_text = 1 + parser.buffer_size = 1024 + self.assertEqual(parser.buffer_size, 1024) + + # Parse one chunk of XML + self.n = 0 + parser.Parse(xml1, 0) + self.assertEqual(parser.buffer_size, 1024) + self.assertEqual(self.n, 1) + + # Turn off buffering and parse the next chunk. + parser.buffer_text = 0 + self.assertFalse(parser.buffer_text) + self.assertEqual(parser.buffer_size, 1024) + for i in range(10): + parser.Parse(xml2, 0) + self.assertEqual(self.n, 11) + + parser.buffer_text = 1 + self.assertTrue(parser.buffer_text) + self.assertEqual(parser.buffer_size, 1024) + parser.Parse(xml3, 1) + self.assertEqual(self.n, 12) + + + + def make_document(self, bytes): + return ("" + bytes * 'a' + '') + + def counting_handler(self, text): + self.n += 1 + + def small_buffer_test(self, buffer_len): + xml = "%s" % ('a' * buffer_len) + parser = expat.ParserCreate() + parser.CharacterDataHandler = self.counting_handler + parser.buffer_size = 1024 + parser.buffer_text = 1 + + self.n = 0 + parser.Parse(xml) + return self.n + + def test_change_size_1(self): + xml1 = "%s" % ('a' * 1024) + xml2 = "aaa%s" % ('a' * 1025) + parser = expat.ParserCreate() + parser.CharacterDataHandler = self.counting_handler + parser.buffer_text = 1 + parser.buffer_size = 1024 + self.assertEqual(parser.buffer_size, 1024) + + self.n = 0 + parser.Parse(xml1, 0) + parser.buffer_size *= 2 + self.assertEqual(parser.buffer_size, 2048) + parser.Parse(xml2, 1) + self.assertEqual(self.n, 2) + + def test_change_size_2(self): + xml1 = "a%s" % ('a' * 1023) + xml2 = "aaa%s" % ('a' * 1025) + parser = expat.ParserCreate() + parser.CharacterDataHandler = self.counting_handler + parser.buffer_text = 1 + parser.buffer_size = 2048 + self.assertEqual(parser.buffer_size, 2048) + + self.n=0 + parser.Parse(xml1, 0) + parser.buffer_size //= 2 + self.assertEqual(parser.buffer_size, 1024) + parser.Parse(xml2, 1) + self.assertEqual(self.n, 4) + +class MalformedInputText(unittest.TestCase): + def test1(self): + xml = "\0\r\n" + parser = expat.ParserCreate() + try: + parser.Parse(xml, True) + self.fail() + except expat.ExpatError as e: + self.assertEqual(str(e), 'unclosed token: line 2, column 0') + + def test2(self): + xml = "\r\n" + parser = expat.ParserCreate() + try: + parser.Parse(xml, True) + self.fail() + except expat.ExpatError as e: + self.assertEqual(str(e), 'XML declaration not well-formed: line 1, column 14') + +def test_main(): + run_unittest(SetAttributeTest, + ParseTest, + NamespaceSeparatorTest, + InterningTest, + BufferTextTest, + HandlerExceptionTest, + PositionTest, + sf1296433Test, + ChardataBufferTest, + MalformedInputText) + +if __name__ == "__main__": + test_main() From commits-noreply at bitbucket.org Thu Apr 28 17:39:48 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Thu, 28 Apr 2011 17:39:48 +0200 (CEST) Subject: [pypy-svn] pypy default: add the possibility to ignore the matching of arguments Message-ID: <20110428153948.6BA3B282B9E@codespeak.net> Author: Antonio Cuni Branch: Changeset: r43724:f4e1447c4e0c Date: 2011-04-28 17:38 +0200 http://bitbucket.org/pypy/pypy/changeset/f4e1447c4e0c/ Log: add the possibility to ignore the matching of arguments diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -285,9 +285,10 @@ def match_op(self, op, (exp_opname, exp_res, exp_args, exp_descr)): self._assert(op.name == exp_opname, "operation mismatch") self.match_var(op.res, exp_res) - self._assert(len(op.args) == len(exp_args), "wrong number of arguments") - for arg, exp_arg in zip(op.args, exp_args): - self._assert(self.match_var(arg, exp_arg), "variable mismatch: %r instead of %r" % (arg, exp_arg)) + if exp_args != ['...']: + self._assert(len(op.args) == len(exp_args), "wrong number of arguments") + for arg, exp_arg in zip(op.args, exp_args): + self._assert(self.match_var(arg, exp_arg), "variable mismatch: %r instead of %r" % (arg, exp_arg)) self.match_descr(op.descr, exp_descr) diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -105,7 +105,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('call_rec', """ ... - p53 = call_assembler(p35, p7, ConstPtr(ptr21), ConstPtr(ptr49), 0, ConstPtr(ptr51), -1, ConstPtr(ptr52), ConstPtr(ptr52), ConstPtr(ptr52), ConstPtr(ptr52), ConstPtr(ptr48), descr=...) + p53 = call_assembler(..., descr=...) guard_not_forced(descr=...) guard_no_exception(descr=...) ... diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -251,6 +251,18 @@ """ assert self.match(loop, expected, ignore_ops=['force_token']) + def test_match_dots_in_arguments(self): + loop = """ + [i0] + i1 = int_add(0, 1) + jump(i4, descr=...) + """ + expected = """ + i1 = int_add(...) + jump(i4, descr=...) + """ + assert self.match(loop, expected) + class TestRunPyPyC(BaseTestPyPyC): From commits-noreply at bitbucket.org Thu Apr 28 17:41:28 2011 From: commits-noreply at bitbucket.org (hpk42) Date: Thu, 28 Apr 2011 17:41:28 +0200 (CEST) Subject: [pypy-svn] pypy default: use actually released pytest-2.0.3 / py-1.4.3 versions Message-ID: <20110428154128.14E8B282B9E@codespeak.net> Author: holger krekel Branch: Changeset: r43725:e5594dda5978 Date: 2011-04-28 17:36 +0200 http://bitbucket.org/pypy/pypy/changeset/e5594dda5978/ Log: use actually released pytest-2.0.3 / py-1.4.3 versions (minor fixes compared to the versions pypy already used) diff --git a/_pytest/assertion.py b/_pytest/assertion.py --- a/_pytest/assertion.py +++ b/_pytest/assertion.py @@ -16,7 +16,8 @@ # py._code._assertionnew to detect this plugin was loaded and in # turn call the hooks defined here as part of the # DebugInterpreter. - config._monkeypatch = m = monkeypatch() + m = monkeypatch() + config._cleanup.append(m.undo) warn_about_missing_assertion() if not config.getvalue("noassert") and not config.getvalue("nomagic"): def callbinrepr(op, left, right): @@ -29,9 +30,6 @@ 'AssertionError', py.code._AssertionError) m.setattr(py.code, '_reprcompare', callbinrepr) -def pytest_unconfigure(config): - config._monkeypatch.undo() - def warn_about_missing_assertion(): try: assert False diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.0.3.dev3' +__version__ = '2.0.3' diff --git a/_pytest/main.py b/_pytest/main.py --- a/_pytest/main.py +++ b/_pytest/main.py @@ -71,7 +71,7 @@ session.exitstatus = EXIT_INTERRUPTED except: excinfo = py.code.ExceptionInfo() - config.pluginmanager.notify_exception(excinfo) + config.pluginmanager.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") diff --git a/py/__init__.py b/py/__init__.py --- a/py/__init__.py +++ b/py/__init__.py @@ -8,7 +8,7 @@ (c) Holger Krekel and others, 2004-2010 """ -__version__ = '1.4.3.dev0' +__version__ = '1.4.3' from py import _apipkg diff --git a/_pytest/core.py b/_pytest/core.py --- a/_pytest/core.py +++ b/_pytest/core.py @@ -265,8 +265,15 @@ config.hook.pytest_unconfigure(config=config) config.pluginmanager.unregister(self) - def notify_exception(self, excinfo): - excrepr = excinfo.getrepr(funcargs=True, showlocals=True) + def notify_exception(self, excinfo, option=None): + if option and option.fulltrace: + style = "long" + else: + style = "native" + excrepr = excinfo.getrepr(funcargs=True, + showlocals=getattr(option, 'showlocals', False), + style=style, + ) res = self.hook.pytest_internalerror(excrepr=excrepr) if not py.builtin.any(res): for line in str(excrepr).split("\n"): diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -48,15 +48,12 @@ self.trace("finish") def pytest_configure(config): - config._mp = mp = monkeypatch() + mp = monkeypatch() t = TempdirHandler(config) + config._cleanup.extend([mp.undo, t.finish]) mp.setattr(config, '_tmpdirhandler', t, raising=False) mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False) -def pytest_unconfigure(config): - config._tmpdirhandler.finish() - config._mp.undo() - def pytest_funcarg__tmpdir(request): """return a temporary directory path object which is unique to each test function invocation, diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py --- a/py/_io/terminalwriter.py +++ b/py/_io/terminalwriter.py @@ -84,7 +84,8 @@ while len(text) > 32768: file.write(text[:32768]) text = text[32768:] - file.write(text) + if text: + file.write(text) SetConsoleTextAttribute(handle, oldcolors) else: file.write(text) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr) + longrepr = str(report.longrepr[2]) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/_pytest/pytester.py b/_pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -236,13 +236,14 @@ def _makefile(self, ext, args, kwargs): items = list(kwargs.items()) if args: - source = "\n".join(map(str, args)) + "\n" + source = py.builtin._totext("\n").join( + map(py.builtin._totext, args)) + py.builtin._totext("\n") basename = self.request.function.__name__ items.insert(0, (basename, source)) ret = None for name, value in items: p = self.tmpdir.join(name).new(ext=ext) - source = str(py.code.Source(value)).lstrip() + source = py.builtin._totext(py.code.Source(value)).lstrip() p.write(source.encode("utf-8"), "wb") if ret is None: ret = p diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -5,8 +5,42 @@ import py import os +import re +import sys import time + +# Python 2.X and 3.X compatibility +try: + unichr(65) +except NameError: + unichr = chr +try: + unicode('A') +except NameError: + unicode = str +try: + long(1) +except NameError: + long = int + + +# We need to get the subset of the invalid unicode ranges according to +# XML 1.0 which are valid in this python build. Hence we calculate +# this dynamically instead of hardcoding it. The spec range of valid +# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] +# | [#x10000-#x10FFFF] +_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19), + (0xD800, 0xDFFF), (0xFDD0, 0xFFFF)] +_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high)) + for (low, high) in _illegal_unichrs + if low < sys.maxunicode] +illegal_xml_re = re.compile(unicode('[%s]') % + unicode('').join(_illegal_ranges)) +del _illegal_unichrs +del _illegal_ranges + + def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group.addoption('--junitxml', action="store", dest="xmlpath", @@ -28,6 +62,7 @@ del config._xml config.pluginmanager.unregister(xml) + class LogXML(object): def __init__(self, logfile, prefix): self.logfile = logfile @@ -55,7 +90,14 @@ self.test_logs.append("") def appendlog(self, fmt, *args): - args = tuple([py.xml.escape(arg) for arg in args]) + def repl(matchobj): + i = ord(matchobj.group()) + if i <= 0xFF: + return unicode('#x%02X') % i + else: + return unicode('#x%04X') % i + args = tuple([illegal_xml_re.sub(repl, py.xml.escape(arg)) + for arg in args]) self.test_logs.append(fmt % args) def append_pass(self, report): diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -12,6 +12,10 @@ config.trace.root.setwriter(sys.stderr.write) return config +def pytest_unconfigure(config): + for func in config._cleanup: + func() + class Parser: """ Parser for command line arguments. """ @@ -251,7 +255,8 @@ self._conftest = Conftest(onimport=self._onimportconftest) self.hook = self.pluginmanager.hook self._inicache = {} - + self._cleanup = [] + @classmethod def fromdictargs(cls, option_dict, args): """ constructor useable for subprocesses. """ From commits-noreply at bitbucket.org Thu Apr 28 17:46:42 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 17:46:42 +0200 (CEST) Subject: [pypy-svn] pypy default: (iko, rguillebert, arigo) Message-ID: <20110428154642.8134E282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43726:79fa6adbce6c Date: 2011-04-28 17:45 +0200 http://bitbucket.org/pypy/pypy/changeset/79fa6adbce6c/ Log: (iko, rguillebert, arigo) Fix interp_locale to call charp2str() before freeing some apparently unrelated string, because the string may turn out to not be unrelated after all. diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py --- a/pypy/module/_locale/interp_locale.py +++ b/pypy/module/_locale/interp_locale.py @@ -221,6 +221,10 @@ msg_c = rffi.str2charp(msg) try: result = _dgettext(domain, msg_c) + # note that 'result' may be the same pointer as 'msg_c', + # so it must be converted to an RPython string *before* + # we free msg_c. + result = rffi.charp2str(result) finally: rffi.free_charp(msg_c) else: @@ -229,11 +233,15 @@ msg_c = rffi.str2charp(msg) try: result = _dgettext(domain_c, msg_c) + # note that 'result' may be the same pointer as 'msg_c', + # so it must be converted to an RPython string *before* + # we free msg_c. + result = rffi.charp2str(result) finally: rffi.free_charp(domain_c) rffi.free_charp(msg_c) - return space.wrap(rffi.charp2str(result)) + return space.wrap(result) _dcgettext = rlocale.external('dcgettext', [rffi.CCHARP, rffi.CCHARP, rffi.INT], rffi.CCHARP) @@ -248,6 +256,10 @@ msg_c = rffi.str2charp(msg) try: result = _dcgettext(domain, msg_c, rffi.cast(rffi.INT, category)) + # note that 'result' may be the same pointer as 'msg_c', + # so it must be converted to an RPython string *before* + # we free msg_c. + result = rffi.charp2str(result) finally: rffi.free_charp(msg_c) else: @@ -257,11 +269,15 @@ try: result = _dcgettext(domain_c, msg_c, rffi.cast(rffi.INT, category)) + # note that 'result' may be the same pointer as 'msg_c', + # so it must be converted to an RPython string *before* + # we free msg_c. + result = rffi.charp2str(result) finally: rffi.free_charp(domain_c) rffi.free_charp(msg_c) - return space.wrap(rffi.charp2str(result)) + return space.wrap(result) _textdomain = rlocale.external('textdomain', [rffi.CCHARP], rffi.CCHARP) @@ -273,15 +289,20 @@ if space.is_w(w_domain, space.w_None): domain = None result = _textdomain(domain) + result = rffi.charp2str(result) else: domain = space.str_w(w_domain) domain_c = rffi.str2charp(domain) try: result = _textdomain(domain_c) + # note that 'result' may be the same pointer as 'domain_c' + # (maybe?) so it must be converted to an RPython string + # *before* we free domain_c. + result = rffi.charp2str(result) finally: rffi.free_charp(domain_c) - return space.wrap(rffi.charp2str(result)) + return space.wrap(result) _bindtextdomain = rlocale.external('bindtextdomain', [rffi.CCHARP, rffi.CCHARP], rffi.CCHARP) diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py --- a/lib_pypy/_locale.py +++ b/lib_pypy/_locale.py @@ -1,4 +1,9 @@ # ctypes implementation of _locale module by Victor Stinner, 2008-03-27 + +# ------------------------------------------------------------ +# Note that we also have our own interp-level implementation +# ------------------------------------------------------------ + """ Support for POSIX locales. """ From commits-noreply at bitbucket.org Thu Apr 28 17:46:48 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 17:46:48 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110428154648.C807D282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43727:2cc88a5940af Date: 2011-04-28 17:46 +0200 http://bitbucket.org/pypy/pypy/changeset/2cc88a5940af/ Log: merge heads diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -285,9 +285,10 @@ def match_op(self, op, (exp_opname, exp_res, exp_args, exp_descr)): self._assert(op.name == exp_opname, "operation mismatch") self.match_var(op.res, exp_res) - self._assert(len(op.args) == len(exp_args), "wrong number of arguments") - for arg, exp_arg in zip(op.args, exp_args): - self._assert(self.match_var(arg, exp_arg), "variable mismatch: %r instead of %r" % (arg, exp_arg)) + if exp_args != ['...']: + self._assert(len(op.args) == len(exp_args), "wrong number of arguments") + for arg, exp_arg in zip(op.args, exp_args): + self._assert(self.match_var(arg, exp_arg), "variable mismatch: %r instead of %r" % (arg, exp_arg)) self.match_descr(op.descr, exp_descr) diff --git a/_pytest/assertion.py b/_pytest/assertion.py --- a/_pytest/assertion.py +++ b/_pytest/assertion.py @@ -16,7 +16,8 @@ # py._code._assertionnew to detect this plugin was loaded and in # turn call the hooks defined here as part of the # DebugInterpreter. - config._monkeypatch = m = monkeypatch() + m = monkeypatch() + config._cleanup.append(m.undo) warn_about_missing_assertion() if not config.getvalue("noassert") and not config.getvalue("nomagic"): def callbinrepr(op, left, right): @@ -29,9 +30,6 @@ 'AssertionError', py.code._AssertionError) m.setattr(py.code, '_reprcompare', callbinrepr) -def pytest_unconfigure(config): - config._monkeypatch.undo() - def warn_about_missing_assertion(): try: assert False diff --git a/lib-python/2.7/test/test_pyexpat.py b/lib-python/modified-2.7/test/test_pyexpat.py copy from lib-python/2.7/test/test_pyexpat.py copy to lib-python/modified-2.7/test/test_pyexpat.py --- a/lib-python/2.7/test/test_pyexpat.py +++ b/lib-python/modified-2.7/test/test_pyexpat.py @@ -559,6 +559,9 @@ self.assertEqual(self.n, 4) class MalformedInputText(unittest.TestCase): + # CPython seems to ship its own version of expat, they fixed it on this commit : + # http://svn.python.org/view?revision=74429&view=revision + @unittest.skipIf(sys.platform == "darwin", "Expat is broken on Mac OS X 10.6.6") def test1(self): xml = "\0\r\n" parser = expat.ParserCreate() @@ -568,6 +571,7 @@ except expat.ExpatError as e: self.assertEqual(str(e), 'unclosed token: line 2, column 0') + @unittest.skipIf(sys.platform == "darwin", "Expat is broken on Mac OS X 10.6.6") def test2(self): xml = "\r\n" parser = expat.ParserCreate() diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.0.3.dev3' +__version__ = '2.0.3' diff --git a/_pytest/main.py b/_pytest/main.py --- a/_pytest/main.py +++ b/_pytest/main.py @@ -71,7 +71,7 @@ session.exitstatus = EXIT_INTERRUPTED except: excinfo = py.code.ExceptionInfo() - config.pluginmanager.notify_exception(excinfo) + config.pluginmanager.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") diff --git a/py/__init__.py b/py/__init__.py --- a/py/__init__.py +++ b/py/__init__.py @@ -8,7 +8,7 @@ (c) Holger Krekel and others, 2004-2010 """ -__version__ = '1.4.3.dev0' +__version__ = '1.4.3' from py import _apipkg diff --git a/_pytest/core.py b/_pytest/core.py --- a/_pytest/core.py +++ b/_pytest/core.py @@ -265,8 +265,15 @@ config.hook.pytest_unconfigure(config=config) config.pluginmanager.unregister(self) - def notify_exception(self, excinfo): - excrepr = excinfo.getrepr(funcargs=True, showlocals=True) + def notify_exception(self, excinfo, option=None): + if option and option.fulltrace: + style = "long" + else: + style = "native" + excrepr = excinfo.getrepr(funcargs=True, + showlocals=getattr(option, 'showlocals', False), + style=style, + ) res = self.hook.pytest_internalerror(excrepr=excrepr) if not py.builtin.any(res): for line in str(excrepr).split("\n"): diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -1,5 +1,6 @@ from pypy.conftest import gettestobjspace from pypy.module.pyexpat.interp_pyexpat import global_storage +from pytest import skip class AppTestPyexpat: def setup_class(cls): @@ -28,6 +29,15 @@ assert isinstance(pyexpat.version_info, tuple) assert isinstance(pyexpat.version_info[0], int) + def test_malformed_xml(self): + import sys + if sys.platform == "darwin": + skip("Fails with the version of expat on Mac OS 10.6.6") + import pyexpat + xml = "\0\r\n" + parser = pyexpat.ParserCreate() + raises(pyexpat.ExpatError, "parser.Parse(xml, True)") + def test_encoding(self): import pyexpat for encoding_arg in (None, 'utf-8', 'iso-8859-1'): diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py --- a/py/_io/terminalwriter.py +++ b/py/_io/terminalwriter.py @@ -84,7 +84,8 @@ while len(text) > 32768: file.write(text[:32768]) text = text[32768:] - file.write(text) + if text: + file.write(text) SetConsoleTextAttribute(handle, oldcolors) else: file.write(text) diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -48,15 +48,12 @@ self.trace("finish") def pytest_configure(config): - config._mp = mp = monkeypatch() + mp = monkeypatch() t = TempdirHandler(config) + config._cleanup.extend([mp.undo, t.finish]) mp.setattr(config, '_tmpdirhandler', t, raising=False) mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False) -def pytest_unconfigure(config): - config._tmpdirhandler.finish() - config._mp.undo() - def pytest_funcarg__tmpdir(request): """return a temporary directory path object which is unique to each test function invocation, diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -105,7 +105,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('call_rec', """ ... - p53 = call_assembler(p35, p7, ConstPtr(ptr21), ConstPtr(ptr49), 0, ConstPtr(ptr51), -1, ConstPtr(ptr52), ConstPtr(ptr52), ConstPtr(ptr52), ConstPtr(ptr52), ConstPtr(ptr48), descr=...) + p53 = call_assembler(..., descr=...) guard_not_forced(descr=...) guard_no_exception(descr=...) ... diff --git a/_pytest/pytester.py b/_pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -236,13 +236,14 @@ def _makefile(self, ext, args, kwargs): items = list(kwargs.items()) if args: - source = "\n".join(map(str, args)) + "\n" + source = py.builtin._totext("\n").join( + map(py.builtin._totext, args)) + py.builtin._totext("\n") basename = self.request.function.__name__ items.insert(0, (basename, source)) ret = None for name, value in items: p = self.tmpdir.join(name).new(ext=ext) - source = str(py.code.Source(value)).lstrip() + source = py.builtin._totext(py.code.Source(value)).lstrip() p.write(source.encode("utf-8"), "wb") if ret is None: ret = p diff --git a/pypy/module/pypyjit/test_pypy_c/test_model.py b/pypy/module/pypyjit/test_pypy_c/test_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_model.py @@ -251,6 +251,18 @@ """ assert self.match(loop, expected, ignore_ops=['force_token']) + def test_match_dots_in_arguments(self): + loop = """ + [i0] + i1 = int_add(0, 1) + jump(i4, descr=...) + """ + expected = """ + i1 = int_add(...) + jump(i4, descr=...) + """ + assert self.match(loop, expected) + class TestRunPyPyC(BaseTestPyPyC): diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -5,8 +5,42 @@ import py import os +import re +import sys import time + +# Python 2.X and 3.X compatibility +try: + unichr(65) +except NameError: + unichr = chr +try: + unicode('A') +except NameError: + unicode = str +try: + long(1) +except NameError: + long = int + + +# We need to get the subset of the invalid unicode ranges according to +# XML 1.0 which are valid in this python build. Hence we calculate +# this dynamically instead of hardcoding it. The spec range of valid +# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] +# | [#x10000-#x10FFFF] +_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19), + (0xD800, 0xDFFF), (0xFDD0, 0xFFFF)] +_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high)) + for (low, high) in _illegal_unichrs + if low < sys.maxunicode] +illegal_xml_re = re.compile(unicode('[%s]') % + unicode('').join(_illegal_ranges)) +del _illegal_unichrs +del _illegal_ranges + + def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group.addoption('--junitxml', action="store", dest="xmlpath", @@ -28,6 +62,7 @@ del config._xml config.pluginmanager.unregister(xml) + class LogXML(object): def __init__(self, logfile, prefix): self.logfile = logfile @@ -55,7 +90,14 @@ self.test_logs.append("") def appendlog(self, fmt, *args): - args = tuple([py.xml.escape(arg) for arg in args]) + def repl(matchobj): + i = ord(matchobj.group()) + if i <= 0xFF: + return unicode('#x%02X') % i + else: + return unicode('#x%04X') % i + args = tuple([illegal_xml_re.sub(repl, py.xml.escape(arg)) + for arg in args]) self.test_logs.append(fmt % args) def append_pass(self, report): diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -74,7 +74,7 @@ elif report.failed: longrepr = str(report.longrepr) elif report.skipped: - longrepr = str(report.longrepr) + longrepr = str(report.longrepr[2]) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -12,6 +12,10 @@ config.trace.root.setwriter(sys.stderr.write) return config +def pytest_unconfigure(config): + for func in config._cleanup: + func() + class Parser: """ Parser for command line arguments. """ @@ -251,7 +255,8 @@ self._conftest = Conftest(onimport=self._onimportconftest) self.hook = self.pluginmanager.hook self._inicache = {} - + self._cleanup = [] + @classmethod def fromdictargs(cls, option_dict, args): """ constructor useable for subprocesses. """ From commits-noreply at bitbucket.org Thu Apr 28 18:02:48 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 18:02:48 +0200 (CEST) Subject: [pypy-svn] pypy default: "Fix" (??) _sre.getlower(x) to return an x>=256 unchanged Message-ID: <20110428160248.B85E1282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43728:bca4299358a0 Date: 2011-04-28 18:02 +0200 http://bitbucket.org/pypy/pypy/changeset/bca4299358a0/ Log: "Fix" (??) _sre.getlower(x) to return an x>=256 unchanged when SRE_FLAG_LOCALE is passed. That's obscure... diff --git a/pypy/rlib/rsre/rsre_char.py b/pypy/rlib/rsre/rsre_char.py --- a/pypy/rlib/rsre/rsre_char.py +++ b/pypy/rlib/rsre/rsre_char.py @@ -54,11 +54,13 @@ def getlower(char_ord, flags): - if flags & SRE_FLAG_UNICODE: + if flags & SRE_FLAG_LOCALE: + if char_ord < 256: # cheating! Well, CPython does too. + char_ord = tolower(char_ord) + return char_ord + elif flags & SRE_FLAG_UNICODE: assert unicodedb is not None char_ord = unicodedb.tolower(char_ord) - elif flags & SRE_FLAG_LOCALE: - return tolower(char_ord) else: if int_between(ord('A'), char_ord, ord('Z') + 1): # ASCII lower char_ord += ord('a') - ord('A') diff --git a/pypy/rlib/rsre/test/test_char.py b/pypy/rlib/rsre/test/test_char.py --- a/pypy/rlib/rsre/test/test_char.py +++ b/pypy/rlib/rsre/test/test_char.py @@ -27,6 +27,12 @@ assert rsre_char.getlower(ord('2'), SRE_FLAG_UNICODE) == ord('2') assert rsre_char.getlower(10, SRE_FLAG_UNICODE) == 10 assert rsre_char.getlower(UPPER_PI, SRE_FLAG_UNICODE) == LOWER_PI + # + # xxx the following cases are like CPython's. They are obscure. + # (iko) that's a nice way to say "broken" + assert rsre_char.getlower(UPPER_PI, SRE_FLAG_LOCALE) == UPPER_PI + assert rsre_char.getlower(UPPER_PI, SRE_FLAG_LOCALE | SRE_FLAG_UNICODE) \ + == UPPER_PI def test_is_word(): assert rsre_char.is_word(ord('A')) From commits-noreply at bitbucket.org Thu Apr 28 18:20:49 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 18:20:49 +0200 (CEST) Subject: [pypy-svn] pypy default: (iko, rguillebert, arigo) Message-ID: <20110428162049.16C9C282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43729:031a4c9492b6 Date: 2011-04-28 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/031a4c9492b6/ Log: (iko, rguillebert, arigo) Don't use locale.resetlocale(), which explodes if invalid strings are set in some environment variables. diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -664,10 +664,11 @@ + [s.OPCODES["at"], s.ATCODES["at_loc_non_boundary"], s.OPCODES["success"]] s.assert_match(opcodes1, "bla\xFC") s.assert_no_match(opcodes2, "bla\xFC") + oldlocale = locale.setlocale(locale.LC_ALL) locale.setlocale(locale.LC_ALL, "de_DE") s.assert_no_match(opcodes1, "bla\xFC") s.assert_match(opcodes2, "bla\xFC") - locale.resetlocale() # is this the right way to rest the locale? + locale.setlocale(locale.LC_ALL, oldlocale) except locale.Error: # skip test skip("locale error") From commits-noreply at bitbucket.org Thu Apr 28 18:28:08 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 18:28:08 +0200 (CEST) Subject: [pypy-svn] pypy default: (iko, rguillebert, arigo) Message-ID: <20110428162808.EDC7D282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43730:1bcebfc05c2a Date: 2011-04-28 18:27 +0200 http://bitbucket.org/pypy/pypy/changeset/1bcebfc05c2a/ Log: (iko, rguillebert, arigo) We love locales. diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -323,7 +323,14 @@ def test_locale(self): import locale - locale.setlocale(locale.LC_NUMERIC, 'en_US.UTF8') + for name in ['en_US.UTF8', 'en_US', 'en']: + try: + locale.setlocale(locale.LC_NUMERIC, name) + break + except locale.Error: + pass + else: + skip("no 'en' or 'en_US' or 'en_US.UTF8' locale??") x = 1234.567890 try: assert locale.format('%g', x, grouping=True) == '1,234.57' From commits-noreply at bitbucket.org Thu Apr 28 18:33:21 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 18:33:21 +0200 (CEST) Subject: [pypy-svn] pypy default: (iko, rguillebert, arigo) Message-ID: <20110428163321.CE869282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43731:81fc10496fce Date: 2011-04-28 18:33 +0200 http://bitbucket.org/pypy/pypy/changeset/81fc10496fce/ Log: (iko, rguillebert, arigo) Fix the test by sleeping a bit, because the character just sent may not yet have arrived on the reading end. diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -48,6 +48,7 @@ assert rhandle.poll() == False assert rhandle.poll(1) == False whandle.send(1) + import time; time.sleep(0.1) # give it time to arrive :-) assert rhandle.poll() == True assert rhandle.poll(None) == True assert rhandle.recv() == 1 From commits-noreply at bitbucket.org Thu Apr 28 18:35:01 2011 From: commits-noreply at bitbucket.org (berdario) Date: Thu, 28 Apr 2011 18:35:01 +0200 (CEST) Subject: [pypy-svn] pypy default: Fixed indentation in the malformed_xml test Message-ID: <20110428163501.3BD4C282B9E@codespeak.net> Author: Dario Bertini Branch: Changeset: r43732:349cda0eface Date: 2011-04-28 18:30 +0200 http://bitbucket.org/pypy/pypy/changeset/349cda0eface/ Log: Fixed indentation in the malformed_xml test diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -33,10 +33,10 @@ import sys if sys.platform == "darwin": skip("Fails with the version of expat on Mac OS 10.6.6") - import pyexpat + import pyexpat xml = "\0\r\n" - parser = pyexpat.ParserCreate() - raises(pyexpat.ExpatError, "parser.Parse(xml, True)") + parser = pyexpat.ParserCreate() + raises(pyexpat.ExpatError, "parser.Parse(xml, True)") def test_encoding(self): import pyexpat From commits-noreply at bitbucket.org Thu Apr 28 18:35:02 2011 From: commits-noreply at bitbucket.org (berdario) Date: Thu, 28 Apr 2011 18:35:02 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110428163502.5A4A4282B9E@codespeak.net> Author: Dario Bertini Branch: Changeset: r43733:d1c4c2e6adc3 Date: 2011-04-28 18:33 +0200 http://bitbucket.org/pypy/pypy/changeset/d1c4c2e6adc3/ Log: merge heads diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -33,10 +33,10 @@ import sys if sys.platform == "darwin": skip("Fails with the version of expat on Mac OS 10.6.6") - import pyexpat + import pyexpat xml = "\0\r\n" - parser = pyexpat.ParserCreate() - raises(pyexpat.ExpatError, "parser.Parse(xml, True)") + parser = pyexpat.ParserCreate() + raises(pyexpat.ExpatError, "parser.Parse(xml, True)") def test_encoding(self): import pyexpat From commits-noreply at bitbucket.org Thu Apr 28 18:35:04 2011 From: commits-noreply at bitbucket.org (berdario) Date: Thu, 28 Apr 2011 18:35:04 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110428163504.0261E282C18@codespeak.net> Author: Dario Bertini Branch: Changeset: r43734:3bf6c8cf8c48 Date: 2011-04-28 18:34 +0200 http://bitbucket.org/pypy/pypy/changeset/3bf6c8cf8c48/ Log: merge heads diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -48,6 +48,7 @@ assert rhandle.poll() == False assert rhandle.poll(1) == False whandle.send(1) + import time; time.sleep(0.1) # give it time to arrive :-) assert rhandle.poll() == True assert rhandle.poll(None) == True assert rhandle.recv() == 1 From commits-noreply at bitbucket.org Thu Apr 28 18:49:51 2011 From: commits-noreply at bitbucket.org (lac) Date: Thu, 28 Apr 2011 18:49:51 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Say which version of sphinx is needed. Message-ID: <20110428164951.46B87282B9E@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43735:858eaea3c688 Date: 2011-04-28 18:49 +0200 http://bitbucket.org/pypy/pypy/changeset/858eaea3c688/ Log: Say which version of sphinx is needed. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -10,7 +10,7 @@ `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral -differences to CPython and some missing extensions, for details see `CPython +differences with CPython and some missing extensions, for details see `CPython differences`_. .. _Django: http://djangoproject.com @@ -58,7 +58,7 @@ * ``libexpat1-dev`` (for the optional ``pyexpat`` module) * ``libssl-dev`` (for the optional ``_ssl`` module) * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) - * ``python-sphinx`` (for the optional documentation build) + * ``python-sphinx`` (for the optional documentation build. You need version 1.0.7 or later) * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) 2. Translation is somewhat time-consuming (30 min to From commits-noreply at bitbucket.org Thu Apr 28 18:50:06 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 18:50:06 +0200 (CEST) Subject: [pypy-svn] pypy default: (iko, rguillebert, dario for the last 5 minutes, arigo) Message-ID: <20110428165006.25290282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43736:483126b12f26 Date: 2011-04-28 18:49 +0200 http://bitbucket.org/pypy/pypy/changeset/483126b12f26/ Log: (iko, rguillebert, dario for the last 5 minutes, arigo) Use any available port instead of enforcing one random port (the same for all the tests). diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -1,12 +1,10 @@ from pypy.conftest import gettestobjspace -import sys, random +import sys import py from pypy.tool.udir import udir from pypy.rlib import rsocket from pypy.rpython.lltypesystem import lltype, rffi -PORT_NUMBER = random.randrange(40000, 60000) - def setup_module(mod): mod.space = gettestobjspace(usemodules=['_socket', 'array']) global socket @@ -299,7 +297,6 @@ def setup_class(cls): cls.space = space cls.w_udir = space.wrap(str(udir)) - cls.w_PORT = space.wrap(PORT_NUMBER) def test_ntoa_exception(self): import _socket @@ -500,8 +497,7 @@ if not hasattr(socket.socket, 'dup'): skip('No dup() on this platform') s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - s.bind(('localhost', self.PORT)) + s.bind(('localhost', 0)) s2 = s.dup() assert s.fileno() != s2.fileno() assert s.getsockname() == s2.getsockname() @@ -557,17 +553,14 @@ def setup_class(cls): cls.space = space - PORT = PORT_NUMBER HOST = 'localhost' def setup_method(self, method): w_HOST = space.wrap(self.HOST) - w_PORT = space.wrap(self.PORT) - self.w_serv = space.appexec([w_socket, w_HOST, w_PORT], - '''(_socket, HOST, PORT): + self.w_serv = space.appexec([w_socket, w_HOST], + '''(_socket, HOST): serv = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) - serv.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) - serv.bind((HOST, PORT)) + serv.bind((HOST, 0)) serv.listen(1) return serv ''') From commits-noreply at bitbucket.org Thu Apr 28 18:50:07 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 18:50:07 +0200 (CEST) Subject: [pypy-svn] pypy default: merge heads Message-ID: <20110428165007.607A9282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43737:1cce6fa9dcf5 Date: 2011-04-28 18:49 +0200 http://bitbucket.org/pypy/pypy/changeset/1cce6fa9dcf5/ Log: merge heads diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -33,10 +33,10 @@ import sys if sys.platform == "darwin": skip("Fails with the version of expat on Mac OS 10.6.6") - import pyexpat + import pyexpat xml = "\0\r\n" - parser = pyexpat.ParserCreate() - raises(pyexpat.ExpatError, "parser.Parse(xml, True)") + parser = pyexpat.ParserCreate() + raises(pyexpat.ExpatError, "parser.Parse(xml, True)") def test_encoding(self): import pyexpat From commits-noreply at bitbucket.org Thu Apr 28 19:05:51 2011 From: commits-noreply at bitbucket.org (lac) Date: Thu, 28 Apr 2011 19:05:51 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix minor english warts. Message-ID: <20110428170551.50E32282B9E@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43738:a120d9ae7af2 Date: 2011-04-28 19:05 +0200 http://bitbucket.org/pypy/pypy/changeset/a120d9ae7af2/ Log: fix minor english warts. diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst --- a/pypy/doc/cli-backend.rst +++ b/pypy/doc/cli-backend.rst @@ -198,12 +198,12 @@ int_add STORE v2 -The code produced works correctly but has some inefficiency issue that +The code produced works correctly but has some inefficiency issues that can be addressed during the optimization phase. The CLI Virtual Machine is fairly expressive, so the conversion between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the correspondent +simple: many operations maps directly to the corresponding instruction, e.g int_add and sub. By contrast some instructions do not have a direct correspondent and @@ -223,7 +223,7 @@ Mapping exceptions ------------------ -Both RPython and CLI have its own set of exception classes: some of +Both RPython and CLI have their own set of exception classes: some of these are pretty similar; e.g., we have OverflowError, ZeroDivisionError and IndexError on the first side and OverflowException, DivideByZeroException and IndexOutOfRangeException @@ -435,7 +435,7 @@ To do so, you can install `Python for .NET`_. Unfortunately, it does not work out of the box under Linux. -To make it working, download and unpack the source package of Python +To make it work, download and unpack the source package of Python for .NET; the only version tested with PyPy is the 1.0-rc2, but it might work also with others. Then, you need to create a file named Python.Runtime.dll.config at the root of the unpacked archive; put the From commits-noreply at bitbucket.org Thu Apr 28 19:11:10 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 19:11:10 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Document the minimark gc. Message-ID: <20110428171110.427B8282B9E@codespeak.net> Author: Armin Rigo Branch: documentation-cleanup Changeset: r43739:08e8a5d17a31 Date: 2011-04-28 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/08e8a5d17a31/ Log: Document the minimark gc. diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -8,10 +8,11 @@ Introduction ============ -**Warning**: The overview and description of our garbage collection -strategy and framework is not here but in the `EU-report on this -topic`_. The present document describes the specific garbage collectors -that we wrote in our framework. +The overview and description of our garbage collection strategy and +framework can be found in the `EU-report on this topic`_. Please refer +to that file for an old, but still more or less accurate, description. +The present document describes the specific garbage collectors that we +wrote in our framework. .. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf @@ -28,6 +29,9 @@ For more details, see the `overview of command line options for translation`_. +The following overview is written in chronological order, so the "best" +GC (which is the default when translating) is the last one below. + .. _`overview of command line options for translation`: config/commandline.html#translation Mark and Sweep @@ -126,4 +130,90 @@ More details are available as comments at the start of the source in `pypy/rpython/memory/gc/markcompact.py`_. +Minimark GC +----------- + +This is a simplification and rewrite of the ideas from the Hybrid GC. +It uses a nursery for the young objects, and mark-and-sweep for the old +objects. This is a moving GC, but objects may only move once (from +the nursery to the old stage). + +The main difference with the Hybrid GC is that the mark-and-sweep +objects (the "old stage") are directly handled by the GC's custom +allocator, instead of being handled by malloc() calls. The gain is that +it is then possible, during a major collection, to walk through all old +generation objects without needing to store a list of pointers to them. +So as a first approximation, when compared to the Hybrid GC, the +Minimark GC saves one word of memory per old object. + +There are a number of environment variables that can be tweaked to +influence the GC. (Their default value should be ok for most usages.) +You can read more about them at the start of +`rpython/memory/gc/minimark.py`_. + +In more details: + +- The small newly malloced objects are allocated in the nursery (case 1). + All objects living in the nursery are "young". + +- The big objects are always handled directly by the system malloc(). + But the big newly malloced objects are still "young" when they are + allocated (case 2), even though they don't live in the nursery. + +- When the nursery is full, we do a minor collection, i.e. we find + which "young" objects are still alive (from cases 1 and 2). The + "young" flag is then removed. The surviving case 1 objects are moved + to the old stage. The dying case 2 objects are immediately freed. + +- The old stage is an area of memory containing old (small) objects. It + is handled by `rpython/memory/gc/minimarkpage.py`_. It is organized + as "arenas" of 256KB or 512KB, subdivided into "pages" of 4KB or 8KB. + Each page can either be free, or contain small objects of all the same + size. Furthermore at any point in time each object location can be + either allocated or freed. The basic design comes from ``obmalloc.c`` + from CPython (which itself comes from the same source as the Linux + system malloc()). + +- New objects are added to the old stage at every minor collection. + Immediately after a minor collection, when we reach some threshold, we + trigger a major collection. This is the mark-and-sweep step. It walks + over *all* objects (mark), and then frees some fraction of them (sweep). + This means that the only time when we want to free objects is while + walking over all of them; we never ask to free an object given just its + address. This allows some simplifications and memory savings when + compared to ``obmalloc.c``. + +- As with all generational collectors, this GC needs a write barrier to + record which old objects have a reference to young objects. + +- Additionally, we found out that it is useful to handle the case of + big arrays specially: when we allocate a big array (with the system + malloc()), we reserve a small number of bytes before. When the array + grows old, we use the extra bytes as a set of bits. Each bit + represents 128 entries in the array. Whenever the write barrier is + called to record a reference from the Nth entry of the array to some + young object, we set the bit number ``(N/128)`` to 1. This can + considerably speed up minor collections, because we then only have to + scan 128 entries of the array instead of all of them. + +- As usual, we need special care about weak references, and objects with + finalizers. Weak references are allocated in the nursery, and if they + survive they move to the old stage, as usual for all objects; the + difference is that the reference they contain must either follow the + object, or be set to NULL if the object dies. And the objects with + finalizers, considered rare enough, are immediately allocated old to + simplify the design. In particular their ``__del__`` method can only + be called just after a major collection. + +- The objects move once only, so we can use a trick to implement id() + and hash(). If the object is not in the nursery, it won't move any + more, so its id() and hash() are the object's address, cast to an + integer. If the object is in the nursery, and we ask for its id() + or its hash(), then we pre-reserve a location in the old stage, and + return the address of that location. If the object survives the + next minor collection, we move it there, and so its id() and hash() + are preserved. If the object dies then the pre-reserved location + becomes free garbage, to be collected at the next major collection. + + .. include:: _ref.txt From commits-noreply at bitbucket.org Thu Apr 28 19:11:11 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 19:11:11 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Add minimark here too. Message-ID: <20110428171111.7E743282B9E@codespeak.net> Author: Armin Rigo Branch: documentation-cleanup Changeset: r43740:a32f0bcddc37 Date: 2011-04-28 11:48 +0200 http://bitbucket.org/pypy/pypy/changeset/a32f0bcddc37/ Log: Add minimark here too. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -116,11 +116,12 @@ * ``--stackless``: this produces a pypy-c that includes features inspired by `Stackless Python `__. - * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid``: + * ``--gc=boehm|ref|marknsweep|semispace|generation|hybrid|minimark``: choose between using the `Boehm-Demers-Weiser garbage collector`_, our reference - counting implementation or four of own collector implementations - (the default depends on the optimization level). + counting implementation or one of own collector implementations + (the default depends on the optimization level but is usually + ``minimark``). Find a more detailed description of the various options in our `configuration sections`_. From commits-noreply at bitbucket.org Thu Apr 28 19:59:37 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 19:59:37 +0200 (CEST) Subject: [pypy-svn] pypy default: "Fix" the tests on OSX -- actually skip some that give nonsensical Message-ID: <20110428175937.54624282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43741:8972cfbb1712 Date: 2011-04-28 19:59 +0200 http://bitbucket.org/pypy/pypy/changeset/8972cfbb1712/ Log: "Fix" the tests on OSX -- actually skip some that give nonsensical results, as long as they get the same nonsensical results on top of CPython 2.7. diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -29,7 +29,7 @@ assert isinstance(_ssl.SSL_ERROR_EOF, int) assert isinstance(_ssl.SSL_ERROR_INVALID_ERROR_CODE, int) - assert isinstance(_ssl.OPENSSL_VERSION_NUMBER, int) + assert isinstance(_ssl.OPENSSL_VERSION_NUMBER, (int, long)) assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) @@ -64,6 +64,8 @@ def test_sslwrap(self): import _ssl, _socket, sys + if sys.platform == 'darwin': + skip("hangs indefinitely on OSX (also on CPython)") s = _socket.socket() ss = _ssl.sslwrap(s, 0) exc = raises(_socket.error, ss.do_handshake) @@ -147,7 +149,9 @@ self.s.close() def test_shutdown(self): - import socket, ssl + import socket, ssl, sys + if sys.platform == 'darwin': + skip("get also on CPython: error: [Errno 0]") ss = socket.ssl(self.s) ss.write("hello\n") assert ss.shutdown() is self.s._sock From commits-noreply at bitbucket.org Thu Apr 28 20:16:41 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 20:16:41 +0200 (CEST) Subject: [pypy-svn] pypy default: Copy the test here. Message-ID: <20110428181641.27ABC282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43742:3e20493c14a4 Date: 2011-04-28 20:15 +0200 http://bitbucket.org/pypy/pypy/changeset/3e20493c14a4/ Log: Copy the test here. diff --git a/lib-python/2.7/test/test_os.py b/lib-python/modified-2.7/test/test_os.py copy from lib-python/2.7/test/test_os.py copy to lib-python/modified-2.7/test/test_os.py From commits-noreply at bitbucket.org Thu Apr 28 20:16:42 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 20:16:42 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix this test to be on line with e6a6bec95962. Message-ID: <20110428181642.5A741282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r43743:14c5e285c79f Date: 2011-04-28 20:16 +0200 http://bitbucket.org/pypy/pypy/changeset/14c5e285c79f/ Log: Fix this test to be on line with e6a6bec95962. diff --git a/lib-python/modified-2.7/test/test_os.py b/lib-python/modified-2.7/test/test_os.py --- a/lib-python/modified-2.7/test/test_os.py +++ b/lib-python/modified-2.7/test/test_os.py @@ -626,32 +626,40 @@ def test_setuid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setuid, 0) - self.assertRaises(OverflowError, os.setuid, 1<<32) + self.assertRaises((OverflowError, OSError), os.setuid, 1<<32) + self.assertRaises(OverflowError, os.setuid, sys.maxint*3) if hasattr(os, 'setgid'): def test_setgid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setgid, 0) - self.assertRaises(OverflowError, os.setgid, 1<<32) + self.assertRaises((OverflowError, OSError), os.setgid, 1<<32) + self.assertRaises(OverflowError, os.setgid, sys.maxint*3) if hasattr(os, 'seteuid'): def test_seteuid(self): if os.getuid() != 0: self.assertRaises(os.error, os.seteuid, 0) - self.assertRaises(OverflowError, os.seteuid, 1<<32) + self.assertRaises((OverflowError, OSError), os.seteuid, 1<<32) + self.assertRaises(OverflowError, os.seteuid, sys.maxint*3) if hasattr(os, 'setegid'): def test_setegid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setegid, 0) - self.assertRaises(OverflowError, os.setegid, 1<<32) + self.assertRaises((OverflowError, OSError), os.setegid, 1<<32) + self.assertRaises(OverflowError, os.setegid, sys.maxint*3) if hasattr(os, 'setreuid'): def test_setreuid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setreuid, 0, 0) - self.assertRaises(OverflowError, os.setreuid, 1<<32, 0) - self.assertRaises(OverflowError, os.setreuid, 0, 1<<32) + self.assertRaises((OverflowError, OSError), + os.setreuid, 1<<32, 0) + self.assertRaises((OverflowError, OSError), + os.setreuid, 0, 1<<32) + self.assertRaises(OverflowError, os.setreuid, sys.maxint*3, 0) + self.assertRaises(OverflowError, os.setreuid, 0, sys.maxint*3) def test_setreuid_neg1(self): # Needs to accept -1. We run this in a subprocess to avoid @@ -664,8 +672,12 @@ def test_setregid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setregid, 0, 0) - self.assertRaises(OverflowError, os.setregid, 1<<32, 0) - self.assertRaises(OverflowError, os.setregid, 0, 1<<32) + self.assertRaises((OverflowError, OSError), + os.setregid, 1<<32, 0) + self.assertRaises((OverflowError, OSError), + os.setregid, 0, 1<<32) + self.assertRaises(OverflowError, os.setregid, sys.maxint*3, 0) + self.assertRaises(OverflowError, os.setregid, 0, sys.maxint*3) def test_setregid_neg1(self): # Needs to accept -1. We run this in a subprocess to avoid From commits-noreply at bitbucket.org Thu Apr 28 20:20:52 2011 From: commits-noreply at bitbucket.org (iko) Date: Thu, 28 Apr 2011 20:20:52 +0200 (CEST) Subject: [pypy-svn] pypy xapian: CPytthon imp.find_modules returns open fd even for extension modules. Message-ID: <20110428182052.D5B9A36C206@codespeak.net> Author: Anders Hammarquist Branch: xapian Changeset: r43744:2b6e6f1b4afd Date: 2011-04-28 20:07 +0200 http://bitbucket.org/pypy/pypy/changeset/2b6e6f1b4afd/ Log: CPytthon imp.find_modules returns open fd even for extension modules. Do same, so xapian will load. WIP - the test segfaults when run with pypy-c diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -1,6 +1,31 @@ from __future__ import with_statement MARKER = 42 +try: + from pypy.conftest import gettestobjspace + from pypy.module.cpyext.state import State + from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + + class AppTestImpCModule(AppTestCpythonExtensionBase): + def setup_class(cls): + cls.space = gettestobjspace(usemodules=['cpyext', '_rawffi']) + cls.w_imp = cls.space.getbuiltinmodule('imp') + cls.w_file_module = cls.space.wrap(__file__) + state = cls.space.fromcache(State) + state.build_api(cls.space) + + def test_find_module_cpyext(self): + import os + mod = self.compile_module('test_import_module', + separate_module_files=[self.here + 'test_import_module.c']) + + fp, pathname, description = self.imp.find_module('test_import_module', + [os.path.dirname(mod)]) + assert fp is not None + assert pathname == mod +except ImportError: + pass + class AppTestImpModule: def setup_class(cls): cls.w_imp = cls.space.getbuiltinmodule('imp') diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -423,7 +423,7 @@ space.warn(msg, space.w_ImportWarning) modtype, suffix, filemode = find_modtype(space, filepart) try: - if modtype in (PY_SOURCE, PY_COMPILED): + if modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION): assert suffix is not None filename = filepart + suffix stream = streamio.open_file_as_stream(filename, filemode) @@ -432,9 +432,9 @@ except: stream.close() raise - if modtype == C_EXTENSION: - filename = filepart + suffix - return FindInfo(modtype, filename, None, suffix, filemode) + # if modtype == C_EXTENSION: + # filename = filepart + suffix + # return FindInfo(modtype, filename, None, suffix, filemode) except StreamErrors: pass From commits-noreply at bitbucket.org Thu Apr 28 22:36:31 2011 From: commits-noreply at bitbucket.org (arigo) Date: Thu, 28 Apr 2011 22:36:31 +0200 (CEST) Subject: [pypy-svn] pypy default: Redo some checking, partially reverting e6a6bec95962. Message-ID: <20110428203631.C3457282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r43745:c6390eb05bea Date: 2011-04-28 22:35 +0200 http://bitbucket.org/pypy/pypy/changeset/c6390eb05bea/ Log: Redo some checking, partially reverting e6a6bec95962. Now on 64-bit you can pass any value between -2**31 and 2**32, i.e. any value that is either a signed or an unsigned int. diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -507,9 +507,9 @@ if hasattr(os, 'setuid'): def test_os_setuid_error(self): - skip("overflow checking disabled for now") os = self.posix - raises((OSError, ValueError, OverflowError), os.setuid, -100000) + raises(OverflowError, os.setuid, -2**31-1) + raises(OverflowError, os.setuid, 2**32) if hasattr(os, 'getgid'): def test_os_getgid(self): @@ -529,9 +529,9 @@ if hasattr(os, 'setgid'): def test_os_setgid_error(self): - skip("overflow checking disabled for now") os = self.posix - raises((OSError, ValueError, OverflowError), os.setgid, -100000) + raises(OverflowError, os.setgid, -2**31-1) + raises(OverflowError, os.setgid, 2**32) if hasattr(os, 'getsid'): def test_os_getsid(self): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -18,9 +18,19 @@ # CPython 2.7 semantics are too messy to follow exactly, # e.g. setuid(-2) works on 32-bit but not on 64-bit. As a result, -# we decided to just accept any 'int', i.e. any C signed long. +# we decided to just accept any 'int', i.e. any C signed long, and +# check that they are in range(-2**31, 2**32). In other words, we +# accept any number that is either a signed or an unsigned C int. c_uid_t = int c_gid_t = int +if sys.maxint == 2147483647: + def check_uid_range(space, num): + pass +else: + def check_uid_range(space, num): + if num < -(1<<31) or num >= (1<<32): + raise OperationError(space.w_OverflowError, + space.wrap("integer out of range")) class FileEncoder(object): def __init__(self, space, w_obj): @@ -828,12 +838,13 @@ """ return space.wrap(os.getuid()) - at unwrap_spec(arg=int) + at unwrap_spec(arg=c_uid_t) def setuid(space, arg): """ setuid(uid) Set the current process's user id. """ + check_uid_range(space, arg) try: os.setuid(arg) except OSError, e: @@ -846,6 +857,7 @@ Set the current process's effective user id. """ + check_uid_range(space, arg) try: os.seteuid(arg) except OSError, e: @@ -858,6 +870,7 @@ Set the current process's group id. """ + check_uid_range(space, arg) try: os.setgid(arg) except OSError, e: @@ -870,6 +883,7 @@ Set the current process's effective group id. """ + check_uid_range(space, arg) try: os.setegid(arg) except OSError, e: @@ -971,6 +985,8 @@ Set the current process's real and effective user ids. """ + check_uid_range(space, ruid) + check_uid_range(space, euid) try: os.setreuid(ruid, euid) except OSError, e: @@ -983,6 +999,8 @@ Set the current process's real and effective group ids. """ + check_uid_range(space, rgid) + check_uid_range(space, egid) try: os.setregid(rgid, egid) except OSError, e: @@ -1063,6 +1081,8 @@ @unwrap_spec(path=str, uid=c_uid_t, gid=c_gid_t) def chown(space, path, uid, gid): + check_uid_range(space, uid) + check_uid_range(space, gid) try: os.chown(path, uid, gid) except OSError, e: @@ -1071,6 +1091,8 @@ @unwrap_spec(path=str, uid=c_uid_t, gid=c_gid_t) def lchown(space, path, uid, gid): + check_uid_range(space, uid) + check_uid_range(space, gid) try: os.lchown(path, uid, gid) except OSError, e: diff --git a/lib-python/modified-2.7/test/test_os.py b/lib-python/modified-2.7/test/test_os.py deleted file mode 100644 --- a/lib-python/modified-2.7/test/test_os.py +++ /dev/null @@ -1,823 +0,0 @@ -# As a test suite for the os module, this is woefully inadequate, but this -# does add tests for a few functions which have been determined to be more -# portable than they had been thought to be. - -import os -import errno -import unittest -import warnings -import sys -import signal -import subprocess -import time -from test import test_support -import mmap -import uuid - -warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__) -warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__) - -# Tests creating TESTFN -class FileTests(unittest.TestCase): - def setUp(self): - if os.path.exists(test_support.TESTFN): - os.unlink(test_support.TESTFN) - tearDown = setUp - - def test_access(self): - f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR) - os.close(f) - self.assertTrue(os.access(test_support.TESTFN, os.W_OK)) - - def test_closerange(self): - first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR) - # We must allocate two consecutive file descriptors, otherwise - # it will mess up other file descriptors (perhaps even the three - # standard ones). - second = os.dup(first) - try: - retries = 0 - while second != first + 1: - os.close(first) - retries += 1 - if retries > 10: - # XXX test skipped - self.skipTest("couldn't allocate two consecutive fds") - first, second = second, os.dup(second) - finally: - os.close(second) - # close a fd that is open, and one that isn't - os.closerange(first, first + 2) - self.assertRaises(OSError, os.write, first, "a") - - @test_support.cpython_only - def test_rename(self): - path = unicode(test_support.TESTFN) - old = sys.getrefcount(path) - self.assertRaises(TypeError, os.rename, path, 0) - new = sys.getrefcount(path) - self.assertEqual(old, new) - - -class TemporaryFileTests(unittest.TestCase): - def setUp(self): - self.files = [] - os.mkdir(test_support.TESTFN) - - def tearDown(self): - for name in self.files: - os.unlink(name) - os.rmdir(test_support.TESTFN) - - def check_tempfile(self, name): - # make sure it doesn't already exist: - self.assertFalse(os.path.exists(name), - "file already exists for temporary file") - # make sure we can create the file - open(name, "w") - self.files.append(name) - - def test_tempnam(self): - if not hasattr(os, "tempnam"): - return - warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, - r"test_os$") - self.check_tempfile(os.tempnam()) - - name = os.tempnam(test_support.TESTFN) - self.check_tempfile(name) - - name = os.tempnam(test_support.TESTFN, "pfx") - self.assertTrue(os.path.basename(name)[:3] == "pfx") - self.check_tempfile(name) - - def test_tmpfile(self): - if not hasattr(os, "tmpfile"): - return - # As with test_tmpnam() below, the Windows implementation of tmpfile() - # attempts to create a file in the root directory of the current drive. - # On Vista and Server 2008, this test will always fail for normal users - # as writing to the root directory requires elevated privileges. With - # XP and below, the semantics of tmpfile() are the same, but the user - # running the test is more likely to have administrative privileges on - # their account already. If that's the case, then os.tmpfile() should - # work. In order to make this test as useful as possible, rather than - # trying to detect Windows versions or whether or not the user has the - # right permissions, just try and create a file in the root directory - # and see if it raises a 'Permission denied' OSError. If it does, then - # test that a subsequent call to os.tmpfile() raises the same error. If - # it doesn't, assume we're on XP or below and the user running the test - # has administrative privileges, and proceed with the test as normal. - if sys.platform == 'win32': - name = '\\python_test_os_test_tmpfile.txt' - if os.path.exists(name): - os.remove(name) - try: - fp = open(name, 'w') - except IOError, first: - # open() failed, assert tmpfile() fails in the same way. - # Although open() raises an IOError and os.tmpfile() raises an - # OSError(), 'args' will be (13, 'Permission denied') in both - # cases. - try: - fp = os.tmpfile() - except OSError, second: - self.assertEqual(first.args, second.args) - else: - self.fail("expected os.tmpfile() to raise OSError") - return - else: - # open() worked, therefore, tmpfile() should work. Close our - # dummy file and proceed with the test as normal. - fp.close() - os.remove(name) - - fp = os.tmpfile() - fp.write("foobar") - fp.seek(0,0) - s = fp.read() - fp.close() - self.assertTrue(s == "foobar") - - def test_tmpnam(self): - if not hasattr(os, "tmpnam"): - return - warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, - r"test_os$") - name = os.tmpnam() - if sys.platform in ("win32",): - # The Windows tmpnam() seems useless. From the MS docs: - # - # The character string that tmpnam creates consists of - # the path prefix, defined by the entry P_tmpdir in the - # file STDIO.H, followed by a sequence consisting of the - # digit characters '0' through '9'; the numerical value - # of this string is in the range 1 - 65,535. Changing the - # definitions of L_tmpnam or P_tmpdir in STDIO.H does not - # change the operation of tmpnam. - # - # The really bizarre part is that, at least under MSVC6, - # P_tmpdir is "\\". That is, the path returned refers to - # the root of the current drive. That's a terrible place to - # put temp files, and, depending on privileges, the user - # may not even be able to open a file in the root directory. - self.assertFalse(os.path.exists(name), - "file already exists for temporary file") - else: - self.check_tempfile(name) - -# Test attributes on return values from os.*stat* family. -class StatAttributeTests(unittest.TestCase): - def setUp(self): - os.mkdir(test_support.TESTFN) - self.fname = os.path.join(test_support.TESTFN, "f1") - f = open(self.fname, 'wb') - f.write("ABC") - f.close() - - def tearDown(self): - os.unlink(self.fname) - os.rmdir(test_support.TESTFN) - - def test_stat_attributes(self): - if not hasattr(os, "stat"): - return - - import stat - result = os.stat(self.fname) - - # Make sure direct access works - self.assertEqual(result[stat.ST_SIZE], 3) - self.assertEqual(result.st_size, 3) - - # Make sure all the attributes are there - members = dir(result) - for name in dir(stat): - if name[:3] == 'ST_': - attr = name.lower() - if name.endswith("TIME"): - def trunc(x): return int(x) - else: - def trunc(x): return x - self.assertEqual(trunc(getattr(result, attr)), - result[getattr(stat, name)]) - self.assertIn(attr, members) - - try: - result[200] - self.fail("No exception thrown") - except IndexError: - pass - - # Make sure that assignment fails - try: - result.st_mode = 1 - self.fail("No exception thrown") - except (AttributeError, TypeError): - pass - - try: - result.st_rdev = 1 - self.fail("No exception thrown") - except (AttributeError, TypeError): - pass - - try: - result.parrot = 1 - self.fail("No exception thrown") - except AttributeError: - pass - - # Use the stat_result constructor with a too-short tuple. - try: - result2 = os.stat_result((10,)) - self.fail("No exception thrown") - except TypeError: - pass - - # Use the constructr with a too-long tuple. - try: - result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14)) - except TypeError: - pass - - - def test_statvfs_attributes(self): - if not hasattr(os, "statvfs"): - return - - try: - result = os.statvfs(self.fname) - except OSError, e: - # On AtheOS, glibc always returns ENOSYS - if e.errno == errno.ENOSYS: - return - - # Make sure direct access works - self.assertEqual(result.f_bfree, result[3]) - - # Make sure all the attributes are there. - members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files', - 'ffree', 'favail', 'flag', 'namemax') - for value, member in enumerate(members): - self.assertEqual(getattr(result, 'f_' + member), result[value]) - - # Make sure that assignment really fails - try: - result.f_bfree = 1 - self.fail("No exception thrown") - except TypeError: - pass - - try: - result.parrot = 1 - self.fail("No exception thrown") - except AttributeError: - pass - - # Use the constructor with a too-short tuple. - try: - result2 = os.statvfs_result((10,)) - self.fail("No exception thrown") - except TypeError: - pass - - # Use the constructr with a too-long tuple. - try: - result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14)) - except TypeError: - pass - - def test_utime_dir(self): - delta = 1000000 - st = os.stat(test_support.TESTFN) - # round to int, because some systems may support sub-second - # time stamps in stat, but not in utime. - os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta))) - st2 = os.stat(test_support.TESTFN) - self.assertEqual(st2.st_mtime, int(st.st_mtime-delta)) - - # Restrict test to Win32, since there is no guarantee other - # systems support centiseconds - if sys.platform == 'win32': - def get_file_system(path): - root = os.path.splitdrive(os.path.abspath(path))[0] + '\\' - import ctypes - kernel32 = ctypes.windll.kernel32 - buf = ctypes.create_string_buffer("", 100) - if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)): - return buf.value - - if get_file_system(test_support.TESTFN) == "NTFS": - def test_1565150(self): - t1 = 1159195039.25 - os.utime(self.fname, (t1, t1)) - self.assertEqual(os.stat(self.fname).st_mtime, t1) - - def test_1686475(self): - # Verify that an open file can be stat'ed - try: - os.stat(r"c:\pagefile.sys") - except WindowsError, e: - if e.errno == 2: # file does not exist; cannot run test - return - self.fail("Could not stat pagefile.sys") - -from test import mapping_tests - -class EnvironTests(mapping_tests.BasicTestMappingProtocol): - """check that os.environ object conform to mapping protocol""" - type2test = None - def _reference(self): - return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"} - def _empty_mapping(self): - os.environ.clear() - return os.environ - def setUp(self): - self.__save = dict(os.environ) - os.environ.clear() - def tearDown(self): - os.environ.clear() - os.environ.update(self.__save) - - # Bug 1110478 - def test_update2(self): - if os.path.exists("/bin/sh"): - os.environ.update(HELLO="World") - with os.popen("/bin/sh -c 'echo $HELLO'") as popen: - value = popen.read().strip() - self.assertEqual(value, "World") - -class WalkTests(unittest.TestCase): - """Tests for os.walk().""" - - def test_traversal(self): - import os - from os.path import join - - # Build: - # TESTFN/ - # TEST1/ a file kid and two directory kids - # tmp1 - # SUB1/ a file kid and a directory kid - # tmp2 - # SUB11/ no kids - # SUB2/ a file kid and a dirsymlink kid - # tmp3 - # link/ a symlink to TESTFN.2 - # TEST2/ - # tmp4 a lone file - walk_path = join(test_support.TESTFN, "TEST1") - sub1_path = join(walk_path, "SUB1") - sub11_path = join(sub1_path, "SUB11") - sub2_path = join(walk_path, "SUB2") - tmp1_path = join(walk_path, "tmp1") - tmp2_path = join(sub1_path, "tmp2") - tmp3_path = join(sub2_path, "tmp3") - link_path = join(sub2_path, "link") - t2_path = join(test_support.TESTFN, "TEST2") - tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4") - - # Create stuff. - os.makedirs(sub11_path) - os.makedirs(sub2_path) - os.makedirs(t2_path) - for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path: - f = file(path, "w") - f.write("I'm " + path + " and proud of it. Blame test_os.\n") - f.close() - if hasattr(os, "symlink"): - os.symlink(os.path.abspath(t2_path), link_path) - sub2_tree = (sub2_path, ["link"], ["tmp3"]) - else: - sub2_tree = (sub2_path, [], ["tmp3"]) - - # Walk top-down. - all = list(os.walk(walk_path)) - self.assertEqual(len(all), 4) - # We can't know which order SUB1 and SUB2 will appear in. - # Not flipped: TESTFN, SUB1, SUB11, SUB2 - # flipped: TESTFN, SUB2, SUB1, SUB11 - flipped = all[0][1][0] != "SUB1" - all[0][1].sort() - self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"])) - self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"])) - self.assertEqual(all[2 + flipped], (sub11_path, [], [])) - self.assertEqual(all[3 - 2 * flipped], sub2_tree) - - # Prune the search. - all = [] - for root, dirs, files in os.walk(walk_path): - all.append((root, dirs, files)) - # Don't descend into SUB1. - if 'SUB1' in dirs: - # Note that this also mutates the dirs we appended to all! - dirs.remove('SUB1') - self.assertEqual(len(all), 2) - self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"])) - self.assertEqual(all[1], sub2_tree) - - # Walk bottom-up. - all = list(os.walk(walk_path, topdown=False)) - self.assertEqual(len(all), 4) - # We can't know which order SUB1 and SUB2 will appear in. - # Not flipped: SUB11, SUB1, SUB2, TESTFN - # flipped: SUB2, SUB11, SUB1, TESTFN - flipped = all[3][1][0] != "SUB1" - all[3][1].sort() - self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"])) - self.assertEqual(all[flipped], (sub11_path, [], [])) - self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"])) - self.assertEqual(all[2 - 2 * flipped], sub2_tree) - - if hasattr(os, "symlink"): - # Walk, following symlinks. - for root, dirs, files in os.walk(walk_path, followlinks=True): - if root == link_path: - self.assertEqual(dirs, []) - self.assertEqual(files, ["tmp4"]) - break - else: - self.fail("Didn't follow symlink with followlinks=True") - - def tearDown(self): - # Tear everything down. This is a decent use for bottom-up on - # Windows, which doesn't have a recursive delete command. The - # (not so) subtlety is that rmdir will fail unless the dir's - # kids are removed first, so bottom up is essential. - for root, dirs, files in os.walk(test_support.TESTFN, topdown=False): - for name in files: - os.remove(os.path.join(root, name)) - for name in dirs: - dirname = os.path.join(root, name) - if not os.path.islink(dirname): - os.rmdir(dirname) - else: - os.remove(dirname) - os.rmdir(test_support.TESTFN) - -class MakedirTests (unittest.TestCase): - def setUp(self): - os.mkdir(test_support.TESTFN) - - def test_makedir(self): - base = test_support.TESTFN - path = os.path.join(base, 'dir1', 'dir2', 'dir3') - os.makedirs(path) # Should work - path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4') - os.makedirs(path) - - # Try paths with a '.' in them - self.assertRaises(OSError, os.makedirs, os.curdir) - path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir) - os.makedirs(path) - path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4', - 'dir5', 'dir6') - os.makedirs(path) - - - - - def tearDown(self): - path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3', - 'dir4', 'dir5', 'dir6') - # If the tests failed, the bottom-most directory ('../dir6') - # may not have been created, so we look for the outermost directory - # that exists. - while not os.path.exists(path) and path != test_support.TESTFN: - path = os.path.dirname(path) - - os.removedirs(path) - -class DevNullTests (unittest.TestCase): - def test_devnull(self): - f = file(os.devnull, 'w') - f.write('hello') - f.close() - f = file(os.devnull, 'r') - self.assertEqual(f.read(), '') - f.close() - -class URandomTests (unittest.TestCase): - def test_urandom(self): - try: - self.assertEqual(len(os.urandom(1)), 1) - self.assertEqual(len(os.urandom(10)), 10) - self.assertEqual(len(os.urandom(100)), 100) - self.assertEqual(len(os.urandom(1000)), 1000) - # see http://bugs.python.org/issue3708 - self.assertRaises(TypeError, os.urandom, 0.9) - self.assertRaises(TypeError, os.urandom, 1.1) - self.assertRaises(TypeError, os.urandom, 2.0) - except NotImplementedError: - pass - - def test_execvpe_with_bad_arglist(self): - self.assertRaises(ValueError, os.execvpe, 'notepad', [], None) - -class Win32ErrorTests(unittest.TestCase): - def test_rename(self): - self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak") - - def test_remove(self): - self.assertRaises(WindowsError, os.remove, test_support.TESTFN) - - def test_chdir(self): - self.assertRaises(WindowsError, os.chdir, test_support.TESTFN) - - def test_mkdir(self): - f = open(test_support.TESTFN, "w") - try: - self.assertRaises(WindowsError, os.mkdir, test_support.TESTFN) - finally: - f.close() - os.unlink(test_support.TESTFN) - - def test_utime(self): - self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None) - - def test_chmod(self): - self.assertRaises(WindowsError, os.chmod, test_support.TESTFN, 0) - -class TestInvalidFD(unittest.TestCase): - singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat", - "fstatvfs", "fsync", "tcgetpgrp", "ttyname"] - #singles.append("close") - #We omit close because it doesn'r raise an exception on some platforms - def get_single(f): - def helper(self): - if hasattr(os, f): - self.check(getattr(os, f)) - return helper - for f in singles: - locals()["test_"+f] = get_single(f) - - def check(self, f, *args): - try: - f(test_support.make_bad_fd(), *args) - except OSError as e: - self.assertEqual(e.errno, errno.EBADF) - else: - self.fail("%r didn't raise a OSError with a bad file descriptor" - % f) - - def test_isatty(self): - if hasattr(os, "isatty"): - self.assertEqual(os.isatty(test_support.make_bad_fd()), False) - - def test_closerange(self): - if hasattr(os, "closerange"): - fd = test_support.make_bad_fd() - # Make sure none of the descriptors we are about to close are - # currently valid (issue 6542). - for i in range(10): - try: os.fstat(fd+i) - except OSError: - pass - else: - break - if i < 2: - raise unittest.SkipTest( - "Unable to acquire a range of invalid file descriptors") - self.assertEqual(os.closerange(fd, fd + i-1), None) - - def test_dup2(self): - if hasattr(os, "dup2"): - self.check(os.dup2, 20) - - def test_fchmod(self): - if hasattr(os, "fchmod"): - self.check(os.fchmod, 0) - - def test_fchown(self): - if hasattr(os, "fchown"): - self.check(os.fchown, -1, -1) - - def test_fpathconf(self): - if hasattr(os, "fpathconf"): - self.check(os.fpathconf, "PC_NAME_MAX") - - def test_ftruncate(self): - if hasattr(os, "ftruncate"): - self.check(os.ftruncate, 0) - - def test_lseek(self): - if hasattr(os, "lseek"): - self.check(os.lseek, 0, 0) - - def test_read(self): - if hasattr(os, "read"): - self.check(os.read, 1) - - def test_tcsetpgrpt(self): - if hasattr(os, "tcsetpgrp"): - self.check(os.tcsetpgrp, 0) - - def test_write(self): - if hasattr(os, "write"): - self.check(os.write, " ") - -if sys.platform != 'win32': - class Win32ErrorTests(unittest.TestCase): - pass - - class PosixUidGidTests(unittest.TestCase): - if hasattr(os, 'setuid'): - def test_setuid(self): - if os.getuid() != 0: - self.assertRaises(os.error, os.setuid, 0) - self.assertRaises((OverflowError, OSError), os.setuid, 1<<32) - self.assertRaises(OverflowError, os.setuid, sys.maxint*3) - - if hasattr(os, 'setgid'): - def test_setgid(self): - if os.getuid() != 0: - self.assertRaises(os.error, os.setgid, 0) - self.assertRaises((OverflowError, OSError), os.setgid, 1<<32) - self.assertRaises(OverflowError, os.setgid, sys.maxint*3) - - if hasattr(os, 'seteuid'): - def test_seteuid(self): - if os.getuid() != 0: - self.assertRaises(os.error, os.seteuid, 0) - self.assertRaises((OverflowError, OSError), os.seteuid, 1<<32) - self.assertRaises(OverflowError, os.seteuid, sys.maxint*3) - - if hasattr(os, 'setegid'): - def test_setegid(self): - if os.getuid() != 0: - self.assertRaises(os.error, os.setegid, 0) - self.assertRaises((OverflowError, OSError), os.setegid, 1<<32) - self.assertRaises(OverflowError, os.setegid, sys.maxint*3) - - if hasattr(os, 'setreuid'): - def test_setreuid(self): - if os.getuid() != 0: - self.assertRaises(os.error, os.setreuid, 0, 0) - self.assertRaises((OverflowError, OSError), - os.setreuid, 1<<32, 0) - self.assertRaises((OverflowError, OSError), - os.setreuid, 0, 1<<32) - self.assertRaises(OverflowError, os.setreuid, sys.maxint*3, 0) - self.assertRaises(OverflowError, os.setreuid, 0, sys.maxint*3) - - def test_setreuid_neg1(self): - # Needs to accept -1. We run this in a subprocess to avoid - # altering the test runner's process state (issue8045). - subprocess.check_call([ - sys.executable, '-c', - 'import os,sys;os.setreuid(-1,-1);sys.exit(0)']) - - if hasattr(os, 'setregid'): - def test_setregid(self): - if os.getuid() != 0: - self.assertRaises(os.error, os.setregid, 0, 0) - self.assertRaises((OverflowError, OSError), - os.setregid, 1<<32, 0) - self.assertRaises((OverflowError, OSError), - os.setregid, 0, 1<<32) - self.assertRaises(OverflowError, os.setregid, sys.maxint*3, 0) - self.assertRaises(OverflowError, os.setregid, 0, sys.maxint*3) - - def test_setregid_neg1(self): - # Needs to accept -1. We run this in a subprocess to avoid - # altering the test runner's process state (issue8045). - subprocess.check_call([ - sys.executable, '-c', - 'import os,sys;os.setregid(-1,-1);sys.exit(0)']) -else: - class PosixUidGidTests(unittest.TestCase): - pass - - at unittest.skipUnless(sys.platform == "win32", "Win32 specific tests") -class Win32KillTests(unittest.TestCase): - def _kill(self, sig): - # Start sys.executable as a subprocess and communicate from the - # subprocess to the parent that the interpreter is ready. When it - # becomes ready, send *sig* via os.kill to the subprocess and check - # that the return code is equal to *sig*. - import ctypes - from ctypes import wintypes - import msvcrt - - # Since we can't access the contents of the process' stdout until the - # process has exited, use PeekNamedPipe to see what's inside stdout - # without waiting. This is done so we can tell that the interpreter - # is started and running at a point where it could handle a signal. - PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe - PeekNamedPipe.restype = wintypes.BOOL - PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle - ctypes.POINTER(ctypes.c_char), # stdout buf - wintypes.DWORD, # Buffer size - ctypes.POINTER(wintypes.DWORD), # bytes read - ctypes.POINTER(wintypes.DWORD), # bytes avail - ctypes.POINTER(wintypes.DWORD)) # bytes left - msg = "running" - proc = subprocess.Popen([sys.executable, "-c", - "import sys;" - "sys.stdout.write('{}');" - "sys.stdout.flush();" - "input()".format(msg)], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - stdin=subprocess.PIPE) - self.addCleanup(proc.stdout.close) - self.addCleanup(proc.stderr.close) - self.addCleanup(proc.stdin.close) - - count, max = 0, 100 - while count < max and proc.poll() is None: - # Create a string buffer to store the result of stdout from the pipe - buf = ctypes.create_string_buffer(len(msg)) - # Obtain the text currently in proc.stdout - # Bytes read/avail/left are left as NULL and unused - rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()), - buf, ctypes.sizeof(buf), None, None, None) - self.assertNotEqual(rslt, 0, "PeekNamedPipe failed") - if buf.value: - self.assertEqual(msg, buf.value) - break - time.sleep(0.1) - count += 1 - else: - self.fail("Did not receive communication from the subprocess") - - os.kill(proc.pid, sig) - self.assertEqual(proc.wait(), sig) - - def test_kill_sigterm(self): - # SIGTERM doesn't mean anything special, but make sure it works - self._kill(signal.SIGTERM) - - def test_kill_int(self): - # os.kill on Windows can take an int which gets set as the exit code - self._kill(100) - - def _kill_with_event(self, event, name): - tagname = "test_os_%s" % uuid.uuid1() - m = mmap.mmap(-1, 1, tagname) - m[0] = '0' - # Run a script which has console control handling enabled. - proc = subprocess.Popen([sys.executable, - os.path.join(os.path.dirname(__file__), - "win_console_handler.py"), tagname], - creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) - # Let the interpreter startup before we send signals. See #3137. - count, max = 0, 20 - while count < max and proc.poll() is None: - if m[0] == '1': - break - time.sleep(0.5) - count += 1 - else: - self.fail("Subprocess didn't finish initialization") - os.kill(proc.pid, event) - # proc.send_signal(event) could also be done here. - # Allow time for the signal to be passed and the process to exit. - time.sleep(0.5) - if not proc.poll(): - # Forcefully kill the process if we weren't able to signal it. - os.kill(proc.pid, signal.SIGINT) - self.fail("subprocess did not stop on {}".format(name)) - - @unittest.skip("subprocesses aren't inheriting CTRL+C property") - def test_CTRL_C_EVENT(self): - from ctypes import wintypes - import ctypes - - # Make a NULL value by creating a pointer with no argument. - NULL = ctypes.POINTER(ctypes.c_int)() - SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler - SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int), - wintypes.BOOL) - SetConsoleCtrlHandler.restype = wintypes.BOOL - - # Calling this with NULL and FALSE causes the calling process to - # handle CTRL+C, rather than ignore it. This property is inherited - # by subprocesses. - SetConsoleCtrlHandler(NULL, 0) - - self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT") - - def test_CTRL_BREAK_EVENT(self): - self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT") - - -def test_main(): - test_support.run_unittest( - FileTests, - TemporaryFileTests, - StatAttributeTests, - EnvironTests, - WalkTests, - MakedirTests, - DevNullTests, - URandomTests, - Win32ErrorTests, - TestInvalidFD, - PosixUidGidTests, - Win32KillTests - ) - -if __name__ == "__main__": - test_main() From commits-noreply at bitbucket.org Fri Apr 29 06:32:55 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Fri, 29 Apr 2011 06:32:55 +0200 (CEST) Subject: [pypy-svn] pypy default: Fixed test (missing import). Message-ID: <20110429043255.9F2F0282B9E@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43746:aefc70438132 Date: 2011-04-29 00:32 -0400 http://bitbucket.org/pypy/pypy/changeset/aefc70438132/ Log: Fixed test (missing import). diff --git a/pypy/rlib/rsre/test/test_char.py b/pypy/rlib/rsre/test/test_char.py --- a/pypy/rlib/rsre/test/test_char.py +++ b/pypy/rlib/rsre/test/test_char.py @@ -1,5 +1,5 @@ from pypy.rlib.rsre import rsre_char -from pypy.rlib.rsre.rsre_char import SRE_FLAG_UNICODE +from pypy.rlib.rsre.rsre_char import SRE_FLAG_LOCALE, SRE_FLAG_UNICODE def setup_module(mod): from pypy.module.unicodedata import unicodedb From commits-noreply at bitbucket.org Fri Apr 29 10:09:39 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 10:09:39 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: yet another 'call this the Rpython toolchain' edit Message-ID: <20110429080939.84528282B9E@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43747:258cd23ba117 Date: 2011-04-29 10:09 +0200 http://bitbucket.org/pypy/pypy/changeset/258cd23ba117/ Log: yet another 'call this the Rpython toolchain' edit diff --git a/pypy/doc/configuration.rst b/pypy/doc/configuration.rst --- a/pypy/doc/configuration.rst +++ b/pypy/doc/configuration.rst @@ -1,25 +1,26 @@ -============================= + ============================= PyPy's Configuration Handling ============================= Due to more and more available configuration options it became quite annoying to hand the necessary options to where they are actually used and even more -annoying to add new options. To circumvent these problems the configuration -management was introduced. There all the necessary options are stored into an -configuration object, which is available nearly everywhere in the translation -toolchain and in the standard interpreter so that adding new options becomes +annoying to add new options. To circumvent these problems configuration +management was introduced. There all the necessary options are stored in a +configuration object, which is available nearly everywhere in the `RPython +toolchain`_ and in the standard interpreter so that adding new options becomes trivial. Options are organized into a tree. Configuration objects can be created in different ways, there is support for creating an optparse command line parser automatically. +_`RPython toolchain`: translation.html Main Assumption =============== Configuration objects are produced at the entry points and handed down to where they are actually used. This keeps configuration local but available -everywhere and consistent. The configuration values can be created using the -command line (already implemented) or a file (still to be done). +everywhere and consistent. The configuration values are created using the +command line. API Details @@ -183,11 +184,12 @@ The usage of config objects in PyPy =================================== -The two large parts of PyPy, the standard interpreter and the translation +The two large parts of PyPy, the Python interpreter_ and the `RPython +toolchain`_ toolchain, have two separate sets of options. The translation toolchain options can be found on the ``config`` attribute of all ``TranslationContext`` instances and are described in `pypy/config/translationoption.py`_. The interpreter options are attached to the object space, also under the name ``config`` and are described in `pypy/config/pypyoption.py`_. - +_interpreter: interpreter.html .. include:: _ref.txt diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -1,6 +1,6 @@ -===================================== +==================================== Coding Guide -===================================== +==================================== .. contents:: From commits-noreply at bitbucket.org Fri Apr 29 11:11:49 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 29 Apr 2011 11:11:49 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: add sections about downloading a prebuilt pypy and installing it; also, move the virtualenv instructions before the cloning the source section Message-ID: <20110429091149.B3506282B9E@codespeak.net> Author: Antonio Cuni Branch: documentation-cleanup Changeset: r43748:f324f4204faa Date: 2011-04-29 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/f324f4204faa/ Log: add sections about downloading a prebuilt pypy and installing it; also, move the virtualenv instructions before the cloning the source section diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -24,14 +24,76 @@ Just the facts ============== +Download a pre-built PyPy +------------------------- + +The quickest way to start using PyPy is to download a prebuilt binary for your +OS and architecture. You can either use the `most recent release`_ or one of +our `development nightly build`_. Please note that the nightly builds are not +guaranteed to be as stable as official releases, use them at your own risk. + +.. _`most recent release`: http://pypy.org/download.html +.. _`development nightly build`: http://buildbot.pypy.org/nightly/trunk/ + +Installing PyPy +--------------- + +PyPy is ready to be executed as soon as you unpack the tarball or the zip +file, with no need install it in any specific location:: + + $ tar xf pypy-1.5-linux.tar.bz2 + + $ ./pypy-1.5-linux/bin/pypy + Python 2.7.1 (?, Apr 27 2011, 12:44:21) + [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + And now for something completely different: ``implementing LOGO in LOGO: + "turtles all the way down"'' + >>>> + +If you want to make PyPy available system-wide, you can put a symlink to the +``pypy`` executable in ``/usr/local/bin``. It is important to put a symlink +and not move the binary there, else PyPy would not be able to find its +library. + +If you want to install 3rd party libraries, the most convenient way is to +install setuptools_, which will bring ``easy_install`` to you:: + + $ wget http://peak.telecommunity.com/dist/ez_setup.py + + $ ./pypy-1.5-linux/bin/pypy ez_setup.py + + $ ls ./pypy-1.5-linux/bin/ + easy_install easy_install-2.7 pypy + +3rd party libraries will be installed in ``pypy-1.5-linux/site-packages``, and +the scripts in ``pypy-1.5-linux/bin``. + +Installing using virtualenv +--------------------------- + +It is often convenient to run pypy inside a virtualenv. To do this +you need a recent version of virtualenv -- 1.6.1 or greater. You can +then install PyPy both from a precompiled tarball or from a mercurial +checkout:: + + # from a tarball + $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env + + # from the mercurial checkout + $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env + +Note that bin/python is now a symlink to bin/pypy. + + Clone the repository -------------------- -Before you can play with PyPy, you will need to obtain a copy -of the sources. This can be done either by `downloading them -from the download page`_ or by checking them out from the -repository using mercurial. We suggest using mercurial if one -wants to access the current development. +If you prefer to `compile PyPy by yourself`_, or if you want to modify it, you +will need to obtain a copy of the sources. This can be done either by +`downloading them from the download page`_ or by checking them out from the +repository using mercurial. We suggest using mercurial if one wants to access +the current development. .. _`downloading them from the download page`: http://pypy.org/download.html @@ -55,28 +117,10 @@ where XXXXX is the revision id. + +.. _`compile PyPy by yourself`: getting-started-python.html .. _`our nightly tests:`: http://buildbot.pypy.org/summary?branch= -If you want to commit to our repository on bitbucket, you will have to -install subversion in addition to mercurial. - -Installing using virtualenv ---------------------------- - -It is often convenient to run pypy inside a virtualenv. To do this -you need a recent version of virtualenv -- 1.5 or greater. You can -then install PyPy both from a precompiled tarball or from a mercurial -checkout:: - - # from a tarball - $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env - - # from the mercurial checkout - $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env - -Note that bin/python is now a symlink to bin/pypy. - - Where to go from here ---------------------- From commits-noreply at bitbucket.org Fri Apr 29 11:30:23 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 29 Apr 2011 11:30:23 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: (all): planning for today Message-ID: <20110429093023.3C0F0282B9E@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3542:26c2ca116486 Date: 2011-04-29 11:30 +0200 http://bitbucket.org/pypy/extradoc/changeset/26c2ca116486/ Log: (all): planning for today diff --git a/sprintinfo/gothenburg-2011/planning.txt b/sprintinfo/gothenburg-2011/planning.txt --- a/sprintinfo/gothenburg-2011/planning.txt +++ b/sprintinfo/gothenburg-2011/planning.txt @@ -14,17 +14,20 @@ - release 1.5 - merge in 2.7.1 stuff DONE - documentation (Anto, Laura) MORE PROGRESS - - document minimark (Armin) + - document minimark DONE - document __builtins__ behaviour in cpython-differences - - mercurial in the coding-guide - - release announcement (Anto, Laura) + - mercurial in the coding-guide (Anto) + - release announcement (Anto, Carl Friedrich) - look at the tracker - - investigate Mac problems IN PROGRESS (Iko, Dario, Romain) + - start the Mac build (Dario, Romain) + - investigate Mac problems DONE? - find out whether cProfile is giving interesting information (Anto, Lukas) - - add a warning about Windows 64 (Kristján, Armin) + - add a warning about Windows 64 (Kristján, Carl Friedrich) SHOULD-BE-PUSHED + - do a full build on Windows (Armin, Kristján) -- discuss stackless+jit integration (Armin, Kristján) -- discuss embedding issues (Armin, Kristján) +- discuss stackless+jit integration HAPPENED +- discuss embedding issues HAPPENED +- start playing with stack slicing (Armin, Kristján) - branches to be integrated/finished afterwards - 32-on-64 From commits-noreply at bitbucket.org Fri Apr 29 11:37:25 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 11:37:25 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix minor english warts Message-ID: <20110429093725.58BF936C20B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43749:b3a9ff62cdb7 Date: 2011-04-29 11:36 +0200 http://bitbucket.org/pypy/pypy/changeset/b3a9ff62cdb7/ Log: fix minor english warts diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -93,7 +93,7 @@ Running application examples ============================== -`pyglet`_ is known to run. We had some success also with pygame-ctypes which is not maintained anymore and with a snapshot of the experimental pysqlite-ctypes. We will only describe how to run the pyglet examples. +`pyglet`_ is known to run. We also had some success with pygame-ctypes (which is no longer maintained) and with a snapshot of the experimental pysqlite-ctypes. We will only describe how to run the pyglet examples. pyglet ------- From commits-noreply at bitbucket.org Fri Apr 29 11:37:26 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 11:37:26 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110429093726.A712A36C20B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43750:8466a8fdb62c Date: 2011-04-29 11:36 +0200 http://bitbucket.org/pypy/pypy/changeset/8466a8fdb62c/ Log: merge heads diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -24,14 +24,76 @@ Just the facts ============== +Download a pre-built PyPy +------------------------- + +The quickest way to start using PyPy is to download a prebuilt binary for your +OS and architecture. You can either use the `most recent release`_ or one of +our `development nightly build`_. Please note that the nightly builds are not +guaranteed to be as stable as official releases, use them at your own risk. + +.. _`most recent release`: http://pypy.org/download.html +.. _`development nightly build`: http://buildbot.pypy.org/nightly/trunk/ + +Installing PyPy +--------------- + +PyPy is ready to be executed as soon as you unpack the tarball or the zip +file, with no need install it in any specific location:: + + $ tar xf pypy-1.5-linux.tar.bz2 + + $ ./pypy-1.5-linux/bin/pypy + Python 2.7.1 (?, Apr 27 2011, 12:44:21) + [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + And now for something completely different: ``implementing LOGO in LOGO: + "turtles all the way down"'' + >>>> + +If you want to make PyPy available system-wide, you can put a symlink to the +``pypy`` executable in ``/usr/local/bin``. It is important to put a symlink +and not move the binary there, else PyPy would not be able to find its +library. + +If you want to install 3rd party libraries, the most convenient way is to +install setuptools_, which will bring ``easy_install`` to you:: + + $ wget http://peak.telecommunity.com/dist/ez_setup.py + + $ ./pypy-1.5-linux/bin/pypy ez_setup.py + + $ ls ./pypy-1.5-linux/bin/ + easy_install easy_install-2.7 pypy + +3rd party libraries will be installed in ``pypy-1.5-linux/site-packages``, and +the scripts in ``pypy-1.5-linux/bin``. + +Installing using virtualenv +--------------------------- + +It is often convenient to run pypy inside a virtualenv. To do this +you need a recent version of virtualenv -- 1.6.1 or greater. You can +then install PyPy both from a precompiled tarball or from a mercurial +checkout:: + + # from a tarball + $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env + + # from the mercurial checkout + $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env + +Note that bin/python is now a symlink to bin/pypy. + + Clone the repository -------------------- -Before you can play with PyPy, you will need to obtain a copy -of the sources. This can be done either by `downloading them -from the download page`_ or by checking them out from the -repository using mercurial. We suggest using mercurial if one -wants to access the current development. +If you prefer to `compile PyPy by yourself`_, or if you want to modify it, you +will need to obtain a copy of the sources. This can be done either by +`downloading them from the download page`_ or by checking them out from the +repository using mercurial. We suggest using mercurial if one wants to access +the current development. .. _`downloading them from the download page`: http://pypy.org/download.html @@ -55,28 +117,10 @@ where XXXXX is the revision id. + +.. _`compile PyPy by yourself`: getting-started-python.html .. _`our nightly tests:`: http://buildbot.pypy.org/summary?branch= -If you want to commit to our repository on bitbucket, you will have to -install subversion in addition to mercurial. - -Installing using virtualenv ---------------------------- - -It is often convenient to run pypy inside a virtualenv. To do this -you need a recent version of virtualenv -- 1.5 or greater. You can -then install PyPy both from a precompiled tarball or from a mercurial -checkout:: - - # from a tarball - $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env - - # from the mercurial checkout - $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env - -Note that bin/python is now a symlink to bin/pypy. - - Where to go from here ---------------------- From commits-noreply at bitbucket.org Fri Apr 29 11:52:35 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 29 Apr 2011 11:52:35 +0200 (CEST) Subject: [pypy-svn] buildbot default: Build this one nightly. Message-ID: <20110429095235.AF166282B9E@codespeak.net> Author: Armin Rigo Branch: Changeset: r495:361f7d2cc0bb Date: 2011-04-29 11:51 +0200 http://bitbucket.org/pypy/buildbot/changeset/361f7d2cc0bb/ Log: Build this one nightly. diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -207,7 +207,7 @@ JITLINUX32, # on tannit32, uses 1 core JITLINUX64, # on tannit64, uses 1 core OJITLINUX32, # on tannit32, uses 1 core - APPLVLWIN32, # on bigboard + JITWIN32, # on bigboard STACKLESSAPPLVLFREEBSD64, # on headless JITMACOSX64, # on mvt's machine ], hour=4, minute=0), From commits-noreply at bitbucket.org Fri Apr 29 11:52:40 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 29 Apr 2011 11:52:40 +0200 (CEST) Subject: [pypy-svn] buildbot default: merge heads Message-ID: <20110429095240.B9C74282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r496:c5111efe6dd9 Date: 2011-04-29 11:52 +0200 http://bitbucket.org/pypy/buildbot/changeset/c5111efe6dd9/ Log: merge heads diff --git a/bitbucket_hook/hook.py b/bitbucket_hook/hook.py --- a/bitbucket_hook/hook.py +++ b/bitbucket_hook/hook.py @@ -1,307 +1,53 @@ import os.path import py -import smtplib -import socket import subprocess import sys -from subprocess import Popen, PIPE +import time -LOCAL_REPOS = py.path.local(__file__).dirpath('repos') -REMOTE_BASE = 'http://bitbucket.org' +from .main import app +from . import scm +# +from . import stdoutlog +from . import irc +from . import mail -if socket.gethostname() == 'viper': - # for debugging, antocuni's settings - SMTP_SERVER = "out.alice.it" - SMTP_PORT = 25 - ADDRESS = 'anto.cuni at gmail.com' - # - CHANNEL = '#test' - BOT = '/tmp/commit-bot/message' -else: - # real settings, (they works on codespeak at least) - SMTP_SERVER = 'localhost' - SMTP_PORT = 25 - ADDRESS = 'pypy-svn at codespeak.net' - # - CHANNEL = '#pypy' - BOT = '/svn/hooks/commit-bot/message' -hgexe = str(py.path.local.sysfind('hg')) +HANDLERS = [ + stdoutlog.handle_commit, + irc.handle_commit, + mail.handle_commit + ] -TEMPLATE = u"""\ -Author: {author} -Branch: {branches} -Changeset: r{rev}:{node|short} -Date: {date|isodate} -%(url)s +def check_for_local_repo(local_repo, remote_repo, owner): + if local_repo.check(dir=True): + return True + if owner == app.config['DEFAULT_USER']: + print >> sys.stderr, 'Automatic initial clone of %s' % remote_repo + scm.hg('clone', str(remote_repo), str(local_repo)) + return True + return False -Log:\t{desc|fill68|tabindent} +def get_commits(payload, seen_nodes=set()): + import operator + commits = sorted(payload['commits'], + key=operator.itemgetter('revision')) + for commit in commits: + node = commit['raw_node'] + if node in seen_nodes: + continue + seen_nodes.add(node) + yield commit -""" -def getpaths(files, listfiles=False): - - # Handle empty input - if not files: - return '', '' - files = [f['file'] for f in files] - if not any(files): - return '', '' - - dirname = os.path.dirname - basename = os.path.basename - - common_prefix = [dirname(f) for f in files] - - # Single file, show its full path - if len(files) == 1: - common_prefix = files[0] - listfiles = False - - else: - common_prefix = [path.split(os.sep) for path in common_prefix] - common_prefix = os.sep.join(os.path.commonprefix(common_prefix)) - if common_prefix and not common_prefix.endswith('/'): - common_prefix += '/' - - if listfiles: - # XXX Maybe should return file paths relative to prefix? Or TMI? - filenames = [basename(f) for f in files if f and basename(f)] - filenames = ' M(%s)' % ', '.join(filenames) - else: - filenames = '' - return common_prefix, filenames - - -class BitbucketHookHandler(object): - Popen, PIPE = Popen, PIPE - def _hgexe(self, argv): - proc = self.Popen([hgexe] + list(argv), stdout=self.PIPE, - stderr=self.PIPE) - stdout, stderr = proc.communicate() - ret = proc.wait() - return stdout, stderr, ret - - def hg(self, *argv): - argv = map(str, argv) - stdout, stderr, ret = self._hgexe(argv) - if ret != 0: - print >> sys.stderr, 'error: hg', ' '.join(argv) - print >> sys.stderr, stderr - raise Exception('error when executing hg') - return unicode(stdout, encoding='utf-8', errors='replace') - - SMTP = smtplib.SMTP - def send(self, from_, to, subject, body, test=False): - from email.mime.text import MIMEText - # Is this a valid workaround for unicode errors? - body = body.encode('ascii', 'xmlcharrefreplace') - msg = MIMEText(body, _charset='utf-8') - msg['From'] = from_ - msg['To'] = to - msg['Subject'] = subject - if test: - print '#' * 20 - print "Email contents:\n" - print from_ - print to - print msg.get_payload(decode=True) - else: - smtp = self.SMTP(SMTP_SERVER, SMTP_PORT) - smtp.sendmail(from_, [to], msg.as_string()) - - call_subprocess = staticmethod(subprocess.call) - - def send_irc_message(self, message, test=False): - if test: - print message + '\n' - else: - return self.call_subprocess([BOT, CHANNEL, message]) - - def handle(self, payload, test=False): - path = payload['repository']['absolute_url'] - self.payload = payload - self.local_repo = LOCAL_REPOS.join(path) - self.remote_repo = REMOTE_BASE + path - if not self.local_repo.check(dir=True): - print >> sys.stderr, 'Ignoring unknown repo', path - return - self.hg('pull', '-R', self.local_repo) - self.handle_irc_message(test) - self.handle_diff_email(test) - - USE_COLOR_CODES = True - LISTFILES = False - def handle_irc_message(self, test=False): - import operator - commits = sorted(self.payload['commits'], - key=operator.itemgetter('revision')) - if test: - print "#" * 20 - print "IRC messages:" - - for commit in commits: - author = commit['author'] - branch = commit['branch'] - node = commit['node'] - - files = commit.get('files', []) - common_prefix, filenames = getpaths(files, self.LISTFILES) - pathlen = len(common_prefix) + len(filenames) + 2 - common_prefix = '/' + common_prefix - - if self.USE_COLOR_CODES: - author = '\x0312%s\x0F' % author # in blue - branch = '\x02%s\x0F' % branch # in bold - node = '\x0311%s\x0F' % node # in azure - common_prefix = '\x0315%s\x0F' % common_prefix # in gray - - message = commit['message'].replace('\n', ' ') - fields = (author, branch, node, common_prefix, filenames) - part1 = '%s %s %s %s%s: ' % fields - totallen = 160 + pathlen - if len(message) + len(part1) <= totallen: - irc_msg = part1 + message - else: - maxlen = totallen - (len(part1) + 3) - irc_msg = part1 + message[:maxlen] + '...' - self.send_irc_message(irc_msg, test) - - def handle_diff_email(self, test=False): - import operator - commits = sorted(self.payload['commits'], - key=operator.itemgetter('revision')) - for commit in commits: - self.send_diff_for_commit(commit, test) - - def send_diff_for_commit(self, commit, test=False): - hgid = commit['raw_node'] - sender = commit['author'] + ' ' - lines = commit['message'].splitlines() - line0 = lines and lines[0] or '' - reponame = self.payload['repository']['name'] - # TODO: maybe include the modified paths in the subject line? - url = self.remote_repo + 'changeset/' + commit['node'] + '/' - template = TEMPLATE % {'url': url} - subject = '%s %s: %s' % (reponame, commit['branch'], line0) - body = self.hg('-R', self.local_repo, 'log', '-r', hgid, - '--template', template) - diff = self.get_diff(hgid, commit['files']) - body = body+diff - self.send(sender, ADDRESS, subject, body, test) - - def get_diff(self, hgid, files): - import re - binary = re.compile('^GIT binary patch$', re.MULTILINE) - files = [item['file'] for item in files] - lines = [] - for filename in files: - out = self.hg('-R', self.local_repo, 'diff', '--git', '-c', hgid, - self.local_repo.join(filename)) - match = binary.search(out) - if match: - # it's a binary patch, omit the content - out = out[:match.end()] - out += u'\n[cut]' - lines.append(out) - return u'\n'.join(lines) - - -if __name__ == '__main__': - import hook as hookfile - repopath = os.path.dirname(os.path.dirname(hookfile.__file__)) - print 'Repository path:', repopath - test_payload = {u'repository': {u'absolute_url': '', - u'name': u'test', - u'owner': u'antocuni', - u'slug': u'test', - u'website': u''}, - u'user': u'antocuni'} - - commits = [{u'author': u'arigo', - u'branch': u'default', - u'files': [], - u'message': u'Merge heads.', - u'node': u'00ae063c6b8c', - u'parents': [u'278760e9c560', u'29f1ff96548d'], - u'raw_author': u'Armin Rigo ', - u'raw_node': u'00ae063c6b8c13d873d92afc5485671f6a944077', - u'revision': 403, - u'size': 0, - u'timestamp': u'2011-01-09 13:07:24'}, - - {u'author': u'antocuni', - u'branch': u'default', - u'files': [{u'file': u'bitbucket_hook/hook.py', u'type': u'modified'}], - u'message': u"don't send newlines to irc", - u'node': u'e17583fbfa5c', - u'parents': [u'69e9eac01cf6'], - u'raw_author': u'Antonio Cuni ', - u'raw_node': u'e17583fbfa5c5636b5375a5fc81f3d388ce1b76e', - u'revision': 399, - u'size': 19, - u'timestamp': u'2011-01-07 17:42:13'}, - - {u'author': u'antocuni', - u'branch': u'default', - u'files': [{u'file': u'.hgignore', u'type': u'added'}], - u'message': u'ignore irrelevant files', - u'node': u'5cbd6e289c04', - u'parents': [u'3a7c89443fc8'], - u'raw_author': u'Antonio Cuni ', - u'raw_node': u'5cbd6e289c043c4dd9b6f55b5ec1c8d05711c6ad', - u'revision': 362, - u'size': 658, - u'timestamp': u'2010-11-04 16:34:31'}, - - {u'author': u'antocuni', - u'branch': u'default', - u'files': [{u'file': u'bitbucket_hook/hook.py', u'type': u'modified'}, - {u'file': u'bitbucket_hook/__init__.py', u'type': u'added'}, - {u'file': u'bitbucket_hook/test/__init__.py', - u'type': u'added'}, - {u'file': u'bitbucket_hook/test/test_hook.py', - u'type': u'added'}], - u'message': u'partially refactor the hook to be more testable, and write a test for the fix in 12cc0caf054d', - u'node': u'9c7bc068df88', - u'parents': [u'12cc0caf054d'], - u'raw_author': u'Antonio Cuni ', - u'raw_node': u'9c7bc068df8850f4102c610d2bee3cdef67b30e6', - u'revision': 391, - u'size': 753, - u'timestamp': u'2010-12-19 14:45:44'}] - - - test_payload[u'commits'] = commits - -## # To regenerate: -## try: -## from json import loads # 2.6 -## except ImportError: -## from simplejson import loads -## -## from urllib2 import urlopen -## url = ("https://api.bitbucket.org/1.0/repositories/pypy/buildbot/" -## "changesets/%s/") -## -## # Representative changesets -## mergeheads = u'00ae063c6b8c' -## singlefilesub = u'e17583fbfa5c' -## root = u'5cbd6e289c04' -## multiadd = u'9c7bc068df88' -## test_nodes = mergeheads, singlefilesub, root, multiadd -## -## commits = [] -## for commit in test_nodes: -## req = urlopen(url % commit) -## payload = req.read() -## req.close() -## commits.append(loads(payload)) -## -## test_payload['commits'] = commits - - LOCAL_REPOS = py.path.local(repopath) - - hook = BitbucketHookHandler() - hook.USE_COLOR_CODES = False - hook.handle(test_payload, test=True) +def handle(payload, test=False): + path = payload['repository']['absolute_url'] + owner = payload['repository']['owner'] + local_repo = app.config['LOCAL_REPOS'].join(path) + remote_repo = app.config['REMOTE_BASE'] + path + if not check_for_local_repo(local_repo, remote_repo, owner): + print >> sys.stderr, 'Ignoring unknown repo', path + return + scm.hg('pull', '-R', local_repo) + for commit in get_commits(payload): + for handler in HANDLERS: + handler(payload, commit, test) diff --git a/bitbucket_hook/test/test_hook.py b/bitbucket_hook/test/test_hook.py --- a/bitbucket_hook/test/test_hook.py +++ b/bitbucket_hook/test/test_hook.py @@ -1,144 +1,31 @@ # -*- encoding: utf-8 -*- +import py +import pytest +from bitbucket_hook import hook, scm, mail, irc -from bitbucket_hook.hook import BitbucketHookHandler, getpaths +#XXX +hook.app.config['USE_COLOR_CODES'] = False -class BaseHandler(BitbucketHookHandler): - - def __init__(self): - self.mails = [] - - def send(self, from_, to, subject, body, test=False): - self.mails.append((from_, to, subject, body)) - - -def test_non_ascii_encoding_guess_utf8(): - class MyHandler(BaseHandler): - def _hgexe(self, argv): - return u'späm'.encode('utf-8'), '', 0 - # - handler = MyHandler() - stdout = handler.hg('foobar') - assert type(stdout) is unicode - assert stdout == u'späm' - -def test_non_ascii_encoding_invalid_utf8(): - class MyHandler(BaseHandler): - def _hgexe(self, argv): - return '\xe4aa', '', 0 # invalid utf-8 string - # - handler = MyHandler() - stdout = handler.hg('foobar') - assert type(stdout) is unicode - assert stdout == u'\ufffdaa' def test_sort_commits(): - class MyHandler(BaseHandler): - def __init__(self): - self.sent_commits = [] - def send_diff_for_commit(self, commit, test=False): - self.sent_commits.append(commit['node']) # - handler = MyHandler() - handler.payload = { - 'commits': [{'revision': 43, 'node': 'second'}, - {'revision': 42, 'node': 'first'}] - } - handler.handle_diff_email() - assert handler.sent_commits == ['first', 'second'] + seen_nodes = set() + payload = { + 'commits': [ + {'revision': 43, 'node': 'second', 'raw_node': 'first'}, + {'revision': 42, 'node': 'first', 'raw_node': 'second'}, + ], + } + commits = hook.get_commits(payload, seen_nodes) + commits = [x['node'] for x in commits] -def test_getpaths(): - d = dict + assert commits == ['first', 'second'] - barefile = [d(file='file')] - distinct = [d(file='path1/file1'), d(file='path2/file2'), - d(file='path3/file')] - shared = [d(file='path/file1'), d(file='path/file2'), - d(file='path/file')] - deepfile = [d(file='a/long/path/to/deepfile.py')] - slashesfile = [d(file='/slashesfile/')] - slashleft = [d(file='/slashleft')] - slashright = [d(file='slashright/')] +LONG_MESSAGE = u'This is a test with a long message: ' + 'x' * 1000 +LONG_CUT = LONG_MESSAGE[:160 - 29] - nocommon = distinct + [d(file='path4/file')] - nocommonplusroot = distinct + barefile - - common = [d(file='some/path/to/file'), d(file='some/path/to/deeper/file'), - d(file='some/path/to/anotherfile'), d(file='some/path/to/afile')] - commonplusroot = shared + barefile - - empty = d(file='') - nocommonplusempty = distinct + [empty] - commonplusempty = shared + [empty] - nocommonplusslash = distinct + [d(file='path4/dir/')] - commonplusslash = shared + [d(file='path/dir/')] - - pypydoubleslash = [d(file='pypy/jit/metainterp/opt/u.py'), - d(file='pypy/jit/metainterp/test/test_c.py'), - d(file='pypy/jit/metainterp/test/test_o.py')] - - pypyempty = [d(file='pypy/rlib/rdtoa.py'), - d(file='pypy/rlib/test/test_rdtoa.py')] - - nothing = ('', '') - - # (input, expected output) for listfiles=False - files_expected = [([], nothing), - ([empty], nothing), - ([empty, empty], nothing), - (barefile, ('file', '')), - (deepfile, ('a/long/path/to/deepfile.py', '')), - (slashesfile, ('/slashesfile/', '')), - (slashleft, ('/slashleft', '')), - (slashright, ('slashright/', '')), - (nocommon, nothing), - (nocommonplusroot, nothing), - (nocommonplusempty, nothing), - (common, ('some/path/to/', '')), - (commonplusroot, nothing), - (commonplusempty, nothing), - (nocommonplusslash, nothing), - (commonplusslash, ('path/', '')), - (pypydoubleslash, ('pypy/jit/metainterp/', '')), - (pypyempty, ('pypy/rlib/', '')), - ] - - for f, wanted in files_expected: - assert getpaths(f) == wanted - - # (input, expected output) for listfiles=True - files_expected = [([], nothing), - ([empty], nothing), - ([empty, empty], nothing), - (barefile, ('file', '')), - (deepfile, ('a/long/path/to/deepfile.py', '')), - (slashesfile, ('/slashesfile/', '')), - (slashleft, ('/slashleft', '')), - (slashright, ('slashright/', '')), - (nocommon, ('', ' M(file1, file2, file, file)')), - (nocommonplusroot, ('', ' M(file1, file2, file, file)')), - (nocommonplusempty, ('',' M(file1, file2, file)')), - (common, ('some/path/to/', - ' M(file, file, anotherfile, afile)')), - (commonplusroot, ('', ' M(file1, file2, file, file)')), - (commonplusempty, ('',' M(file1, file2, file)')), - (nocommonplusslash, ('',' M(file1, file2, file)')), - (commonplusslash, ('path/',' M(file1, file2, file)')), - (pypydoubleslash, ('pypy/jit/metainterp/', - ' M(u.py, test_c.py, test_o.py)')), - (pypyempty, ('pypy/rlib/', - ' M(rdtoa.py, test_rdtoa.py)')), - ] - - for f, wanted in files_expected: - assert getpaths(f, listfiles=True) == wanted - - - -LONG_MESSAGE = u'This is a test with a long message: ' + 'x'*1000 -LONG_CUT = LONG_MESSAGE[:160-29] - def irc_cases(payload=None): if payload is None: @@ -154,12 +41,12 @@ d(file='my/file3')] single_file_deep = [d(file='path/to/single')] - cases = [(no_file, ''), # No diff - (single_file,'single'), # Single file + cases = [(no_file, ''), # No diff + (single_file, 'single'), # Single file (multiple_files, ''), # No common prefix - (multiple_files_subdir, 'path/'), # Common prefix - (multiple_files_subdir_root, ''), # No common subdir, file in root - (single_file_deep,'path/to/single') # Single file in deep path + (multiple_files_subdir, 'path/'), # Common prefix + (multiple_files_subdir_root, ''), # No common subdir file in root + (single_file_deep, 'path/to/single'), # Single file in deep path ] author = u'antocuni' @@ -171,7 +58,7 @@ for i, (case, snippet) in enumerate(cases): rev = 44 + i - node = chr(97+i) + 'xxyyy' + node = chr(97 + i) + 'xxyyy' raw_node = node * 2 expected.append(expected_template % (node, snippet, LONG_CUT)) commits.append(d(revision=rev, files=case, author=author, @@ -181,51 +68,64 @@ return payload, expected -def test_irc_message(): - class MyHandler(BaseHandler): - USE_COLOR_CODES = False - def __init__(self): - self.messages = [] - def send_irc_message(self, message, test=False): - self.messages.append(message) +def test_irc_message(monkeypatch, messages): + payload = { + 'repository': { + 'owner': 'pypy', + 'name': 'pypy', + }, + 'commits': [ + { + 'revision': 42, + 'branch': u'default', + 'author': u'antocuni', + 'message': u'this is a test', + 'node': 'abcdef', + 'raw_node': 'abcdef', + }, + { + 'revision': 43, + 'author': u'antocuni', + 'branch': u'mybranch', + 'message': LONG_MESSAGE, + 'node': 'xxxyyy', + 'raw_node': 'xxxyyy', + }, + ] + } - handler = MyHandler() - handler.payload = { - 'commits': [{'revision': 42, - 'branch': u'default', - 'author': u'antocuni', - 'message': u'this is a test', - 'node': 'abcdef' - }, - {'revision': 43, - 'author': u'antocuni', - 'branch': u'mybranch', - 'message': LONG_MESSAGE, - 'node': 'xxxyyy' - } - ]} + payload, expected = irc_cases(payload) + commits = payload['commits'] + irc.handle_commit(payload, commits[0]) + irc.handle_commit(payload, commits[1]) - handler.payload, expected = irc_cases(handler.payload) - handler.handle_irc_message() - - msg1, msg2 = handler.messages[:2] + msg1, msg2 = messages[:2] assert msg1 == 'antocuni default abcdef /: this is a test' x = 'antocuni mybranch xxxyyy /: %s...' % LONG_CUT assert msg2 == x - for got, wanted in zip(handler.messages[2:], expected): + for got, wanted in zip(messages[2:], expected): assert got == wanted -def noop(*args, **kwargs): pass + +def noop(*args, **kwargs): + pass + + class mock: __init__ = noop - def communicate(*args, **kwargs): return '1', 2 - def wait(*args, **kwargs): return 0 + + def communicate(*args, **kwargs): + return '1', 2 + + def wait(*args, **kwargs): + return 0 + sendmail = noop -def test_handle(): - handler = BitbucketHookHandler() + +def test_handle(monkeypatch): commits, _ = irc_cases() test_payload = {u'repository': {u'absolute_url': '', u'name': u'test', @@ -235,14 +135,57 @@ u'user': u'antocuni', 'commits': commits['commits']} - handler.call_subprocess = noop - handler.Popen = mock - handler.SMTP = mock + monkeypatch.setattr(scm, 'Popen', mock) + monkeypatch.setattr(irc.subprocess, 'call', noop) + monkeypatch.setattr(mail, 'SMTP', mock) - handler.handle(test_payload) - handler.handle(test_payload, test=True) + hook.handle(test_payload) + hook.handle(test_payload, test=True) - handler.LISTFILES = True - handler.handle(test_payload) - handler.handle(test_payload, test=True) + hook.app.config['LISTFILES'] = True + hook.handle(test_payload) + hook.handle(test_payload, test=True) + +def test_handle_unknown(monkeypatch): + hgcalls = [] + def hgraise(*args): + hgcalls.append(args) + + monkeypatch.setattr(scm, 'hg', hgraise) + hook.handle({ + u'repository': { + u'absolute_url': '/foobar/myrepo', + u'owner': 'foobar', + }, + }) + assert hgcalls == [] + + hook.handle({ + u'repository': { + u'absolute_url': '/pypy/myrepo', + u'owner': 'pypy' + }, + u'commits': [], + }) + assert hgcalls[0][:2] == ('clone', 'http://bitbucket.org/pypy/myrepo',) + local_repo = hgcalls[0][-1] + assert hgcalls[1] == ('pull', '-R', local_repo) + + +def test_ignore_duplicate_commits(monkeypatch, mails, messages): + commits, _ = irc_cases() + payload = {u'repository': {u'absolute_url': '', + u'name': u'test', + u'owner': u'antocuni', + u'slug': u'test', + u'website': u''}, + u'user': u'antocuni', + 'commits': commits['commits']} + seen_nodes = set() + commits_listed = list(hook.get_commits(payload, seen_nodes)) + commits_again = list(hook.get_commits(payload, seen_nodes)) + num_commits = len(commits['commits']) + assert len(commits_listed) == num_commits + assert not commits_again + diff --git a/bitbucket_hook/test/test_main.py b/bitbucket_hook/test/test_main.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test/test_main.py @@ -0,0 +1,32 @@ +from bitbucket_hook.main import app +from bitbucket_hook import hook + +def test_get(): + client = app.test_client() + response = client.get('/') + + +def test_post(monkeypatch): + client = app.test_client() + def handle(payload, test): + assert payload=={} + assert test==app.config['TESTING'] + monkeypatch.setattr(hook, 'handle', handle) + + app.config['TESTING'] = True + response = client.post('/', data={'payload':"{}"}) + + app.config['TESTING'] = False + response = client.post('/', data={'payload':"{}"}) + + assert response.status_code == 200 + +def test_post_error(monkeypatch): + client = app.test_client() + def handle(payload, test): + raise Exception('omg') + monkeypatch.setattr(hook, 'handle', handle) + response = client.post('/', data={'payload':"{}"}) + assert response.status_code == 500 + + diff --git a/bitbucket_hook/test/conftest.py b/bitbucket_hook/test/conftest.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test/conftest.py @@ -0,0 +1,27 @@ +#XXX imports in conftest globals dont sow in coverage reports + + +def pytest_funcarg__mails(request): + return [] + + +def pytest_funcarg__messages(request): + return [] + + +def pytest_funcarg__monkeypatch(request): + from bitbucket_hook import irc, mail + mp = request.getfuncargvalue('monkeypatch') + mails = request.getfuncargvalue('mails') + + def send(from_, to, subject, body, test=False, mails=mails): + mails.append((from_, to, subject, body)) + mp.setattr(mail, 'send', send) + + messages = request.getfuncargvalue('messages') + + def send_irc_message(message, test=False): + messages.append(message) + mp.setattr(irc, 'send_message', send_irc_message) + + return mp diff --git a/bitbucket_hook/test_hook_testcall.py b/bitbucket_hook/test_hook_testcall.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test_hook_testcall.py @@ -0,0 +1,109 @@ +import os +import py + + +def test_handlecall(): + from bitbucket_hook.hook import handle + from bitbucket_hook.main import app + repopath = os.path.dirname(os.path.dirname(__file__)) + print 'Repository path:', repopath + test_payload = {u'repository': {u'absolute_url': '', + u'name': u'test', + u'owner': u'antocuni', + u'slug': u'test', + u'website': u''}, + u'user': u'antocuni'} + + commits = [{u'author': u'arigo', + u'branch': u'default', + u'files': [], + u'message': u'Merge heads.', + u'node': u'00ae063c6b8c', + u'parents': [u'278760e9c560', u'29f1ff96548d'], + u'raw_author': u'Armin Rigo ', + u'raw_node': u'00ae063c6b8c13d873d92afc5485671f6a944077', + u'revision': 403, + u'size': 0, + u'timestamp': u'2011-01-09 13:07:24'}, + + {u'author': u'antocuni', + u'branch': u'default', + u'files': [{u'file': u'bitbucket_hook/hook.py', + u'type': u'modified'}], + u'message': u"don't send newlines to irc", + u'node': u'e17583fbfa5c', + u'parents': [u'69e9eac01cf6'], + u'raw_author': u'Antonio Cuni ', + u'raw_node': u'e17583fbfa5c5636b5375a5fc81f3d388ce1b76e', + u'revision': 399, + u'size': 19, + u'timestamp': u'2011-01-07 17:42:13'}, + + {u'author': u'antocuni', + u'branch': u'default', + u'files': [{u'file': u'.hgignore', u'type': u'added'}], + u'message': u'ignore irrelevant files', + u'node': u'5cbd6e289c04', + u'parents': [u'3a7c89443fc8'], + u'raw_author': u'Antonio Cuni ', + u'raw_node': u'5cbd6e289c043c4dd9b6f55b5ec1c8d05711c6ad', + u'revision': 362, + u'size': 658, + u'timestamp': u'2010-11-04 16:34:31'}, + + {u'author': u'antocuni', + u'branch': u'default', + u'files': [{u'file': u'bitbucket_hook/hook.py', + u'type': u'modified'}, + {u'file': u'bitbucket_hook/__init__.py', + u'type': u'added'}, + {u'file': u'bitbucket_hook/test/__init__.py', + u'type': u'added'}, + {u'file': u'bitbucket_hook/test/test_hook.py', + u'type': u'added'}], + u'message': u'partially refactor the hook to be more testable,' + u' and write a test for the fix in 12cc0caf054d', + u'node': u'9c7bc068df88', + u'parents': [u'12cc0caf054d'], + u'raw_author': u'Antonio Cuni ', + u'raw_node': u'9c7bc068df8850f4102c610d2bee3cdef67b30e6', + u'revision': 391, + u'size': 753, + u'timestamp': u'2010-12-19 14:45:44'}] + + test_payload[u'commits'] = commits + +## # To regenerate: +## try: +## from json import loads # 2.6 +## except ImportError: +## from simplejson import loads +## +## from urllib2 import urlopen +## url = ("https://api.bitbucket.org/1.0/repositories/pypy/buildbot/" +## "changesets/%s/") +## +## # Representative changesets +## mergeheads = u'00ae063c6b8c' +## singlefilesub = u'e17583fbfa5c' +## root = u'5cbd6e289c04' +## multiadd = u'9c7bc068df88' +## test_nodes = mergeheads, singlefilesub, root, multiadd +## +## commits = [] +## for commit in test_nodes: +## req = urlopen(url % commit) +## payload = req.read() +## req.close() +## commits.append(loads(payload)) +## +## test_payload['commits'] = commits + + app.config['LOCAL_REPOS'] = py.path.local(repopath) + app.config['USE_COLOR_CODES'] = False + + handle(test_payload, test=True) + + +if __name__ == '__main__': + test_handlecall() diff --git a/bitbucket_hook/test/test_scm.py b/bitbucket_hook/test/test_scm.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test/test_scm.py @@ -0,0 +1,34 @@ +# -*- encoding: utf-8 -*- +import py +import pytest + +from bitbucket_hook import scm + + +def test_non_ascii_encoding_guess_utf8(monkeypatch): + + def _hgexe(argv): + return u'späm'.encode('utf-8'), '', 0 + monkeypatch.setattr(scm, '_hgexe', _hgexe) + stdout = scm.hg('foobar') + assert type(stdout) is unicode + assert stdout == u'späm' + + +def test_non_ascii_encoding_invalid_utf8(monkeypatch): + + def _hgexe(argv): + return '\xe4aa', '', 0 # invalid utf-8 string + monkeypatch.setattr(scm, '_hgexe', _hgexe) + stdout = scm.hg('foobar') + assert type(stdout) is unicode + assert stdout == u'\ufffdaa' + + + at pytest.mark.skip_if("not py.path.local.sysfind('hg')", + reason='hg binary missing') +def test_hg(): + scm.hg('help') + with pytest.raises(Exception): + print scm.hg + scm.hg('uhmwrong') diff --git a/bitbucket_hook/run.py b/bitbucket_hook/run.py new file mode 100755 --- /dev/null +++ b/bitbucket_hook/run.py @@ -0,0 +1,22 @@ +#!/usr/bin/python + +""" +To start the server in production mode, run this command:: + + ./run.py deploy +""" + +import py +import sys +import argparse +main = py.path.local(__file__).dirpath().join('main.py').pyimport() + + +if __name__ == '__main__': + HOST_NAME = 'codespeak.net' + PORT_NUMBER = 9237 + main.app.run( + host = HOST_NAME if 'deploy' in sys.argv else 'localhost', + debug = 'debug' in sys.argv, + port=PORT_NUMBER) + diff --git a/bitbucket_hook/irc.py b/bitbucket_hook/irc.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/irc.py @@ -0,0 +1,108 @@ +''' +utilities for interacting with the irc bot (via cli) +''' + +import os +import subprocess + +def getpaths(files, listfiles=False): + + # Handle empty input + if not files: + return '', '' + files = [f['file'] for f in files] + if not any(files): + return '', '' + + dirname = os.path.dirname + basename = os.path.basename + + common_prefix = [dirname(f) for f in files] + + # Single file, show its full path + if len(files) == 1: + common_prefix = files[0] + listfiles = False + + else: + common_prefix = [path.split(os.sep) for path in common_prefix] + common_prefix = os.sep.join(os.path.commonprefix(common_prefix)) + if common_prefix and not common_prefix.endswith('/'): + common_prefix += '/' + + if listfiles: + # XXX Maybe should return file paths relative to prefix? Or TMI? + filenames = [basename(f) for f in files if f and basename(f)] + filenames = ' M(%s)' % ', '.join(filenames) + else: + filenames = '' + return common_prefix, filenames + + +def send_message(message, test=False): + if test: + print message + '\n' + else: + from .main import app + return subprocess.call([ + app.config['BOT'], + app.config['CHANNEL'], + message, + ]) + +def get_short_id(owner, repo, branch): + """ + Custom rules to get a short string that identifies a repo/branch in a + useful way, for IRC messages. Look at test_irc.test_get_short_id for what + we expect. + """ + from .main import app + repo_parts = [] + if owner != app.config['DEFAULT_USER']: + repo_parts.append('%s' % owner) + if repo_parts or repo != app.config['DEFAULT_REPO']: + repo_parts.append(repo) + repo_id = '/'.join(repo_parts) + # + if repo_id == '': + return branch + elif branch == 'default': + return repo_id + elif repo_id == branch: + return repo_id # e.g., pypy/extradoc has a branch extradoc, just return 'extradoc' + else: + return '%s[%s]' % (repo_id, branch) + return branch + + +def handle_commit(payload, commit, test=False): + from .main import app + + repo_owner = payload['repository']['owner'] + repo_name = payload['repository']['name'] + author = commit['author'] + branch = commit['branch'] + node = commit['node'] + short_id = get_short_id(repo_owner, repo_name, branch) + + files = commit.get('files', []) + common_prefix, filenames = getpaths(files, app.config['LISTFILES']) + pathlen = len(common_prefix) + len(filenames) + 2 + common_prefix = '/' + common_prefix + + if app.config['USE_COLOR_CODES']: + author = '\x0312%s\x0F' % author # in blue + short_id = '\x02%s\x0F' % short_id # in bold + node = '\x0311%s\x0F' % node # in azure + common_prefix = '\x0315%s\x0F' % common_prefix # in gray + + message = commit['message'].replace('\n', ' ') + fields = (author, short_id, node, common_prefix, filenames) + part1 = '%s %s %s %s%s: ' % fields + totallen = 160 + pathlen + if len(message) + len(part1) <= totallen: + irc_msg = part1 + message + else: + maxlen = totallen - (len(part1) + 3) + irc_msg = part1 + message[:maxlen] + '...' + send_message(irc_msg, test) diff --git a/bitbucket_hook/stdoutlog.py b/bitbucket_hook/stdoutlog.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/stdoutlog.py @@ -0,0 +1,21 @@ +import time + +RED = 31 +GREEN = 32 +YELLOW = 33 +BLUE = 34 +MAGENTA = 35 +CYAN = 36 +GRAY = 37 + +def color(s, fg=1, bg=1): + template = '\033[%02d;%02dm%s\033[0m' + return template % (bg, fg, s) + +def handle_commit(payload, commit, test=False): + author = commit['author'] + node = commit['node'] + timestamp = commit.get('timestamp') + curtime = time.strftime('[%Y-%m-%d %H:%M]') + log = '%s %s %s %s' % (curtime, node, timestamp, author) + print color(log, fg=GREEN) diff --git a/bitbucket_hook/mail.py b/bitbucket_hook/mail.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/mail.py @@ -0,0 +1,57 @@ +from . import scm +from smtplib import SMTP + + +TEMPLATE = u"""\ +Author: {author} +Branch: {branches} +Changeset: r{rev}:{node|short} +Date: {date|isodate} +%(url)s + +Log:\t{desc|fill68|tabindent} + +""" + +def handle_commit(payload, commit, test=False): + from .main import app + + path = payload['repository']['absolute_url'] + local_repo = app.config['LOCAL_REPOS'].join(path) + remote_repo = app.config['REMOTE_BASE'] + path + + hgid = commit['raw_node'] + sender = commit['author'] + ' ' + lines = commit['message'].splitlines() + line0 = lines and lines[0] or '' + reponame = payload['repository']['name'] + # TODO: maybe include the modified paths in the subject line? + url = remote_repo + 'changeset/' + commit['node'] + '/' + template = TEMPLATE % {'url': url} + subject = '%s %s: %s' % (reponame, commit['branch'], line0) + body = scm.hg('-R', local_repo, 'log', '-r', hgid, + '--template', template) + diff = scm.get_diff(local_repo, hgid, commit['files']) + body = body + diff + send(sender, app.config['ADDRESS'], subject, body, test) + + +def send(from_, to, subject, body, test=False): + from .main import app + from email.mime.text import MIMEText + # Is this a valid workaround for unicode errors? + body = body.encode('ascii', 'xmlcharrefreplace') + msg = MIMEText(body, _charset='utf-8') + msg['From'] = from_ + msg['To'] = to + msg['Subject'] = subject + if test: + print '#' * 20 + print "Email contents:\n" + print from_ + print to + print msg.get_payload(decode=True) + else: + smtp = SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT']) + smtp.sendmail(from_, [to], msg.as_string()) + diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -202,7 +202,9 @@ logfiles={'pytestLog': 'cpython.log'})) if pypyjit: - # upload nightly build, if we're running jit tests + # kill this step when the transition to test_pypy_c_new has been + # completed + # "old" test_pypy_c self.addStep(PytestCmd( description="pypyjit tests", command=["python", "pypy/test_all.py", @@ -210,6 +212,15 @@ "--resultlog=pypyjit.log", "pypy/module/pypyjit/test"], logfiles={'pytestLog': 'pypyjit.log'})) + # + # "new" test_pypy_c + self.addStep(PytestCmd( + description="pypyjit tests", + command=["pypy/translator/goal/pypy-c", "pypy/test_all.py", + "--resultlog=pypyjit_new.log", + "pypy/module/pypyjit/test_pypy_c"], + logfiles={'pytestLog': 'pypyjit_new.log'})) + if pypyjit: kind = 'jit' else: diff --git a/bitbucket_hook/test/test_irc.py b/bitbucket_hook/test/test_irc.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/test/test_irc.py @@ -0,0 +1,103 @@ +from bitbucket_hook import irc +import subprocess + +def fl(*paths): + return [{'file': x} for x in paths] + + +def pytest_generate_tests(metafunc): + + barefile = fl('file') + distinct = fl('path1/file1', 'path2/file2', 'path3/file') + shared = fl('path/file1', 'path/file2', 'path/file') + + deepfile = fl('a/long/path/to/deepfile.py') + slashesfile = fl('/slashesfile/') + slashleft = fl('/slashleft') + slashright = fl('slashright/') + + nocommon = distinct + fl('path4/file') + nocommonplusroot = distinct + barefile + + common = fl('some/path/to/file', 'some/path/to/deeper/file', + 'some/path/to/anotherfile', 'some/path/to/afile') + commonplusroot = shared + barefile + + empty = fl('') + nocommonplusempty = distinct + empty + commonplusempty = shared + empty + nocommonplusslash = distinct + fl('path4/dir/') + commonplusslash = shared + fl('path/dir/') + + pypydoubleslash = fl('pypy/jit/metainterp/opt/u.py', + 'pypy/jit/metainterp/test/test_c.py', + 'pypy/jit/metainterp/test/test_o.py') + + pypyempty = fl('pypy/rlib/rdtoa.py', 'pypy/rlib/test/test_rdtoa.py') + + nothing = ('', '') + + expectations = [ + ('null', [], nothing), + ('empty', empty, nothing), + ('empty*2', empty * 2, nothing), + ('bare', barefile, ('file', '')), + ('deep', deepfile, ('a/long/path/to/deepfile.py', '')), + ('slashes', slashesfile, ('/slashesfile/', '')), + ('slashleft', slashleft, ('/slashleft', '')), + ('slashright', slashright, ('slashright/', '')), + ('nocommon', nocommon, ('', ' M(file1, file2, file, file)')), + ('nocommon+root', nocommonplusroot, + ('', ' M(file1, file2, file, file)')), + ('nocommon+empty', nocommonplusempty, ('', ' M(file1, file2, file)')), + ('common', common, ('some/path/to/', + ' M(file, file, anotherfile, afile)')), + ('common+root', commonplusroot, ('', ' M(file1, file2, file, file)')), + ('common+empty', commonplusempty, ('', ' M(file1, file2, file)')), + ('nocommon+slash', nocommonplusslash, ('', ' M(file1, file2, file)')), + ('common+slash', commonplusslash, ('path/', ' M(file1, file2, file)')), + ('pypydoubledash', pypydoubleslash, ('pypy/jit/metainterp/', + ' M(u.py, test_c.py, test_o.py)')), + ('pypyempty', pypyempty, ('pypy/rlib/', + ' M(rdtoa.py, test_rdtoa.py)')), + ] + + if metafunc.function.__name__ == 'test_getpaths': + for name, files, (common, listfiles) in expectations: + metafunc.addcall(id='list/' + name, funcargs={ + 'files': files, + 'expected_common': common, + 'expected_listfiles': listfiles, + }) + metafunc.addcall(id='nolist/' + name, funcargs={ + 'files': files, + 'expected_common': common, + 'expected_listfiles': listfiles, + }) + + +def test_getpaths(files, expected_common, expected_listfiles): + common, files = irc.getpaths(files, listfiles=bool(expected_listfiles)) + assert common == expected_common + assert files == expected_listfiles + +def test_send_message(monkeypatch): + monkeypatch.undo() # hack to get at the functions + + # gets called in normal mode + monkeypatch.setattr(subprocess, 'call', lambda *k, **kw: None) + irc.send_message('test') + + # doesnt get called in test mode + monkeypatch.setattr(subprocess, 'call', lambda: None) + irc.send_message('test', test=True) + +def test_get_short_id(): + assert irc.get_short_id('pypy', 'pypy', 'default') == 'default' + assert irc.get_short_id('pypy', 'pypy', 'mybranch') == 'mybranch' + assert irc.get_short_id('pypy', 'buildbot', 'default') == 'buildbot' + assert irc.get_short_id('pypy', 'buildbot', 'mybranch') == 'buildbot[mybranch]' + assert irc.get_short_id('pypy', 'extradoc', 'extradoc') == 'extradoc' + # + assert irc.get_short_id('anto', 'pypy', 'default') == 'anto/pypy' + assert irc.get_short_id('anto', 'pypy', 'mybranch') == 'anto/pypy[mybranch]' diff --git a/bitbucket_hook/scm.py b/bitbucket_hook/scm.py new file mode 100644 --- /dev/null +++ b/bitbucket_hook/scm.py @@ -0,0 +1,36 @@ +import sys +from subprocess import Popen, PIPE + + +def _hgexe(argv): + proc = Popen(['hg'] + list(argv), stdout=PIPE, stderr=PIPE) + stdout, stderr = proc.communicate() + ret = proc.wait() + return stdout, stderr, ret + + +def hg(*argv): + argv = map(str, argv) + stdout, stderr, ret = _hgexe(argv) + if ret != 0: + print >> sys.stderr, 'error: hg', ' '.join(argv) + print >> sys.stderr, stderr + raise Exception('error when executing hg') + return unicode(stdout, encoding='utf-8', errors='replace') + + +def get_diff(local_repo, hgid, files): + import re + binary = re.compile('^GIT binary patch$', re.MULTILINE) + files = [item['file'] for item in files] + lines = [] + for filename in files: + out = hg('-R', local_repo, 'diff', '--git', '-c', hgid, + local_repo.join(filename)) + match = binary.search(out) + if match: + # it's a binary patch, omit the content + out = out[:match.end()] + out += u'\n[cut]' + lines.append(out) + return u'\n'.join(lines) diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -2,6 +2,9 @@ *.pyc *~ +# test coverage files +.coverage + # master/slaveinfo.py contains the passwords, so it should never be tracked master/slaveinfo.py diff --git a/bot2/pypybuildbot/summary.py b/bot2/pypybuildbot/summary.py --- a/bot2/pypybuildbot/summary.py +++ b/bot2/pypybuildbot/summary.py @@ -67,12 +67,18 @@ def populate_one(self, name, shortrepr, longrepr=None): if shortrepr == '!': namekey = [name, ''] - else: - namekey = name.split(':', 1) + else: + # pytest2 and pytest1 use different separators/test id + # syntax support both here for now + if '.py::' in name: + namekey = name.split('::', 1) + else: + namekey = name.split(':', 1) if namekey[0].endswith('.py'): namekey[0] = namekey[0][:-3].replace('/', '.') if len(namekey) == 1: namekey.append('') + namekey[1] = namekey[1].replace("::", ".") namekey = tuple(namekey) self._outcomes[namekey] = shortrepr @@ -106,7 +112,7 @@ kind = None def add_one(): if kind is not None: - self.populate_one(name, kind, ''.join(longrepr)) + self.populate_one(name, kind, ''.join(longrepr)) for line in log.readlines(): first = line[0] if first == ' ': @@ -570,7 +576,7 @@ mod, testname = self.get_namekey(request) if mod is None: return "no such test" - return "%s %s" % (mod, testname) + return "%s %s" % (mod, testname) def body(self, request): t0 = time.time() @@ -660,7 +666,7 @@ request.site.buildbot_service.head_elements = old_head_elements def getTitle(self, request): - status = self.getStatus(request) + status = self.getStatus(request) return "%s: summaries of last %d revisions" % (status.getProjectName(), N) diff --git a/bitbucket_hook/main.py b/bitbucket_hook/main.py --- a/bitbucket_hook/main.py +++ b/bitbucket_hook/main.py @@ -7,57 +7,81 @@ codespeak. """ -import time -import BaseHTTPServer import json -import cgi import traceback import pprint import sys +import flask +import py -from hook import BitbucketHookHandler -HOST_NAME = 'codespeak.net' -PORT_NUMBER = 9237 +app = flask.Flask(__name__) -class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler): - def do_GET(self): - """Respond to a GET request.""" - self.send_response(200) - self.send_header("Content-type", "text/html") - self.end_headers() - self.wfile.write(""" - -

This is the pypy bitbucket hook. Use the following form only for testing

- - payload:
- submit: - - - """) - def do_POST(self): - length = int(self.headers['Content-Length']) - query_string = self.rfile.read(length) - data = dict(cgi.parse_qsl(query_string)) - payload = json.loads(data['payload']) - handler = BitbucketHookHandler() - try: - handler.handle(payload) - except: - traceback.print_exc() - print >> sys.stderr, 'payload:' - pprint.pprint(payload, sys.stderr) - print >> sys.stderr + at app.route('/', methods=['GET']) +def test_form(): + """Respond to a GET request.""" + return """ + +

+ This is the pypy bitbucket hook. + Use the following form only for testing +

+
+ payload:
+ submit: +
+ + """ -if __name__ == '__main__': - server_class = BaseHTTPServer.HTTPServer - httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler) - print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER) + + at app.route('/', methods=['POST']) +def handle_payload(): + payload = json.loads(flask.request.form['payload']) try: - httpd.serve_forever() - except KeyboardInterrupt: - pass - httpd.server_close() - print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER) + from . import hook + hook.handle(payload, test=app.testing) + except: + traceback.print_exc() + print >> sys.stderr, 'payload:' + pprint.pprint(payload, sys.stderr) + print >> sys.stderr + raise + return 'ok' + + +class DefaultConfig(object): + LOCAL_REPOS = py.path.local(__file__).dirpath('repos') + REMOTE_BASE = 'http://bitbucket.org' + USE_COLOR_CODES = True + LISTFILES = False + # + DEFAULT_USER = 'pypy' + DEFAULT_REPO = 'pypy' + + +class CodeSpeakConfig(DefaultConfig): + SMTP_SERVER = 'localhost' + SMTP_PORT = 25 + ADDRESS = 'pypy-svn at codespeak.net' + # + CHANNEL = '#pypy' + BOT = '/svn/hooks/commit-bot/message' + + +class ViperConfig(DefaultConfig): + SMTP_SERVER = "out.alice.it" + SMTP_PORT = 25 + ADDRESS = 'anto.cuni at gmail.com' + # + CHANNEL = '#test' + BOT = '/tmp/commit-bot/message' + + +if py.std.socket.gethostname() == 'viper': + # for debugging, antocuni's settings + app.config.from_object(ViperConfig) +else: + # real settings, (they works on codespeak at least) + app.config.from_object(CodeSpeakConfig) From commits-noreply at bitbucket.org Fri Apr 29 11:57:17 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 11:57:17 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Kill XXX, armin already documented minimark Message-ID: <20110429095717.CD7D1282BEC@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43751:ed49df41d8f0 Date: 2011-04-29 11:56 +0200 http://bitbucket.org/pypy/pypy/changeset/ed49df41d8f0/ Log: Kill XXX, armin already documented minimark diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -20,10 +20,6 @@ Garbage collectors currently written for the GC framework ========================================================= -XXX we need to add something about minimark - -(Very rough sketch only for now.) - Reminder: to select which GC you want to include in a translated RPython program, use the ``--gc=NAME`` option of ``translate.py``. For more details, see the `overview of command line options for From commits-noreply at bitbucket.org Fri Apr 29 12:05:49 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 12:05:49 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: RPython toolchain again Message-ID: <20110429100549.04815282BEC@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43752:9e4aa458f145 Date: 2011-04-29 12:05 +0200 http://bitbucket.org/pypy/pypy/changeset/9e4aa458f145/ Log: RPython toolchain again diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -15,7 +15,7 @@ .. glossary:: annotator - The component of the :term:`translator`\ 's :term:`toolchain` that performs a form + The component of the :term:`RPython toolchain` that performs a form of :term:`type inference` on the flow graph. See the `annotator pass`_ in the documentation. @@ -26,7 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the PyPy :term:`toolchain`. A backend uses either the + language`_ using the :term:`RPython toolchain`. A backend uses either the :term:`lltypesystem` or the :term:`ootypesystem`. compile-time @@ -100,12 +100,16 @@ value at :term:`compile-time`, essentially by deferring compilation until the run-time value is known. See if `the jit docs`_ help. - rpython + RPython `Restricted Python`_, a limited subset of the Python_ language. The limitations make :term:`type inference` possible. It is also the language that the PyPy interpreter itself is written in. + RPython toolchain + The `annotator pass`_, `The RPython Typer`_, and various + :term:`backend`\ s. + rtyper Based on the type annotations, the `RPython Typer`_ turns the flow graph into one that fits the model of the target platform/:term:`backend` @@ -130,10 +134,6 @@ It is the `subsystem implementing the Python language`_, composed of the bytecode interpreter and of the standard objectspace. - toolchain - The `annotator pass`_, `The RPython Typer`_, and various - :term:`backend`\ s. - transformation Code that modifies flowgraphs to weave in translation aspects @@ -151,7 +151,7 @@ type inference Deduces either partially or fully the type of expressions as described in this `type inference article on Wikipedia`_. - PyPy's tool-chain own flavour of type inference is described + The :term:`RPython toolchain`'s flavour of type inference is described in the `annotator pass`_ section. .. _applevel: coding-guide.html#application-level From commits-noreply at bitbucket.org Fri Apr 29 12:07:11 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 29 Apr 2011 12:07:11 +0200 (CEST) Subject: [pypy-svn] pypy mingw32-build: A branch to support again the mingw32 compiler. Message-ID: <20110429100711.AFF78282BEC@codespeak.net> Author: Amaury Forgeot d'Arc Branch: mingw32-build Changeset: r43753:aba9eb5b22f5 Date: 2011-04-29 12:00 +0200 http://bitbucket.org/pypy/pypy/changeset/aba9eb5b22f5/ Log: A branch to support again the mingw32 compiler. From commits-noreply at bitbucket.org Fri Apr 29 12:07:13 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 29 Apr 2011 12:07:13 +0200 (CEST) Subject: [pypy-svn] pypy mingw32-build: On Windows when --cc=mingw32, set the _WIN32_WINNT value as soon as possible, Message-ID: <20110429100713.21541282BEC@codespeak.net> Author: Amaury Forgeot d'Arc Branch: mingw32-build Changeset: r43754:e0a0b98ccbe2 Date: 2011-04-29 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/e0a0b98ccbe2/ Log: On Windows when --cc=mingw32, set the _WIN32_WINNT value as soon as possible, otherwise getaddrinfo() may be undefined. 0x0501 = windows XP is the minimum supported version for mingw32 builds diff --git a/pypy/rlib/_rsocket_rffi.py b/pypy/rlib/_rsocket_rffi.py --- a/pypy/rlib/_rsocket_rffi.py +++ b/pypy/rlib/_rsocket_rffi.py @@ -17,6 +17,8 @@ _SOLARIS = sys.platform == "sunos5" _MACOSX = sys.platform == "darwin" +pre_include_bits = [] + if _POSIX: includes = ('sys/types.h', 'sys/socket.h', @@ -69,11 +71,11 @@ ]) else: # MINGW includes = ('stdint.h',) + pre_include_bits = [ + '#ifndef _WIN32_WINNT\n' + + '#define _WIN32_WINNT 0x0501\n' + + '#endif'] header_lines.extend([ - '''\ - #ifndef _WIN32_WINNT - #define _WIN32_WINNT 0x0501 - #endif''', '#define SIO_RCVALL _WSAIOW(IOC_VENDOR,1)', '#define SIO_KEEPALIVE_VALS _WSAIOW(IOC_VENDOR,4)', '#define RCVALL_OFF 0', @@ -110,6 +112,7 @@ """] eci = ExternalCompilationInfo( + pre_include_bits = pre_include_bits, post_include_bits = [HEADER, COND_HEADER], includes = includes, libraries = libraries, From commits-noreply at bitbucket.org Fri Apr 29 12:07:16 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 29 Apr 2011 12:07:16 +0200 (CEST) Subject: [pypy-svn] pypy mingw32-build: Add support for shared modules with the mingw32 compiler. Message-ID: <20110429100716.15105282C1D@codespeak.net> Author: Amaury Forgeot d'Arc Branch: mingw32-build Changeset: r43755:330fe353a7c1 Date: 2011-04-29 11:56 +0200 http://bitbucket.org/pypy/pypy/changeset/330fe353a7c1/ Log: Add support for shared modules with the mingw32 compiler. I needed to pass the "target" file name to _exportsymbols_link_flags(), hence the changes for other platforms. diff --git a/pypy/translator/platform/posix.py b/pypy/translator/platform/posix.py --- a/pypy/translator/platform/posix.py +++ b/pypy/translator/platform/posix.py @@ -38,10 +38,10 @@ cwd=str(cfile.dirpath())) return oname - def _link_args_from_eci(self, eci, standalone): - return Platform._link_args_from_eci(self, eci, standalone) + def _link_args_from_eci(self, eci, target, standalone): + return Platform._link_args_from_eci(self, eci, target, standalone) - def _exportsymbols_link_flags(self, eci, relto=None): + def _exportsymbols_link_flags(self, eci, target, relto=None): if not eci.export_symbols: return [] @@ -95,14 +95,14 @@ if shared: linkflags = self._args_for_shared(linkflags) - linkflags += self._exportsymbols_link_flags(eci, relto=path) - if shared: libname = exe_name.new(ext='').basename target_name = 'lib' + exe_name.new(ext=self.so_ext).basename else: target_name = exe_name.basename + linkflags += self._exportsymbols_link_flags(eci, target_name, relto=path) + if shared: cflags = self.cflags + self.shared_only else: diff --git a/pypy/translator/platform/__init__.py b/pypy/translator/platform/__init__.py --- a/pypy/translator/platform/__init__.py +++ b/pypy/translator/platform/__init__.py @@ -170,16 +170,16 @@ def _preprocess_library_dirs(self, library_dirs): return library_dirs - def _link_args_from_eci(self, eci, standalone): + def _link_args_from_eci(self, eci, target, standalone): library_dirs = self.preprocess_library_dirs(eci.library_dirs) library_dirs = self._libdirs(library_dirs) libraries = self._libs(eci.libraries) link_files = self._linkfiles(eci.link_files) - export_flags = self._exportsymbols_link_flags(eci) + export_flags = self._exportsymbols_link_flags(eci, target) return (library_dirs + list(self.link_flags) + export_flags + link_files + list(eci.link_extra) + libraries) - def _exportsymbols_link_flags(self, eci, relto=None): + def _exportsymbols_link_flags(self, eci, target, relto=None): if eci.export_symbols: raise ValueError("This platform does not support export symbols") return [] @@ -201,7 +201,7 @@ cc_link = 'g++' # XXX hard-coded so far else: cc_link = self.cc - largs = self._link_args_from_eci(eci, standalone) + largs = self._link_args_from_eci(eci, exe_name, standalone) return self._link(cc_link, ofiles, largs, standalone, exe_name) # below are some detailed informations for platforms diff --git a/pypy/translator/platform/darwin.py b/pypy/translator/platform/darwin.py --- a/pypy/translator/platform/darwin.py +++ b/pypy/translator/platform/darwin.py @@ -46,13 +46,13 @@ args.append(f) return args - def _link_args_from_eci(self, eci, standalone): - args = super(Darwin, self)._link_args_from_eci(eci, standalone) + def _link_args_from_eci(self, eci, target, standalone): + args = super(Darwin, self)._link_args_from_eci(eci, target, standalone) frameworks = self._frameworks(eci.frameworks) include_dirs = self._includedirs(eci.include_dirs) return (args + frameworks + include_dirs) - def _exportsymbols_link_flags(self, eci, relto=None): + def _exportsymbols_link_flags(self, eci, target, relto=None): if not eci.export_symbols: return [] diff --git a/pypy/translator/platform/windows.py b/pypy/translator/platform/windows.py --- a/pypy/translator/platform/windows.py +++ b/pypy/translator/platform/windows.py @@ -145,11 +145,12 @@ # see src/thread_nt.h return False - def _link_args_from_eci(self, eci, standalone): + def _link_args_from_eci(self, eci, target, standalone): # Windows needs to resolve all symbols even for DLLs - return super(MsvcPlatform, self)._link_args_from_eci(eci, standalone=True) + return super(MsvcPlatform, self)._link_args_from_eci(eci, target, + standalone=True) - def _exportsymbols_link_flags(self, eci, relto=None): + def _exportsymbols_link_flags(self, eci, target, relto=None): if not eci.export_symbols: return [] @@ -226,19 +227,18 @@ m.exe_name = exe_name m.eci = eci - linkflags = list(self.link_flags) if shared: linkflags = self._args_for_shared(linkflags) + [ '/EXPORT:$(PYPY_MAIN_FUNCTION)'] - linkflags += self._exportsymbols_link_flags(eci, relto=path) - - if shared: so_name = exe_name.new(purebasename='lib' + exe_name.purebasename, ext=self.so_ext) target_name = so_name.basename else: + linkflags = list(self.link_flags) target_name = exe_name.basename + linkflags += self._exportsymbols_link_flags(eci, target_name, relto=path) + def pypyrel(fpath): rel = py.path.local(fpath).relto(pypypath) if rel: @@ -367,3 +367,26 @@ # Mingw tools write compilation errors to stdout super(MingwPlatform, self)._handle_error( returncode, '', stderr + stdout, outname) + + def _exportsymbols_link_flags(self, eci, target, relto=None): + if not eci.export_symbols: + return [] + + def_file = self._make_response_file("dynamic-symbols-") + f = def_file.open("w") + f.write("EXPORTS\n") + for sym in eci.export_symbols: + f.write("%s\n" % (sym,)) + f.close() + + exp_file = def_file.new(ext='.exp') + self._execute_c_compiler('dlltool', + ['--dllname', str(target), + '--output-exp', str(exp_file), + '--def', str(def_file)], + exp_file) + + if relto: + exp_file = relto.bestrelpath(exp_file) + return ["-Wl,%s" % (exp_file,)] + From commits-noreply at bitbucket.org Fri Apr 29 12:07:17 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 29 Apr 2011 12:07:17 +0200 (CEST) Subject: [pypy-svn] pypy mingw32-build: Add the "--cc" option to py.test Message-ID: <20110429100717.EB7EB282BF2@codespeak.net> Author: Amaury Forgeot d'Arc Branch: mingw32-build Changeset: r43756:0e04cf951e16 Date: 2011-04-29 11:57 +0200 http://bitbucket.org/pypy/pypy/changeset/0e04cf951e16/ Log: Add the "--cc" option to py.test For example: --cc=mingw32 diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -33,6 +33,10 @@ raise ValueError("%s not in %s" % (value, PLATFORMS)) set_platform(value, None) +def _set_compiler(opt, opt_str, value, parser): + from pypy.translator.platform import set_platform + set_platform('host', value) + def pytest_addoption(parser): group = parser.getgroup("pypy options") group.addoption('--view', action="store_true", dest="view", default=False, @@ -46,6 +50,9 @@ group.addoption('-P', '--platform', action="callback", type="string", default="host", callback=_set_platform, help="set up tests to use specified platform as compile/run target") + group.addoption('--cc', action="callback", type="string", + default="host", callback=_set_compiler, + help="set up tests to use specified compiler") def pytest_sessionstart(): # have python subprocesses avoid startup customizations by default From commits-noreply at bitbucket.org Fri Apr 29 12:07:19 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 29 Apr 2011 12:07:19 +0200 (CEST) Subject: [pypy-svn] pypy mingw32-build: cpyext fixes for the mingw32 compiler Message-ID: <20110429100719.E63D0282BF2@codespeak.net> Author: Amaury Forgeot d'Arc Branch: mingw32-build Changeset: r43757:e935245ba765 Date: 2011-04-29 11:58 +0200 http://bitbucket.org/pypy/pypy/changeset/e935245ba765/ Log: cpyext fixes for the mingw32 compiler diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -184,8 +184,9 @@ api_library = state.api_lib if sys.platform == 'win32': kwds["libraries"] = [api_library] - # '%s' undefined; assuming extern returning int - kwds["compile_extra"] = ["/we4013"] + if platform.platform.name == "msvc": + # '%s' undefined; assuming extern returning int + kwds["compile_extra"] = ["/we4013"] else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform == 'linux2': diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -12,46 +12,50 @@ appleveldefs = { } + def __init__(self, space, w_name): + MixedModule.__init__(self, space, w_name) + + # import these modules to register api functions by side-effect + import pypy.module.cpyext.thread + import pypy.module.cpyext.pyobject + import pypy.module.cpyext.boolobject + import pypy.module.cpyext.floatobject + import pypy.module.cpyext.modsupport + import pypy.module.cpyext.pythonrun + import pypy.module.cpyext.pyerrors + import pypy.module.cpyext.typeobject + import pypy.module.cpyext.object + import pypy.module.cpyext.stringobject + import pypy.module.cpyext.tupleobject + import pypy.module.cpyext.dictobject + import pypy.module.cpyext.intobject + import pypy.module.cpyext.longobject + import pypy.module.cpyext.listobject + import pypy.module.cpyext.sequence + import pypy.module.cpyext.eval + import pypy.module.cpyext.import_ + import pypy.module.cpyext.mapping + import pypy.module.cpyext.iterator + import pypy.module.cpyext.unicodeobject + import pypy.module.cpyext.sysmodule + import pypy.module.cpyext.number + import pypy.module.cpyext.sliceobject + import pypy.module.cpyext.stubsactive + import pypy.module.cpyext.pystate + import pypy.module.cpyext.cdatetime + import pypy.module.cpyext.complexobject + import pypy.module.cpyext.weakrefobject + import pypy.module.cpyext.funcobject + import pypy.module.cpyext.frameobject + import pypy.module.cpyext.classobject + import pypy.module.cpyext.pypyintf + import pypy.module.cpyext.memoryobject + import pypy.module.cpyext.codecs + import pypy.module.cpyext.pyfile + + # now that all rffi_platform.Struct types are registered, configure them + api.configure_types() + def startup(self, space): space.fromcache(State).startup(space) -# import these modules to register api functions by side-effect -import pypy.module.cpyext.thread -import pypy.module.cpyext.pyobject -import pypy.module.cpyext.boolobject -import pypy.module.cpyext.floatobject -import pypy.module.cpyext.modsupport -import pypy.module.cpyext.pythonrun -import pypy.module.cpyext.pyerrors -import pypy.module.cpyext.typeobject -import pypy.module.cpyext.object -import pypy.module.cpyext.stringobject -import pypy.module.cpyext.tupleobject -import pypy.module.cpyext.dictobject -import pypy.module.cpyext.intobject -import pypy.module.cpyext.longobject -import pypy.module.cpyext.listobject -import pypy.module.cpyext.sequence -import pypy.module.cpyext.eval -import pypy.module.cpyext.import_ -import pypy.module.cpyext.mapping -import pypy.module.cpyext.iterator -import pypy.module.cpyext.unicodeobject -import pypy.module.cpyext.sysmodule -import pypy.module.cpyext.number -import pypy.module.cpyext.sliceobject -import pypy.module.cpyext.stubsactive -import pypy.module.cpyext.pystate -import pypy.module.cpyext.cdatetime -import pypy.module.cpyext.complexobject -import pypy.module.cpyext.weakrefobject -import pypy.module.cpyext.funcobject -import pypy.module.cpyext.frameobject -import pypy.module.cpyext.classobject -import pypy.module.cpyext.pypyintf -import pypy.module.cpyext.memoryobject -import pypy.module.cpyext.codecs -import pypy.module.cpyext.pyfile - -# now that all rffi_platform.Struct types are registered, configure them -api.configure_types() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -843,9 +843,10 @@ compile_extra=['-DPy_BUILD_CORE'] if building_bridge: - if sys.platform == "win32": + if platform.platform.name == "msvc": # '%s' undefined; assuming extern returning int compile_extra.append("/we4013") + if sys.platform == "win32": # Sometimes the library is wrapped into another DLL, ensure that # the correct bootstrap code is installed kwds["link_extra"] = ["msvcrt.lib"] From commits-noreply at bitbucket.org Fri Apr 29 12:09:25 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 29 Apr 2011 12:09:25 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: update the coding guide Message-ID: <20110429100925.7A64F282BEC@codespeak.net> Author: Antonio Cuni Branch: documentation-cleanup Changeset: r43758:4180022b96dd Date: 2011-04-29 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/4180022b96dd/ Log: update the coding guide diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -131,7 +131,7 @@ whether a particular function is implemented at application or interpreter level. -our runtime interpreter is "restricted python" +Our runtime interpreter is "RPython" ---------------------------------------------- In order to make a C code generator feasible all code on interpreter level has @@ -172,7 +172,7 @@ enables the code generator to emit efficient machine level replacements for pure integer objects, for instance. -Restricted Python +RPython ================= RPython Definition, not @@ -454,43 +454,6 @@ .. _`wrapped`: -RPylint -------- - -Pylint_ is a static code checker for Python. Recent versions -(>=0.13.0) can be run with the ``--rpython-mode`` command line option. This option -enables the RPython checker which will checks for some of the -restrictions RPython adds on standard Python code (and uses a -more aggressive type inference than the one used by default by -pylint). The full list of checks is available in the documentation of -Pylint. - -RPylint can be a nice tool to get some information about how much work -will be needed to convert a piece of Python code to RPython, or to get -started with RPython. While this tool will not guarantee that the -code it checks will be translate successfully, it offers a few nice -advantages over running a translation: - -* it is faster and therefore provides feedback faster than ``translate.py`` - -* it does not stop at the first problem it finds, so you can get more - feedback on the code in one run - -* the messages tend to be a bit less cryptic - -* you can easily run it from emacs, vi, eclipse or visual studio. - -Note: if pylint is not prepackaged for your OS/distribution, or if -only an older version is available, you will need to install from -source. In that case, there are a couple of dependencies, -logilab-common_ and astng_ that you will need to install too before -you can use the tool. - -.. _Pylint: http://www.logilab.org/project/pylint -.. _logilab-common: http://www.logilab.org/project/logilab-common -.. _astng: http://www.logilab.org/project/logilab-astng - - Wrapping rules ============== @@ -644,21 +607,19 @@ >>>> import sys >>>> sys.__file__ - '/home/hpk/pypy-dist/pypy/module/sys/*.py' + '/home/hpk/pypy-dist/pypy/module/sys' - >>>> import operator - >>>> operator.__file__ - '/home/hpk/pypy-dist/lib_pypy/operator.py' + >>>> import cPickle + >>>> cPickle.__file__ + '/home/hpk/pypy-dist/lib_pypy/cPickle..py' >>>> import opcode >>>> opcode.__file__ - '/home/hpk/pypy-dist/lib-python/modified-2.5.2/opcode.py' + '/home/hpk/pypy-dist/lib-python/modified-2.7/opcode.py' >>>> import os - faking - faking >>>> os.__file__ - '/home/hpk/pypy-dist/lib-python/2.5.2/os.py' + '/home/hpk/pypy-dist/lib-python/2.7/os.py' >>>> Module directories / Import order @@ -681,11 +642,11 @@ contains pure Python reimplementation of modules. -*lib-python/modified-2.5.2/* +*lib-python/modified-2.7/* The files and tests that we have modified from the CPython library. -*lib-python/2.5.2/* +*lib-python/2.7/* The unmodified CPython library. **Never ever check anything in there**. @@ -700,14 +661,14 @@ by default and CPython has a number of places where it relies on some classes being old-style. -If you want to change a module or test contained in ``lib-python/2.5.2`` -then make sure that you copy the file to our ``lib-python/modified-2.5.2`` +If you want to change a module or test contained in ``lib-python/2.7`` +then make sure that you copy the file to our ``lib-python/modified-2.7`` directory first. In mercurial commandline terms this reads:: - hg cp lib-python/2.5.2/somemodule.py lib-python/modified-2.5.2/ + $ hg cp lib-python/2.7/somemodule.py lib-python/modified-2.7/ and subsequently you edit and commit -``lib-python/modified-2.5.2/somemodule.py``. This copying operation is +``lib-python/modified-2.7/somemodule.py``. This copying operation is important because it keeps the original CPython tree clean and makes it obvious what we had to change. @@ -889,17 +850,8 @@ use your codespeak login or register ------------------------------------ -If you already committed to the PyPy source code, chances -are that you can simply use your codespeak login that -you use for subversion or for shell access. - -If you are not a commiter then you can still `register with -the tracker`_ easily. - -modifying Issues from hg commit messages ------------------------------------------ - -XXX: to be written after migrating the issue tracker away from codespeak.net +If you have an existing codespeak account, you can use it to login within the +tracker. Else, you can `register with the tracker`_ easily. .. _`register with the tracker`: https://codespeak.net/issue/pypy-dev/user?@template=register @@ -912,7 +864,7 @@ Testing in PyPy =============== -Our tests are based on the new `py.test`_ tool which lets you write +Our tests are based on the `py.test`_ tool which lets you write unittests without boilerplate. All tests of modules in a directory usually reside in a subdirectory **test**. There are basically two types of unit tests: @@ -923,12 +875,6 @@ - **Application Level tests**. They run at application level which means that they look like straight python code but they are interpreted by PyPy. -Both types of tests need an `objectspace`_ they can run with (the interpreter -dispatches operations on objects to an objectspace). If you run a test you -can usually give the '-o' switch to select an object space. E.g. '-o thunk' -will select the thunk object space. The default is the `Standard Object Space`_ -which aims to implement unmodified Python semantics. - .. _`standard object space`: objspace.html#standard-object-space .. _`objectspace`: objspace.html .. _`py.test`: http://pytest.org/ @@ -999,7 +945,7 @@ python test_all.py file_or_directory which is a synonym for the general `py.test`_ utility -located in the ``pypy`` directory. For switches to +located in the ``py/bin/`` directory. For switches to modify test execution pass the ``-h`` option. Test conventions @@ -1013,10 +959,6 @@ which contain unittests. Such scripts can usually be executed directly or are collectively run by pypy/test_all.py -- each test directory needs a copy of pypy/tool/autopath.py which - upon import will make sure that sys.path contains the directory - where 'pypy' is in. - .. _`change documentation and website`: Changing documentation and website @@ -1033,8 +975,7 @@ .. _`ReST quickstart`: http://docutils.sourceforge.net/docs/user/rst/quickref.html Note that the web site of http://pypy.org/ is maintained separately. -For now it is in the repository https://bitbucket.org/pypy/extradoc -in the directory ``pypy.org``. +For now it is in the repository https://bitbucket.org/pypy/pypy.org Automatically test documentation/website changes ------------------------------------------------ From commits-noreply at bitbucket.org Fri Apr 29 12:10:56 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 29 Apr 2011 12:10:56 +0200 (CEST) Subject: [pypy-svn] buildbot default: General Windows disliking. Message-ID: <20110429101056.1BFCC282BEC@codespeak.net> Author: Armin Rigo Branch: Changeset: r497:07e17d302e69 Date: 2011-04-29 12:10 +0200 http://bitbucket.org/pypy/buildbot/changeset/07e17d302e69/ Log: General Windows disliking. diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -102,11 +102,17 @@ class CheckGotRevision(ShellCmd): description = 'got_revision' - command = ['hg', 'parents', '--template', '{rev}:{node|short}'] + command = ['hg', 'parents', '--template', '{rev}:{node}'] def commandComplete(self, cmd): if cmd.rc == 0: got_revision = cmd.logs['stdio'].getText() + # manually get the effect of {node|short} without using a + # '|' in the command-line, because it doesn't work on Windows + num = got_revision.find(':') + if num > 0: + got_revision = got_revision[:num+13] + # final_file_name = got_revision.replace(':', '-') # ':' should not be part of filenames --- too many issues self.build.setProperty('got_revision', got_revision, 'got_revision') From commits-noreply at bitbucket.org Fri Apr 29 12:32:15 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 12:32:15 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: more cleanup Message-ID: <20110429103215.4B0AC36C053@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43762:b6f5304180f4 Date: 2011-04-29 12:31 +0200 http://bitbucket.org/pypy/pypy/changeset/b6f5304180f4/ Log: more cleanup diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -17,7 +17,6 @@ translation process - as opposed to encoding low level details into the language implementation itself. `more...`_ - .. _Python: http://docs.python.org/reference/ .. _`more...`: architecture.html @@ -132,6 +131,7 @@ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html .. _`Learning more about the translation toolchain and how to develop (with) PyPy`: getting-started-dev.html +.. _setuptools: http://pypi.python.org/pypi/setuptools Understanding PyPy's architecture --------------------------------- diff --git a/pypy/doc/configuration.rst b/pypy/doc/configuration.rst --- a/pypy/doc/configuration.rst +++ b/pypy/doc/configuration.rst @@ -1,4 +1,4 @@ - ============================= +============================= PyPy's Configuration Handling ============================= @@ -184,12 +184,12 @@ The usage of config objects in PyPy =================================== -The two large parts of PyPy, the Python interpreter_ and the `RPython +The two large parts of PyPy, the Python interpreter and the `RPython toolchain`_ toolchain, have two separate sets of options. The translation toolchain options can be found on the ``config`` attribute of all ``TranslationContext`` instances and are described in `pypy/config/translationoption.py`_. The interpreter options are attached to the object space, also under the name ``config`` and are described in `pypy/config/pypyoption.py`_. -_interpreter: interpreter.html + .. include:: _ref.txt diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -27,7 +27,7 @@ `getting started`_ provides hands-on instructions including a two-liner to run the PyPy Python interpreter on your system, examples on advanced features and -entry points for using PyPy's translation tool chain. +entry points for using the `RPython toolchain`_. `FAQ`_ contains some frequently asked questions. @@ -86,7 +86,7 @@ .. _`Videos`: video-index.html .. _`Release 1.4`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org - +.. _`RPython toolchain`: translation.html Project Documentation From commits-noreply at bitbucket.org Fri Apr 29 12:36:48 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 29 Apr 2011 12:36:48 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: run makeref and fix links Message-ID: <20110429103648.86DBC282B9E@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43763:8ffb9dffd742 Date: 2011-04-29 12:36 +0200 http://bitbucket.org/pypy/pypy/changeset/8ffb9dffd742/ Log: run makeref and fix links diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -145,7 +145,7 @@ There are a number of environment variables that can be tweaked to influence the GC. (Their default value should be ok for most usages.) You can read more about them at the start of -`rpython/memory/gc/minimark.py`_. +`pypy/rpython/memory/gc/minimark.py`_. In more details: @@ -162,7 +162,7 @@ to the old stage. The dying case 2 objects are immediately freed. - The old stage is an area of memory containing old (small) objects. It - is handled by `rpython/memory/gc/minimarkpage.py`_. It is organized + is handled by `pypy/rpython/memory/gc/minimarkpage.py`_. It is organized as "arenas" of 256KB or 512KB, subdivided into "pages" of 4KB or 8KB. Each page can either be free, or contain small objects of all the same size. Furthermore at any point in time each object location can be diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -100,6 +100,8 @@ .. _`pypy/rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/hybrid.py .. _`pypy/rpython/memory/gc/markcompact.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/markcompact.py .. _`pypy/rpython/memory/gc/marksweep.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/marksweep.py +.. _`pypy/rpython/memory/gc/minimark.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/minimark.py +.. _`pypy/rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/minimarkpage.py .. _`pypy/rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/semispace.py .. _`pypy/rpython/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ .. _`pypy/rpython/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ootype.py From commits-noreply at bitbucket.org Fri Apr 29 12:56:08 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 29 Apr 2011 12:56:08 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: (antocuni, cfbolz): the logo in large and small variants (without the word pypy) Message-ID: <20110429105608.86381282B9E@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3543:763d72d329e8 Date: 2011-04-29 12:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/763d72d329e8/ Log: (antocuni, cfbolz): the logo in large and small variants (without the word pypy) diff --git a/logo/pypy_small.svg b/logo/pypy_small.svg new file mode 100644 --- /dev/null +++ b/logo/pypy_small.svg @@ -0,0 +1,493 @@ + + + +image/svg+xml + + + pypy logo - by samuel reis + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/logo/pypy_fin.svg b/logo/pypy_fin.svg deleted file mode 100644 --- a/logo/pypy_fin.svg +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/pypy_fin.jpg b/logo/pypy_fin.jpg deleted file mode 100644 Binary file logo/pypy_fin.jpg has changed diff --git a/logo/pypy_logo.png b/logo/pypy_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..612ab0a779a91415dd22848715c13735d50e13a3 GIT binary patch [cut] diff --git a/logo/pypy_small.png b/logo/pypy_small.png new file mode 100644 index 0000000000000000000000000000000000000000..e19809ff08cff28dd5457095078f665873277759 GIT binary patch [cut] diff --git a/logo/pypy_fin.svg b/logo/pypy_logo.svg copy from logo/pypy_fin.svg copy to logo/pypy_logo.svg --- a/logo/pypy_fin.svg +++ b/logo/pypy_logo.svg @@ -1,108 +1,511 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + +image/svg+xml + + + pypy logo - by samuel reis + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file From commits-noreply at bitbucket.org Fri Apr 29 12:58:09 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 12:58:09 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: minor english wart Message-ID: <20110429105809.6A8C736C204@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43764:8bc8876751b6 Date: 2011-04-29 12:55 +0200 http://bitbucket.org/pypy/pypy/changeset/8bc8876751b6/ Log: minor english wart diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -4,7 +4,7 @@ :abstract: - When PyPy is translated into an executable like ``pypy-c``, the + When PyPy is translated into an executable such as ``pypy-c``, the executable contains a full virtual machine that can optionally include a Just-In-Time compiler. This JIT compiler is **generated automatically from the interpreter** that we wrote in RPython. From commits-noreply at bitbucket.org Fri Apr 29 12:58:10 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 12:58:10 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110429105810.C83D936C204@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43765:590ceb16ac9f Date: 2011-04-29 12:57 +0200 http://bitbucket.org/pypy/pypy/changeset/590ceb16ac9f/ Log: merge heads diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -145,7 +145,7 @@ There are a number of environment variables that can be tweaked to influence the GC. (Their default value should be ok for most usages.) You can read more about them at the start of -`rpython/memory/gc/minimark.py`_. +`pypy/rpython/memory/gc/minimark.py`_. In more details: @@ -162,7 +162,7 @@ to the old stage. The dying case 2 objects are immediately freed. - The old stage is an area of memory containing old (small) objects. It - is handled by `rpython/memory/gc/minimarkpage.py`_. It is organized + is handled by `pypy/rpython/memory/gc/minimarkpage.py`_. It is organized as "arenas" of 256KB or 512KB, subdivided into "pages" of 4KB or 8KB. Each page can either be free, or contain small objects of all the same size. Furthermore at any point in time each object location can be diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -100,6 +100,8 @@ .. _`pypy/rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/hybrid.py .. _`pypy/rpython/memory/gc/markcompact.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/markcompact.py .. _`pypy/rpython/memory/gc/marksweep.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/marksweep.py +.. _`pypy/rpython/memory/gc/minimark.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/minimark.py +.. _`pypy/rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/minimarkpage.py .. _`pypy/rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/semispace.py .. _`pypy/rpython/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ .. _`pypy/rpython/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ootype.py From commits-noreply at bitbucket.org Fri Apr 29 13:01:18 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 13:01:18 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: RPython toolchain again Message-ID: <20110429110118.6CAE436C206@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43766:a9048c5cc0f7 Date: 2011-04-29 13:00 +0200 http://bitbucket.org/pypy/pypy/changeset/a9048c5cc0f7/ Log: RPython toolchain again diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -10,8 +10,8 @@ The JIT's `theory`_ is great in principle, but the actual code is a different story. This section tries to give a high level overview of how PyPy's JIT is -implemented. It's helpful to have an understanding of how the PyPy `translation -tool chain`_ works before digging into the sources. +implemented. It's helpful to have an understanding of how the `RPython translation +toolchain`_ works before digging into the sources. Almost all JIT specific code is found in pypy/jit subdirectories. Translation time code is in the codewriter directory. The metainterp directory holds @@ -19,7 +19,7 @@ the backend directory is responsible for generating machine code. .. _`theory`: overview.html -.. _`translation tool chain`: ../translation.html +.. _`RPython translation toolchain`: ../translation.html JIT hints From commits-noreply at bitbucket.org Fri Apr 29 13:12:19 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 29 Apr 2011 13:12:19 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: Grrr. Bad fijal. Message-ID: <20110429111219.9811C2A202B@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r3544:5a2db73e47c3 Date: 2011-04-29 13:12 +0200 http://bitbucket.org/pypy/extradoc/changeset/5a2db73e47c3/ Log: Grrr. Bad fijal. diff --git a/pypy.org/image/pypy-logo.png b/pypy.org/image/pypy-logo.png deleted file mode 100644 Binary file pypy.org/image/pypy-logo.png has changed diff --git a/pypy.org/source/_layouts/site.genshi b/pypy.org/source/_layouts/site.genshi deleted file mode 100644 --- a/pypy.org/source/_layouts/site.genshi +++ /dev/null @@ -1,83 +0,0 @@ - - - - - PyPy :: ${title} - ${site_title} - - - - - - - - - - - - - - - - - - -
- - - -
-${Markup(content)} -
- -
- - - diff --git a/pypy.org/links.txt b/pypy.org/links.txt deleted file mode 100644 --- a/pypy.org/links.txt +++ /dev/null @@ -1,49 +0,0 @@ - -Related EU projects and EU pages: -================================= - - * `6th Framework Programme`_ - - * `The Calibre Project`_ - - * `Edos`_ - - * `Amigo`_ - - * `tOSSad`_ - - * `Cospa`_ - - * `Floss`_ - -.. _`6th Framework Programme`: http://fp6.cordis.lu/index.cfm?fuseaction=UserSite.FP6HomePage - -.. _`The Calibre Project`: http://www.calibre.ie - -.. _`Edos`: http://www.pps.jussieu.fr/~dicosmo/EDOS/index.html - -.. _`Amigo`: http://www.hitech-projects.com/euprojects/amigo/ - -.. _`tOSSad`: http://www.tossad.org/ - -.. _`Cospa`: http://www.cospa-project.org/ - -.. _`Floss`: http://www.infonomics.nl/FLOSS/ - -Related Python F/OSS projects: -============================== - - * `CPython`_ - -.. _`CPython`: http://www.python.org - -.. bea put the following into the list as well - Ironpython? - Jython? - Twisted? - - Related F/OSS projects: - Oz? - ..? - ..? - diff --git a/pypy.org/revision_march2006.txt b/pypy.org/revision_march2006.txt deleted file mode 100644 --- a/pypy.org/revision_march2006.txt +++ /dev/null @@ -1,20 +0,0 @@ -pypy.org;revision March 2006 --------------------------------- - -In order to have an external website servicing information in connection -to pressreleases and more external newsletter the pypy org page needs updating. - -The following changes/adds needs to be done: - -- change navigation to links about: - Home, News, Consortium, Links, Documentation, Community/Coding - - Links being a page with links to sisterprojects, to commission pages etc - - Documentation being a page to access published reports, pressreleases, talks, papers, - diagrams, tutorials, pictures, film? - -- info regarding industrial impact (why PyPy, gains? what will it provide, which industries/languages - do we target - who will have use of PyPy???? This info could be on the main page (HOME) - - -Do we want to have a link to universities, conferences and companies we are in touch with/cooperating with? -Such as UIB, UCD, Iona, x, y, z... diff --git a/pypy.org/css/site.css b/pypy.org/css/site.css deleted file mode 100644 --- a/pypy.org/css/site.css +++ /dev/null @@ -1,1360 +0,0 @@ -/* No Copyright (-) 2010 The Ampify Authors. This file is under the */ -/* Public Domain license that can be found in the root LICENSE file. */ - -/* Element styles */ - -* { - margin: 0; - padding: 0; -} - -html { - height: 100%; -} - -body { - background-color: #efefef; - background-repeat: repeat; - font: 90%/1em 'Lucida Grande', 'Lucida Sans Unicode', Optima, Verdana, sans-serif; - height: 100%; - width: 100%; -} - -a, a:visited, a:hover, a:active, a:hover { - color: blue; - text-decoration: underline; -} - -a:hover { - text-decoration: none; -} - -a.promote-fb, a.promote-fb:active, a.promote-fb:hover, a.promote-fb:visited { - background: url(http://static.ampify.it/gfx.share-facebook-sprite.png) no-repeat; - color: transparent; - display: block; - float: left; - height: 22px; - margin: 0 16px 0 0px; - outline; none; - overflow: hidden; - position: relative; - text-indent: 9999px !important; - top: -2px; - vertical-align: middle; - width: 146px; -} - -a.promote-fb:hover { - background-position: 0 -22px; -} - -a.promote-fb:active { - background-position: 0 -44px; -} - -blockquote, p, dl, h1, h2, h3, h4, h5, h6, ol, ul { - padding-top: 0.5em; - padding-bottom: 0.5em; -} - -code, .literal { - background-color: #f0f0f0; - border: 1px solid #dadada; - padding: 1px 3px; - font-family: Monaco, "DejaVu Sans Mono", "Bitstream Vera Sans Mono", monospace; -} - -dl, ol, ul { - padding-left: 20px; -} - -h1 { - font-size: 2.25em; -} - -h1, h2 { - color: #c32528; - font-family: "museo-1", "museo-2", Verdana; - letter-spacing: 1px; - text-shadow: #eee 2px 2px 3px; -} - -img { - border: 0; -} - -label, input, select, textarea { - cursor: pointer; -} - -pre { - background-color: #fff; - border: 1px solid #cacaca; - color: #101010; - font: 12px/1.4em Monaco, "DejaVu Sans Mono", "Bitstream Vera Sans Mono", monospace; - overflow: auto; - padding: 7px 0 8px 7px; - margin: 10px 30px 0 30px; - -moz-border-radius: 5px; - -webkit-border-radius: 5px; - border-radius: 5px; - -webkit-box-shadow: 0px 0px 7px #cacaca; -} - -pre.ascii-art { - line-height: 1em; - overflow: visible; -} - -pre code { - background-color: transparent; - border: 0; - padding: 0; -} - -select { - max-width: 200px; -} - - -strong { - font-family: Museo, Verdana; - font-size: 1.1em; -} - -table, table.docutils { - border: 0; - border-collapse: collapse; - margin: 0 auto; - text-align: left; -} - -table.docutils { - margin: 5px 40px 10px 30px; -} - -/* -table a { - text-decoration: none; -} -*/ - -th { - border-bottom: 2px solid #808096; - padding: 8px; -} - -table.docutils td { - border-bottom: 1px solid #ccc; - padding: 9px 8px 8px 8px; -} - -ul { - list-style-type: circle; -} - -/* ID styles */ - -#body { - background-color: #fff; - border-radius: 15px; - -webkit-border-radius: 15px; - -moz-border-radius: 15px; - padding: 5px; -} - -#body-inner { - margin: 0 auto; - padding: 10px 20px; - width: 950px; -} - -#body-outer { - height: 100%; -} - -body > #body-outer { - height: auto; - min-height: 100%; -} - -#content { - line-height: 1.55em; -} - -#footer-espians { - margin: 0 auto; - text-align: center; -} - -#footer { - background-color: #161616; - background-image: url(http://static.ampify.it/gfx.footer-background.png); - background-position: top left; - background-repeat: repeat-x; - clear: both; - color: #fff; - height: 50px; - margin-top: -200px; - position: relative; - text-align: center; - width: 100%; -} - -#footer-content { - margin: 0 auto; - padding-top: 15px; - text-align: center; -} - -#header { - width: 100%; - border-bottom: 1px dashed #9e9e9e; - margin-bottom: 10px; -} - -#logo { - float: left; -} - -#main { - float: left; - padding: 10px 30px 0 10px; - width: 630px; - line-height: 2em; - font-size: 0.9em; -} - -#main blockquote { - padding-left: 20px; -} - -#main h1 { - font-size: 1.5em; - padding-top: 20px; - padding-bottom: 0; -} - -#main h1.title { - font-size: 1.8em; - padding-top: 0; - padding-bottom: 10px; -} - -#main p { - padding-top: 10px; -} - -#main pre { - font-size: 15px; - font-family: "inconsolata-2", "inconsolata-1", Monaco, "DejaVu Sans Mono", "Bitstream Vera Sans Mono", monospace; -} - -#main ul, #main ol { - padding-left: 35px; -} - -#main li { - padding-top: 5px; -} - -#sidebar { - float: left; - width: 270px; - font-size: 0.9em; - line-height: 1.6em; -} - -#sidebar a, #sidebar a:hover, #sidebar a:active, #sidebar a:visited { - text-decoration: none; -} - -#sidebar a:hover { - text-decoration: underline; -} - -#sidebar h3 { - text-align: right; - border-bottom: 1px solid #cacaca; - margin-bottom: 10px; -} - -#sidebar ul { - list-style-type: none; - padding: 0; - margin: 0; -} - -#sidebar li { - padding-bottom: 12px; -} - -#sidebar li div, .sidebar-text { - color: #6e6e6e; - font-size: 0.9em; -} - -#table-of-contents { - float: right; - margin: 10px 0 10px 20px; - border: 1px solid #cacaca; -} - -#table-of-contents .topic-title { - display: none; -} - -#table-of-contents ul { - padding: 0 20px 5px 20px; -} - -#main-support-page { - width: 540px; - margin: 0 auto; -} - -#main-support-page pre { - width: 350px; -} - -#menu ul { - list-style-type: none; - padding: 0; -} - -#menu ul li { - background-color: #fff; - float: left; - margin-left: 12px; - text-align: center; -} - -#menu a { - border-top: 2px solid #ccc; - color: #000; - display: block; - font-size: 18px; - padding-top: 6px; - padding-bottom: 8px; - text-decoration: none; -} - -#menu a.selected, #menu a.selected:hover { - background-color: #f6f6f6; - border-top: 2px solid #a9151b; -} - -#menu a:hover { - border-top: 2px solid #969696; -} - -#menu-follow { - float: right; -} - -#menu-follow div { - padding: 0 5px 5px 0; -} - -#menu-lang { - margin: 0 105px 0 20px; - float: left; - text-align: right; - width: 140px; -} - -#menu-lang select { - max-width: 120px; -} - -#menu-lang-form { - display: none; - margin-top: 5px; -} - -#menu-sub { - font-size: 1em; - padding-bottom: 10px; - text-align: center; -} - -.menu-sub-sep { - color: #9f9f9f; - padding: 2px; -} - -#spread-button { - background-color: #ffffff; - border-radius: 5px; - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - padding: 5px; -} - -/* Support site IDs */ - -#help-spread-the-word { - float: left; -} - -/* Class styles */ - -.absmiddle { - vertical-align: middle; -} - -.boxed { - border: 1px solid #cacaca; - -moz-border-radius: 5px; - -webkit-border-radius: 5px; - border-radius: 5px; - -webkit-box-shadow: 0px 0px 7px #cacaca; -} - -.center { - text-align: center; -} - -/* clear utility classes */ - -.clear { - background-color: transparent; - border: 0px solid; - clear: both; - height: 0; - margin: 0; - padding: 0; - width: 0; -} - -.clear-left { - background-color: transparent; - border: 0px solid; - clear: left; - height: 0; - margin: 0; - padding: 0; - width: 0; -} - -.clear-right { - background-color: transparent; - border: 0px solid; - clear: right; - height: 0; - margin: 0; - padding: 0; - width: 0; -} - -/* clearfix */ - -.clearfix:after { - content: "."; - display: block; - height: 0; - clear: both; - visibility: hidden; -} - -.clearfix { - display: inline-block; -} - -/* Hides from IE-mac \*/ -* html .clearfix { - height: 1%; -} -.clearfix { - display: block; -} -/* End hide from IE-mac */ - -/* float utility classes */ - -.float-left { - float: left; - padding-bottom: 7px; - padding-right: 7px; -} - -.float-right { - float: right; - padding-bottom: 7px; - padding-left: 7px; -} - -/* footer classes */ - -.footer-follow { - margin: 0 45px 15px 0; - float: left; -} - -.footer-follow a { - color: #fff; - text-decoration: underline; - font-size: 10px; -} - -.footer-follow a:hover { - text-decoration: none; -} - -.footer-follow div { - white-space: nowrap; -} - -.footer-menu { - float: right; - margin: 17px 50px 0 0; - font-size: 10px; -} - -.footer-menu a { - color: #fff; - padding-left: 5px; - padding-right: 5px; -} - -.footer-text { - text-align: left; -} - -/* quoted blocks with attribution */ - -.quote { - background: transparent url(http://static.ampify.it/gfx.blockquote.gif) no-repeat 0 0; - padding: 6px 12px 0 40px; - color: #575757; - font-size: 22px; - line-height: 28px; -} - -.quote-attribution { - color: #575757; - font-size: 14px; - text-align: right; - padding: 10px 12px 10px 0; -} - -.quote-attribution a, .quote-attribution a:active, .quote-attribution a:visited { - color: #575757; - text-decoration: underline; -} - -.quote-attribution a:hover { - text-decoration: none; -} - -/* Source code syntax highlighting */ - -.syntax .c { color: #919191 } /* Comment */ -.syntax .cm { color: #919191 } /* Comment.Multiline */ -.syntax .cp { color: #919191 } /* Comment.Preproc */ -.syntax .cs { color: #919191 } /* Comment.Special */ -.syntax .c1 { color: #919191 } /* Comment.Single */ - -.syntax .err { color: #a61717; background-color: #e3d2d2 } /* Error */ - -.syntax .g { color: #101010 } /* Generic */ -.syntax .gd { color: #d22323 } /* Generic.Deleted */ -.syntax .ge { color: #101010; font-style: italic } /* Generic.Emph */ -.syntax .gh { color: #101010 } /* Generic.Heading */ -.syntax .gi { color: #589819 } /* Generic.Inserted */ -.syntax .go { color: #6a6a6a } /* Generic.Output */ -.syntax .gp { color: #6a6a6a } /* Generic.Prompt */ -.syntax .gr { color: #d22323 } /* Generic.Error */ -.syntax .gs { color: #101010 } /* Generic.Strong */ -.syntax .gt { color: #d22323 } /* Generic.Traceback */ -.syntax .gu { color: #101010 } /* Generic.Subheading */ - -.syntax .k { color: #c32528 } /* Keyword */ /* espian red */ -.syntax .k { color: #ff5600 } /* Keyword */ /* orangy */ -.syntax .kc { color: #ff5600 } /* Keyword.Constant */ -.syntax .kd { color: #ff5600 } /* Keyword.Declaration */ -.syntax .kd { color: #ff5600 } /* Keyword.Declaration */ -.syntax .kn { color: #ff5600 } /* Keyword */ -.syntax .kp { color: #ff5600 } /* Keyword.Pseudo */ -.syntax .kr { color: #ff5600 } /* Keyword.Reserved */ -.syntax .kt { color: #ff5600 } /* Keyword.Type */ - -.syntax .l { color: #101010 } /* Literal */ -.syntax .ld { color: #101010 } /* Literal.Date */ - -.syntax .m { color: #3677a9 } /* Literal.Number */ /* darkish pastely blue */ -.syntax .m { color: #00a33f } /* Literal.Number */ /* brightish green */ -.syntax .m { color: #1550a2 } /* Literal.Number */ /* darker blue */ -.syntax .m { color: #5d90cd } /* Literal.Number */ /* pastely blue */ -.syntax .mf { color: #5d90cd } /* Literal.Number.Float */ -.syntax .mh { color: #5d90cd } /* Literal.Number.Hex */ -.syntax .mi { color: #5d90cd } /* Literal.Number.Integer */ -.syntax .il { color: #5d90cd } /* Literal.Number.Integer.Long */ -.syntax .mo { color: #5d90cd } /* Literal.Number.Oct */ - -.syntax .bp { color: #a535ae } /* Name.Builtin.Pseudo */ -.syntax .n { color: #101010 } /* Name */ -.syntax .na { color: #bbbbbb } /* Name.Attribute */ -.syntax .nb { color: #bf78cc } /* Name.Builtin */ /* pastely purple */ -.syntax .nb { color: #af956f } /* Name.Builtin */ /* pastely light brown */ -.syntax .nb { color: #a535ae } /* Name.Builtin */ /* brightish pastely purple */ -.syntax .nc { color: #101010 } /* Name.Class */ -.syntax .nd { color: #6d8091 } /* Name.Decorator */ -.syntax .ne { color: #af956f } /* Name.Exception */ -.syntax .nf { color: #3677a9 } /* Name.Function */ -.syntax .nf { color: #1550a2 } /* Name.Function */ -.syntax .ni { color: #101010 } /* Name.Entity */ -.syntax .nl { color: #101010 } /* Name.Label */ -.syntax .nn { color: #101010 } /* Name.Namespace */ -.syntax .nn { color: #101010 } /* Name.Namespace */ -.syntax .no { color: #101010 } /* Name.Constant */ -.syntax .nx { color: #101010 } /* Name.Other */ -.syntax .nt { color: #6d8091 } /* Name.Tag */ -.syntax .nv { color: #101010 } /* Name.Variable */ -.syntax .vc { color: #101010 } /* Name.Variable.Class */ -.syntax .vg { color: #101010 } /* Name.Variable.Global */ -.syntax .vi { color: #101010 } /* Name.Variable.Instance */ -.syntax .py { color: #101010 } /* Name.Property */ - -.syntax .o { color: #ff5600 } /* Operator */ /* orangy */ -.syntax .o { color: #101010 } /* Operator */ -.syntax .ow { color: #101010 } /* Operator.Word */ - -.syntax .p { color: #101010 } /* Punctuation */ - -.syntax .s { color: #dd1144 } /* Literal.String */ /* darkish red */ -.syntax .s { color: #c32528 } /* Literal.String */ /* espian red */ -.syntax .s { color: #39946a } /* Literal.String */ /* pastely greeny */ -.syntax .s { color: #5d90cd } /* Literal.String */ /* pastely blue */ -.syntax .s { color: #00a33f } /* Literal.String */ /* brightish green */ -.syntax .sb { color: #00a33f } /* Literal.String.Backtick */ -.syntax .sc { color: #00a33f } /* Literal.String.Char */ -.syntax .sd { color: #00a33f } /* Literal.String.Doc */ -.syntax .se { color: #00a33f } /* Literal.String.Escape */ -.syntax .sh { color: #00a33f } /* Literal.String.Heredoc */ -.syntax .si { color: #00a33f } /* Literal.String.Interpol */ -.syntax .sr { color: #00a33f } /* Literal.String.Regex */ -.syntax .ss { color: #00a33f } /* Literal.String.Symbol */ -.syntax .sx { color: #00a33f } /* Literal.String.Other */ -.syntax .s1 { color: #00a33f } /* Literal.String.Single */ -.syntax .s2 { color: #00a33f } /* Literal.String.Double */ - -.syntax .w { color: #101010 } /* Text.Whitespace */ -.syntax .x { color: #101010 } /* Other */ - -.syntax.bash .nb { color: #101010 } -.syntax.bash .nv { color: #c32528 } - -.syntax.css .k { color: #606060 } -.syntax.css .nc { color: #c32528 } -.syntax.css .nf { color: #c32528 } -.syntax.css .nt { color: #c32528 } - -.syntax.rst .k { color: #5d90cd } -.syntax.rst .ow { color: #5d90cd } -.syntax.rst .p { color: #5d90cd } - -.syntax.yaml .l-Scalar-Plain { color: #5d90cd } -.syntax.yaml .p-Indicator { color: #101010 } - -/* classes for support pages */ - -.community-section { - margin: 10px auto 10px auto; - width: 500px; -} - -.community-section-heading { - font-size: 1.6em; - margin-top: 10px; -} - -.community-section-heading span { - background-color: #efd7d7; - line-height: 1.7em; -} - -.community-section-text { - line-height: 1.6em; - padding: 10px 0 0 50px; -} - -.support-page-banner { - font-size: 1.2em; - line-height: 1.6em; - margin: 20px 0; -} - -.support-page-banner-text { - background-color: #f0f0f0; -} - -/* classes for the main support page */ - -.promote { - margin: 5px 0 0px 26px; -} - -.promote-retweet { - margin-top: 2px; - float: left; -} - -.promote-yahoo-buzz { - margin: -2px 15px 0 0; - float: left; - padding-bottom: 5px; -} - -.promote-google-buzz, .promote-google-buzz:active, .promote-google-buzz:visited, .promote-google-buzz:hover { - background: url(http://static.ampify.it/icon.google-buzz.png) no-repeat 0 3px; - color: #666; - float: left; - font-size: 0.9em; - font-weight: bold; - margin-top: -2px; - padding: 0 10px 5px 20px; - text-decoration: none; -} - -.promote-google-buzz:hover { - text-decoration: underline; -} - -.share-text { - font-size: 18px; - line-height: 24px; - margin: 0px 0 12px 26px; - color: #aaa; -} - -/* thanks to mashable.com for these wonderful share icons + css!! thanks guys -- you rock!! */ - -ul.share-buttons { - line-height: normal; - margin: 12px 0 0 24px; - padding: 0; -} - -.share-buttons li { - display: inline; - float: left; - list-style-type: none; - margin: 0 18px 12px 0; - padding-top: 1px; -} - -.share-buttons a { - background: url(http://static.ampify.it/gfx.mashable-syndication.gif) no-repeat; - color: #575757; - display: block; - font-size: 7px; - height: 14px; - overflow: hidden; - padding: 33px 0 0; - text-align: center; - text-decoration: none; - white-space: nowrap; - width: 46px; -} - -.share-buttons a:hover { - text-decoration:none; -} - -.share-buttons .delicious a { - background-position: -138px 0; -} - -.share-buttons .delicious a:hover { - background-position: -138px -47px; -} - -.share-buttons .digg a { - background-position: 0 0; -} - -.share-buttons .digg a:hover { - background-position: 0 -47px; -} - -.share-buttons .facebook a { - background-position: -184px 0; -} - -.share-buttons .facebook a:hover { - background-position: -184px -47px; -} - -.share-buttons .google a { - background-position: -276px 0; -} - -.share-buttons .google a:hover { - background-position: -276px -47px; -} - -.share-buttons .myspace a { - background-position: -230px 0; -} - -.share-buttons .myspace a:hover { - background-position: -230px -47px; -} - -.share-buttons .reddit a { - background-position: -414px 0; -} - -.share-buttons .reddit a:hover { - background-position: -414px -47px; -} - -.share-buttons .sharethis a { - background-position: -460px 0; -} - -.share-buttons .sharethis a:hover { - background-position: -460px -47px; -} - -.share-buttons .stumbleupon a { - background-position: -92px 0; -} - -.share-buttons .stumbleupon a:hover { - background-position: -92px -47px; -} - -.share-buttons .technorati a { - background-position: -322px 0; -} - -.share-buttons .technorati a:hover { - background-position: -322px -47px; -} - -.share-buttons .twitter a { - background-position: -46px 0; -} - -.share-buttons .twitter a:hover { - background-position: -46px -47px; -} - -.share-buttons .yahoo a { - background-position: -368px 0; -} - -.share-buttons .yahoo a:hover { - background-position: -368px -47px; -} - -/* Unsorted */ - -.columns { - max-width: 940px; - width: 940px; -} - -.column, .column-last { - float: left; - /* - height: 180px; - */ - padding: 0 10px 0 0; - position: relative; - width: 227px; -} - -.column-last { - padding: 0 0 0 0; -} - -.column-text { - font-size: 12px; - line-height: 18px; - margin: 10px 0 20px 15px; -} - -.column-footer { - background-image: url(bottom.gif); - background-repeat: no-repeat; - background-color: transparent; -/* - background-attachment: scroll; - */ -} - -.column-left { - border-right: 0px solid #f00; - float: left; - width: 455px; -} - -.column-left-text { - font-size: 12px; - line-height: 18px; - margin-left: 12px; -} - -.column-right { - float: right; - width: 485px; -} - -.follow { - display: block; - list-style-type: none; - margin: 12px 0px 0px 24px; - padding: 0px; - text-align: center; -} - -.follow li { - float: left; - display: block; - margin-right: 30px; - margin-bottom: 18px; -} - -.follow a { - color: #575757; - display: block; - font-size: 10px; - margin-top: 7px; - text-decoration: underline; - text-decoration: none; - font-weight: bold; -} - -.follow a:hover { - text-decoration: none; -} - -.get-left { - float: left; - width: 230px; - margin-right: 12px; - font-weight: bold; -} - -.get-right { - float: right; - width: 230px; - margin-right: 12px; - font-weight: bold; -} - -.get-item { - border: 1px solid #ccc; - border-radius: 15px; - -webkit-border-radius: 15px; - -moz-border-radius: 15px; - margin-top: 10px; - font-weight: normal; - margin: 12px 0 12px 0; - padding: 10px; -} - -.get-item img { - float: left; - margin-right: 5px; - padding-top: 3px; - vertical-align: middle; -} - -.headline { - margin-bottom: 10px; - text-align: center; - line-height: 36px; -} - -.headline-text { - background-color: #f0f0f0; - font-size: 20px; -} - -.headline-text-small { - background-color: #f0f0f0; - font-size: 20px; -} - -.lede { - background-color: #dfdfdf; -} - -.link { - font-size: 16px; -} - -.oneoff { - margin: 12px 12px 12px 0; - font-size: 14px; - line-height: 20px; - border-top: 1px solid #ccc; - border-bottom: 1px solid #ccc; - padding: 12px 0 12px 0; -} - -.optional { - color: #666; - font-size: 9px; - vertical-align: super; -} - -/* other */ - -#callout { - background-color: #fff; - margin-bottom: 10px; -/* - border-bottom-left-radius: 15px; - border-bottom-right-radius: 15px; - -moz-border-radius-bottomleft: 15px; - -moz-border-radius-bottomright: 15px; - -webkit-border-bottom-left-radius: 15px; - -webkit-border-bottom-right-radius: 15px; -*/ - padding: 10px; -} - -#feature { - float: right; -} - -#supporter-form { - font-size: 14px; - line-height: 26px; - margin: 8px 0 0 0; - padding-top: 5px; -} - -#supporter-form td { - padding-top: 10px; - vertical-align: top; -} - -#supporter-form .label { - text-align: right; - width: 205px; - padding-right: 10px; - padding-left: 10px; - font-size: 12px; - font-weight: bold; - white-space: nowrap; -} - -.input-item input, .input-item textarea { - /* thanks soundcloud! */ - background-image: url(http://static.ampify.it/gfx.button-repeat.png); - background-position: 0 -1200px; - background-repeat: repeat-x; - border: 1px solid #ccc; - border-radius: 5px; - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - color: #333; - font-size: 20px; - max-width: 220px; - padding: 3px 0 3px 3px; - width: 220px; -} - -.input-item select { - font-size: 16px; -} - -.male { - border: 2px solid #70b1e6; - background-color: #eaf3fb; -} - -.male .label { - color: #70b1e6; -} - -.female { - border: 2px solid #ed6aa4; - background-color: #fee5f5; -} - -.female .label { - color: #ed6aa4; -} - -#supporter-submit { - text-align: center; - padding: 0 0 15px 0; -} - -#support { - font-size: 20px; - margin-bottom: 15px; - margin-top: 5px; -} - -.male-counter { - color: #70b1e6; - font-size: 12px; - margin-bottom: -50px; - white-space: nowrap; -} - -.female-counter { - color: #ed6aa4; - font-size: 12px; - padding-left: 24px; - margin-bottom: -50px; - white-space: nowrap; -} - - -.tav-thanks { - font-size: 14px; - line-height: 26px; - margin: 12px 12px 12px 0; - color: #575757; - color: #000; - font-style: italic; -} - -.tav-thanks img { - float: right; - margin-left: 12px; -} - -.tav-thanks div { - text-align: right; - font-size: 12px; - line-height: 20px; - margin-top: 12px; -} -/* -.tav-thanks a { - color: #000; - text-decoration: underline; -} - -.tav-thanks a:hover { - text-decoration: none; -} -*/ - -.the-disqus-section { - margin: 12px 12px 12px 0; - min-height: 500px; -} - -.the-disqus-section h3 { - margin-top: 10px; - margin-bottom: 12px; - font-size: 20px; -} - -.action-items { - font-size: 14px; - line-height: 26px; - margin: 10px 24px; 20px 24px; -} - -.action-left { - float: left; - margin-right: 20px; -} - -.action-link { - margin-top: 20px; -} - -.action-text { - margin-top: 20px; -} - -/* plan file related css */ - -a.button { - background: transparent url('http://static.ampify.it/gfx.bg-button-a.gif') no-repeat scroll top right; - color: #444; - display: block; - float: left; - font: normal 12px arial, sans-serif; - height: 24px; - margin-right: 6px; - padding-right: 18px; /* sliding doors padding */ - text-decoration: none; -} - -a.button span { - background: transparent url('http://static.ampify.it/gfx.bg-button-span.gif') no-repeat; - display: block; - line-height: 14px; - padding: 5px 0 5px 18px; -} - -a.button:active { - background-position: bottom right; - color: #000; - outline: none; /* hide dotted outline in Firefox */ -} - -a.button:active span { - background-position: bottom left; - padding: 6px 0 4px 18px; /* push text down 1px */ -} - -a.buttondown { - background: transparent url('http://static.ampify.it/gfx.bg-button-a.gif') no-repeat scroll top right; - color: #444; - display: block; - float: left; - font: normal 12px arial, sans-serif; - height: 24px; - margin-right: 6px; - padding-right: 18px; /* sliding doors padding */ - text-decoration: none; - background-position: bottom right; - color: #000; - outline: none; /* hide dotted outline in Firefox */ -} - -a.buttondown span { - background: transparent url('http://static.ampify.it/gfx.bg-button-span.gif') no-repeat; - display: block; - line-height: 14px; - padding: 5px 0 5px 18px; - background-position: bottom left; - padding: 6px 0 4px 18px; /* push text down 1px */ -} - -.tag-segment { - text-align: right; -} - -.tag-segment span { - color: #fff; - padding: 0.5em; - font-size: 0.7em; -} - -.tag-link { - text-decoration: none; - color: #000; -} - -.tag { - background-color: #696969; -} - -.tag-val-done { - background-color: #007f16; - background-color: #00782d; - background-color: #006400; -} - -.tag-val-needsreview { - background-color: #a4ff00; - color: #000 !important; -} - -.tag-val-inreview { - background-color: #3056bf; -} - -.tag-val-todo { - background-color: #a60c00; - background-color: #d0006e; - background-color: #8B0000; -} - -.tag-val-wip { - background-color: #a66e00; - background-color: #ff550f; -} - -.tag-type-1 { -} - -.tag-type-2 { /* #hashtags */ - background-color: #2a4580; - background-color: #696969; -} - -.tag-type-dep { - display: none; -} - -.tag-type-milestone { - background-color: #00008B; - background-color: #06276f; - background-color: #a4ff00; /* nice colour! */ - /* color: #000 !important; */ - background-color: #002ca6; - background-color: #3056bf; - background-color: #898989; -} - -.tag-type-priority { - background-color: #481254; -} - -.tag-type-zuser { - background-color: #4573d5; - background-color: #696969; -} - -#plan-tags a, #site-tags a { - margin-bottom: 0.7em; -} - -#plan-container { - margin-top: 1.2em; - margin-bottom: 2.4em; -} - -.plan-help { - font-size: 0.9em; - font-weight: bold; - text-align: right; - margin-bottom: 1.4em; -} - -.container { - padding-left: 20px; -} - -.container blockquote { - padding: 0 0 0 30px; -} - -.container > p:first-child { - font-weight: bold; - font-family: "museo-1", "museo-2", Verdana; - padding-bottom: 0; -} - -.sidepic { - padding-top: 50px; - text-align: center; -} - -a#main_download { - -moz-border-radius: 10px; - -webkit-border-radius: 10px; - border-radius: 5px; - margin-left: 10%; - margin-right: 10%; - padding: 5px; - display: block; - color: white; - background-color: #0b0; -} - -.download-menu { - background-color: #F0F0F0; - font-size: 10pt; - border: 1px solid #000; - margin-left: 20px; - margin-right: 20px; -} diff --git a/pypy.org/source/howtohelp.txt b/pypy.org/source/howtohelp.txt deleted file mode 100644 --- a/pypy.org/source/howtohelp.txt +++ /dev/null @@ -1,25 +0,0 @@ ---- -layout: page -title: How to help? ---- - -How to help PyPy development? -===================================== - -Here are some ideas to help PyPy development: - -* use pypy for your projects and provide detailed feedback_ - -* help implement Python2.6, 2.7 or 3.x features - -* write blog posts or tweets about your experiences - -* help porting to new platforms - -* contact_ us and get involved - -* donate_ some money to enable others to help - -.. _contact: contact.html -.. _feedback: contact.html -.. _`donate`: http://morepypy.blogspot.com/2010/11/speeding-up-pypy-by-donations.html diff --git a/pypy.org/image/header-background.png b/pypy.org/image/header-background.png deleted file mode 100644 Binary file pypy.org/image/header-background.png has changed diff --git a/pypy.org/source/contact.txt b/pypy.org/source/contact.txt deleted file mode 100644 --- a/pypy.org/source/contact.txt +++ /dev/null @@ -1,20 +0,0 @@ ---- -layout: page -title: Contact ---- - -Contact -============================================================ - -* irc: **#pypy** on **irc.freenode.net** - -* mailing list: `pypy-dev at codespeak.net`__ - -* the `bug tracker`_ - -* more on our `dev site`_. - - -.. __: http://codespeak.net/mailman/listinfo/pypy-dev -.. _`bug tracker`: https://codespeak.net/issue/pypy-dev/ -.. _`dev site`: http://codespeak.net/pypy/trunk/pypy/doc/ diff --git a/pypy.org/source/_layouts/page.genshi b/pypy.org/source/_layouts/page.genshi deleted file mode 100644 --- a/pypy.org/source/_layouts/page.genshi +++ /dev/null @@ -1,70 +0,0 @@ ---- -layout: site -license: MIT ---- - -
-
-

${Markup(title)}

-${Markup(content)} -
- -
diff --git a/pypy.org/source/_layouts/archive.genshi b/pypy.org/source/_layouts/archive.genshi deleted file mode 100644 --- a/pypy.org/source/_layouts/archive.genshi +++ /dev/null @@ -1,34 +0,0 @@ ---- -layout: site -license: MIT ---- -
- - - - -
- - - -
diff --git a/pypy.org/consortium.txt b/pypy.org/consortium.txt deleted file mode 100644 --- a/pypy.org/consortium.txt +++ /dev/null @@ -1,43 +0,0 @@ -Consortium Partners ------------------------------- - -DFKI http://www.dfki.de - Stephan Busemann [stephan.busemann at dfki de] - (EU coordinator) - Alastair Burt [alastair.burt at dfki de] - Anders Lehmann [anders.lehmann at dfki de] - -Open End AB (formerly AB Strakt) http://www.openend.se - Jacob Hall�n [jacob at strakt com] (project manager) - Samuele Pedroni [pedronis at strakt com] (technical board) - Anders Chrigstr�m [ac at strakt com] - -Change Maker http://www.changemaker.nu - Beatrice D�ring [bea at changemaker nu] (assistant project manager, press contact) - -merlinux GmbH http://merlinux.de - Holger Krekel [krekel at merlinux de] (technical director/board) - -Heinrich Heine Universit�t D�sseldorf http://www.uni-duesseldorf.de/ - Armin Rigo [arigo at tunes org] (technical board) - Michael Hudson [mwh at python net] - -Tismerysoft http://www.stackless.com - Christian Tismer [tismer at stackless com] (technical board) - -Logilab http://www.logilab.fr - Nicholas Chauvat [Nicolas.Chauvat at logilab fr] - Ludovic Aubry [ludal at logilab fr] - Adrien DiMascio [adim at logilab fr] - -impara GmbH www.impara.de - Bert Freudenberg [bert at impara de] - -Individual Partners -------------------------- - -Laura Creighton [lac at strakt com] -Eric van Riet Paap [eric at vanrietpaap nl] -Richard Emslie [rxe at ukshells co uk] -Niklaus Haldimann [nhaldimann at gmx ch] - diff --git a/pypy.org/compat.html b/pypy.org/compat.html deleted file mode 100644 --- a/pypy.org/compat.html +++ /dev/null @@ -1,168 +0,0 @@ - - - - PyPy :: Python compatibility - - - - - - - - - - - - - - - -
- -
-
-
-

Python compatibility

-

PyPy implements the Python language version 2.5. It supports all of the core -language, passing Python test suite (with minor modifications that were -already accepted in the main python in newer versions). It supports most -of the commonly used Python standard library modules; details below.

-

PyPy has alpha-level support for the CPython C API, however, as of 1.4.1 -release this feature is not yet complete. Most libraries will require -a bit of effort to work, but there are known success stories. Check out -PyPy blog for updates.

-

C extensions need to be recompiled for PyPy in order to work. Depending on -your build system, it might work out of the box or will be slightly harder. -In order to instruct pypy to load a CPython extension (compiled with supplied -Python.h), run following line:

-
-import cpyext
-
-

before importing any extensions. You can put this line in your PYTHONSTARTUP -file if you want this to be enabled permanently.

-

Standard library modules supported by PyPy, in alphabetical order:

-
    -
  • __builtin__ __pypy__ _ast _bisect _codecs _lsprof _minimal_curses _random _rawffi _ssl _socket _sre _weakref array bz2 cStringIO cpyext crypt errno exceptions fcntl  gc itertools marshal math md5 mmap operator parser posix pyexpat select sha signal struct symbol sys termios thread time token unicodedata zipimport zlib
  • -
-

Supported, but written in pure-python:

-
    -
  • binascii cPickle cmath collections ctypes datetime functools grp pwd sqlite3 syslog
  • -
-

All modules that are pure python in CPython of course work.

-

Python libraries known to work under PyPy (the list is not exhaustive):

-
    -
  • ctypes
  • -
  • django (without any DB but sqlite)
  • -
  • twisted (without ssl support)
  • -
  • pylons
  • -
  • divmod's nevow
  • -
  • pyglet
  • -
-

Known differences that are not going to be fixed:

-
    -
  • PyPy does not support refcounting semantics. The following code -won't fill the file immediately, but only after a certain period -of time, when the GC does a collection:

    -
    open("filename", "w").write("stuff")
    -

    The proper fix is

    -
    f = open("filename", "w")
    f.write("stuff")
    f.close()
    -

    or using the with keyword

    -
    with open("filename", "w") as f:
    f.write("stuff")
    -
  • -
  • For the same reason, some functions and attributes of the gc module -behave in a slightly different way: for example, gc.enable and -gc.disable are supported, but instead of enabling and disabling the GC, -they just enable and disable the execution of finalizers. Also, -gc.garbage always returns an empty list.

    -
  • -
  • You can't attach a __del__ method to a class after its creation.

    -
  • -
  • You can't store non-string keys in type objects. Example

    -
    class A(object):
    locals()[42] = 3
    -

    won't work.

    -
  • -
-

A more complete list is available at our dev site.

-
- -
-
-
- - \ No newline at end of file diff --git a/pypy.org/news.txt b/pypy.org/news.txt deleted file mode 100644 --- a/pypy.org/news.txt +++ /dev/null @@ -1,323 +0,0 @@ -Review passed with flying colours ---------------------------------- - -On the 31st of May 2007 the PyPy project was reviewed by the EU -Commission in Brussels. Reviewers were Roel Wuyts, Unversit� Libre de -Bruxelles and Aki Lumiaho, Ramboll, Finland. Present was also our -Project Officer, Charles McMillan. After 6 hours of presentations of -the various aspects of the project, it only took the reviewers a few -minutes to decide that the project was accepted, without any further -work being required. Professor Wuyts, who has dynamic programming -languages as his main field of research was very enthusiastic about -the entire project and the results with the Just In Time Compiler -Generator in particular. He offered his help in establishing -collaborations with the communities around Prolog, Smalltalk, Lisp and -other dynamic languages, as well as giving hints on how to get our -results most widely pubicized. - -The preparations for the review left the team rather exhausted so -development progress will be rather slow until the sprint at -Europython in the second week of July. - -PyPy EU funding period over, Review ahead ------------------------------------------------------ - -The 28 month EU project period of PyPy is over and new things are to come! -On 11th May we `submitted last documents`_ to the European Union and are now -heading towards a 31st May Review Meeting in Bruxelles. The `PyPy EU Final -Activity Report`_ summarizes what we did and what we have in mind -on technical, scientific and community levels. It also contains reflections -and recommendations possibly interesting to other projects aiming at -EU funded Open Source research. *(12th May, 2007)* - -.. _`submitted last documents`: http://codespeak.net/pypy/dist/pypy/doc/index-report.html -.. _`PyPy EU Final Activity Report`: http://codespeak.net/pypy/extradoc/eu-report/PYPY-EU-Final-Activity-Report.pdf - -PyPy 1.0: JIT compiler generator, optimizations and more ------------------------------------------------------------------- - -We are proud to release PyPy 1.0.0, our sixth public release. See -the `release announcement -`__ -to read about the -many new features in this release, especially the results of our -JIT generation technology. See also our detailed instructions on -how to `get started`_. *(March 27th, 2007)* - -.. _`get started`: http://codespeak.net/pypy/dist/pypy/doc/getting-started.html - -PyPy 0.99.0: optimizations, backends, new object spaces and more -------------------------------------------------------------------- - -We are proud to release PyPy 0.99.0, our fifth public release. See -the `release announcement -`__ -to read about the -many new features in this release. See also our detailed instructions on -how to `get started`_. *(February 17th, 2007)* - -.. _`get started`: http://codespeak.net/pypy/dist/pypy/doc/getting-started.html - - -py lib 0.9.0: py.test, distributed execution, greenlets and more -------------------------------------------------------------------- - -Our development support and testing library was publically released, see the -`0.9 release announcement `__ -and its extensive `online documentation `__. -*(February 15th, 2007)* - - -Leysin Sprint, 8th - 14th January 2007 ---------------------------------------------------------- - -The PyPy Leysin sprint is over. We worked hard on various topics, including -preparing the upcoming py-lib and PyPy releases. For more details, see the -`Leysin sprint report`_, the `Leysin announcement`_ and the -`list of people present`_. - -.. _`Leysin announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/announcement.html -.. _`Leysin sprint report`: http://codespeak.net/pipermail/pypy-dev/2007q1/003481.html -.. _`list of people present`: http://codespeak.net/svn/pypy/extradoc/sprintinfo/leysin-winter-2007/people.txt - - - - -Duesseldorf sprint #2, 30th October - 5th November over ----------------------------------------------------------- - -PyPy has sprinted at the Heinrich-Heine-Universit�t D�sseldorf. -It was a very productive sprint with work done -in various areas. Read the `sprint report`_ for a detailed description of what -was achieved and the `full announcement`_ for various details. - -.. _`full announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/announce.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q4/003396.html - - -PyPy at University of Limerick: sprint and workshop 21-28th of August 2006 -------------------------------------------------------------------------------- - -PyPy has sprinted at the University of Limerick in Ireland, hosted by partners in our -sister project Calibre. During the first day of the sprint, the 21st, we -arranged a workshop with tutorials, talks and discussions on both technical (JIT, VM) -and methodological topics (F/OSS/Agile, Distributed). See sprint announcement -http://codespeak.net/pypy/extradoc/sprintinfo/ireland-2006/announce.html - -Summer of PyPy - calls for proposals -------------------------------------- - -PyPy is implementing it�s own model (inspired by Google�s Summer of Code) -for reimbursing travel and accommodation for sprint participation as well as -mentoring on various subjects related to PyPy. The call for proposal is out - -see more on http://codespeak.net/pypy/dist/pypy/doc/summer-of-pypy.html. - -PyPy at Agile 2006 24-28th of July 2006 ----------------------------------------- -PyPy published an experience report at Agile 2006 conference in -Minneapolis, USA titled: "Trouble in Paradise: the Open Source Project PyPy, -EU-funding and Agile Practices". The talk was presented by Beatrice D�ring. -Together with Arlo Belshee an Open Space session was arranged on the -topic "What can Agile learn from Open Source?", attended by 11 people. -See http://www.agile2006.org/. - -PyPy at Europython 2006 -------------------------- -PyPy arranged four talks at EuroPython 2006 - this year hosted by -CERN in Geneva, Switzerland as well as arranging "lightning talks" -and demoing features from the 0.9 release. After the conference the team -hosted a sprint with 24 participants (!). Thanks to everyone -participating and contributing during the sprint. -See http://www.europython.org/. - -PyPy releases videodocumentation! ------------------------------------ -At http://codespeak.net/pypy/dist/pypy/doc/video-index.html (bittorrent) -you can download video documentation covering sprints, talks, tutorials, -design discussions and interviews on PyPy. - -PyPy releases 0.9! -------------------- -The fourth public release of the project covers stackless -implementations in PyPy, extension compiler and many other -features - see the release announcement: -http://codespeak.net/pypy/dist/pypy/doc/release-0.9.0.html. - - -PyPy at XP 2006 17-22nd of June 2006 ---------------------------------------- -PyPy published an experience report, -"Sprint Driven Development, Agile Methodologies in a Distributed -Open Source Project (PyPy)" and presented the results at the XP -2006 conference in Oulu, Finland. - -PyPy sprint in Tokyo 23rd-29th of April, 2006 ------------------------------------------------ -The PyPy team has been invited by FSIJ (Free Software Initiative Japan) -to sprint in the facilities of the National Institute of Advanced Technology -and Science in Akihabara, Tokyo. The sprint is aiming at dissemination of -the current state of PyPy within the community of Japanese Hackers. -See more on http://codespeak.net/pypy/dist/pypy/doc/news.html. - - -PyPy sprint in Louvain-La-Neuve 6-10th of March, 2006 -------------------------------------------------------- -The PyPy team sprinted at D�partement d'Ing�nierie Informatique -at UCL in Belgium. During the sprint a workshop regarding the -funcionalities of PyPy/Python and the OZ language was discussed -with people from both communities. Many thanks to our hosts at -UCL, among them Gr�goire Dooms for making this sprint possible. - - -PyPy at PyCon 24th-26th February, 2006 ----------------------------------------- -The PyPy team visited Addison/Dallas in Texas and participated -in PyCon. The PyPy team presented three talks about the status, a -architecture and methodology of the project. After the conference -ca 20 people participated in the PyPy sprint that went on for four -days. - -PyPy in Ireland 6-7th February, 2006 ---------------------------------------- -PyPy visited IONA Technologies for an experience workshop, sharing experiences -with agile and distributed software development. PyPy held two talks at -the University College Dublin for students and researchers in computer -science. Many thanks to Niall Donnely at IONA and Dr Joseph Kiniry at UCD -for hosting these events! - -PyPy at the Open Source World Conference, M�laga -- February 15-17, 2006 ------------------------------------------------------------------------- - -PyPy is presenting at the stand of the European Commission and giving -a talk on on Thursday, 14:00-15:00. - -PyPy sprint in Mallorca 23-29th of January, 2006 ------------------------------------------------- -PyPy kickstarted the second year of the project with a sprint in -the GNU/Linux lab at the University of Balearic Isles on Mallorca. -The PyPy team held an architectural talk for students at the -University and met up with the local OSS group Bulma. -Many thanks to Ricardo Galli at UIB for being a gracious host! - -PyPy at CCC conference 27-30th of December 2005 -------------------------------------------------- -PyPy presented two different talks at the 22C3 conference in Berlin, -"PyPy - the new Python implementation on the block" and "OSS, EUfunding -and agile business". For more information about this conference: -http://events.ccc.de/congress/2005/overview.en.html. -The talks can be found on http://codespeak.net/pypy. - -PyPy sprint in Gothenburg and seminar in Brussels ---------------------------------------------------- -PyPy sprinted in Gothenburg during the first week of December - work -with the JIT compiler was a primary target. This work will continue during -the spring of 2006. - -In parallell with the PyPy sprint, some PyPy teammembers visited -the Commission in Brussels and participated in an internal EC seminar -regarding OSS, distributed and agile development on the 8th of December. -This was done in cooperation with the Calibre project, by invitation from the -PyPy Project Officer Mr Dirk Van Royy. -The talk can be found on http://codespeak.net/pypy. - -PyPy at PyCon --------------- -PyPy has three talks accepted at PyCon, which will be held in Dallas, USA -24-26th of February. For more information about the conference and the pypy -talks: http://us.pycon.org/TX2006/HomePage. - -PyPy at the CCC conference December 2005 ------------------------------------------ -PyPy will present talks at the CCC conference in Berlin 27th-29th of December. -For more information:www.ccc.de - -PyPy in Gothenburg December 2005 ---------------------------------- -The upcoming sprint in Gothenburg (7th-11th of December) has been announced. - -PyPy in Paris October 2005 ---------------------------- -The team met up in Paris in the office of Logilab. Several newcomers -participated in the sprint, in total 18 people attended.Work was focused -on aspects of phase II of the project: translation, continuation-passing -style (stackless) and JIT (Just in Time compilation). - -PyPy at the 2 International Calibre conference, Limerick, September 2005 ------------------------------------------------------------------------------- - -Beatrice D�ring from the PyPy team participated in the second International -Calibre conference at the University of Limerick (9th of September). Calibre is a coordination action -project, focused on OSS - business models, agile and distributed development. -Beatrice presented a talk about the sprint methodology in use in the PyPy project. - -Lots of useful contacts where made and there are rumours of a sprint -in Ireland next year, and maybe also in Japan. For more information about -Calibre: www.calibre.ie - -PyPy in Heidelberg August 2005 ----------------------------------- - -The team met up and sprinted in Heidelberg, in total 14 people. -On the 28th of August the 0.7 version of PyPy - the first -public release of a fully translatable self contained Python -implementation was launched.This work concluded the phase 1 of the project. - -PyPy sprint in Hildesheim July 2005 ----------------------------------------- - -The team met up at Trillke Gut and worked on the upcoming phase 1 -deliverables. The result was PyPy - selfcontained! - - -PyPy at Europython in Gothenburg June/July 2005 ------------------------------------------------------- - -The Pypy team was busy sprinting 4 days before Europython and 6 days -afterwards. There where also several PyPy talks at the conference, a -joint talk by Holger Krekel, Armin Rigo, Carl Friedrich Bolz about -translation aspects of PyPy, Holger Krekel about py.test and -Beatrice D�ring about sprint driven development. This sprint had 20 -participants which is a PyPy record. - -PyPy at ACCU April 2005 ------------------------------------------------------- - -Armin Rigo and Jacob Hall�n held 2 talks at the ACCU conference,about PyPy and sprint driven -development. - -PyPy at PyCon in Washington March 2005 ------------------------------------------------------- - -The Pypy team sprinted at PyCon - 13 developers worked on getting Pypy -compliant with the CPython regression test. Armin Rigo and Holger Krekel -also did talks about type inference and the py lib/py.test. - -PyPy at the Calibre conference in Paris March 2005 ------------------------------------------------------- - -Beatrice D�ring from the PyPy team participated in the Calibre workshop -"Libre software - which business model?". - -Sprint in Leysin, Switzerland January 2005 ------------------------------------------------------- - -The PyPy team met up for the first sprint after the official start of -the EU funded project. 13 people participated and worked together for 7 -days. The team succeded in importing and running CPython test on PyPy. - -PyPy at Chaos Communication Conference 2004 ------------------------------------------------------- - -Holger Krekel gave a talk about the process and status -of an open-source project receiving EU funding and -the particular situation with PyPy. - - -Contract signed - PyPy is flying December 2004 ------------------------------------------------------- - -The PyPy team recieved contract confirmation 1 December 2004 form the -Commission The team kicked of the work with a Consortium meeting i -Saarbruecken. - - diff --git a/pypy.org/source/README b/pypy.org/source/README deleted file mode 100644 --- a/pypy.org/source/README +++ /dev/null @@ -1,18 +0,0 @@ -You can get necessary software by doing: - -git clone https://github.com/tav/ampify.git - -and then recreate the website in this directory by running - -ampify/environ/yatiblog -o .. - -you'll get html output in the parent directory. -Then you can check it in, go to codespeak in /www/pypy.org/htdocs/ -and type "hg pull -u". - -Other required dependencies: - * "docutils" from "easy_install docutils" - * "simplejson" from "easy_install simplejson" - * "genshi" from "easy_install genshi" - * "pygments" from "easy_install pygments" - * "yaml" from "apt-get install libyaml-dev", "easy_install pyyaml" diff --git a/pypy.org/index2.txt b/pypy.org/index2.txt deleted file mode 100644 --- a/pypy.org/index2.txt +++ /dev/null @@ -1,50 +0,0 @@ -Welcome! - *This is the European Union project page for PyPy.* - *Please visit the main page at* http://codespeak.net/pypy/. - - -PyPy EU project title (contract number: 004779) ------------------------------------------------- - -Researching a higly flexible and modular language platform and -implementing it by leveraging the Open Source Python Language and -Community - -PyPy EU project description (004779) --------------------------------------- - -The PyPy project has been an ongoing Open Source Python language -implementation since 2003. In December 2004 PyPy recieved EU-funding -within the Framework Programme 6, second call for proposals ("Open -development platforms and services" IST). - -A consortium of 8 (12) partners in Germany, France and Sweden are working to -achieve the goal of a open run-time environment for the Open Source -Programming Language Python. The scientific aspects of the project is to -investigate novel techniques (based on aspect-oriented programming code -generation and abstract interpretation) for the implementation of -practical dynamic languages. - -A methodological goal of the project is also to show case a novel -software engineering process, Sprint Driven Development. This is an -Agile methodology, providing a dynamic and adaptive environment, suitable -for co-operative and distributed development. - -The project is divided into three major phases, phase 1 has the focus of -developing the actual research tool - the self contained compiler, phase -2 has the focus of optimisations (core, translation and dynamic) and in -phase 3 the actual integration of efforts and dissemination of the -results. The project has an expected deadline in November 2006. - -PyPy is still, though EU-funded, heavily integrated in the Open Source -community of Python. The methodology of choice is the key strategy to -make sure that the community of skilled and enthusiastic developers can -contribute in ways that wouldn't have been possible without EU-funding. - -For questions regarding the PyPy-project, please email our consortium at -[pypy-funding at codespeak net] or Bea During (bea at changemaker nu). - -For more detailed information, documentation and code - please visit the -`PyPy community housed at codespeak`_. - -.. _`PyPy community housed at codespeak`: http://codespeak.net/pypy diff --git a/pypy.org/howtohelp.html b/pypy.org/howtohelp.html deleted file mode 100644 --- a/pypy.org/howtohelp.html +++ /dev/null @@ -1,117 +0,0 @@ - - - - PyPy :: How to help? - - - - - - - - - - - - - - - -
- -
-
-
-

How to help?

-

Here are some ideas to help PyPy development:

-
    -
  • use pypy for your projects and provide detailed feedback
  • -
  • help implement Python2.6, 2.7 or 3.x features
  • -
  • write blog posts or tweets about your experiences
  • -
  • help porting to new platforms
  • -
  • contact us and get involved
  • -
  • donate some money to enable others to help
  • -
-
- -
-
-
- - \ No newline at end of file diff --git a/pypy.org/js/detect.js b/pypy.org/js/detect.js deleted file mode 100644 --- a/pypy.org/js/detect.js +++ /dev/null @@ -1,19 +0,0 @@ - -$(document).ready(function() { - var download_url, download_text; - if (navigator.platform.indexOf('Linux') != -1) { - download_url = 'download/pypy-1.4.1-linux.tar.bz2'; - download_text = 'Download linux x86 bin'; - } else if (navigator.platform.indexOf('Win') != -1) { - download_url = 'download/pypy-1.4.1-win32.zip'; - download_text = 'Download Windows x86 bin'; - } else if (navigator.platform.indexOf('Mac') != 1) { - download_url = 'download/pypy-1.4.1-osx.tar.bz2'; - downloat_text = 'Download Mac OS X 10.6 bin'; - } else { - download_url = "download.html"; - download_text = "Download page"; - } - $("#main_download").attr('href', download_url); - $("#main_download").text(download_text); -}); diff --git a/pypy.org/confrest.py b/pypy.org/confrest.py deleted file mode 100644 --- a/pypy.org/confrest.py +++ /dev/null @@ -1,38 +0,0 @@ -from confrest_oldpy import * - -class PyPyPage(Page): - def fill(self): - super(PyPyPage, self).fill() - self.menubar[:] = html.div( - html.a("home", href="index.html", class_="menu"), " ", - html.a("news", href="news.html", class_="menu"), " ", - html.a("consortium", href="consortium.html", class_="menu"), " ", - html.a("links", href="links.html", class_="menu"), " ", - html.a("community/coding", - href="http://codespeak.net/pypy/dist/pypy/doc/index.html", - class_="menu"), " ", - " ", id="menubar") - - def get_doclink(self, target): - return relpath(self.targetpath.strpath, - self.project.get_docpath().join(target).strpath) - -class Project(Project): - mydir = py.magic.autopath().dirpath() - title = "PyPy EU Project" - stylesheet = 'http://codespeak.net/pypy/dist/pypy/doc/style.css' - encoding = 'latin1' - prefix_title = "EU/PyPy" - logo = html.div( - html.a(html.img(alt="PyPy", id="pyimg", - src="http://codespeak.net/pypy/img/py-web1.png", - height=110, width=149)), - html.img(alt="EU Logo", id="extraimg", - src="ist.png", - height=105, width=213), - ) - Page = PyPyPage - def get_docpath(self): - return self.mydir - - diff --git a/pypy.org/ist.png b/pypy.org/ist.png deleted file mode 100644 Binary file pypy.org/ist.png has changed diff --git a/pypy.org/archive.html b/pypy.org/archive.html deleted file mode 100644 --- a/pypy.org/archive.html +++ /dev/null @@ -1,59 +0,0 @@ - - - - PyPy - - - - - - - - - - - - - - - -
- -
-
- -
    -
-
- -
    -
-
-
-
- - \ No newline at end of file diff --git a/pypy.org/features.html b/pypy.org/features.html deleted file mode 100644 --- a/pypy.org/features.html +++ /dev/null @@ -1,175 +0,0 @@ - - - - PyPy :: Features - - - - - - - - - - - - - - - -
- -
-
-
-

Features

-

PyPy 1.4.1 implements Python 2.5. and runs on Intel -x86 (IA-32) and x86_64 platforms, with ARM being underway. -It supports all of the core language, passing the Python test suite -(with minor modifications that were already accepted in the main python -in newer versions). It supports most of the commonly used Python -standard library modules. For known differences with CPython, see our -compatibility page.

-

If you are interested in helping to move forward, see our howtohelp page.

-
-

Speed

-

Our main executable comes with a Just-in-Time compiler. It is -really fast in running most benchmarks. Try it out!

-
-
-

Sandboxing

-

PyPy's sandboxing is a working prototype for the idea of running untrusted -user programs. Unlike other sandboxing approaches for Python, PyPy's does not -try to limit language features considered “unsafe”. Instead we replace all -calls to external libraries (C or platform) with a stub that communicates -with an external process handling the policy.

-

To run the sandboxed process, you need pypy-sandbox. You also need to -get the full sources (step 1 only). Run:

-
-cd pypy-trunk/pypy/translator/sandbox
-pypy_interact.py path/to/pypy-sandbox
-
-

You get a fully sandboxed interpreter, in its own filesystem hierarchy -(try os.listdir('/')). For example, you would run an untrusted -script as follows:

-
-mkdir virtualtmp
-cp untrusted.py virtualtmp/
-pypy_interact.py --tmp=virtualtmp pypy-sandbox /tmp/untrusted.py
-
-

Note that the path /tmp/untrusted.py is a path inside the sandboxed -filesystem. You don't have to put untrusted.py in the real /tmp -directory at all.

-

To read more about its features, try pypy_interact.py --help or go to -our dev site.

-
-
-

Stackless

-

PyPy is also available in a separate Stackless version that includes -support for micro-threads for massive concurrency. Read more about -it at the Stackless main site (we provide the same interface as the -standard Stackless Python), and at the greenlets page.

-
-
-

Other features

-

PyPy has many secondary features and semi-independent -projects. We will mention here:

-
    -
  • the .NET backend: you get a version of pypy-net that runs -natively in the .NET/CLI VM. Of particular interest is the cli-jit -branch, in which you can make a version of pypy-net which also -contains a high-level JIT compiler (it compiles your Python programs -Just in Time into CLR bytecodes, which are in turn compiled natively -by the VM).
  • -
  • the Java backend: PyPy can run on the Java VM, but more care is -needed to finish this project. (Requires the cli-jit branch for -now.) Writing a backend for our high-level JIT compiler would be -excellent. Contact us!
  • -
  • Other languages: available in a separate part of the repository, -we implemented other languages too: Prolog (almost complete), as -well as Smalltalk, JavaScript, Io, Scheme and Gameboy.
  • -
-
-
- -
-
-
- - \ No newline at end of file diff --git a/pypy.org/source/index.txt b/pypy.org/source/index.txt deleted file mode 100644 --- a/pypy.org/source/index.txt +++ /dev/null @@ -1,45 +0,0 @@ ---- -layout: page -title: PyPy ---- - -PyPy is a `very compliant`_ implementation of the `Python`_ language. -PyPy has several advantages and distinctive features: - - * **Speed:** thanks to its Just-in-Time compiler, Python programs - often run `faster`_ on PyPy. `(What is a JIT compiler?)`_ - - * **Memory usage:** large, memory-hungry Python programs might end up - taking `less space`_ than they do in CPython. - - * **Sandboxing:** PyPy provides the ability to `run untrusted code`_ in a - fully secure way. - - * **Stackless:** PyPy can be configured to run in `stackless`_ mode, - providing micro-threads for massive concurrency. - - * As well as other `features`_. - -.. class:: download - -`Download and try out the PyPy release 1.4.1!`__ - -.. __: download.html - -To read more about Python, look into `Python docs`_ and check our -Compatibility_ page. PyPy can run such python libraries as `twisted`_ -and `django`_ and supports `ctypes`_. - -.. _`stackless`: http://www.stackless.com/ -.. _`Python`: http://python.org/ -.. _`faster`: http://speed.pypy.org/comparison/?exe=2%2B35,1%2B172&ben=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20&env=1&hor=false&bas=2%2B35&chart=normal+bars -.. _`(What is a JIT compiler?)`: http://en.wikipedia.org/wiki/Just-in-time_compilation -.. _`run untrusted code`: features.html#sandboxing -.. _`very compliant`: compat.html -.. _`Python docs`: http://www.python.org/doc/2.5.4/ -.. _`twisted`: http://twistedmatrix.com/ -.. _`django`: http://www.djangoproject.com/ -.. _`ctypes`: http://www.python.org/doc/2.5.4/lib/module-ctypes.html -.. _`features`: features.html -.. _`less space`: http://morepypy.blogspot.com/2009/10/gc-improvements.html -.. _Compatibility: compat.html diff --git a/pypy.org/source/compat.txt b/pypy.org/source/compat.txt deleted file mode 100644 --- a/pypy.org/source/compat.txt +++ /dev/null @@ -1,96 +0,0 @@ ---- -layout: page -title: Python compatibility ---- - -PyPy implements the Python language version 2.5. It supports all of the core -language, passing Python test suite (with minor modifications that were -already accepted in the main python in newer versions). It supports most -of the commonly used Python `standard library modules`_; details below. - -PyPy has **alpha-level** support for the `CPython C API`_, however, as of 1.4.1 -release this feature is not yet complete. Most libraries will require -a bit of effort to work, but there are known success stories. Check out -PyPy blog for updates. - -C extensions need to be recompiled for PyPy in order to work. Depending on -your build system, it might work out of the box or will be slightly harder. -In order to instruct pypy to load a CPython extension (compiled with supplied -Python.h), run following line:: - - import cpyext - -before importing any extensions. You can put this line in your PYTHONSTARTUP -file if you want this to be enabled permanently. - -Standard library modules supported by PyPy, in alphabetical order: - -* ``__builtin__ __pypy__ _ast _bisect _codecs _lsprof _minimal_curses _random _rawffi _ssl _socket _sre _weakref array bz2 cStringIO cpyext crypt errno exceptions fcntl gc itertools marshal math md5 mmap operator parser posix pyexpat select sha signal struct symbol sys termios thread time token unicodedata zipimport zlib`` - -Supported, but written in pure-python: - -* ``binascii cPickle cmath collections ctypes datetime functools grp pwd sqlite3 syslog`` - -All modules that are pure python in CPython of course work. - -Python libraries known to work under PyPy (the list is not exhaustive): - -* ctypes - -* django (without any DB but sqlite) - -* twisted (without ssl support) - -* pylons - -* divmod's nevow - -* pyglet - -Known differences that are not going to be fixed: - -* PyPy does not support refcounting semantics. The following code - won't fill the file immediately, but only after a certain period - of time, when the GC does a collection: - - .. syntax:: python - - open("filename", "w").write("stuff") - - The proper fix is - - .. syntax:: python - - f = open("filename", "w") - f.write("stuff") - f.close() - - or using the ``with`` keyword - - .. syntax:: python - - with open("filename", "w") as f: - f.write("stuff") - -* For the same reason, some functions and attributes of the ``gc`` module - behave in a slightly different way: for example, ``gc.enable`` and - ``gc.disable`` are supported, but instead of enabling and disabling the GC, - they just enable and disable the execution of finalizers. Also, - ``gc.garbage`` always returns an empty list. - -* You can't attach a ``__del__`` method to a class after its creation. - -* You can't store non-string keys in type objects. Example - - .. syntax:: python - - class A(object): - locals()[42] = 3 - - won't work. - -A more complete list is available at `our dev site`_. - -.. _`CPython C API`: http://docs.python.org/c-api/ -.. _`standard library modules`: http://docs.python.org/library/ -.. _`our dev site`: http://codespeak.net/pypy/dist/pypy/doc/cpython_differences.html diff --git a/pypy.org/source/features.txt b/pypy.org/source/features.txt deleted file mode 100644 --- a/pypy.org/source/features.txt +++ /dev/null @@ -1,109 +0,0 @@ ---- -layout: page -title: Features ---- - -PyPy features -=========================================================== - -**PyPy 1.4.1** implements **Python 2.5.** and runs on Intel -`x86 (IA-32)`_ and `x86_64`_ platforms, with ARM being underway. -It supports all of the core language, passing the Python test suite -(with minor modifications that were already accepted in the main python -in newer versions). It supports most of the commonly used Python -standard library modules. For known differences with CPython, see our -`compatibility`_ page. - -If you are interested in helping to move forward, see our `howtohelp`_ page. - -.. _`compatibility`: compat.html -.. _`x86 (IA-32)`: http://en.wikipedia.org/wiki/IA-32 -.. _`x86_64`: http://en.wikipedia.org/wiki/X86_64 -.. _`howtohelp`: howtohelp.html - - -Speed ------ - -Our `main executable`_ comes with a Just-in-Time compiler. It is -`really fast`_ in running most benchmarks. `Try it out!`_ - -.. _`main executable`: download.html#with-a-jit-compiler -.. _`Try it out!`: download.html#with-a-jit-compiler -.. _`really fast`: http://bit.ly/fhSlfk - -Sandboxing --------------------- - -PyPy's *sandboxing* is a working prototype for the idea of running untrusted -user programs. Unlike other sandboxing approaches for Python, PyPy's does not -try to limit language features considered "unsafe". Instead we replace all -calls to external libraries (C or platform) with a stub that communicates -with an external process handling the policy. - -To run the sandboxed process, you need `pypy-sandbox`_. You also need to -get the `full sources`_ (step 1 only). Run:: - - cd pypy-trunk/pypy/translator/sandbox - pypy_interact.py path/to/pypy-sandbox - -You get a fully sandboxed interpreter, in its own filesystem hierarchy -(try ``os.listdir('/')``). For example, you would run an untrusted -script as follows:: - - mkdir virtualtmp - cp untrusted.py virtualtmp/ - pypy_interact.py --tmp=virtualtmp pypy-sandbox /tmp/untrusted.py - -Note that the path ``/tmp/untrusted.py`` is a path inside the sandboxed -filesystem. You don't have to put ``untrusted.py`` in the real ``/tmp`` -directory at all. - -To read more about its features, try ``pypy_interact.py --help`` or go to -`our dev site`_. - -.. _`pypy-sandbox`: download.html#sandboxed-version -.. _`full sources`: download.html#translate -.. _`our dev site`: http://codespeak.net/pypy/dist/pypy/doc/sandbox.html - - -Stackless --------------------------- - -PyPy is also available in a separate `Stackless version`_ that includes -support for micro-threads for massive concurrency. Read more about -it at the Stackless_ main site (we provide the same interface as the -standard Stackless Python), and at the greenlets_ page. - -.. _`Stackless version`: download.html#stackless-version -.. _`stackless`: http://www.stackless.com/ -.. _`greenlets`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt - - -Other features ---------------------------------------- - -PyPy has many secondary features and semi-independent -projects. We will mention here: - -* **the .NET backend:** you get a version of ``pypy-net`` that runs - natively in the .NET/CLI VM. Of particular interest is `the cli-jit - branch`_, in which you can make a version of ``pypy-net`` which also - contains a high-level JIT compiler (it compiles your Python programs - Just in Time into CLR bytecodes, which are in turn compiled natively - by the VM). - -* **the Java backend:** PyPy can run on the Java VM, but more care is - needed to finish this project. (Requires `the cli-jit branch`_ for - now.) Writing a backend for our high-level JIT compiler would be - excellent. `Contact us`_! - -* **Other languages:** available in a `separate part of the repository`_, - we implemented other languages too: Prolog_ (almost complete), as - well as Smalltalk, JavaScript, Io, Scheme and Gameboy. - - -.. _`the cli-jit branch`: http://codespeak.net/svn/pypy/branch/cli-jit/ -.. _`contact us`: contact.html -.. _`separate part of the repository`: http://codespeak.net/svn/pypy/lang/ -.. _Prolog: http://codespeak.net/svn/pypy/lang/prolog diff --git a/pypy.org/contact.html b/pypy.org/contact.html deleted file mode 100644 --- a/pypy.org/contact.html +++ /dev/null @@ -1,114 +0,0 @@ - - - - PyPy :: Contact - - - - - - - - - - - - - - - -
- -
-
-
-

Contact

- -
- -
-
-
- - \ No newline at end of file diff --git a/pypy.org/download.html b/pypy.org/download.html deleted file mode 100644 --- a/pypy.org/download.html +++ /dev/null @@ -1,246 +0,0 @@ - - - - PyPy :: Download and install - - - - - - - - - - - - - - - -
- -
-
-
-

Download and install

-

There are nightly binary builds available. Those builds are not always -as stable as the release, but they contain Python 2.7 compatibility, -numerous bugfixes and performance improvements.

-

Here are the various binaries of PyPy 1.4.1 that we provide for x86 Linux, -Mac OS/X or Windows. This is mostly a bugfix release, although the performance -over the previous release 1.4 has improved in some cases.

- -
-

“JIT Compiler” version

-

These binaries include a Just-in-Time compiler. They only work on -x86 CPUs that have the SSE2 instruction set (most of -them do, nowadays), or on x86-64 CPUs. -(This is the official release 1.4.1; -for the most up-to-date version see below.)

- -

If your CPU is really old, it may not have SSE2. In this case, you need -to translate yourself with the option --jit-backend=x86-without-sse2.

-
-
-

Other versions

-

The other versions of PyPy are:

-
    -
  • The most up-to-date nightly build with a JIT, if the official -release is too old for what you want to do.
  • -
  • No JIT: A version without the JIT. Consumes a bit less memory -and may be faster on short-running scripts.
  • -
  • Stackless: Provides Stackless extensions, as well as greenlets. -It is not possible right now to combine Stackless features with the JIT.
  • -
  • Sandboxing: A special safe version. Read the docs about sandboxing. -(It is also possible to translate a version that includes both -sandboxing and the JIT compiler, although as the JIT is relatively -complicated, this reduces a bit the level of confidence we can put in -the result.)
  • -
-

These versions are not officially part of the release 1.4.1, which focuses -on the JIT. You can find prebuilt binaries for them on our -nightly build or in the release binaries, or translate them -yourself.

-
-
-

Installing

-

All versions are packaged in a tar.bz2 or zip file. When -uncompressed, they run in-place. For now you can uncompress them -either somewhere in your home directory or, say, in /opt, and -if you want, put a symlink from somewhere like -/usr/local/bin/pypy to /path/to/pypy-1.4.1/bin/pypy. Do -not move or copy the executable pypy outside the tree – put -a symlink to it, otherwise it will not find its libraries.

-
-
-

Building from source

-
    -
  1. Get the source code. The following packages contain the source at -the same revision as the above binaries (these are svn exports):

    -
      -
    • pypy-1.4.1-src.tar.bz2 (sources, Unix line endings)
    • -
    • pypy-1.4.1-src.zip (sources, Windows line endings) not available
    • -
    -

    Or you can checkout the current trunk using Mercurial (the trunk -usually works and is of course more up-to-date):

    -
    -hg clone http://bitbucket.org/pypy/pypy
    -
    -
  2. -
  3. Make sure you installed the dependencies. See the list here.

    -
  4. -
  5. Enter the goal directory:

    -
    -cd pypy-trunk/pypy/translator/goal
    -
    -
  6. -
  7. Run the translate.py script. Here are the common combinations -of options (works also with python instead of pypy):

    -
    -pypy translate.py -Ojit                # get the JIT version
    -pypy translate.py -O2                  # get the no-jit version
    -pypy translate.py --sandbox            # get the sandbox version
    -pypy translate.py --stackless          # get the stackless version
    -pypy translate.py -Ojit --backend=cli  # only for branch/cli-jit
    -
    -
  8. -
  9. Enjoy Mandelbrot :-) It takes on the order of half an hour to -finish the translation, and 1.7 GB of RAM on a 32-bit system -and 3.0 GB on 64-bit systems. (Do not start a translation on a -machine with insufficient RAM! It will just swap forever.)

    -
  10. -
-

Notes:

-
    -
  • It is recommended to use PyPy to do translations, instead of using CPython, -because it is twice as fast. (Using CPython would lower the memory -requirement down to 1.2 GB on 32-bit, 2.4 GB on 64-bit.) -You should just start by downloading an official release of PyPy (with the -JIT).
  • -
  • Because of asmgcroot, compiling the generated C files containing the -JIT is delicate. It requires using either MSVC or gcc with no particularly -fancy options. It does not work e.g. with clang, or if you pass uncommon -options with the CFLAGS environment variable. The generated C files -are left around in /tmp/usession-$USER/testing_1/ or -$TMPDIR/usession-$USER/testing_1, so if compilation -fails (i.e. you get a CompilationError) or if you want to build a -debug build, you can go there and retry running make. For example: -CFLAGS= make, or make lldebug. It is recommended to move this -directory away if you want to keep it; otherwise, future runs of -translate.py will remove it.
  • -
-
-
-

Checksums

-

Here are the checksums for each of the downloads (md5 and sha1):

-
-3dccf24c23e30b4a04cf122f704b4064  pypy-1.4.1-linux.tar.bz2
-1fb62a813978c2581e9e09debad6b116  pypy-1.4.1-linux64.tar.bz2
-8584c4e8c042f5b661fcfffa0d9b8a25  pypy-1.4.1-osx.tar.bz2
-769b3fb134944ee8c22ad0834970de3b  pypy-1.4.1-osx64.tar.bz2
-ebbbb156b1eb842e9e65d909ed5f9f6d  pypy-1.4.1-src.tar.bz2
-6e2366377ad2f0c583074d3ba6f60d064549bef2  pypy-1.4.1-linux.tar.bz2
-1cfd53343e19264905a00d2ffcf83e03e39dcbb3  pypy-1.4.1-linux64.tar.bz2
-961470e7510c47b8f56e6cc6da180605ba058cb6  pypy-1.4.1-osx.tar.bz2
-8e2830bef80b93f4d3c016b972fbdf7bcd403abc  pypy-1.4.1-osx64.tar.bz2
-922a8815377fe2e0c015338fa8b28ae16bf8c840  pypy-1.4.1-src.tar.bz2
-
-
-
- -
-
-
- - \ No newline at end of file diff --git a/pypy.org/eu-logo-small.jpg b/pypy.org/eu-logo-small.jpg deleted file mode 100644 Binary file pypy.org/eu-logo-small.jpg has changed diff --git a/pypy.org/source/download.txt b/pypy.org/source/download.txt deleted file mode 100644 --- a/pypy.org/source/download.txt +++ /dev/null @@ -1,186 +0,0 @@ ---- -layout: page -title: Download and install ---- - -Download -============================================================ - -.. class:: download_menu - - There are `nightly binary builds`_ available. Those builds are not always - as stable as the release, but they contain Python 2.7 compatibility, - numerous bugfixes and performance improvements. - -Here are the various binaries of **PyPy 1.4.1** that we provide for x86 Linux, -Mac OS/X or Windows. This is mostly a bugfix release, although the performance -over the previous release 1.4 has improved in some cases. - -.. class:: download_menu - - * Download - - * `Default (with a JIT Compiler)`_ - * `Other versions`_ - - * `Installing`_ (optional) - * `Building from source`_ - * `Checksums`_ - -.. _`Default (with a JIT Compiler)`: - -"JIT Compiler" version -------------------------------- - -These binaries include a Just-in-Time compiler. They only work on -x86 CPUs that have the SSE2_ instruction set (most of -them do, nowadays), or on x86-64 CPUs. -(This is the official release 1.4.1; -for the most up-to-date version see below.) - -* `Linux binary (32bit)`__ -* `Linux binary (64bit)`__ -* `Mac OS/X binary (32bit)`__ -* `Mac OS/X binary (64bit)`__ -* `Windows binary (32bit)`__ (you may need to install the `VS 2010 runtime libraries`_) - -.. __: http://pypy.org/download/pypy-1.4.1-linux.tar.bz2 -.. __: http://pypy.org/download/pypy-1.4.1-linux64.tar.bz2 -.. __: http://pypy.org/download/pypy-1.4.1-osx.tar.bz2 -.. __: http://pypy.org/download/pypy-1.4.1-osx64.tar.bz2 -.. __: http://pypy.org/download/pypy-1.4.1-win32.zip -.. _`VS 2010 runtime libraries`: http://www.microsoft.com/downloads/en/details.aspx?familyid=A7B7A05E-6DE6-4D3A-A423-37BF0912DB84 - -If your CPU is really old, it may not have SSE2. In this case, you need -to translate_ yourself with the option ``--jit-backend=x86-without-sse2``. - -.. _`Other versions (without a JIT)`: - -Other versions -------------------------------- - -The other versions of PyPy are: - -* The most up-to-date `nightly build`_ with a JIT, if the official - release is too old for what you want to do. - -* No JIT: A version without the JIT. Consumes a bit less memory - and may be faster on short-running scripts. - -* Stackless: Provides Stackless_ extensions, as well as greenlets_. - It is not possible right now to combine Stackless features with the JIT. - -* Sandboxing: A special safe version. Read the docs about sandboxing_. - (It is also possible to translate_ a version that includes both - sandboxing and the JIT compiler, although as the JIT is relatively - complicated, this reduces a bit the level of confidence we can put in - the result.) - -These versions are not officially part of the release 1.4.1, which focuses -on the JIT. You can find prebuilt binaries for them on our -`nightly build`_ or in the `release`_ binaries, or translate_ them -yourself. - -.. _`nightly build`: http://buildbot.pypy.org/nightly/trunk/ -.. _`release`: http://pypy.org/download/ - - -Installing -------------------------------- - -All versions are packaged in a ``tar.bz2`` or ``zip`` file. When -uncompressed, they run in-place. For now you can uncompress them -either somewhere in your home directory or, say, in ``/opt``, and -if you want, put a symlink from somewhere like -``/usr/local/bin/pypy`` to ``/path/to/pypy-1.4.1/bin/pypy``. Do -not move or copy the executable ``pypy`` outside the tree --- put -a symlink to it, otherwise it will not find its libraries. - - -.. _translate: - -Building from source -------------------------------- - -1. Get the source code. The following packages contain the source at - the same revision as the above binaries (these are svn exports): - - * `pypy-1.4.1-src.tar.bz2`__ (sources, Unix line endings) - * pypy-1.4.1-src.zip (sources, Windows line endings) not available - - .. __: http://pypy.org/download/pypy-1.4.1-src.tar.bz2 - - Or you can checkout the current trunk using Mercurial_ (the trunk - usually works and is of course more up-to-date):: - - hg clone http://bitbucket.org/pypy/pypy - -2. Make sure you installed the dependencies. See the list here__. - - .. __: http://codespeak.net/pypy/dist/pypy/doc/getting-started-python.html#translating-the-pypy-python-interpreter - -3. Enter the ``goal`` directory:: - - cd pypy-trunk/pypy/translator/goal - -4. Run the ``translate.py`` script. Here are the common combinations - of options (works also with ``python`` instead of ``pypy``):: - - pypy translate.py -Ojit # get the JIT version - pypy translate.py -O2 # get the no-jit version - pypy translate.py --sandbox # get the sandbox version - pypy translate.py --stackless # get the stackless version - pypy translate.py -Ojit --backend=cli # only for branch/cli-jit - -5. Enjoy Mandelbrot ``:-)`` It takes on the order of half an hour to - finish the translation, and 1.7 GB of RAM on a 32-bit system - and 3.0 GB on 64-bit systems. (Do not start a translation on a - machine with insufficient RAM! It will just swap forever.) - -Notes: - -* It is recommended to use PyPy to do translations, instead of using CPython, - because it is twice as fast. (Using CPython would lower the memory - requirement down to 1.2 GB on 32-bit, 2.4 GB on 64-bit.) - You should just start by downloading an official release of PyPy (with the - JIT). - -* Because of ``asmgcroot``, compiling the generated C files containing the - JIT is delicate. It requires using either MSVC or gcc with no particularly - fancy options. It does not work e.g. with clang, or if you pass uncommon - options with the ``CFLAGS`` environment variable. The generated C files - are left around in ``/tmp/usession-$USER/testing_1/`` or - ``$TMPDIR/usession-$USER/testing_1``, so if compilation - fails (i.e. you get a ``CompilationError``) or if you want to build a - debug build, you can go there and retry running ``make``. For example: - ``CFLAGS= make``, or ``make lldebug``. It is recommended to move this - directory away if you want to keep it; otherwise, future runs of - ``translate.py`` will remove it. - -.. _`x86 (IA-32)`: http://en.wikipedia.org/wiki/IA-32 -.. _`x86-64`: http://en.wikipedia.org/wiki/X86-64 -.. _SSE2: http://en.wikipedia.org/wiki/SSE2 -.. _`ctypes`: http://www.python.org/doc/2.5.4/lib/module-ctypes.html -.. _`contact us`: contact.html -.. _`sandboxing`: features.html#sandboxing -.. _`stackless`: http://www.stackless.com/ -.. _`greenlets`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt -.. _Mercurial: http://mercurial.selenic.com/ -.. _`nightly binary builds`: http://buildbot.pypy.org/nightly/trunk/ - -Checksums ---------- - -Here are the checksums for each of the downloads (md5 and sha1):: - - 3dccf24c23e30b4a04cf122f704b4064 pypy-1.4.1-linux.tar.bz2 - 1fb62a813978c2581e9e09debad6b116 pypy-1.4.1-linux64.tar.bz2 - 8584c4e8c042f5b661fcfffa0d9b8a25 pypy-1.4.1-osx.tar.bz2 - 769b3fb134944ee8c22ad0834970de3b pypy-1.4.1-osx64.tar.bz2 - ebbbb156b1eb842e9e65d909ed5f9f6d pypy-1.4.1-src.tar.bz2 - - 6e2366377ad2f0c583074d3ba6f60d064549bef2 pypy-1.4.1-linux.tar.bz2 - 1cfd53343e19264905a00d2ffcf83e03e39dcbb3 pypy-1.4.1-linux64.tar.bz2 - 961470e7510c47b8f56e6cc6da180605ba058cb6 pypy-1.4.1-osx.tar.bz2 - 8e2830bef80b93f4d3c016b972fbdf7bcd403abc pypy-1.4.1-osx64.tar.bz2 - 922a8815377fe2e0c015338fa8b28ae16bf8c840 pypy-1.4.1-src.tar.bz2 diff --git a/pypy.org/source/yatiblog.conf b/pypy.org/source/yatiblog.conf deleted file mode 100644 --- a/pypy.org/source/yatiblog.conf +++ /dev/null @@ -1,12 +0,0 @@ -site_author: PyPy Team -site_description: PyPy -site_license: MIT -site_title: PyPy -site_url: http://pypy.org - -analytics_id: UA-7778406-3 -section_id: code -typekit: disabled - -index_pages: -- archive.html: archive.genshi diff --git a/pypy.org/source/speed.txt b/pypy.org/source/speed.txt deleted file mode 100644 --- a/pypy.org/source/speed.txt +++ /dev/null @@ -1,1 +0,0 @@ - diff --git a/pypy.org/index.html b/pypy.org/index.html deleted file mode 100644 --- a/pypy.org/index.html +++ /dev/null @@ -1,127 +0,0 @@ - - - - PyPy :: PyPy - - - - - - - - - - - - - - - -
- -
-
-
-

PyPy

-

PyPy is a very compliant implementation of the Python language. -PyPy has several advantages and distinctive features:

-
-
    -
  • Speed: thanks to its Just-in-Time compiler, Python programs -often run faster on PyPy. (What is a JIT compiler?)
  • -
  • Memory usage: large, memory-hungry Python programs might end up -taking less space than they do in CPython.
  • -
  • Sandboxing: PyPy provides the ability to run untrusted code in a -fully secure way.
  • -
  • Stackless: PyPy can be configured to run in stackless mode, -providing micro-threads for massive concurrency.
  • -
  • As well as other features.
  • -
-
-

Download and try out the PyPy release 1.4.1!

-

To read more about Python, look into Python docs and check our -Compatibility page. PyPy can run such python libraries as twisted -and django and supports ctypes.

-
- -
-
-
- - \ No newline at end of file From commits-noreply at bitbucket.org Fri Apr 29 14:10:54 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 14:10:54 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: more rpython toolchain language changes Message-ID: <20110429121054.8469C36C055@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43767:4824fdcde4da Date: 2011-04-29 14:10 +0200 http://bitbucket.org/pypy/pypy/changeset/4824fdcde4da/ Log: more rpython toolchain language changes diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -5,14 +5,14 @@ .. contents:: -This document describes the tool chain that we have developed to analyze +This document describes the toolchain that we have developed to analyze and "compile" RPython_ programs (like PyPy itself) to various target platforms. .. _RPython: coding-guide.html#restricted-python It consists of three broad sections: a slightly simplified overview, a -brief introduction to each of the major components of our tool chain and +brief introduction to each of the major components of our toolchain and then a more comprehensive section describing how the pieces fit together. If you are reading this document for the first time, the Overview_ is likely to be most useful, if you are trying to refresh your PyPy memory @@ -21,7 +21,7 @@ Overview ======== -The job of translation tool chain is to translate RPython_ programs into an +The job of the translation toolchain is to translate RPython_ programs into an efficient version of that program for one of various target platforms, generally one that is considerably lower-level than Python. It divides this task into several steps, and the purpose of this document is to @@ -40,7 +40,7 @@ .. _`initialization time`: -The translation tool chain never sees Python source code or syntax +The RPython translation toolchain never sees Python source code or syntax trees, but rather starts with the *code objects* that define the behaviour of the function objects one gives it as input. The `bytecode evaluator`_ and the `Flow Object Space`_ work through these @@ -706,17 +706,17 @@ External Function Calls ======================= -External function call approach is described in `rffi`_ documentation. +The external function call approach is described in `rffi`_ documentation. .. _`rffi`: rffi.html How It Fits Together ==================== -As should be clear by now, the translation tool chain of PyPy is a flexible +As should be clear by now, the translation toolchain of PyPy is a flexible and complicated beast, formed from many separate components. -The following image summarizes the various parts of the tool chain as of the +The following image summarizes the various parts of the toolchain as of the 0.9 release, with the default translation to C highlighted: .. image:: image/pypy-translation-0.9.png From commits-noreply at bitbucket.org Fri Apr 29 14:13:57 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 14:13:57 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: more Rpython toolchain Message-ID: <20110429121357.BD782282B52@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43768:f9b275fca640 Date: 2011-04-29 14:13 +0200 http://bitbucket.org/pypy/pypy/changeset/f9b275fca640/ Log: more Rpython toolchain diff --git a/pypy/doc/video-index.rst b/pypy/doc/video-index.rst --- a/pypy/doc/video-index.rst +++ b/pypy/doc/video-index.rst @@ -162,7 +162,7 @@ PAL, 48 min, divx AVI Holger Krekel and Armin Rigo talk about the basic implementation, -implementation level aspects and the translation toolchain. This +implementation level aspects and the RPython translation toolchain. This talk also gives an insight into how a developer works with these tools on a daily basis, and pays special attention to flow graphs. @@ -184,7 +184,7 @@ PAL, 44 min, divx AVI -Michael Hudson gives an in-depth, very technical introduction to a PyPy sprint. The film provides a detailed and hands-on overview about the architecture of PyPy, especially the translation toolchain. +Michael Hudson gives an in-depth, very technical introduction to a PyPy sprint. The film provides a detailed and hands-on overview about the architecture of PyPy, especially the RPython translation toolchain. Scripting .NET with IronPython by Jim Hugunin @@ -292,5 +292,5 @@ PAL 72 min, DivX AVI -Core developers Armin Rigo, Samuele Pedroni and Carl Friedrich Bolz are giving an overview of the PyPy architecture, the standard interpreter, the translation toolchain and the just-in-time compiler. +Core developers Armin Rigo, Samuele Pedroni and Carl Friedrich Bolz are giving an overview of the PyPy architecture, the standard interpreter, the RPython translation toolchain and the just-in-time compiler. From commits-noreply at bitbucket.org Fri Apr 29 15:14:31 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 15:14:31 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Rpython translation toolchain again Message-ID: <20110429131431.9267C282B52@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43769:864df3d29f9f Date: 2011-04-29 15:14 +0200 http://bitbucket.org/pypy/pypy/changeset/864df3d29f9f/ Log: Rpython translation toolchain again diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -13,7 +13,7 @@ Translating PyPy with Visual Studio ----------------------------------- -We routinely test the translation toolchain using Visual Studio .NET +We routinely test the `RPython translation toolchain`_ using Visual Studio .NET 2005, Professional Edition, and Visual Studio .NET 2008, Express Edition. Other configurations may work as well. @@ -122,3 +122,4 @@ cp .libs/libffi-5.dll .. _`libffi source files`: http://sourceware.org/libffi/ +.. _`RPython translation toolchain`: translation.rst From commits-noreply at bitbucket.org Fri Apr 29 15:36:03 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Fri, 29 Apr 2011 15:36:03 +0200 (CEST) Subject: [pypy-svn] pypy default: Fix a silly type from 2 years ago. Maintain string identity in {set, get, del}attr when calling __{set, get, del}attr__. Message-ID: <20110429133603.1245B282B52@codespeak.net> Author: Alex Gaynor Branch: Changeset: r43770:8d941baaa75f Date: 2011-04-29 09:35 -0400 http://bitbucket.org/pypy/pypy/changeset/8d941baaa75f/ Log: Fix a silly type from 2 years ago. Maintain string identity in {set,get,del}attr when calling __{set,get,del}attr__. diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -138,7 +138,7 @@ w_d = space.newdict(from_strdict_shared=d) assert self.space.eq_w(space.getitem(w_d, w("a")), w(1)) assert self.space.eq_w(space.getitem(w_d, w("b")), w(2)) - + def test_initialize_from_strdict_really_shared(self): space = self.space w = space.wrap @@ -156,8 +156,8 @@ cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names) def test_equality(self): - d = {1:2} - f = {1:2} + d = {1:2} + f = {1:2} assert d == f assert d != {1:3} @@ -165,13 +165,13 @@ d = {1:2, 3:4} d.clear() assert len(d) == 0 - + def test_copy(self): d = {1:2, 3:4} dd = d.copy() assert d == dd assert not d is dd - + def test_get(self): d = {1:2, 3:4} assert d.get(1) == 2 @@ -193,18 +193,18 @@ assert result == 44 assert len(dd) == 1 raises(KeyError, dd.pop, 33) - + def test_has_key(self): d = {1:2, 3:4} assert d.has_key(1) assert not d.has_key(33) - + def test_items(self): d = {1:2, 3:4} its = d.items() its.sort() assert its == [(1,2),(3,4)] - + def test_iteritems(self): d = {1:2, 3:4} dd = d.copy() @@ -212,27 +212,27 @@ assert v == dd[k] del dd[k] assert not dd - + def test_iterkeys(self): d = {1:2, 3:4} dd = d.copy() for k in d.iterkeys(): del dd[k] assert not dd - + def test_itervalues(self): d = {1:2, 3:4} values = [] for k in d.itervalues(): values.append(k) assert values == d.values() - + def test_keys(self): d = {1:2, 3:4} kys = d.keys() kys.sort() assert kys == [1,3] - + def test_popitem(self): d = {1:2, 3:4} it = d.popitem() @@ -393,7 +393,7 @@ d = dict(a=33, b=44) assert d == {'a':33, 'b':44} d = dict({'a':33, 'b':44}) - assert d == {'a':33, 'b':44} + assert d == {'a':33, 'b':44} try: d = dict(23) except (TypeError, ValueError): pass else: self.fail("dict(23) should raise!") @@ -445,7 +445,7 @@ assert d3['x'] == 42 assert d3['y'] == 42 - def test_overridden_setitem_customkey(self): + def test_overridden_setitem_customkey(self): class D(dict): def __setitem__(self, key, value): dict.__setitem__(self, key, 42) @@ -495,7 +495,7 @@ assert v1 == v2 else: assert False, 'Expected KeyError' - + def test_del_keyerror_unpacking(self): d = {} for v1 in ['Q', (1,)]: @@ -564,9 +564,21 @@ setattr(a, s, 42) key = a.__dict__.keys()[0] assert key == s + assert key is not s assert type(key) is str assert getattr(a, s) == 42 + def test_setattr_string_identify(self): + attrs = [] + class A(object): + def __setattr__(self, attr, value): + attrs.append(attr) + + a = A() + s = "abc" + setattr(a, s, 123) + assert attrs[0] is s + class AppTestDictViews: def test_dictview(self): d = {1: 2, 3: 4} diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py --- a/pypy/module/__builtin__/operation.py +++ b/pypy/module/__builtin__/operation.py @@ -49,7 +49,7 @@ # space.{get,set,del}attr()... # Note that if w_name is already a string (or a subclass of str), # it must be returned unmodified (and not e.g. unwrapped-rewrapped). - if not space.is_true(space.is_(w_name, space.w_str)): + if not space.is_w(space.type(w_name), space.w_str): name = space.str_w(w_name) # typecheck w_name = space.wrap(name) # rewrap as a real string return w_name @@ -185,7 +185,7 @@ the sentinal. """ if w_sentinel is None: - return space.iter(w_collection_or_callable) + return space.iter(w_collection_or_callable) else: return iter_sentinel(space, w_collection_or_callable, w_sentinel) From commits-noreply at bitbucket.org Fri Apr 29 15:46:02 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 29 Apr 2011 15:46:02 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: suggest distribute and pip instead of setuptools and easy_install Message-ID: <20110429134602.1AD1536C206@codespeak.net> Author: Antonio Cuni Branch: documentation-cleanup Changeset: r43771:f07036d18a8e Date: 2011-04-29 15:45 +0200 http://bitbucket.org/pypy/pypy/changeset/f07036d18a8e/ Log: suggest distribute and pip instead of setuptools and easy_install diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -56,14 +56,17 @@ library. If you want to install 3rd party libraries, the most convenient way is to -install setuptools_, which will bring ``easy_install`` to you:: +install distribute_ and pip_: - $ wget http://peak.telecommunity.com/dist/ez_setup.py + $ curl -O http://python-distribute.org/distribute_setup.py - $ ./pypy-1.5-linux/bin/pypy ez_setup.py + $ curl -O https://github.com/pypa/pip/raw/master/contrib/get-pip.py - $ ls ./pypy-1.5-linux/bin/ - easy_install easy_install-2.7 pypy + $ ./pypy-1.5-linux/bin/pypy distribute_setup.py + + $ ./pypy-1.5-linux/bin/pypy get-pip.py + + $ ./pypy-1.5-linux/bin/pip install pygments # for example 3rd party libraries will be installed in ``pypy-1.5-linux/site-packages``, and the scripts in ``pypy-1.5-linux/bin``. From commits-noreply at bitbucket.org Fri Apr 29 15:47:44 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 15:47:44 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: more RPython toolchain Message-ID: <20110429134744.676D336C206@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43772:1250ec028b7c Date: 2011-04-29 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/1250ec028b7c/ Log: more RPython toolchain diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -8,9 +8,20 @@ What is PyPy ? ============== +In common parlance, PyPy has been used to mean two things. The first is the +`RPython translation toolchain`_, which is a framework for generating +dynamic programming language implementations. And the second is one +particular implementation that is so generated -- +an implementation of the Python_ programming language written in +Python itself. It is designed to be flexible and easy to experiment with. -PyPy is an implementation of the Python_ programming language written in -Python itself, flexible and easy to experiment with. +This double usage has proven to be confusing, and we are trying to move +away from using the word PyPy to mean both things. From now on we will +try to use PyPy to only mean the Python implementation, and say the +`RPython translation toolchain`_ when we mean the framework. Some older +documents, presentations, papers and videos will still have the old +usage. You are hereby warned. + We target a large variety of platforms, small and large, by providing a compiler toolsuite that can produce custom Python versions. Platform, memory and threading models, as well as the JIT compiler itself, are aspects of the @@ -18,6 +29,7 @@ language implementation itself. `more...`_ .. _Python: http://docs.python.org/reference/ +.. _`RPython translation toolchain`: translation.html .. _`more...`: architecture.html Just the facts diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -122,4 +122,4 @@ cp .libs/libffi-5.dll .. _`libffi source files`: http://sourceware.org/libffi/ -.. _`RPython translation toolchain`: translation.rst +.. _`RPython translation toolchain`: translation.html From commits-noreply at bitbucket.org Fri Apr 29 15:47:45 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 15:47:45 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: merge heads Message-ID: <20110429134745.8375D36C20B@codespeak.net> Author: Laura Creighton Branch: documentation-cleanup Changeset: r43773:621831c843a7 Date: 2011-04-29 15:47 +0200 http://bitbucket.org/pypy/pypy/changeset/621831c843a7/ Log: merge heads diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -68,14 +68,17 @@ library. If you want to install 3rd party libraries, the most convenient way is to -install setuptools_, which will bring ``easy_install`` to you:: +install distribute_ and pip_: - $ wget http://peak.telecommunity.com/dist/ez_setup.py + $ curl -O http://python-distribute.org/distribute_setup.py - $ ./pypy-1.5-linux/bin/pypy ez_setup.py + $ curl -O https://github.com/pypa/pip/raw/master/contrib/get-pip.py - $ ls ./pypy-1.5-linux/bin/ - easy_install easy_install-2.7 pypy + $ ./pypy-1.5-linux/bin/pypy distribute_setup.py + + $ ./pypy-1.5-linux/bin/pypy get-pip.py + + $ ./pypy-1.5-linux/bin/pip install pygments # for example 3rd party libraries will be installed in ``pypy-1.5-linux/site-packages``, and the scripts in ``pypy-1.5-linux/bin``. From commits-noreply at bitbucket.org Fri Apr 29 16:10:30 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 29 Apr 2011 16:10:30 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: add a minimal release announcement Message-ID: <20110429141030.B7114282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43776:908d80bad51e Date: 2011-04-29 16:09 +0200 http://bitbucket.org/pypy/pypy/changeset/908d80bad51e/ Log: add a minimal release announcement diff --git a/pypy/doc/release-1.5.0.rst b/pypy/doc/release-1.5.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-1.5.0.rst @@ -0,0 +1,58 @@ +====================== +PyPy 1.5: Catching Up +====================== + +We're pleased to announce the 1.5 release of PyPy. This release is updating +PyPy to the features of CPython 2.7.1, including the standard library. Thus the +features of `CPython 2.6`_ and `CPython 2.7`_ are now supported. It also +contains additional performance improvements. You can download it here: + + http://pypy.org/download.html + +What is PyPy +============ + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.1. It's fast (`pypy 1.5 and cpython 2.6`_ performance comparison) +due to its integrated tracing JIT compiler. + +Among its new features, this release includes the features of CPython 2.6 and +2.7. It also includes a large number of small improvements to the tracing JIT +compiler. + +Numerous speed achievements are described on `our blog`_. Normalized speed +charts comparing `pypy 1.5 and pypy 1.4`_ as well as `pypy 1.4 and cpython +2.6.2`_ are available on our benchmark website. The speed improvement over 1.4 +seems to be around 25% on average. + +More highlights +=============== + +- The largest change in PyPy's tracing JIT is adding support for `loop invariant + code motion`_, which was mostly done by Håkan Ardö. This feature improves the + performance of tight loops doing numerical calculations. + +- The CPython extension module API has been improved and now supports many more + extensions. For information on which one are supported, please refer to our + `compatibility wiki`_. + +- These changes make it possible to support `Tkinter and IDLE`_. + + + +Cheers, + +Carl Friedrich Bolz, Antonio Cuni, Maciej Fijalkowski, +Amaury Forgeot d'Arc, Armin Rigo and the PyPy team + + +.. _`CPython 2.6`: http://docs.python.org/dev/whatsnew/2.6.html +.. _`CPython 2.7`: http://docs.python.org/dev/whatsnew/2.7.html + +.. _`our blog`: http://morepypy.blogspot.com +.. _`pypy 1.5 and pypy 1.4`: http://bit.ly/joPhHo +.. _`pypy 1.5 and cpython 2.6.2`: http://bit.ly/mbVWwJ + +.. _`loop invariant code motion`: http://morepypy.blogspot.com/2011/01/loop-invariant-code-motion.html +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home +.. _`Tkinter and IDLE`: http://morepypy.blogspot.com/2011/04/using-tkinter-and-idle-with-pypy.html From commits-noreply at bitbucket.org Fri Apr 29 16:19:53 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 29 Apr 2011 16:19:53 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: make the logo square and export two different versions Message-ID: <20110429141953.B428F282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3545:b42725a3cd99 Date: 2011-04-29 16:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/b42725a3cd99/ Log: make the logo square and export two different versions diff --git a/logo/pypy_small.png b/logo/pypy_small.png deleted file mode 100644 Binary file logo/pypy_small.png has changed diff --git a/logo/pypy_small64.png b/logo/pypy_small64.png new file mode 100644 index 0000000000000000000000000000000000000000..45a8f80bbd11a4fcaa1a02002d862aaea36e490c GIT binary patch [cut] diff --git a/logo/pypy_small128.png b/logo/pypy_small128.png new file mode 100644 index 0000000000000000000000000000000000000000..1a90eae9aabc7a7dcf5b6327657ba2d057bedc02 GIT binary patch [cut] diff --git a/logo/pypy_small.svg b/logo/pypy_small.svg --- a/logo/pypy_small.svg +++ b/logo/pypy_small.svg @@ -14,16 +14,16 @@ id="Layer_1" x="0px" y="0px" - width="210.448" - height="165.661" - viewBox="-93 6.5 210.448 165.661" + width="210" + height="210" + viewBox="-93 6.5 210 210" enable-background="new -93 6.5 800 600" xml:space="preserve" inkscape:version="0.48.1 r9760" - sodipodi:docname="pypy_fin.svg" - inkscape:export-filename="/home/cfbolz/projects/extradoc/logo/pypy_fin.jpg.png" - inkscape:export-xdpi="59.689667" - inkscape:export-ydpi="59.689667">image/svg+xml + transform="translate(-42.203001,-16.756995)"> </g> <polygon - points="96.208,150.154 116.592,151.498 121.744,167.178 108.528,175.242 104.272,164.938 89.488,155.53 " + points="108.528,175.242 104.272,164.938 89.488,155.53 96.208,150.154 116.592,151.498 121.744,167.178 " id="polygon81" style="opacity:0.5" /> <polygon - points="111.888,139.626 126.448,139.402 134.399,154.634 143.919,139.066 133.335,124.562 119.392,127.922 " + points="143.919,139.066 133.335,124.562 119.392,127.922 111.888,139.626 126.448,139.402 134.399,154.634 " id="polygon83" style="opacity:0.35" /> <polygon - points="120.873,119.701 128.728,107.44 145.818,109.225 138.099,92.69 120.163,93.501 115.261,106.98 " + points="138.099,92.69 120.163,93.501 115.261,106.98 120.873,119.701 128.728,107.44 145.818,109.225 " id="polygon85" style="opacity:0.25" /> <defs @@ -308,7 +308,7 @@ <g id="g264" - transform="translate(-42.203006,-39.096)"> + transform="translate(-42.203001,-16.756995)"> <path d="m 574.956,574.669 c 0,-0.743 -0.024,-1.343 -0.049,-1.895 h 0.948 l 0.048,0.995 h 0.023 c 0.432,-0.707 1.115,-1.127 2.063,-1.127 1.403,0 2.458,1.188 2.458,2.95 0,2.087 -1.271,3.118 -2.639,3.118 -0.768,0 -1.438,-0.336 -1.786,-0.911 h -0.024 v 3.154 h -1.043 v -6.284 z m 1.043,1.548 c 0,0.155 0.024,0.3 0.048,0.432 0.192,0.731 0.828,1.235 1.583,1.235 1.115,0 1.764,-0.912 1.764,-2.243 0,-1.163 -0.612,-2.159 -1.728,-2.159 -0.72,0 -1.392,0.517 -1.595,1.308 -0.036,0.132 -0.072,0.288 -0.072,0.432 v 0.995 z" id="path266" @@ -487,7 +487,7 @@ </g> <path style="fill:url(#linearGradient4158);fill-opacity:1;stroke:none" - d="m -39.411632,106.22003 c 3.648072,9.15888 32.9454951,17.49942 34.7403051,15.93868 0.0732,-3.93095 0.43679,-8.47093 0.43679,-8.47093 0,0 16.8971519,13.88296 22.5804819,19.05885 -5.14622,2.54814 -24.4402519,13.25297 -24.4402519,13.25297 l 0.41181,-7.58934 c 0,0 -36.2179301,-6.72378 -46.4783691,-11.38754 z" + d="m -39.411627,128.55903 c 3.648072,9.15888 32.9454945,17.49942 34.7403045,15.93868 0.0732,-3.93095 0.43679,-8.47093 0.43679,-8.47093 0,0 16.8971525,13.88296 22.5804825,19.05885 -5.14622,2.54814 -24.4402525,13.25297 -24.4402525,13.25297 l 0.41181,-7.58934 c 0,0 -36.2179295,-6.72378 -46.4783685,-11.38754 z" id="path3382" inkscape:connector-curvature="0" sodipodi:nodetypes="cccccccc" /></svg> \ No newline at end of file From commits-noreply at bitbucket.org Fri Apr 29 16:24:48 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Fri, 29 Apr 2011 16:24:48 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (arigo, cfbolz): mention cProfile Message-ID: <20110429142448.777DB282B52@codespeak.net> Author: Carl Friedrich Bolz <cfbolz at gmx.de> Branch: documentation-cleanup Changeset: r43777:568224719cb2 Date: 2011-04-29 16:24 +0200 http://bitbucket.org/pypy/pypy/changeset/568224719cb2/ Log: (arigo, cfbolz): mention cProfile diff --git a/pypy/doc/release-1.5.0.rst b/pypy/doc/release-1.5.0.rst --- a/pypy/doc/release-1.5.0.rst +++ b/pypy/doc/release-1.5.0.rst @@ -9,8 +9,8 @@ http://pypy.org/download.html -What is PyPy -============ +What is PyPy? +============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for CPython 2.7.1. It's fast (`pypy 1.5 and cpython 2.6`_ performance comparison) @@ -38,7 +38,9 @@ - These changes make it possible to support `Tkinter and IDLE`_. - +- The `cProfile`_ profiler is now working together with the JIT. However, it + skews the relative performance in not yet studied ways, so that it is not yet + a perfect tool to find subtle performance problems. Cheers, @@ -56,3 +58,4 @@ .. _`loop invariant code motion`: http://morepypy.blogspot.com/2011/01/loop-invariant-code-motion.html .. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home .. _`Tkinter and IDLE`: http://morepypy.blogspot.com/2011/04/using-tkinter-and-idle-with-pypy.html +.. _`cProfile`: http://docs.python.org/library/profile.html From commits-noreply at bitbucket.org Fri Apr 29 17:27:14 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 17:27:14 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: begin cleanup Message-ID: <20110429152714.5D675282B52@codespeak.net> Author: Laura Creighton <lac at openend.se> Branch: documentation-cleanup Changeset: r43778:07cd5747f575 Date: 2011-04-29 17:26 +0200 http://bitbucket.org/pypy/pypy/changeset/07cd5747f575/ Log: begin cleanup diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -18,11 +18,10 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing you typically do is -translate it to get a reasonably performing interpreter. This is described in -the next section. If you just want to play around a bit, you can also try -untranslated `py.py interpreter`_ (which is extremely slow, but still fast -enough for tiny examples). +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. + +.. `download a pre-built PyPy`: http://pypy.org/download.html Translating the PyPy Python interpreter --------------------------------------- @@ -35,7 +34,13 @@ You can translate the whole of PyPy's Python interpreter to low level C code, `CLI code`_, or `JVM code`_. -1. Install build-time dependencies. On a Debian box these are:: +1. First `download a pre-built PyPy`_ for your architecture which you will +use to translate your Python interpreter. It is, of course, possible to +translate with a CPython 2.6 or later, but this is not the preferred way, +because it will take a lot longer to run -- depending on your architecture, +between two and three times as long. + +2. Install build-time dependencies. On a Debian box these are:: [user at debian-box ~]$ sudo apt-get install \ gcc make python-dev libffi-dev pkg-config \ @@ -61,26 +66,29 @@ * ``python-sphinx`` (for the optional documentation build. You need version 1.0.7 or later) * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) -2. Translation is somewhat time-consuming (30 min to - over one hour) and RAM-hungry. If you have less than 1.5 GB of - RAM (or a slow machine) you might want to pick the +3. Translation is time-consuming -- 45 minutes on a very fast machine -- + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources + are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of - `2` or `3` or `jit` gives much better results, though. + `2` or `3` or `jit` gives much better results, though. But if all + you want to do is to test that some new feature that you just wrote + translates, level 1 is enough. - Let me stress this another time: at ``--opt=1`` you get the Boehm + Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it. The resulting ``pypy-c`` is - slow. + You really do not want to pick it for a program you intend to use. + The resulting ``pypy-c`` is slow. -3. Run:: +4. Run:: cd pypy/translator/goal python translate.py --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel 32-bit environment needs **at - least** 2GB, and 64-bit needs 4GB. + of your choice like ``--opt=2`` if you do not want to include the JIT + compiler. .. _`optimization level`: config/opt.html From commits-noreply at bitbucket.org Fri Apr 29 17:38:27 2011 From: commits-noreply at bitbucket.org (lac) Date: Fri, 29 Apr 2011 17:38:27 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: fix missing list Message-ID: <20110429153827.EFD82282B52@codespeak.net> Author: Laura Creighton <lac at openend.se> Branch: documentation-cleanup Changeset: r43779:ac095c250da3 Date: 2011-04-29 17:38 +0200 http://bitbucket.org/pypy/pypy/changeset/ac095c250da3/ Log: fix missing list diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -99,6 +99,9 @@ Note that bin/python is now a symlink to bin/pypy. +.. _`distribute`: http://www.python-distribute.org/ +.. _`pip`: http://pypi.python.org/pypi/pip + Clone the repository -------------------- From commits-noreply at bitbucket.org Fri Apr 29 17:46:13 2011 From: commits-noreply at bitbucket.org (amauryfa) Date: Fri, 29 Apr 2011 17:46:13 +0200 (CEST) Subject: [pypy-svn] pypy default: Update win32 build script Message-ID: <20110429154613.75A1D282B52@codespeak.net> Author: Amaury Forgeot d'Arc <amauryfa at gmail.com> Branch: Changeset: r43780:b590cf6de419 Date: 2011-04-29 17:42 +0200 http://bitbucket.org/pypy/pypy/changeset/b590cf6de419/ Log: Update win32 build script diff --git a/pypy/tool/release/win32build.py b/pypy/tool/release/win32build.py --- a/pypy/tool/release/win32build.py +++ b/pypy/tool/release/win32build.py @@ -7,7 +7,7 @@ pypydir = py.path.local(autopath.pypydir) builddir = pypydir.join('translator', 'goal') -VERSION = "1.4.1" +VERSION = "1.5.0a0" def make_pypy(tag, options): pypy = 'pypy%s' % (tag,) From commits-noreply at bitbucket.org Fri Apr 29 17:59:23 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 29 Apr 2011 17:59:23 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: start importing new jump tests from x86 backend Message-ID: <20110429155923.C2C56282B52@codespeak.net> Author: David Schneider <david.schneider at picle.org> Branch: arm-backed-float Changeset: r43781:03e7d62472f9 Date: 2011-04-19 17:22 +0200 http://bitbucket.org/pypy/pypy/changeset/03e7d62472f9/ Log: start importing new jump tests from x86 backend diff --git a/pypy/jit/backend/arm/jump.py b/pypy/jit/backend/arm/jump.py --- a/pypy/jit/backend/arm/jump.py +++ b/pypy/jit/backend/arm/jump.py @@ -89,7 +89,7 @@ if loc.is_stack(): key = loc.as_key() if (key in dst_keys or (loc.width > WORD and - (key + WORD) in dst_keys)): + (key + 1) in dst_keys)): assembler.regalloc_push(loc) extrapushes.append(dstloc) continue diff --git a/pypy/jit/backend/arm/test/test_jump.py b/pypy/jit/backend/arm/test/test_jump.py --- a/pypy/jit/backend/arm/test/test_jump.py +++ b/pypy/jit/backend/arm/test/test_jump.py @@ -1,10 +1,9 @@ import py - from pypy.jit.backend.x86.test.test_jump import MockAssembler from pypy.jit.backend.arm.registers import * from pypy.jit.backend.arm.locations import * from pypy.jit.backend.arm.regalloc import ARMFrameManager -from pypy.jit.backend.arm.jump import remap_frame_layout +from pypy.jit.backend.arm.jump import remap_frame_layout, remap_frame_layout_mixed from pypy.jit.metainterp.history import INT frame_pos = ARMFrameManager.frame_pos @@ -114,3 +113,61 @@ ('push', s12), ('mov', r5, s12), ('pop', r5)] + def test_mixed(self): + s23 = frame_pos(2, FLOAT) # non-conflicting locations + s4 = frame_pos(4, INT) + remap_frame_layout_mixed(self.assembler, [r1], [s4], 'tmp', + [s23], [d5], 'xmmtmp') + assert self.assembler.ops == [('mov', r1, s4), + ('mov', s23, d5)] + def test_mixed2(self): + s23 = frame_pos(2, FLOAT) # gets stored in pos 2 and 3, with value==3 + s3 = frame_pos(3, INT) + remap_frame_layout_mixed(self.assembler, [r1], [s3], 'tmp', + [s23], [d5], 'xmmtmp') + assert self.assembler.ops == [('push', s23), + ('mov', r1, s3), + ('pop', d5)] + def test_mixed3(self): + s23 = frame_pos(2, FLOAT) + s2 = frame_pos(2, INT) + remap_frame_layout_mixed(self.assembler, [r1], [s2], 'tmp', + [s23], [d5], 'xmmtmp') + assert self.assembler.ops == [ + ('push', s23), + ('mov', r1, s2), + ('pop', d5)] + def test_mixed4(self): + s23 = frame_pos(2, FLOAT) + s4 = frame_pos(4, INT) + s45 = frame_pos(4, FLOAT) + s1 = frame_pos(1, INT) + remap_frame_layout_mixed(self.assembler, [s4], [s1], r3, + [s23], [s45], d3) + assert self.assembler.ops == [('mov', s4, r3), + ('mov', r3, s1), + ('mov', s23, d3), + ('mov', d3, s45)] + def test_mixed5(self): + s2 = frame_pos(2, INT) + s23 = frame_pos(2, FLOAT) + s4 = frame_pos(4, INT) + s45 = frame_pos(4, FLOAT) + remap_frame_layout_mixed(self.assembler, [s4], [s2], r3, + [s23], [s45], d3) + assert self.assembler.ops == [('push', s23), + ('mov', s4, r3), + ('mov', r3, s2), + ('pop', s45)] + def test_mixed6(self): + s3 = frame_pos(3, INT) + s23 = frame_pos(2, FLOAT) + s4 = frame_pos(4, INT) + s45 = frame_pos(4, FLOAT) + remap_frame_layout_mixed(self.assembler, [s4], [s3], r3, + [s23], [s45], d3) + assert self.assembler.ops == [('push', s23), + ('mov', s4, r3), + ('mov', r3, s3), + ('pop', s45)] + From commits-noreply at bitbucket.org Fri Apr 29 17:59:25 2011 From: commits-noreply at bitbucket.org (bivab) Date: Fri, 29 Apr 2011 17:59:25 +0200 (CEST) Subject: [pypy-svn] pypy arm-backed-float: finish porting test_jump for arm backend Message-ID: <20110429155925.A6076282B56@codespeak.net> Author: David Schneider <david.schneider at picle.org> Branch: arm-backed-float Changeset: r43782:78b6e41c424b Date: 2011-04-29 17:57 +0200 http://bitbucket.org/pypy/pypy/changeset/78b6e41c424b/ Log: finish porting test_jump for arm backend diff --git a/pypy/jit/backend/arm/test/test_jump.py b/pypy/jit/backend/arm/test/test_jump.py --- a/pypy/jit/backend/arm/test/test_jump.py +++ b/pypy/jit/backend/arm/test/test_jump.py @@ -1,3 +1,4 @@ +import random import py from pypy.jit.backend.x86.test.test_jump import MockAssembler from pypy.jit.backend.arm.registers import * @@ -117,14 +118,14 @@ s23 = frame_pos(2, FLOAT) # non-conflicting locations s4 = frame_pos(4, INT) remap_frame_layout_mixed(self.assembler, [r1], [s4], 'tmp', - [s23], [d5], 'xmmtmp') + [s23], [d5], 'vfptmp') assert self.assembler.ops == [('mov', r1, s4), ('mov', s23, d5)] def test_mixed2(self): s23 = frame_pos(2, FLOAT) # gets stored in pos 2 and 3, with value==3 s3 = frame_pos(3, INT) remap_frame_layout_mixed(self.assembler, [r1], [s3], 'tmp', - [s23], [d5], 'xmmtmp') + [s23], [d5], 'vfptmp') assert self.assembler.ops == [('push', s23), ('mov', r1, s3), ('pop', d5)] @@ -132,7 +133,7 @@ s23 = frame_pos(2, FLOAT) s2 = frame_pos(2, INT) remap_frame_layout_mixed(self.assembler, [r1], [s2], 'tmp', - [s23], [d5], 'xmmtmp') + [s23], [d5], 'vfptmp') assert self.assembler.ops == [ ('push', s23), ('mov', r1, s2), @@ -171,3 +172,140 @@ ('mov', r3, s3), ('pop', s45)] +def test_random_mixed(): + assembler = MockAssembler() + registers1 = [r0, r1, r2] + registers2 = [d0, d1, d2] + VFPWORDS = 2 + # + def pick1(): + n = random.randrange(-3, 10) + if n < 0: + return registers1[n] + else: + return frame_pos(n, INT) + def pick2(): + n = random.randrange(-3 , 10 // VFPWORDS) + if n < 0: + return registers2[n] + else: + return frame_pos(n*VFPWORDS, FLOAT) + # + def pick1c(): + n = random.randrange(-2000, 500) + if n >= 0: + return imm(n) + else: + return pick1() + # + def pick_dst(fn, count, seen): + result = [] + while len(result) < count: + x = fn() + keys = [x.as_key()] + if x.is_stack() and x.width > WORD: + keys.append(keys[0] + 1) + for key in keys: + if key in seen: + break + else: + for key in keys: + seen[key] = True + result.append(x) + return result + # + def get_state(locations): + regs1 = {} + regs2 = {} + stack = {} + for i, loc in enumerate(locations): + if loc.is_vfp_reg(): + if loc.width > WORD: + newvalue = ('value-vfp-%d' % i, + 'value-vfp-hiword-%d' % i) + else: + newvalue = 'value-vfp-%d' % i + regs2[loc.value] = newvalue + elif loc.is_reg(): + regs1[loc.value] = 'value-int-%d' % i + elif loc.is_stack(): + stack[loc.position] = 'value-width%d-%d' % (loc.width, i) + if loc.width > WORD: + stack[loc.position-1] = 'value-hiword-%d' % i + else: + assert loc.is_imm() or loc.is_imm_float() + return regs1, regs2, stack + # + for i in range(1):#range(500): + seen = {} + src_locations2 = [pick2() for i in range(4)] + dst_locations2 = pick_dst(pick2, 4, seen) + src_locations1 = [pick1c() for i in range(5)] + dst_locations1 = pick_dst(pick1, 5, seen) + #import pdb; pdb.set_trace() + assembler = MockAssembler() + remap_frame_layout_mixed(assembler, + src_locations1, dst_locations1, ip, + src_locations2, dst_locations2, vfp_ip) + # + regs1, regs2, stack = get_state(src_locations1 + + src_locations2) + # + def read(loc, expected_width=None): + if expected_width is not None: + assert loc.width == expected_width*WORD + if loc.is_vfp_reg(): + return regs2[loc.value] + elif loc.is_reg(): + return regs1[loc.value] + elif loc.is_stack(): + got = stack[loc.position] + if loc.width > WORD: + got = (got, stack[loc.position-1]) + return got + if loc.is_imm() or loc.is_imm_float(): + return 'const-%d' % loc.value + assert 0, loc + # + def write(loc, newvalue): + if loc.is_vfp_reg(): + regs2[loc.value] = newvalue + elif loc.is_reg(): + regs1[loc.value] = newvalue + elif loc.is_stack(): + if loc.width > WORD: + newval1, newval2 = newvalue + stack[loc.position] = newval1 + stack[loc.position-1] = newval2 + else: + stack[loc.position] = newvalue + else: + assert 0, loc + # + src_values1 = [read(loc, 1) for loc in src_locations1] + src_values2 = [read(loc, 2) for loc in src_locations2] + # + extrapushes = [] + for op in assembler.ops: + if op[0] == 'mov': + src, dst = op[1:] + assert src.is_reg() or src.is_vfp_reg() or src.is_stack() or src.is_imm_float() or src.is_imm() + assert dst.is_reg() or dst.is_vfp_reg() or dst.is_stack() + assert not (src.is_stack() and dst.is_stack()) + write(dst, read(src)) + elif op[0] == 'push': + src, = op[1:] + assert src.is_reg() or src.is_vfp_reg() or src.is_stack() + extrapushes.append(read(src)) + elif op[0] == 'pop': + dst, = op[1:] + assert dst.is_reg() or dst.is_vfp_reg() or dst.is_stack() + write(dst, extrapushes.pop()) + else: + assert 0, "unknown op: %r" % (op,) + assert not extrapushes + # + for i, loc in enumerate(dst_locations1): + assert read(loc, 1) == src_values1[i] + for i, loc in enumerate(dst_locations2): + assert read(loc, 2) == src_values2[i] diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py --- a/pypy/jit/backend/arm/locations.py +++ b/pypy/jit/backend/arm/locations.py @@ -24,6 +24,7 @@ class RegisterLocation(AssemblerLocation): _immutable_ = True + width = WORD def __init__(self, value): self.value = value @@ -40,6 +41,7 @@ class VFPRegisterLocation(RegisterLocation): _immutable_ = True type = FLOAT + width = 2*WORD def get_single_precision_regs(self): return [VFPRegisterLocation(i) for i in [self.value*2, self.value*2+1]] @@ -58,6 +60,8 @@ class ImmLocation(AssemblerLocation): _immutable_ = True + width = WORD + def __init__(self, value): self.value = value @@ -78,6 +82,7 @@ """This class represents an imm float value which is stored in memory at the address stored in the field value""" _immutable_ = True + width = 2*WORD type = FLOAT def __init__(self, value): @@ -104,7 +109,7 @@ self.type = type def __repr__(self): - return 'FP+%d' % (self.position,) + return 'FP(%s)+%d' % (self.type, self.position,) def location_code(self): return 'b' From commits-noreply at bitbucket.org Fri Apr 29 20:02:58 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 29 Apr 2011 20:02:58 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Update the documentation of this option. Message-ID: <20110429180258.0BA2C282B52@codespeak.net> Author: Armin Rigo <arigo at tunes.org> Branch: documentation-cleanup Changeset: r43783:010a7ad8d1a1 Date: 2011-04-29 20:02 +0200 http://bitbucket.org/pypy/pypy/changeset/010a7ad8d1a1/ Log: Update the documentation of this option. diff --git a/pypy/doc/config/translation.gcrootfinder.txt b/pypy/doc/config/translation.gcrootfinder.txt --- a/pypy/doc/config/translation.gcrootfinder.txt +++ b/pypy/doc/config/translation.gcrootfinder.txt @@ -1,15 +1,16 @@ -Choose method how to find roots in the GC. Boehm and refcounting have their own -methods, this is mostly only interesting for framework GCs. For those you have -a choice of various alternatives: +Choose the method used to find the roots in the GC. This only +applies to our framework GCs. You have a choice of two +alternatives: - - use a shadow stack (XXX link to paper), e.g. explicitly maintaining a stack - of roots +- ``--gcrootfinder=shadowstack``: use a so-called "shadow + stack", which is an explicitly maintained custom stack of + root pointers. This is the most portable solution. - - use stackless to find roots by unwinding the stack. Requires - :config:`translation.stackless`. Note that this turned out to - be slower than just using a shadow stack. +- ``--gcrootfinder=asmgcc``: use assembler hackery to find the + roots directly from the normal stack. This is a bit faster, + but platform specific. It works so far with GCC or MSVC, + on i386 and x86-64. - - use GCC and i386 specific assembler hackery to find the roots on the stack. - This is fastest but platform specific. - - - Use LLVM's GC facilities to find the roots. +You may have to force the use of the shadowstack root finder if +you are running into troubles or if you insist on translating +PyPy with other compilers like clang. From commits-noreply at bitbucket.org Fri Apr 29 20:12:27 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 29 Apr 2011 20:12:27 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: update the list of supported modules Message-ID: <20110429181227.8B09F282B52@codespeak.net> Author: Antonio Cuni <anto.cuni at gmail.com> Branch: extradoc Changeset: r175:b68035292a16 Date: 2011-04-29 20:12 +0200 http://bitbucket.org/pypy/pypy.org/changeset/b68035292a16/ Log: update the list of supported modules diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -59,11 +59,11 @@ your build system, it might work out of the box or will be slightly harder.</p> <p>Standard library modules supported by PyPy, in alphabetical order:</p> <ul class="simple"> -<li><tt class="docutils literal">__builtin__ __pypy__ _ast _bisect _codecs _lsprof _minimal_curses _random _rawffi _ssl _socket _sre _weakref array bz2 cStringIO cpyext crypt errno exceptions fcntl  gc itertools marshal math md5 mmap operator parser posix pyexpat select sha signal struct symbol sys termios thread time token unicodedata zipimport zlib</tt></li> +<li><tt class="docutils literal">__builtin__, __pypy__, _ast, _bisect, _codecs, _collections, _ffi, _file, _hashlib, _io, _locale, _lsprof, _md5, _minimal_curses, _multiprocessing, _pickle_support, _random, _rawffi, _sha, _socket, _sre, _ssl, _stackless, _warnings, _weakref, _winreg, array, binascii, bz2, cStringIO, clr, cmath, cpyext, crypt, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, micronumpy, mmap, operator, oracle, parser, posix, pyexpat, pypyjit, rbench, rctime, select, signal, struct, symbol, sys, termios, thread, time, token, unicodedata, zipimport, zlib</tt></li> </ul> <p>Supported, but written in pure-python:</p> <ul class="simple"> -<li><tt class="docutils literal">binascii cPickle cmath collections ctypes datetime functools grp pwd sqlite3 syslog</tt></li> +<li><tt class="docutils literal">cPickle, _csv, ctypes, datetime, dbm, _functools, grp, pwd, readline, resource, sqlite3, syslog, tputil</tt></li> </ul> <p>All modules that are pure python in CPython of course work.</p> <p>Python libraries known to work under PyPy (the list is not exhaustive):</p> diff --git a/source/compat.txt b/source/compat.txt --- a/source/compat.txt +++ b/source/compat.txt @@ -18,11 +18,11 @@ Standard library modules supported by PyPy, in alphabetical order: -* ``__builtin__ __pypy__ _ast _bisect _codecs _lsprof _minimal_curses _random _rawffi _ssl _socket _sre _weakref array bz2 cStringIO cpyext crypt errno exceptions fcntl gc itertools marshal math md5 mmap operator parser posix pyexpat select sha signal struct symbol sys termios thread time token unicodedata zipimport zlib`` +* ``__builtin__, __pypy__, _ast, _bisect, _codecs, _collections, _ffi, _file, _hashlib, _io, _locale, _lsprof, _md5, _minimal_curses, _multiprocessing, _pickle_support, _random, _rawffi, _sha, _socket, _sre, _ssl, _stackless, _warnings, _weakref, _winreg, array, binascii, bz2, cStringIO, clr, cmath, cpyext, crypt, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, micronumpy, mmap, operator, oracle, parser, posix, pyexpat, pypyjit, rbench, rctime, select, signal, struct, symbol, sys, termios, thread, time, token, unicodedata, zipimport, zlib`` Supported, but written in pure-python: -* ``binascii cPickle cmath collections ctypes datetime functools grp pwd sqlite3 syslog`` +* ``cPickle, _csv, ctypes, datetime, dbm, _functools, grp, pwd, readline, resource, sqlite3, syslog, tputil`` All modules that are pure python in CPython of course work. From commits-noreply at bitbucket.org Fri Apr 29 20:16:40 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 29 Apr 2011 20:16:40 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: update the list of modules Message-ID: <20110429181640.B3518282B52@codespeak.net> Author: Antonio Cuni <anto.cuni at gmail.com> Branch: documentation-cleanup Changeset: r43784:0661bb939d95 Date: 2011-04-29 20:06 +0200 http://bitbucket.org/pypy/pypy/changeset/0661bb939d95/ Log: update the list of modules diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -23,34 +23,54 @@ _ast _bisect _codecs + _collections + _ffi + _file + _hashlib + _io + _locale _lsprof + _md5 `_minimal_curses`_ + _multiprocessing + _pickle_support _random `_rawffi`_ - _ssl + _sha _socket _sre + _ssl + _stackless + _warnings _weakref + _winreg array + binascii bz2 cStringIO + clr + cmath `cpyext`_ crypt errno exceptions fcntl gc + imp itertools marshal math - md5 + micronumpy mmap operator + oracle parser posix pyexpat + pypyjit + rbench + rctime select - sha signal struct symbol @@ -78,7 +98,7 @@ * Supported by being rewritten in pure Python (possibly using ``ctypes``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, - ``cStringIO``, ``cmath``, ``dbm`` (?), ``datetime``, ``binascii``... + ``cStringIO``, ``cmath``, ``dbm`` (?), ``datetime``... Note that some modules are both in there and in the list above; by default, the built-in module is used (but can be disabled at translation time). From commits-noreply at bitbucket.org Fri Apr 29 20:16:42 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 29 Apr 2011 20:16:42 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: cStringIO is implemented in rpython nowadays Message-ID: <20110429181642.0469A282B52@codespeak.net> Author: Antonio Cuni <anto.cuni at gmail.com> Branch: documentation-cleanup Changeset: r43785:bc5e92451943 Date: 2011-04-29 20:13 +0200 http://bitbucket.org/pypy/pypy/changeset/bc5e92451943/ Log: cStringIO is implemented in rpython nowadays diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -97,8 +97,7 @@ * Supported by being rewritten in pure Python (possibly using ``ctypes``): see the `lib_pypy/`_ directory. Examples of modules that we - support this way: ``ctypes``, ``cPickle``, - ``cStringIO``, ``cmath``, ``dbm`` (?), ``datetime``... + support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; by default, the built-in module is used (but can be disabled at translation time). From commits-noreply at bitbucket.org Fri Apr 29 20:25:13 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 29 Apr 2011 20:25:13 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: mention the postgresql fork Message-ID: <20110429182513.CC2EC282B52@codespeak.net> Author: Antonio Cuni <anto.cuni at gmail.com> Branch: documentation-cleanup Changeset: r43786:9ec12c539ca0 Date: 2011-04-29 20:25 +0200 http://bitbucket.org/pypy/pypy/changeset/9ec12c539ca0/ Log: mention the postgresql fork diff --git a/pypy/doc/release-1.5.0.rst b/pypy/doc/release-1.5.0.rst --- a/pypy/doc/release-1.5.0.rst +++ b/pypy/doc/release-1.5.0.rst @@ -42,6 +42,9 @@ skews the relative performance in not yet studied ways, so that it is not yet a perfect tool to find subtle performance problems. +- There is an `external fork`_ which includes an RPython version of the + ``postgresql``. However, there are no prebuilt binaries for this. + Cheers, Carl Friedrich Bolz, Antonio Cuni, Maciej Fijalkowski, @@ -59,3 +62,4 @@ .. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home .. _`Tkinter and IDLE`: http://morepypy.blogspot.com/2011/04/using-tkinter-and-idle-with-pypy.html .. _`cProfile`: http://docs.python.org/library/profile.html +.. _`external fork`: https://bitbucket.org/alex_gaynor/pypy-postgresql From commits-noreply at bitbucket.org Fri Apr 29 20:49:29 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 29 Apr 2011 20:49:29 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Remove the modules listed too eagerly. Message-ID: <20110429184929.98736282B52@codespeak.net> Author: Armin Rigo <arigo at tunes.org> Branch: documentation-cleanup Changeset: r43787:419a56e02906 Date: 2011-04-29 20:49 +0200 http://bitbucket.org/pypy/pypy/changeset/419a56e02906/ Log: Remove the modules listed too eagerly. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -24,8 +24,7 @@ _bisect _codecs _collections - _ffi - _file + `_ffi`_ _hashlib _io _locale @@ -33,14 +32,12 @@ _md5 `_minimal_curses`_ _multiprocessing - _pickle_support _random `_rawffi`_ _sha _socket _sre _ssl - _stackless _warnings _weakref _winreg @@ -60,16 +57,12 @@ itertools marshal math - micronumpy mmap operator oracle parser posix pyexpat - pypyjit - rbench - rctime select signal struct @@ -108,6 +101,7 @@ .. the nonstandard modules are listed below... .. _`__pypy__`: __pypy__-module.html +.. _`_ffi`: ctypes-implementation.html .. _`_rawffi`: ctypes-implementation.html .. _`_minimal_curses`: config/objspace.usemodules._minimal_curses.html .. _`cpyext`: http://morepypy.blogspot.com/2010/04/using-cpython-extension-modules-with.html From commits-noreply at bitbucket.org Fri Apr 29 20:52:15 2011 From: commits-noreply at bitbucket.org (arigo) Date: Fri, 29 Apr 2011 20:52:15 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: Remove the modules listed too eagerly. (compat.html not regenerated) Message-ID: <20110429185215.3F389282B52@codespeak.net> Author: Armin Rigo <arigo at tunes.org> Branch: extradoc Changeset: r176:fbe333452c31 Date: 2011-04-29 20:52 +0200 http://bitbucket.org/pypy/pypy.org/changeset/fbe333452c31/ Log: Remove the modules listed too eagerly. (compat.html not regenerated) diff --git a/source/compat.txt b/source/compat.txt --- a/source/compat.txt +++ b/source/compat.txt @@ -18,7 +18,7 @@ Standard library modules supported by PyPy, in alphabetical order: -* ``__builtin__, __pypy__, _ast, _bisect, _codecs, _collections, _ffi, _file, _hashlib, _io, _locale, _lsprof, _md5, _minimal_curses, _multiprocessing, _pickle_support, _random, _rawffi, _sha, _socket, _sre, _ssl, _stackless, _warnings, _weakref, _winreg, array, binascii, bz2, cStringIO, clr, cmath, cpyext, crypt, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, micronumpy, mmap, operator, oracle, parser, posix, pyexpat, pypyjit, rbench, rctime, select, signal, struct, symbol, sys, termios, thread, time, token, unicodedata, zipimport, zlib`` +* ``__builtin__, __pypy__, _ast, _bisect, _codecs, _collections, _ffi, _hashlib, _io, _locale, _lsprof, _md5, _minimal_curses, _multiprocessing, _random, _rawffi, _sha, _socket, _sre, _ssl, _warnings, _weakref, _winreg, array, binascii, bz2, cStringIO, clr, cmath, cpyext, crypt, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, mmap, operator, oracle, parser, posix, pyexpat, select, signal, struct, symbol, sys, termios, thread, time, token, unicodedata, zipimport, zlib`` Supported, but written in pure-python: From commits-noreply at bitbucket.org Fri Apr 29 21:08:47 2011 From: commits-noreply at bitbucket.org (antocuni) Date: Fri, 29 Apr 2011 21:08:47 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: (antocuni, cfbolz) the blogger template Message-ID: <20110429190847.DF94736C055@codespeak.net> Author: Antonio Cuni <anto.cuni at gmail.com> Branch: extradoc Changeset: r3546:2c848fb97cda Date: 2011-04-29 21:07 +0200 http://bitbucket.org/pypy/extradoc/changeset/2c848fb97cda/ Log: (antocuni, cfbolz) the blogger template diff --git a/blog/template.xml b/blog/template.xml new file mode 100644 --- /dev/null +++ b/blog/template.xml @@ -0,0 +1,1768 @@ +<?xml version="1.0" encoding="UTF-8" ?> +<!-- this template contains two modifications: it adds flattr buttons, and +adds an image of the logo just before the title --> + +<!DOCTYPE html> +<html b:version='2' class='v2' expr:dir='data:blog.languageDirection' xmlns='http://www.w3.org/1999/xhtml' xmlns:b='http://www.google.com/2005/gml/b' xmlns:data='http://www.google.com/2005/gml/data' xmlns:expr='http://www.google.com/2005/gml/expr'> + <head> + + <script type="text/javascript"> + /* <![CDATA[ */ + (function() { + var s = document.createElement('script'), t = document.getElementsByTagName('script')[0]; + + s.type = 'text/javascript'; + s.async = true; + s.src = 'https://api.flattr.com/js/0.6/load.js?mode=manual&uid=cfbolz&category=text&language=en_GB'; + t.parentNode.insertBefore(s, t); + })(); + function displayFlattrButton(postid, url, tags) { + var flattr_tle = document.getElementById("flattr_title_" + postid).innerHTML; + if (flattr_tle.length > 80) { + flattr_tle = flattr_tle.replace(/(.{0,71}) .*/, "$1 …"); + flattr_tle = flattr_tle.slice(0,79); + } + var flattr_dsc = document.getElementById("flattr_summary_" + postid).innerHTML; + flattr_dsc = flattr_dsc.replace(/<.*?>/g, ""); + if (flattr_dsc.length > 980) { + flattr_dsc = flattr_dsc.replace(/\n/g, " "); + flattr_dsc = flattr_dsc.replace(/(.{0,971}) .*/, "$1 …"); + flattr_dsc = flattr_dsc.slice(0,979); + } + if (tags.length > 230) { + tags = tags.replace(/(.{0,230}),/, "$1"); + tags = tags.slice(0,229); + } + tags = tags + ';'; + + document.write('<a class="FlattrButton" style="display:none;" title="' + +flattr_tle + '" href="' + url + '" rev="flattr;button:compact;tags:' + tags + '">' + flattr_dsc + '</a>'); + } + /* ]]> */ + </script> + + + <meta content='IE=EmulateIE7' http-equiv='X-UA-Compatible'/> + <b:if cond='data:blog.isMobile'> + <meta content='width=device-width,initial-scale=1.0,minimum-scale=1.0,maximum-scale=1.0' name='viewport'/> + <b:else/> + <meta content='width=1100' name='viewport'/> + </b:if> + <b:include data='blog' name='all-head-content'/> + <title><data:blog.pageTitle/> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +*/ + +/* Content +----------------------------------------------- */ +body { + font: $(body.font); + color: $(body.text.color); + background: $(body.background); + padding: 0 $(content.shadow.spread) $(content.shadow.spread) $(content.shadow.spread); + $(body.background.override) +} + +html body $(page.width.selector) { + min-width: 0; + max-width: 100%; + width: $(page.width); +} + +a:link { + text-decoration:none; + color: $(link.color); +} + +a:visited { + text-decoration:none; + color: $(link.visited.color); +} + +a:hover { + text-decoration:underline; + color: $(link.hover.color); +} + +.body-fauxcolumn-outer .fauxcolumn-inner { + background: transparent $(body.background.gradient.tile) repeat scroll top left; + _background-image: none; +} + +.body-fauxcolumn-outer .cap-top { + position: absolute; + z-index: 1; + height: 400px; + width: 100%; + background: $(body.background); + $(body.background.override) +} + +.body-fauxcolumn-outer .cap-top .cap-left { + width: 100%; + background: transparent $(body.background.gradient.cap) repeat-x scroll top left; + _background-image: none; +} + +.content-outer { + -moz-box-shadow: 0 0 $(content.shadow.spread) rgba(0, 0, 0, .15); + -webkit-box-shadow: 0 0 $(content.shadow.spread.webkit) rgba(0, 0, 0, .15); + -goog-ms-box-shadow: 0 0 $(content.shadow.spread.ie) #333333; + box-shadow: 0 0 $(content.shadow.spread) rgba(0, 0, 0, .15); + + margin-bottom: 1px; +} + +.content-inner { + padding: $(content.padding) $(content.padding.horizontal); +} + +$(content.background.color.selector) { + background-color: $(content.background.color); +} + +/* Header +----------------------------------------------- */ +.header-outer { + background: $(header.background.color) $(header.background.gradient) repeat-x scroll 0 -400px; + _background-image: none; +} + +.Header h1 { + font: $(header.font); + color: $(header.text.color); + text-shadow: $(header.shadow.offset.left) $(header.shadow.offset.top) $(header.shadow.spread) rgba(0, 0, 0, .2); +} + +.Header h1 a { + color: $(header.text.color); +} + +.Header .description { + font-size: $(description.text.size); + color: $(description.text.color); +} + +.header-inner .Header .titlewrapper { + padding: 22px $(header.padding); +} + +.header-inner .Header .descriptionwrapper { + padding: 0 $(header.padding); +} + +/* Tabs +----------------------------------------------- */ +.tabs-inner .section:first-child { + border-top: $(header.bottom.border.size) solid $(tabs.border.color); +} + +.tabs-inner .section:first-child ul { + margin-top: -$(header.border.size); + border-top: $(header.border.size) solid $(tabs.border.color); + border-left: $(header.border.horizontalsize) solid $(tabs.border.color); + border-right: $(header.border.horizontalsize) solid $(tabs.border.color); +} + +.tabs-inner .widget ul { + background: $(tabs.background.color) $(tabs.background.gradient) repeat-x scroll 0 -800px; + _background-image: none; + border-bottom: $(tabs.border.width) solid $(tabs.border.color); + + margin-top: $(tabs.margin.top); + margin-left: -$(tabs.margin.side); + margin-right: -$(tabs.margin.side); +} + +.tabs-inner .widget li a { + display: inline-block; + + padding: .6em 1em; + + font: $(tabs.font); + color: $(tabs.text.color); + + border-$startSide: $(tabs.border.width) solid $(content.background.color); + border-$endSide: $(tabs.bevel.border.width) solid $(tabs.border.color); +} + +.tabs-inner .widget li:first-child a { + border-$startSide: none; +} + +.tabs-inner .widget li.selected a, .tabs-inner .widget li a:hover { + color: $(tabs.selected.text.color); + background-color: $(tabs.selected.background.color); + text-decoration: none; +} + +/* Columns +----------------------------------------------- */ +.main-outer { + border-top: $(main.border.width) solid $(body.rule.color); +} + +.fauxcolumn-left-outer .fauxcolumn-inner { + border-right: 1px solid $(body.rule.color); +} + +.fauxcolumn-right-outer .fauxcolumn-inner { + border-left: 1px solid $(body.rule.color); +} + +/* Headings +----------------------------------------------- */ +h2 { + margin: 0 0 1em 0; + + font: $(widget.title.font); + color: $(widget.title.text.color); + text-transform: uppercase; +} + +/* Widgets +----------------------------------------------- */ +.widget .zippy { + color: $(widget.alternate.text.color); + text-shadow: 2px 2px 1px rgba(0, 0, 0, .1); +} + +.widget .popular-posts ul { + list-style: none; +} + +/* Posts +----------------------------------------------- */ +.date-header span { + background-color: $(date.header.background.color); + color: $(date.header.color); + padding: $(date.header.padding); + letter-spacing: $(date.header.letterspacing); + margin: $(date.header.margin); +} + +.main-inner { + padding-top: $(main.padding.top); + padding-bottom: $(main.padding.bottom); +} + +.main-inner .column-center-inner { + padding: 0 $(main.padding); +} + +.main-inner .column-center-inner .section { + margin: 0 $(main.section.margin); +} + +.post { + margin: 0 0 $(post.margin.bottom) 0; +} + +h3.post-title, .comments h4 { + font: $(post.title.font); + margin: .75em 0 0; +} + +.post-body { + font-size: 110%; + line-height: 1.4; + position: relative; +} + +.post-body img, .post-body .tr-caption-container, .Profile img, .Image img, +.BlogList .item-thumbnail img { + padding: $(image.border.small.size); + + background: $(image.background.color); + border: 1px solid $(image.border.color); + + -moz-box-shadow: 1px 1px 5px rgba(0, 0, 0, .1); + -webkit-box-shadow: 1px 1px 5px rgba(0, 0, 0, .1); + box-shadow: 1px 1px 5px rgba(0, 0, 0, .1); +} + +.post-body img, .post-body .tr-caption-container { + padding: $(image.border.large.size); +} + +.post-body .tr-caption-container { + color: $(image.text.color); +} + +.post-body .tr-caption-container img { + padding: 0; + + background: transparent; + border: none; + + -moz-box-shadow: 0 0 0 rgba(0, 0, 0, .1); + -webkit-box-shadow: 0 0 0 rgba(0, 0, 0, .1); + box-shadow: 0 0 0 rgba(0, 0, 0, .1); +} + +.post-header { + margin: 0 0 1.5em; + + line-height: 1.6; + font-size: 90%; +} + +.post-footer { + margin: 20px -2px 0; + padding: 5px 10px; + + color: $(post.footer.text.color); + background-color: $(post.footer.background.color); + border-bottom: 1px solid $(post.footer.border.color); + + line-height: 1.6; + font-size: 90%; +} + +#comments .comment-author { + padding-top: 1.5em; + + border-top: 1px solid $(body.rule.color); + background-position: 0 1.5em; +} + +#comments .comment-author:first-child { + padding-top: 0; + + border-top: none; +} + +.avatar-image-container { + margin: .2em 0 0; +} + +#comments .avatar-image-container img { + border: 1px solid $(image.border.color); +} + +/* Accents +---------------------------------------------- */ +.section-columns td.columns-cell { + border-$startSide: 1px solid $(body.rule.color); +} + +.blog-pager { + background: $(paging.background); +} + +.blog-pager-older-link, .home-link, +.blog-pager-newer-link { + background-color: $(content.background.color); + padding: 5px; +} + +.footer-outer { + border-top: $(footer.bevel) dashed #bbbbbb; +} + +/* Mobile +----------------------------------------------- */ +.mobile .content-outer { + -webkit-box-shadow: 0 0 3px rgba(0, 0, 0, .15); + box-shadow: 0 0 3px rgba(0, 0, 0, .15); + padding: 0 $(content.shadow.spread); +} + +body.mobile .AdSense { + margin-left: -$(content.shadow.spread); +} + +.mobile .tabs-inner .widget ul { + margin-left: 0; + margin-right: 0; +} + +.mobile .post { + margin: 0; +} + +.mobile .main-inner .column-center-inner .section { + margin: 0; +} + +.mobile .date-header span { + padding: 0.4em 10px; + margin: 0 -10px; +} + +.mobile h3.post-title { + margin: 0; +} + +.mobile .blog-pager { + background: transparent; +} + +.mobile .footer-outer { + border-top: none; +} + +.mobile .main-inner, .mobile .footer-inner { + background-color: $(content.background.color); +} + +.mobile-index-contents { + color: $(body.text.color); +} + +.mobile-link-button { + background-color: $(link.color); +} + +.mobile-link-button a:link, .mobile-link-button a:visited { + color: $(content.background.color); +} +]]> + + + + + + + + + + + + + +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+ +
+
+
+
+ +
+
+
+
+
+
+ + + + + + + + +
+
+

+ +

+
+ +
+ +
+
+

+ +

+
+ +
+
+ + +
+ + + + + + + +
+
+ + +
+
+

+ +

+
+ +
+
+
+ + + + + + + + +
+

+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
+
+ +
+
+
+ +
+ +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ + +
+ +
+
+ + + +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + +
+
+
+ +

+ +
+ + + +
+ + +
+ +
+ + +

+ + + + + + + + + + + + + +

+ + +
+
+
+ +
+ +
+
+ + + + + + +
+ + + +
+
+ +
+
+
+
+
+
+
+
+ + + +
+ + +

+

+

+ + + +
+ + + +
+
+ + +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+
+
+
+
+
+
+ +
+
+ + +
+
+
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + +
+
+
+ + + + + +

+
+ + +
+ + + + + + +

+
+
+ +
+ + +
+
+ + + +

+
+
+
+
+ + + + + + + + + +
+
+ +
+
+ +
    + +
  • + () +
  • +
    +
+
+ + + + + +
    +
  • + + + () + + + + + + +
  • +
+
+
+ + + + + ▼  + + + + + + ◄  + + ►  + + + + + + + +
    + +
  • +
    +
+
+
+ + + +

+
+
+ +
    + +
  • +
    +
+ + + + + + + +
+
+ +
+ + +
+
+ +
+
+ +
+ + +
+
+
+ + + + +

+
+
+ +
+ + +
+
+ + + + +

+
+
+ +
+ + +
+
+ From commits-noreply at bitbucket.org Sat Apr 30 11:53:14 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 11:53:14 +0200 (CEST) Subject: [pypy-svn] pypy default: A hack to force users to specify -O2 or -Ojit in Message-ID: <20110430095314.28704282B52@codespeak.net> Author: Armin Rigo Branch: Changeset: r43788:a429ba536944 Date: 2011-04-30 11:52 +0200 http://bitbucket.org/pypy/pypy/changeset/a429ba536944/ Log: A hack to force users to specify -O2 or -Ojit in "translate.py targetpypystandalone". diff --git a/pypy/translator/goal/targetpypystandalone.py b/pypy/translator/goal/targetpypystandalone.py --- a/pypy/translator/goal/targetpypystandalone.py +++ b/pypy/translator/goal/targetpypystandalone.py @@ -105,6 +105,9 @@ return parser def handle_config(self, config, translateconfig): + if translateconfig._cfgimpl_value_owners['opt'] == 'default': + raise Exception("You have to specify the --opt level.\n" + "Try --opt=2 or --opt=jit, or equivalently -O2 or -Ojit .") self.translateconfig = translateconfig # set up the objspace optimizations based on the --opt argument from pypy.config.pypyoption import set_pypy_opt_level From commits-noreply at bitbucket.org Sat Apr 30 12:07:57 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 12:07:57 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): a link to the BF tutorial Message-ID: <20110430100757.799F5282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43789:0b3486517281 Date: 2011-04-30 12:03 +0200 http://bitbucket.org/pypy/pypy/changeset/0b3486517281/ Log: (lac, cfbolz): a link to the BF tutorial diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -231,16 +231,17 @@ * `The translation document`_: a detailed description of our translation process. - * All our `Technical reports`_, including `Compiling dynamic language - implementations`_. - * `JIT Generation in PyPy`_, describing how we produce a Just-in-time Compiler from an interpreter. -.. _`documentation index`: docindex.html + * A tutorial of how to use the `RPython toolchain`_ to `implement your own + interpreter`_. + +.. _`documentation index`: index.html#project-documentation .. _`getting-started`: getting-started.html .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`the translation document`: translation.html +.. _`RPython toolchain`: translation.html .. _`Compiling dynamic language implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf .. _`Technical reports`: index-report.html @@ -257,6 +258,7 @@ .. _stackless: stackless.html .. _`generate Just-In-Time Compilers`: jit/index.html .. _`JIT Generation in PyPy`: jit/index.html +.. _`implement your own interpreter`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html .. include:: _ref.txt From commits-noreply at bitbucket.org Sat Apr 30 12:07:59 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 12:07:59 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): kill references to docindex Message-ID: <20110430100759.23F16282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43790:07a2bb93bdf3 Date: 2011-04-30 12:03 +0200 http://bitbucket.org/pypy/pypy/changeset/07a2bb93bdf3/ Log: (lac, cfbolz): kill references to docindex diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -399,9 +399,9 @@ .. _Pygame: http://www.pygame.org/ .. _Standard object space: objspace.html#the-standard-object-space .. _mailing lists: index.html -.. _documentation: docindex.html +.. _documentation: index.html#project-documentation .. _unit tests: coding-guide.html#test-design -.. _`directory reference`: docindex.html#directory-reference +.. _`directory reference`: index.html#pypy-directory-reference .. include:: _ref.txt diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -80,7 +80,6 @@ .. _`Mercurial commit mailing list`: http://codespeak.net/mailman/listinfo/pypy-svn .. _`development mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html -.. _`Documentation`: docindex.html .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html From commits-noreply at bitbucket.org Sat Apr 30 12:08:01 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 12:08:01 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): update release announcement, put it into an index Message-ID: <20110430100801.D4B6B282B57@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43791:4faa86a2de87 Date: 2011-04-30 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/4faa86a2de87/ Log: (lac, cfbolz): update release announcement, put it into an index diff --git a/pypy/doc/release-1.5.0.rst b/pypy/doc/release-1.5.0.rst --- a/pypy/doc/release-1.5.0.rst +++ b/pypy/doc/release-1.5.0.rst @@ -2,10 +2,10 @@ PyPy 1.5: Catching Up ====================== -We're pleased to announce the 1.5 release of PyPy. This release is updating -PyPy to the features of CPython 2.7.1, including the standard library. Thus the -features of `CPython 2.6`_ and `CPython 2.7`_ are now supported. It also -contains additional performance improvements. You can download it here: +We're pleased to announce the 1.5 release of PyPy. This release updates +PyPy with the features of CPython 2.7.1, including the standard library. Thus +all the features of `CPython 2.6`_ and `CPython 2.7`_ are now supported. It +also contains additional performance improvements. You can download it here: http://pypy.org/download.html @@ -13,15 +13,16 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7.1. It's fast (`pypy 1.5 and cpython 2.6`_ performance comparison) +CPython 2.7.1. It's fast (`pypy 1.5 and cpython 2.6.2`_ performance comparison) due to its integrated tracing JIT compiler. -Among its new features, this release includes the features of CPython 2.6 and -2.7. It also includes a large number of small improvements to the tracing JIT -compiler. +This release includes the features of CPython 2.6 and 2.7. It also includes a +large number of small improvements to the tracing JIT compiler. It supports +Intel machines running Linux 32/64, Mac OS X, Windows. Windows 64 is not +yet supported. Numerous speed achievements are described on `our blog`_. Normalized speed -charts comparing `pypy 1.5 and pypy 1.4`_ as well as `pypy 1.4 and cpython +charts comparing `pypy 1.5 and pypy 1.4`_ as well as `pypy 1.5 and cpython 2.6.2`_ are available on our benchmark website. The speed improvement over 1.4 seems to be around 25% on average. @@ -38,17 +39,23 @@ - These changes make it possible to support `Tkinter and IDLE`_. -- The `cProfile`_ profiler is now working together with the JIT. However, it - skews the relative performance in not yet studied ways, so that it is not yet - a perfect tool to find subtle performance problems. +- The `cProfile`_ profiler is now working with the JIT. However, it skews the + performance in unstudied ways. Therefore it is not yet usable to analyze + subtle performance problems (the same is true for CPython of course). - There is an `external fork`_ which includes an RPython version of the ``postgresql``. However, there are no prebuilt binaries for this. +- Our developer documentation was moved to Sphinx and cleaned up. It now lives + on http://pypy.readthedocs.org + +- and many small things :-) + + Cheers, -Carl Friedrich Bolz, Antonio Cuni, Maciej Fijalkowski, -Amaury Forgeot d'Arc, Armin Rigo and the PyPy team +Carl Friedrich Bolz, Laura Creighton, Antonio Cuni, Maciej Fijalkowski, +Amaury Forgeot d'Arc, Alex Gaynor, Armin Rigo and the PyPy team .. _`CPython 2.6`: http://docs.python.org/dev/whatsnew/2.6.html diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -15,3 +15,4 @@ release-1.4.0.rst release-1.4.0beta.rst release-1.4.1.rst + release-1.5.0.rst From commits-noreply at bitbucket.org Sat Apr 30 12:08:03 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 12:08:03 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac): Add some links to getting-started Message-ID: <20110430100803.CB835282B89@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43792:5d1a7bd78ddb Date: 2011-04-30 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/5d1a7bd78ddb/ Log: (cfbolz, lac): Add some links to getting-started diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -8,6 +8,7 @@ What is PyPy ? ============== + In common parlance, PyPy has been used to mean two things. The first is the `RPython translation toolchain`_, which is a framework for generating dynamic programming language implementations. And the second is one @@ -139,15 +140,19 @@ .. _`our nightly tests:`: http://buildbot.pypy.org/summary?branch= Where to go from here ----------------------- +====================== After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ - - `Learning more about the translation toolchain and how to develop (with) PyPy`_ + - `Learning more about the RPython toolchain and how to develop (with) PyPy`_ + - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ + - `Look at our benchmark results`_ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html -.. _`Learning more about the translation toolchain and how to develop (with) PyPy`: getting-started-dev.html +.. _`Learning more about the RPython toolchain and how to develop (with) PyPy`: getting-started-dev.html +.. _`Tutorial for how to write an interpreter with the RPython toolchain and make it fast`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html +.. _`Look at our benchmark results`: http://speed.pypy.org .. _setuptools: http://pypi.python.org/pypi/setuptools @@ -159,7 +164,7 @@ interesting information. Additionally, in true hacker spirit, you may just `start reading sources`_ . -.. _`documentation section`: docindex.html +.. _`documentation section`: index.html#project-documentation .. _`start reading sources`: getting-started-dev.html#start-reading-sources Filing bugs or feature requests From commits-noreply at bitbucket.org Sat Apr 30 12:49:59 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 12:49:59 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: Say that the Windows release is beta. Message-ID: <20110430104959.E091E282B52@codespeak.net> Author: Armin Rigo Branch: documentation-cleanup Changeset: r43793:e8ee3d2d179d Date: 2011-04-30 12:49 +0200 http://bitbucket.org/pypy/pypy/changeset/e8ee3d2d179d/ Log: Say that the Windows release is beta. diff --git a/pypy/doc/release-1.5.0.rst b/pypy/doc/release-1.5.0.rst --- a/pypy/doc/release-1.5.0.rst +++ b/pypy/doc/release-1.5.0.rst @@ -18,8 +18,9 @@ This release includes the features of CPython 2.6 and 2.7. It also includes a large number of small improvements to the tracing JIT compiler. It supports -Intel machines running Linux 32/64, Mac OS X, Windows. Windows 64 is not -yet supported. +Intel machines running Linux 32/64 or Mac OS X. Windows is beta (it roughly +works but a lot of small issues have not been fixed so far). Windows 64 is +not yet supported. Numerous speed achievements are described on `our blog`_. Normalized speed charts comparing `pypy 1.5 and pypy 1.4`_ as well as `pypy 1.5 and cpython From commits-noreply at bitbucket.org Sat Apr 30 12:57:13 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 12:57:13 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz, lac): remove outdated references to 2.2, 2.3, 2.4 and 2.5. some fixes in getting-started Message-ID: <20110430105713.E9649282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43794:36db01d474e6 Date: 2011-04-30 12:51 +0200 http://bitbucket.org/pypy/pypy/changeset/36db01d474e6/ Log: (cfbolz, lac): remove outdated references to 2.2, 2.3, 2.4 and 2.5. some fixes in getting-started diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -159,8 +159,8 @@ interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, - and input data files that allow it to parse both Python 2.3 and 2.4 - syntax. Once the input data has been processed, the parser can be + and input data files that allow it to parse the syntax of various Python + versions. Once the input data has been processed, the parser can be translated by the above machinery into efficient code. * `pypy/interpreter/astcompiler`_ contains the compiler. This @@ -347,18 +347,6 @@ pygame: http://www.pygame.org/download.shtml -CTypes on Python 2.4 -++++++++++++++++++++++++++++ - -`ctypes`_ is included in CPython 2.5 and higher. CPython 2.4 users needs to -install it if they want to run low-level tests. See -the `download page of ctypes`_. - -.. _`download page of ctypes`: https://sourceforge.net/projects/ctypes/files/ -.. _`ctypes`: http://starship.python.net/crew/theller/ctypes/ - -.. _`py.test`: - py.test and the py lib +++++++++++++++++++++++ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -11,7 +11,7 @@ Getting into PyPy ... ============================================= -* `Release 1.4`_: the latest official release +* `Release 1.5`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -83,7 +83,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 1.4`: http://pypy.org/download.html +.. _`Release 1.5`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -21,7 +21,7 @@ To actually use PyPy's Python interpreter, the first thing to do is to `download a pre-built PyPy`_ for your architecture. -.. `download a pre-built PyPy`: http://pypy.org/download.html +.. _`download a pre-built PyPy`: http://pypy.org/download.html Translating the PyPy Python interpreter --------------------------------------- @@ -32,13 +32,13 @@ .. _`windows document`: windows.html You can translate the whole of PyPy's Python interpreter to low level C code, -`CLI code`_, or `JVM code`_. +or `CLI code`_. 1. First `download a pre-built PyPy`_ for your architecture which you will -use to translate your Python interpreter. It is, of course, possible to -translate with a CPython 2.6 or later, but this is not the preferred way, -because it will take a lot longer to run -- depending on your architecture, -between two and three times as long. + use to translate your Python interpreter. It is, of course, possible to + translate with a CPython 2.6 or later, but this is not the preferred way, + because it will take a lot longer to run -- depending on your architecture, + between two and three times as long. 2. Install build-time dependencies. On a Debian box these are:: @@ -66,6 +66,7 @@ * ``python-sphinx`` (for the optional documentation build. You need version 1.0.7 or later) * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) + 3. Translation is time-consuming -- 45 minutes on a very fast machine -- and RAM-hungry. As of March 2011, you will need **at least** 2 GB of memory on a @@ -73,8 +74,8 @@ are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all - you want to do is to test that some new feature that you just wrote - translates, level 1 is enough. + you want to do is to test that some new feature that you just wrote + translates, level 1 is enough. Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. @@ -88,7 +89,7 @@ possibly replacing ``--opt=jit`` with another `optimization level`_ of your choice like ``--opt=2`` if you do not want to include the JIT - compiler. + compiler, which makes the Python interpreter much slower. .. _`optimization level`: config/opt.html @@ -100,22 +101,20 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.5.2 (64177, Apr 16 2009, 16:33:13) - [PyPy 1.1.0] on linux2 + Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) + [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``this sentence is false'' >>>> 46 - 4 42 >>>> from test import pystone >>>> pystone.main() - Pystone(1.1) time for 50000 passes = 2.57 - This machine benchmarks at 19455.3 pystones/second + Pystone(1.1) time for 50000 passes = 0.280017 + This machine benchmarks at 178561 pystones/second >>>> This executable can be moved around or copied on other machines; see -Installation_ below. For now a JIT-enabled ``pypy-c`` always produces -debugging output to stderr when it exits, unless translated with -``--jit-debug=off``. +Installation_ below. The ``translate.py`` script takes a very large number of options controlling what to translate and how. See ``translate.py -h``. Some of the more @@ -142,7 +141,7 @@ ++++++++++++++++++++++++++++++++++++++++ It is possible to have non-standard features enabled for translation, -but they are not really tested any more. Look for example at the +but they are not really tested any more. Look, for example, at the `objspace proxies`_ document. .. _`objspace proxies`: objspace-proxies.html @@ -162,8 +161,8 @@ the convenience ./pypy-cli script:: $ ./pypy-cli - Python 2.5.2 (64219, Apr 17 2009, 13:54:38) - [PyPy 1.1.0] on linux2 + Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) + [PyPy 1.5.0-alpha0] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``distopian and utopian chairs'' >>>> @@ -183,29 +182,31 @@ To try out the experimental .NET integration, check the documentation of the clr_ module. -.. _`JVM code`: +.. not working now: -Translating using the JVM backend -+++++++++++++++++++++++++++++++++ + .. _`JVM code`: -To create a standalone JVM executable:: + Translating using the JVM backend + +++++++++++++++++++++++++++++++++ - ./translate.py --backend=jvm targetpypystandalone.py + To create a standalone JVM executable:: -This will create a jar file ``pypy-jvm.jar`` as well as a convenience -script ``pypy-jvm`` for executing it. To try it out, simply run -``./pypy-jvm``:: + ./translate.py --backend=jvm targetpypystandalone.py - $ ./pypy-jvm - Python 2.5.2 (64214, Apr 17 2009, 08:11:23) - [PyPy 1.1.0] on darwin - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> + This will create a jar file ``pypy-jvm.jar`` as well as a convenience + script ``pypy-jvm`` for executing it. To try it out, simply run + ``./pypy-jvm``:: -Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment -the executable does not provide any interesting features, like integration with -Java. + $ ./pypy-jvm + Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) + [PyPy 1.5.0-alpha0] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + And now for something completely different: ``# assert did not crash'' + >>>> + + Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment + the executable does not provide any interesting features, like integration with + Java. Installation ++++++++++++ @@ -218,11 +219,11 @@ For installation purposes, note that the executable needs to be able to find its version of the Python standard library in the following three -directories: ``lib-python/2.5.2``, ``lib-python/modified-2.5.2`` and +directories: ``lib-python/2.7``, ``lib-python/modified-2.7`` and ``lib_pypy``. They are located by "looking around" starting from the directory in which the executable resides. The current logic is to try to find a ``PREFIX`` from which the directories -``PREFIX/lib-python/2.5.2`` and ``PREFIX/lib-python/modified.2.5.2`` and +``PREFIX/lib-python/2.7`` and ``PREFIX/lib-python/modified.2.7`` and ``PREFIX/lib_pypy`` can all be found. The prefixes that are tried are:: . @@ -249,8 +250,8 @@ ... $ bin/easy_install WebOb $ bin/pypy-c - Python 2.5.2 (64714, Apr 27 2009, 08:16:13) - [PyPy 1.1.0] on linux2 + Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) + [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``PyPy doesn't have copolyvariadic dependently-monomorphed hyperfluxads'' >>>> import webob @@ -265,7 +266,7 @@ +++++++++++++++++++++ To start interpreting Python with PyPy, install a C compiler that is -supported by distutils and use Python 2.4 or greater to run PyPy:: +supported by distutils and use Python 2.5 or greater to run PyPy:: cd pypy python bin/py.py @@ -305,7 +306,7 @@ Alternatively, as with regular Python, you can simply give a script name on the command line:: - python py.py ../../lib-python/2.5.2/test/pystone.py 10 + python py.py ../../lib-python/2.7/test/pystone.py 10 See our `configuration sections`_ for details about what all the commandline options do. diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -340,9 +340,8 @@ ------------------------- While implementing the integer type, we stumbled over the problem that -integers are quite in flux in CPython right now. Starting on Python 2.2, -integers mutate into longs on overflow. However, shifting to the left -truncates up to 2.3 but extends to longs as well in 2.4. By contrast, we need +integers are quite in flux in CPython right now. Starting with Python 2.4, +integers mutate into longs on overflow. In contrast, we need a way to perform wrap-around machine-sized arithmetic by default, while still being able to check for overflow when we need it explicitly. Moreover, we need a consistent behavior before and after translation. @@ -363,15 +362,6 @@ ovfcheck() as a hint: they replace the whole ``ovfcheck(x+y)`` expression with a single overflow-checking addition in C. -**ovfcheck_lshift()** - - ovfcheck_lshift(x, y) is a workaround for ovfcheck(x< - + The installation should be complete now. To run Python for .NET, diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -79,7 +79,7 @@ $ bin/py.py --objspace-std-withrope faking - PyPy 0.99.0 in StdObjSpace on top of Python 2.4.4c1 (startuptime: 17.24 secs) + PyPy 1.5.0-alpha0 in StdObjSpace on top of Python 2.7.1+ (startuptime: 11.38 secs) >>>> import sys >>>> sys.maxint 2147483647 diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -22,7 +22,8 @@ Code objects are a nicely preprocessed, structured representation of source code, and their main content is *bytecode*. We use the same -compact bytecode format as CPython 2.4. Our bytecode compiler is +compact bytecode format as CPython 2.7, with minor differences in the bytecode +set. Our bytecode compiler is implemented as a chain of flexible passes (tokenizer, lexer, parser, abstract syntax tree builder, bytecode generator). The latter passes are based on the ``compiler`` package from the standard library of From commits-noreply at bitbucket.org Sat Apr 30 12:57:20 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 12:57:20 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: kill part about easy_install Message-ID: <20110430105720.2552D282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43795:f3477bfd239b Date: 2011-04-30 12:55 +0200 http://bitbucket.org/pypy/pypy/changeset/f3477bfd239b/ Log: kill part about easy_install diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -227,11 +227,11 @@ ``PREFIX/lib_pypy`` can all be found. The prefixes that are tried are:: . - ./lib/pypy1.2 + ./lib/pypy1.5 .. - ../lib/pypy1.2 + ../lib/pypy1.5 ../.. - ../../lib/pypy-1.2 + ../../lib/pypy-1.5 ../../.. etc. @@ -241,22 +241,6 @@ most code will be fine. However, the ``sys.prefix`` will be unset and some existing libraries assume that this is never the case. -In order to use ``distutils`` or ``setuptools`` a directory ``PREFIX/site-packages`` needs to be created. Here's an example session setting up and using ``easy_install``:: - - $ cd PREFIX - $ mkdir site-packages - $ curl -sO http://peak.telecommunity.com/dist/ez_setup.py - $ bin/pypy-c ez_setup.py - ... - $ bin/easy_install WebOb - $ bin/pypy-c - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``PyPy doesn't have copolyvariadic dependently-monomorphed hyperfluxads'' - >>>> import webob - >>>> - .. _`py.py interpreter`: Running the Python Interpreter Without Translation From commits-noreply at bitbucket.org Sat Apr 30 13:19:28 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 13:19:28 +0200 (CEST) Subject: [pypy-svn] pypy default: (lac, cfbolz): make --view imply --pdb, otherwise it has no effect Message-ID: <20110430111928.ED21F282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43796:7f2db120f9f2 Date: 2011-04-30 13:10 +0200 http://bitbucket.org/pypy/pypy/changeset/7f2db120f9f2/ Log: (lac, cfbolz): make --view imply --pdb, otherwise it has no effect diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py --- a/pypy/translator/goal/translate.py +++ b/pypy/translator/goal/translate.py @@ -89,7 +89,6 @@ 'translation.debug': False, } -# we want 2.4 expand_default functionality import optparse from pypy.tool.ansi_print import ansi_log log = py.log.Producer("translation") @@ -210,6 +209,10 @@ from pypy.translator import translator from pypy.translator import driver from pypy.translator.tool.pdbplus import PdbPlusShow + + if translateconfig.view: + translateconfig.pdb = True + if translateconfig.profile: from cProfile import Profile prof = Profile() From commits-noreply at bitbucket.org Sat Apr 30 13:36:36 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 13:36:36 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: Updates. Message-ID: <20110430113636.9C9B9282B52@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r177:33ecaf6f033f Date: 2011-04-30 13:36 +0200 http://bitbucket.org/pypy/pypy.org/changeset/33ecaf6f033f/ Log: Updates. diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -59,7 +59,7 @@ your build system, it might work out of the box or will be slightly harder.

Standard library modules supported by PyPy, in alphabetical order:

    -
  • __builtin__, __pypy__, _ast, _bisect, _codecs, _collections, _ffi, _file, _hashlib, _io, _locale, _lsprof, _md5, _minimal_curses, _multiprocessing, _pickle_support, _random, _rawffi, _sha, _socket, _sre, _ssl, _stackless, _warnings, _weakref, _winreg, array, binascii, bz2, cStringIO, clr, cmath, cpyext, crypt, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, micronumpy, mmap, operator, oracle, parser, posix, pyexpat, pypyjit, rbench, rctime, select, signal, struct, symbol, sys, termios, thread, time, token, unicodedata, zipimport, zlib
  • +
  • __builtin__, __pypy__, _ast, _bisect, _codecs, _collections, _ffi, _hashlib, _io, _locale, _lsprof, _md5, _minimal_curses, _multiprocessing, _random, _rawffi, _sha, _socket, _sre, _ssl, _warnings, _weakref, _winreg, array, binascii, bz2, cStringIO, clr, cmath, cpyext, crypt, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, mmap, operator, oracle, parser, posix, pyexpat, select, signal, struct, symbol, sys, termios, thread, time, token, unicodedata, zipimport, zlib

Supported, but written in pure-python:

    diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -97,8 +97,7 @@

These versions are not officially part of the release 1.5, which focuses on the JIT. You can find prebuilt binaries for them on our -nightly build or in the release binaries, or translate them -yourself.

+nightly build, or translate them yourself.

Installing

@@ -137,14 +136,14 @@
 pypy translate.py -Ojit                # get the JIT version
 pypy translate.py -O2                  # get the no-jit version
-pypy translate.py --sandbox            # get the sandbox version
-pypy translate.py --stackless          # get the stackless version
+pypy translate.py -O2 --sandbox        # get the sandbox version
+pypy translate.py -O2 --stackless      # get the stackless version
 pypy translate.py -Ojit --backend=cli  # only for branch/cli-jit
 
  • Enjoy Mandelbrot :-) It takes on the order of half an hour to -finish the translation, and 1.7 GB of RAM on a 32-bit system -and 3.0 GB on 64-bit systems. (Do not start a translation on a +finish the translation, and 2 GB of RAM on a 32-bit system +and 4 GB on 64-bit systems. (Do not start a translation on a machine with insufficient RAM! It will just swap forever.)

  • @@ -170,11 +169,13 @@ 1fb62a813978c2581e9e09debad6b116 pypy-1.4.1-linux64.tar.bz2 8584c4e8c042f5b661fcfffa0d9b8a25 pypy-1.4.1-osx.tar.bz2 769b3fb134944ee8c22ad0834970de3b pypy-1.4.1-osx64.tar.bz2 +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-win32.zip ebbbb156b1eb842e9e65d909ed5f9f6d pypy-1.4.1-src.tar.bz2 6e2366377ad2f0c583074d3ba6f60d064549bef2 pypy-1.4.1-linux.tar.bz2 1cfd53343e19264905a00d2ffcf83e03e39dcbb3 pypy-1.4.1-linux64.tar.bz2 961470e7510c47b8f56e6cc6da180605ba058cb6 pypy-1.4.1-osx.tar.bz2 8e2830bef80b93f4d3c016b972fbdf7bcd403abc pypy-1.4.1-osx64.tar.bz2 +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-win32.zip 922a8815377fe2e0c015338fa8b28ae16bf8c840 pypy-1.4.1-src.tar.bz2
    diff --git a/howtohelp.html b/howtohelp.html --- a/howtohelp.html +++ b/howtohelp.html @@ -50,7 +50,7 @@

    Here are some ideas to help PyPy development:

    • use pypy for your projects and provide detailed feedback
    • -
    • help implement Python2.6, 2.7 or 3.x features
    • +
    • talk to us about how to support Python 3.x
    • write blog posts or tweets about your experiences
    • help porting to new platforms
    • contact us and get involved
    • diff --git a/js/detect.js b/js/detect.js --- a/js/detect.js +++ b/js/detect.js @@ -2,14 +2,24 @@ $(document).ready(function() { var download_url, download_text; if (navigator.platform.indexOf('Linux') != -1) { - download_url = 'download/pypy-1.4.1-linux.tar.bz2'; - download_text = 'Download linux x86 bin'; + if (navigator.platform.indexOf('64') != -1) { + download_url = 'download/pypy-1.5-linux64.tar.bz2'; + download_text = 'Download linux x86-64 bin'; + } else { + download_url = 'download/pypy-1.5-linux.tar.bz2'; + download_text = 'Download linux x86 bin (32 bit)'; + } } else if (navigator.platform.indexOf('Win') != -1) { - download_url = 'download/pypy-1.4.1-win32.zip'; + download_url = 'download/pypy-1.5-win32.zip'; download_text = 'Download Windows x86 bin'; } else if (navigator.platform.indexOf('Mac') != 1) { - download_url = 'download/pypy-1.4.1-osx.tar.bz2'; - downloat_text = 'Download Mac OS X 10.6 bin'; + if (navigator.platform.indexOf('64') != -1) { + download_url = 'download/pypy-1.5-osx64.tar.bz2'; + downloat_text = 'Download Mac OS X 10.6 bin (64 bit)'; + } else { + download_url = 'download/pypy-1.5-osx.tar.bz2'; + downloat_text = 'Download Mac OS X 10.6 bin (32 bit)'; + } } else { download_url = "download.html"; download_text = "Download page"; diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -77,11 +77,9 @@ These versions are not officially part of the release 1.5, which focuses on the JIT. You can find prebuilt binaries for them on our -`nightly build`_ or in the `release`_ binaries, or translate_ them -yourself. +`nightly build`_, or translate_ them yourself. .. _`nightly build`: http://buildbot.pypy.org/nightly/trunk/ -.. _`release`: http://pypy.org/download/ Installing @@ -127,13 +125,13 @@ pypy translate.py -Ojit # get the JIT version pypy translate.py -O2 # get the no-jit version - pypy translate.py --sandbox # get the sandbox version - pypy translate.py --stackless # get the stackless version + pypy translate.py -O2 --sandbox # get the sandbox version + pypy translate.py -O2 --stackless # get the stackless version pypy translate.py -Ojit --backend=cli # only for branch/cli-jit 5. Enjoy Mandelbrot ``:-)`` It takes on the order of half an hour to - finish the translation, and 1.7 GB of RAM on a 32-bit system - and 3.0 GB on 64-bit systems. (Do not start a translation on a + finish the translation, and 2 GB of RAM on a 32-bit system + and 4 GB on 64-bit systems. (Do not start a translation on a machine with insufficient RAM! It will just swap forever.) Notes: @@ -170,10 +168,12 @@ 1fb62a813978c2581e9e09debad6b116 pypy-1.4.1-linux64.tar.bz2 8584c4e8c042f5b661fcfffa0d9b8a25 pypy-1.4.1-osx.tar.bz2 769b3fb134944ee8c22ad0834970de3b pypy-1.4.1-osx64.tar.bz2 + xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-win32.zip ebbbb156b1eb842e9e65d909ed5f9f6d pypy-1.4.1-src.tar.bz2 6e2366377ad2f0c583074d3ba6f60d064549bef2 pypy-1.4.1-linux.tar.bz2 1cfd53343e19264905a00d2ffcf83e03e39dcbb3 pypy-1.4.1-linux64.tar.bz2 961470e7510c47b8f56e6cc6da180605ba058cb6 pypy-1.4.1-osx.tar.bz2 8e2830bef80b93f4d3c016b972fbdf7bcd403abc pypy-1.4.1-osx64.tar.bz2 + xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-win32.zip 922a8815377fe2e0c015338fa8b28ae16bf8c840 pypy-1.4.1-src.tar.bz2 diff --git a/source/features.txt b/source/features.txt --- a/source/features.txt +++ b/source/features.txt @@ -30,7 +30,7 @@ .. _`main executable`: download.html#with-a-jit-compiler .. _`Try it out!`: download.html#with-a-jit-compiler -.. _`really fast`: http://bit.ly/fhSlfk +.. _`really fast`: http://speed.pypy.org/ Sandboxing -------------------- diff --git a/source/howtohelp.txt b/source/howtohelp.txt --- a/source/howtohelp.txt +++ b/source/howtohelp.txt @@ -10,7 +10,7 @@ * use pypy for your projects and provide detailed feedback_ -* help implement Python2.6, 2.7 or 3.x features +* talk to us about how to support Python 3.x * write blog posts or tweets about your experiences diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -58,7 +58,7 @@

      Speed

      Our main executable comes with a Just-in-Time compiler. It is -really fast in running most benchmarks. Try it out!

      +really fast in running most benchmarks. Try it out!

      Sandboxing

      From commits-noreply at bitbucket.org Sat Apr 30 13:36:53 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 13:36:53 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: (lac, cfbolz): go through this and fix stuff Message-ID: <20110430113653.14BD7282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: documentation-cleanup Changeset: r43797:a11a8be9082e Date: 2011-04-30 13:36 +0200 http://bitbucket.org/pypy/pypy/changeset/a11a8be9082e/ Log: (lac, cfbolz): go through this and fix stuff diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -10,10 +10,9 @@ ------------------------- The translator is a tool based on the PyPy interpreter which can translate -sufficiently static Python programs into low-level code (in particular it can -be used to translate the `full Python interpreter`_). To be able to use it -you need to (if you want to look at the flowgraphs, which you obviously -should): +sufficiently static RPython programs into low-level code (in particular it can +be used to translate the `full Python interpreter`_). To be able to experiment with it +you need to: * Download and install Pygame_. @@ -146,7 +145,7 @@ Where to start reading the sources ---------------------------------- -PyPy is made from parts that are relatively independent from each other. +PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: @@ -159,15 +158,13 @@ interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, - and input data files that allow it to parse the syntax of various Python - versions. Once the input data has been processed, the parser can be + and grammar files that allow it to parse the syntax of various Python + versions. Once the grammar has been processed, the parser can be translated by the above machinery into efficient code. * `pypy/interpreter/astcompiler`_ contains the compiler. This contains a modified version of the compiler package from CPython - that fixes some bugs and is translatable. That the compiler and - parser are translatable is new in 0.8.0 and it makes using the - resulting binary interactively much more pleasant. + that fixes some bugs and is translatable. * `pypy/objspace/std`_ contains the `Standard object space`_. The main file is `pypy/objspace/std/objspace.py`_. For each type, the files ``xxxtype.py`` and @@ -190,24 +187,25 @@ * `pypy/rpython`_ contains the code of the RPython typer. The typer transforms annotated flow graphs in a way that makes them very similar to C code so that they can be easy translated. The graph transformations are controlled - by the stuff in `pypy/rpython/rtyper.py`_. The object model that is used can + by the code in `pypy/rpython/rtyper.py`_. The object model that is used can be found in `pypy/rpython/lltypesystem/lltype.py`_. For each RPython type there is a file rxxxx.py that contains the low level functions needed for this type. -* `pypy/rlib`_ contains the RPython standard library, things that you can +* `pypy/rlib`_ contains the `RPython standard library`_, things that you can use from rpython. +.. _`RPython standard library`: rlib.html + .. _optionaltool: Running PyPy's unit tests ------------------------- -PyPy development always was and is still thorougly test-driven. +PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use indepedently -from PyPy for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -355,7 +353,7 @@ We use the `py library`_ for filesystem path manipulations, terminal writing, logging and some other support functionality. -You don't neccessarily need to install these two libraries because +You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. Getting involved From commits-noreply at bitbucket.org Sat Apr 30 13:42:07 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 13:42:07 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: Kill references to the OSX 32 binary, which we don't provide any more. Message-ID: <20110430114207.5CE4336C202@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r178:008391fa9e59 Date: 2011-04-30 13:41 +0200 http://bitbucket.org/pypy/pypy.org/changeset/008391fa9e59/ Log: Kill references to the OSX 32 binary, which we don't provide any more. diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -72,7 +72,6 @@ @@ -167,13 +166,11 @@
       3dccf24c23e30b4a04cf122f704b4064  pypy-1.4.1-linux.tar.bz2
       1fb62a813978c2581e9e09debad6b116  pypy-1.4.1-linux64.tar.bz2
      -8584c4e8c042f5b661fcfffa0d9b8a25  pypy-1.4.1-osx.tar.bz2
       769b3fb134944ee8c22ad0834970de3b  pypy-1.4.1-osx64.tar.bz2
       xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-win32.zip
       ebbbb156b1eb842e9e65d909ed5f9f6d  pypy-1.4.1-src.tar.bz2
       6e2366377ad2f0c583074d3ba6f60d064549bef2  pypy-1.4.1-linux.tar.bz2
       1cfd53343e19264905a00d2ffcf83e03e39dcbb3  pypy-1.4.1-linux64.tar.bz2
      -961470e7510c47b8f56e6cc6da180605ba058cb6  pypy-1.4.1-osx.tar.bz2
       8e2830bef80b93f4d3c016b972fbdf7bcd403abc  pypy-1.4.1-osx64.tar.bz2
       xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-win32.zip
       922a8815377fe2e0c015338fa8b28ae16bf8c840  pypy-1.4.1-src.tar.bz2
      
      diff --git a/source/download.txt b/source/download.txt
      --- a/source/download.txt
      +++ b/source/download.txt
      @@ -39,13 +39,11 @@
       
       * `Linux binary (32bit)`__
       * `Linux binary (64bit)`__
      -* `Mac OS/X binary (32bit)`__
       * `Mac OS/X binary (64bit)`__
       * `Windows binary (32bit)`__ (you may need to install the `VS 2010 runtime libraries`_)
       
       .. __: http://pypy.org/download/pypy-1.5-linux.tar.bz2
       .. __: http://pypy.org/download/pypy-1.5-linux64.tar.bz2
      -.. __: http://pypy.org/download/pypy-1.5-osx.tar.bz2
       .. __: http://pypy.org/download/pypy-1.5-osx64.tar.bz2
       .. __: http://pypy.org/download/pypy-1.5-win32.zip
       .. _`VS 2010 runtime libraries`: http://www.microsoft.com/downloads/en/details.aspx?familyid=A7B7A05E-6DE6-4D3A-A423-37BF0912DB84
      @@ -166,14 +164,12 @@
       
         3dccf24c23e30b4a04cf122f704b4064  pypy-1.4.1-linux.tar.bz2
         1fb62a813978c2581e9e09debad6b116  pypy-1.4.1-linux64.tar.bz2
      -  8584c4e8c042f5b661fcfffa0d9b8a25  pypy-1.4.1-osx.tar.bz2
         769b3fb134944ee8c22ad0834970de3b  pypy-1.4.1-osx64.tar.bz2
         xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-win32.zip
         ebbbb156b1eb842e9e65d909ed5f9f6d  pypy-1.4.1-src.tar.bz2
       
         6e2366377ad2f0c583074d3ba6f60d064549bef2  pypy-1.4.1-linux.tar.bz2
         1cfd53343e19264905a00d2ffcf83e03e39dcbb3  pypy-1.4.1-linux64.tar.bz2
      -  961470e7510c47b8f56e6cc6da180605ba058cb6  pypy-1.4.1-osx.tar.bz2
         8e2830bef80b93f4d3c016b972fbdf7bcd403abc  pypy-1.4.1-osx64.tar.bz2
         xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-win32.zip
         922a8815377fe2e0c015338fa8b28ae16bf8c840  pypy-1.4.1-src.tar.bz2
      
      diff --git a/js/detect.js b/js/detect.js
      --- a/js/detect.js
      +++ b/js/detect.js
      @@ -13,13 +13,8 @@
               download_url = 'download/pypy-1.5-win32.zip';
               download_text = 'Download Windows x86 bin';
           } else if (navigator.platform.indexOf('Mac') != 1) {
      -        if (navigator.platform.indexOf('64') != -1) {
      -            download_url = 'download/pypy-1.5-osx64.tar.bz2';
      -            downloat_text = 'Download Mac OS X 10.6 bin (64 bit)';
      -        } else {
      -            download_url = 'download/pypy-1.5-osx.tar.bz2';
      -            downloat_text = 'Download Mac OS X 10.6 bin (32 bit)';
      -        }
      +        download_url = 'download/pypy-1.5-osx64.tar.bz2';
      +        downloat_text = 'Download Mac OS X 10.6 bin (64 bit)';
           } else {
               download_url = "download.html";
               download_text = "Download page";
      
      From commits-noreply at bitbucket.org  Sat Apr 30 13:54:18 2011
      From: commits-noreply at bitbucket.org (cfbolz)
      Date: Sat, 30 Apr 2011 13:54:18 +0200 (CEST)
      Subject: [pypy-svn] extradoc extradoc: use ascii for this file,
      	to be on the safe side
      Message-ID: <20110430115418.4B481282B52@codespeak.net>
      
      Author: Carl Friedrich Bolz 
      Branch: extradoc
      Changeset: r3547:ee3059291497
      Date: 2011-04-30 13:54 +0200
      http://bitbucket.org/pypy/extradoc/changeset/ee3059291497/
      
      Log:	use ascii for this file, to be on the safe side
      
      diff --git a/talk/bibtex.bib b/talk/bibtex.bib
      --- a/talk/bibtex.bib
      +++ b/talk/bibtex.bib
      @@ -85,7 +85,7 @@
           type = {Master Thesis},
           title = {Automatic {JIT} Compiler Generation with Runtime Partial Evaluation
       },
      -    school = {{Heinrich-Heine-Universität} Düsseldorf},
      +    school = {{Heinrich-Heine-Universit\"at} D\"usseldorf},
           author = {Carl Friedrich Bolz},
           year = {2008}
       },
      @@ -116,7 +116,7 @@
       	abstract = {We attempt to apply the technique of Tracing {JIT} Compilers in the context of the {PyPy} project, i.e., to programs that are interpreters for some dynamic languages, including Python. Tracing {JIT} compilers can greatly speed up programs that spend most of their time in loops in which they take similar code paths. However, applying an unmodified tracing {JIT} to a program that is itself a bytecode interpreter results in very limited or no speedup. In this paper we show how to guide tracing {JIT} compilers to greatly improve the speed of bytecode interpreters. One crucial point is to unroll the bytecode dispatch loop, based on two kinds of hints provided by the implementer of the bytecode interpreter. We evaluate our technique by applying it to two {PyPy} interpreters: one is a small example, and the other one is the full Python interpreter.},
       	booktitle = {Proceedings of the 4th workshop on the Implementation, Compilation, Optimization of {Object-Oriented} Languages and Programming Systems},
       	publisher = {{ACM}},
      -	author = {Carl Friedrich Bolz and Antonio Cuni and Maciej Fijałkowski and Armin Rigo},
      +	author = {Carl Friedrich Bolz and Antonio Cuni and Maciej Fija\lkowski and Armin Rigo},
       	year = {2009},
       	pages = {18--25}
       },
      @@ -166,7 +166,7 @@
       
       
       @inproceedings{Bolz:2011:ARP:1929501.1929508,
      - author = {Bolz, Carl Friedrich and Cuni, Antonio and Fijałkowski, Maciej and Leuschel, Michael and Pedroni, Samuele and Rigo, Armin},
      + author = {Bolz, Carl Friedrich and Cuni, Antonio and Fija\lkowski, Maciej and Leuschel, Michael and Pedroni, Samuele and Rigo, Armin},
        title = {Allocation removal by partial evaluation in a tracing JIT},
        booktitle = {Proceedings of the 20th ACM SIGPLAN workshop on Partial evaluation and program manipulation},
        series = {PEPM '11},
      
      From commits-noreply at bitbucket.org  Sat Apr 30 13:58:32 2011
      From: commits-noreply at bitbucket.org (arigo)
      Date: Sat, 30 Apr 2011 13:58:32 +0200 (CEST)
      Subject: [pypy-svn] pypy post-release-1.5: Add a failing test that shows a
      	difference	with CPython.
      Message-ID: <20110430115832.A06AF36C205@codespeak.net>
      
      Author: Armin Rigo 
      Branch: post-release-1.5
      Changeset: r43798:a1d28b2cc5c6
      Date: 2011-04-30 13:58 +0200
      http://bitbucket.org/pypy/pypy/changeset/a1d28b2cc5c6/
      
      Log:	Add a failing test that shows a difference with CPython.
      
      diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py
      --- a/pypy/objspace/std/test/test_setobject.py
      +++ b/pypy/objspace/std/test/test_setobject.py
      @@ -284,6 +284,18 @@
               # All empty frozenset subclass instances should have different ids
               assert len(set(map(id, efs))) == len(efs)
       
      +    def test_subclass_union(self):
      +        for base in [set, frozenset]:
      +            class subset(base):
      +                def __init__(self, *args):
      +                    self.x = args
      +            s = subset([2])
      +            assert s.x == ([2],)
      +            t = s | base([5])
      +            # obscure CPython behavior:
      +            assert type(t) is subset
      +            assert not hasattr(t, 'x')
      +
           def test_isdisjoint(self):
               assert set([1,2,3]).isdisjoint(set([4,5,6]))
               assert set([1,2,3]).isdisjoint(frozenset([4,5,6]))
      
      From commits-noreply at bitbucket.org  Sat Apr 30 14:00:27 2011
      From: commits-noreply at bitbucket.org (cfbolz)
      Date: Sat, 30 Apr 2011 14:00:27 +0200 (CEST)
      Subject: [pypy-svn] pypy documentation-cleanup: (lac,
      	cfbolz): this is no longer true
      Message-ID: <20110430120027.7B73C36C205@codespeak.net>
      
      Author: Carl Friedrich Bolz 
      Branch: documentation-cleanup
      Changeset: r43799:7faafdb9eb5f
      Date: 2011-04-30 13:44 +0200
      http://bitbucket.org/pypy/pypy/changeset/7faafdb9eb5f/
      
      Log:	(lac, cfbolz): this is no longer true
      
      diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst
      --- a/pypy/doc/interpreter.rst
      +++ b/pypy/doc/interpreter.rst
      @@ -146,21 +146,15 @@
         file location can be constructed for tracebacks 
       
       Moreover the Frame class itself has a number of methods which implement
      -the actual bytecodes found in a code object.  In fact, PyPy already constructs 
      -four specialized Frame class variants depending on the code object: 
      +the actual bytecodes found in a code object.  The methods of the ``PyFrame``
      +class are added in various files:
       
      -- PyInterpFrame (in `pypy/interpreter/pyopcode.py`_)  for
      -  basic simple code objects (not involving generators or nested scopes) 
      +- the class ``PyFrame`` is defined in `pypy/interpreter/pyframe.py`_.
       
      -- PyNestedScopeFrame (in `pypy/interpreter/nestedscope.py`_) 
      -  for code objects that reference nested scopes, inherits from PyInterpFrame
      +- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcode.
       
      -- PyGeneratorFrame (in `pypy/interpreter/generator.py`_) 
      -  for code objects that yield values to the caller, inherits from PyInterpFrame
      -
      -- PyNestedScopeGeneratorFrame for code objects that reference
      -  nested scopes and yield values to the caller, inherits from both PyNestedScopeFrame
      -  and PyGeneratorFrame 
      +- nested scope support is added to the ``PyFrame`` class in
      +  `pypy/interpreter/nestedscope.py`_.
       
       .. _Code: 
       
      
      From commits-noreply at bitbucket.org  Sat Apr 30 14:00:29 2011
      From: commits-noreply at bitbucket.org (cfbolz)
      Date: Sat, 30 Apr 2011 14:00:29 +0200 (CEST)
      Subject: [pypy-svn] pypy documentation-cleanup: remove the taint space
      Message-ID: <20110430120029.E57EF282B55@codespeak.net>
      
      Author: Carl Friedrich Bolz 
      Branch: documentation-cleanup
      Changeset: r43800:44b362d27cea
      Date: 2011-04-30 14:00 +0200
      http://bitbucket.org/pypy/pypy/changeset/44b362d27cea/
      
      Log:	remove the taint space
      
      diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst
      --- a/pypy/doc/objspace-proxies.rst
      +++ b/pypy/doc/objspace-proxies.rst
      @@ -18,10 +18,6 @@
         only if and when needed; and a way to globally replace an object with
         another.
       
      -* *Taint Object Space*: a soft security system; your application cannot
      -  accidentally compute results based on tainted objects unless it
      -  explicitly untaints them first.
      -
       * *Dump Object Space*: dumps all operations performed on all the objects
         into a large log file.  For debugging your applications.
       
      @@ -133,293 +129,295 @@
          function behaves lazily: all calls to it return a thunk object.
       
       
      -.. _taint:
      +.. broken right now:
       
      -The Taint Object Space
      -======================
      +    .. _taint:
       
      -Motivation
      -----------
      +    The Taint Object Space
      +    ======================
       
      -The Taint Object Space provides a form of security: "tainted objects",
      -inspired by various sources, see [D12.1]_ for a more detailed discussion. 
      +    Motivation
      +    ----------
       
      -The basic idea of this kind of security is not to protect against
      -malicious code but to help with handling and boxing sensitive data. 
      -It covers two kinds of sensitive data: secret data which should not leak, 
      -and untrusted data coming from an external source and that must be 
      -validated before it is used.
      +    The Taint Object Space provides a form of security: "tainted objects",
      +    inspired by various sources, see [D12.1]_ for a more detailed discussion. 
       
      -The idea is that, considering a large application that handles these
      -kinds of sensitive data, there are typically only a small number of
      -places that need to explicitly manipulate that sensitive data; all the
      -other places merely pass it around, or do entirely unrelated things.
      +    The basic idea of this kind of security is not to protect against
      +    malicious code but to help with handling and boxing sensitive data. 
      +    It covers two kinds of sensitive data: secret data which should not leak, 
      +    and untrusted data coming from an external source and that must be 
      +    validated before it is used.
       
      -Nevertheless, if a large application needs to be reviewed for security,
      -it must be entirely carefully checked, because it is possible that a
      -bug at some apparently unrelated place could lead to a leak of sensitive
      -information in a way that an external attacker could exploit.  For
      -example, if any part of the application provides web services, an
      -attacker might be able to issue unexpected requests with a regular web
      -browser and deduce secret information from the details of the answers he
      -gets.  Another example is the common CGI attack where an attacker sends
      -malformed inputs and causes the CGI script to do unintended things.
      +    The idea is that, considering a large application that handles these
      +    kinds of sensitive data, there are typically only a small number of
      +    places that need to explicitly manipulate that sensitive data; all the
      +    other places merely pass it around, or do entirely unrelated things.
       
      -An approach like that of the Taint Object Space allows the small parts
      -of the program that manipulate sensitive data to be explicitly marked.
      -The effect of this is that although these small parts still need a
      -careful security review, the rest of the application no longer does,
      -because even a bug would be unable to leak the information.
      +    Nevertheless, if a large application needs to be reviewed for security,
      +    it must be entirely carefully checked, because it is possible that a
      +    bug at some apparently unrelated place could lead to a leak of sensitive
      +    information in a way that an external attacker could exploit.  For
      +    example, if any part of the application provides web services, an
      +    attacker might be able to issue unexpected requests with a regular web
      +    browser and deduce secret information from the details of the answers he
      +    gets.  Another example is the common CGI attack where an attacker sends
      +    malformed inputs and causes the CGI script to do unintended things.
       
      -We have implemented a simple two-level model: objects are either
      -regular (untainted), or sensitive (tainted).  Objects are marked as
      -sensitive if they are secret or untrusted, and only declassified at
      -carefully-checked positions (e.g. where the secret data is needed, or
      -after the untrusted data has been fully validated).
      +    An approach like that of the Taint Object Space allows the small parts
      +    of the program that manipulate sensitive data to be explicitly marked.
      +    The effect of this is that although these small parts still need a
      +    careful security review, the rest of the application no longer does,
      +    because even a bug would be unable to leak the information.
       
      -It would be simple to extend the code for more fine-grained scales of
      -secrecy.  For example it is typical in the literature to consider
      -user-specified lattices of secrecy levels, corresponding to multiple
      -"owners" that cannot access data belonging to another "owner" unless
      -explicitly authorized to do so.
      +    We have implemented a simple two-level model: objects are either
      +    regular (untainted), or sensitive (tainted).  Objects are marked as
      +    sensitive if they are secret or untrusted, and only declassified at
      +    carefully-checked positions (e.g. where the secret data is needed, or
      +    after the untrusted data has been fully validated).
       
      -Tainting and untainting
      ------------------------
      +    It would be simple to extend the code for more fine-grained scales of
      +    secrecy.  For example it is typical in the literature to consider
      +    user-specified lattices of secrecy levels, corresponding to multiple
      +    "owners" that cannot access data belonging to another "owner" unless
      +    explicitly authorized to do so.
       
      -Start a py.py with the Taint Object Space and try the following example::
      +    Tainting and untainting
      +    -----------------------
       
      -    $ py.py -o taint
      -    >>>> from __pypy__ import taint
      -    >>>> x = taint(6)
      +    Start a py.py with the Taint Object Space and try the following example::
       
      -    # x is hidden from now on.  We can pass it around and
      -    # even operate on it, but not inspect it.  Taintness
      -    # is propagated to operation results.
      +        $ py.py -o taint
      +        >>>> from __pypy__ import taint
      +        >>>> x = taint(6)
       
      -    >>>> x
      -    TaintError
      +        # x is hidden from now on.  We can pass it around and
      +        # even operate on it, but not inspect it.  Taintness
      +        # is propagated to operation results.
       
      -    >>>> if x > 5: y = 2   # see below
      -    TaintError
      +        >>>> x
      +        TaintError
       
      -    >>>> y = x + 5         # ok
      -    >>>> lst = [x, y]
      -    >>>> z = lst.pop()
      -    >>>> t = type(z)       # type() works too, tainted answer
      -    >>>> t
      -    TaintError
      -    >>>> u = t is int      # even 'is' works
      -    >>>> u
      -    TaintError
      +        >>>> if x > 5: y = 2   # see below
      +        TaintError
       
      -Notice that using a tainted boolean like ``x > 5`` in an ``if``
      -statement is forbidden.  This is because knowing which path is followed
      -would give away a hint about ``x``; in the example above, if the
      -statement ``if x > 5: y = 2`` was allowed to run, we would know
      -something about the value of ``x`` by looking at the (untainted) value
      -in the variable ``y``.
      +        >>>> y = x + 5         # ok
      +        >>>> lst = [x, y]
      +        >>>> z = lst.pop()
      +        >>>> t = type(z)       # type() works too, tainted answer
      +        >>>> t
      +        TaintError
      +        >>>> u = t is int      # even 'is' works
      +        >>>> u
      +        TaintError
       
      -Of course, there is a way to inspect tainted objects.  The basic way is
      -to explicitly "declassify" it with the ``untaint()`` function.  In an
      -application, the places that use ``untaint()`` are the places that need
      -careful security review.  To avoid unexpected objects showing up, the
      -``untaint()`` function must be called with the exact type of the object
      -to declassify.  It will raise ``TaintError`` if the type doesn't match::
      +    Notice that using a tainted boolean like ``x > 5`` in an ``if``
      +    statement is forbidden.  This is because knowing which path is followed
      +    would give away a hint about ``x``; in the example above, if the
      +    statement ``if x > 5: y = 2`` was allowed to run, we would know
      +    something about the value of ``x`` by looking at the (untainted) value
      +    in the variable ``y``.
       
      -    >>>> from __pypy__ import taint
      -    >>>> untaint(int, x)
      -    6
      -    >>>> untaint(int, z)
      -    11
      -    >>>> untaint(bool, x > 5)
      -    True
      -    >>>> untaint(int, x > 5)
      -    TaintError
      +    Of course, there is a way to inspect tainted objects.  The basic way is
      +    to explicitly "declassify" it with the ``untaint()`` function.  In an
      +    application, the places that use ``untaint()`` are the places that need
      +    careful security review.  To avoid unexpected objects showing up, the
      +    ``untaint()`` function must be called with the exact type of the object
      +    to declassify.  It will raise ``TaintError`` if the type doesn't match::
       
      +        >>>> from __pypy__ import taint
      +        >>>> untaint(int, x)
      +        6
      +        >>>> untaint(int, z)
      +        11
      +        >>>> untaint(bool, x > 5)
      +        True
      +        >>>> untaint(int, x > 5)
      +        TaintError
       
      -Taint Bombs
      ------------
       
      -In this area, a common problem is what to do about failing operations.
      -If an operation raises an exception when manipulating a tainted object,
      -then the very presence of the exception can leak information about the
      -tainted object itself.  Consider::
      +    Taint Bombs
      +    -----------
       
      -    >>>> 5 / (x-6)
      +    In this area, a common problem is what to do about failing operations.
      +    If an operation raises an exception when manipulating a tainted object,
      +    then the very presence of the exception can leak information about the
      +    tainted object itself.  Consider::
       
      -By checking if this raises ``ZeroDivisionError`` or not, we would know
      -if ``x`` was equal to 6 or not.  The solution to this problem in the
      -Taint Object Space is to introduce *Taint Bombs*.  They are a kind of
      -tainted object that doesn't contain a real object, but a pending
      -exception.  Taint Bombs are indistinguishable from normal tainted
      -objects to unprivileged code. See::
      +        >>>> 5 / (x-6)
       
      -    >>>> x = taint(6)
      -    >>>> i = 5 / (x-6)     # no exception here
      -    >>>> j = i + 1         # nor here
      -    >>>> k = j + 5         # nor here
      -    >>>> untaint(int, k)
      -    TaintError
      +    By checking if this raises ``ZeroDivisionError`` or not, we would know
      +    if ``x`` was equal to 6 or not.  The solution to this problem in the
      +    Taint Object Space is to introduce *Taint Bombs*.  They are a kind of
      +    tainted object that doesn't contain a real object, but a pending
      +    exception.  Taint Bombs are indistinguishable from normal tainted
      +    objects to unprivileged code. See::
       
      -In the above example, all of ``i``, ``j`` and ``k`` contain a Taint
      -Bomb.  Trying to untaint it raises an exception - a generic
      -``TaintError``.  What we win is that the exception gives little away,
      -and most importantly it occurs at the point where ``untaint()`` is
      -called, not where the operation failed.  This means that all calls to
      -``untaint()`` - but not the rest of the code - must be carefully
      -reviewed for what occurs if they receive a Taint Bomb; they might catch
      -the ``TaintError`` and give the user a generic message that something
      -went wrong, if we are reasonably careful that the message or even its
      -presence doesn't give information away.  This might be a
      -problem by itself, but there is no satisfying general solution here:
      -it must be considered on a case-by-case basis.  Again, what the
      -Taint Object Space approach achieves is not solving these problems, but
      -localizing them to well-defined small parts of the application - namely,
      -around calls to ``untaint()``.
      +        >>>> x = taint(6)
      +        >>>> i = 5 / (x-6)     # no exception here
      +        >>>> j = i + 1         # nor here
      +        >>>> k = j + 5         # nor here
      +        >>>> untaint(int, k)
      +        TaintError
       
      -The ``TaintError`` exception deliberately does not include any
      -useful error messages, because they might give information away.
      -Of course, this makes debugging quite a bit harder; a difficult
      -problem to solve properly.  So far we have implemented a way to peek in a Taint
      -Box or Bomb, ``__pypy__._taint_look(x)``, and a "debug mode" that
      -prints the exception as soon as a Bomb is created - both write
      -information to the low-level stderr of the application, where we hope
      -that it is unlikely to be seen by anyone but the application
      -developer.
      +    In the above example, all of ``i``, ``j`` and ``k`` contain a Taint
      +    Bomb.  Trying to untaint it raises an exception - a generic
      +    ``TaintError``.  What we win is that the exception gives little away,
      +    and most importantly it occurs at the point where ``untaint()`` is
      +    called, not where the operation failed.  This means that all calls to
      +    ``untaint()`` - but not the rest of the code - must be carefully
      +    reviewed for what occurs if they receive a Taint Bomb; they might catch
      +    the ``TaintError`` and give the user a generic message that something
      +    went wrong, if we are reasonably careful that the message or even its
      +    presence doesn't give information away.  This might be a
      +    problem by itself, but there is no satisfying general solution here:
      +    it must be considered on a case-by-case basis.  Again, what the
      +    Taint Object Space approach achieves is not solving these problems, but
      +    localizing them to well-defined small parts of the application - namely,
      +    around calls to ``untaint()``.
       
      +    The ``TaintError`` exception deliberately does not include any
      +    useful error messages, because they might give information away.
      +    Of course, this makes debugging quite a bit harder; a difficult
      +    problem to solve properly.  So far we have implemented a way to peek in a Taint
      +    Box or Bomb, ``__pypy__._taint_look(x)``, and a "debug mode" that
      +    prints the exception as soon as a Bomb is created - both write
      +    information to the low-level stderr of the application, where we hope
      +    that it is unlikely to be seen by anyone but the application
      +    developer.
       
      -Taint Atomic functions
      -----------------------
       
      -Occasionally, a more complicated computation must be performed on a
      -tainted object.  This requires first untainting the object, performing the
      -computations, and then carefully tainting the result again (including
      -hiding all exceptions into Bombs).
      +    Taint Atomic functions
      +    ----------------------
       
      -There is a built-in decorator that does this for you::
      +    Occasionally, a more complicated computation must be performed on a
      +    tainted object.  This requires first untainting the object, performing the
      +    computations, and then carefully tainting the result again (including
      +    hiding all exceptions into Bombs).
       
      -    >>>> @__pypy__.taint_atomic
      -    >>>> def myop(x, y):
      -    ....     while x > 0:
      -    ....         x -= y
      -    ....     return x
      -    ....
      -    >>>> myop(42, 10)
      -    -8
      -    >>>> z = myop(taint(42), 10)
      -    >>>> z
      -    TaintError
      -    >>>> untaint(int, z)
      -    -8
      +    There is a built-in decorator that does this for you::
       
      -The decorator makes a whole function behave like a built-in operation.
      -If no tainted argument is passed in, the function behaves normally.  But
      -if any of the arguments is tainted, it is automatically untainted - so
      -the function body always sees untainted arguments - and the eventual
      -result is tainted again (possibly in a Taint Bomb).
      +        >>>> @__pypy__.taint_atomic
      +        >>>> def myop(x, y):
      +        ....     while x > 0:
      +        ....         x -= y
      +        ....     return x
      +        ....
      +        >>>> myop(42, 10)
      +        -8
      +        >>>> z = myop(taint(42), 10)
      +        >>>> z
      +        TaintError
      +        >>>> untaint(int, z)
      +        -8
       
      -It is important for the function marked as ``taint_atomic`` to have no
      -visible side effects, as these could cause information leakage.
      -This is currently not enforced, which means that all ``taint_atomic``
      -functions have to be carefully reviewed for security (but not the
      -callers of ``taint_atomic`` functions).
      +    The decorator makes a whole function behave like a built-in operation.
      +    If no tainted argument is passed in, the function behaves normally.  But
      +    if any of the arguments is tainted, it is automatically untainted - so
      +    the function body always sees untainted arguments - and the eventual
      +    result is tainted again (possibly in a Taint Bomb).
       
      -A possible future extension would be to forbid side-effects on
      -non-tainted objects from all ``taint_atomic`` functions.
      +    It is important for the function marked as ``taint_atomic`` to have no
      +    visible side effects, as these could cause information leakage.
      +    This is currently not enforced, which means that all ``taint_atomic``
      +    functions have to be carefully reviewed for security (but not the
      +    callers of ``taint_atomic`` functions).
       
      -An example of usage: given a tainted object ``passwords_db`` that
      -references a database of passwords, we can write a function
      -that checks if a password is valid as follows::
      +    A possible future extension would be to forbid side-effects on
      +    non-tainted objects from all ``taint_atomic`` functions.
       
      -    @taint_atomic
      -    def validate(passwords_db, username, password):
      -        assert type(passwords_db) is PasswordDatabase
      -        assert type(username) is str
      -        assert type(password) is str
      -        ...load username entry from passwords_db...
      -        return expected_password == password
      +    An example of usage: given a tainted object ``passwords_db`` that
      +    references a database of passwords, we can write a function
      +    that checks if a password is valid as follows::
       
      -It returns a tainted boolean answer, or a Taint Bomb if something
      -went wrong.  A caller can do::
      +        @taint_atomic
      +        def validate(passwords_db, username, password):
      +            assert type(passwords_db) is PasswordDatabase
      +            assert type(username) is str
      +            assert type(password) is str
      +            ...load username entry from passwords_db...
      +            return expected_password == password
       
      -    ok = validate(passwords_db, 'john', '1234')
      -    ok = untaint(bool, ok)
      +    It returns a tainted boolean answer, or a Taint Bomb if something
      +    went wrong.  A caller can do::
       
      -This can give three outcomes: ``True``, ``False``, or a ``TaintError``
      -exception (with no information on it) if anything went wrong.  If even
      -this is considered giving too much information away, the ``False`` case
      -can be made indistinguishable from the ``TaintError`` case (simply by
      -raising an exception in ``validate()`` if the password is wrong).
      +        ok = validate(passwords_db, 'john', '1234')
      +        ok = untaint(bool, ok)
       
      -In the above example, the security results achieved are the following:
      -as long as ``validate()`` does not leak information, no other part of
      -the code can obtain more information about a passwords database than a
      -Yes/No answer to a precise query.
      +    This can give three outcomes: ``True``, ``False``, or a ``TaintError``
      +    exception (with no information on it) if anything went wrong.  If even
      +    this is considered giving too much information away, the ``False`` case
      +    can be made indistinguishable from the ``TaintError`` case (simply by
      +    raising an exception in ``validate()`` if the password is wrong).
       
      -A possible extension of the ``taint_atomic`` decorator would be to check
      -the argument types, as ``untaint()`` does, for the same reason: to
      -prevent bugs where a function like ``validate()`` above is accidentally
      -called with the wrong kind of tainted object, which would make it
      -misbehave.  For now, all ``taint_atomic`` functions should be
      -conservative and carefully check all assumptions on their input
      -arguments.
      +    In the above example, the security results achieved are the following:
      +    as long as ``validate()`` does not leak information, no other part of
      +    the code can obtain more information about a passwords database than a
      +    Yes/No answer to a precise query.
       
      +    A possible extension of the ``taint_atomic`` decorator would be to check
      +    the argument types, as ``untaint()`` does, for the same reason: to
      +    prevent bugs where a function like ``validate()`` above is accidentally
      +    called with the wrong kind of tainted object, which would make it
      +    misbehave.  For now, all ``taint_atomic`` functions should be
      +    conservative and carefully check all assumptions on their input
      +    arguments.
       
      -.. _`taint-interface`:
       
      -Interface
      ----------
      +    .. _`taint-interface`:
       
      -.. _`like a built-in operation`:
      +    Interface
      +    ---------
       
      -The basic rule of the Tainted Object Space is that it introduces two new
      -kinds of objects, Tainted Boxes and Tainted Bombs (which are not types
      -in the Python sense).  Each box internally contains a regular object;
      -each bomb internally contains an exception object.  An operation
      -involving Tainted Boxes is performed on the objects contained in the
      -boxes, and gives a Tainted Box or a Tainted Bomb as a result (such an
      -operation does not let an exception be raised).  An operation called
      -with a Tainted Bomb argument immediately returns the same Tainted Bomb.
      +    .. _`like a built-in operation`:
       
      -In a PyPy running with (or translated with) the Taint Object Space,
      -the ``__pypy__`` module exposes the following interface:
      +    The basic rule of the Tainted Object Space is that it introduces two new
      +    kinds of objects, Tainted Boxes and Tainted Bombs (which are not types
      +    in the Python sense).  Each box internally contains a regular object;
      +    each bomb internally contains an exception object.  An operation
      +    involving Tainted Boxes is performed on the objects contained in the
      +    boxes, and gives a Tainted Box or a Tainted Bomb as a result (such an
      +    operation does not let an exception be raised).  An operation called
      +    with a Tainted Bomb argument immediately returns the same Tainted Bomb.
       
      -* ``taint(obj)``
      +    In a PyPy running with (or translated with) the Taint Object Space,
      +    the ``__pypy__`` module exposes the following interface:
       
      -    Return a new Tainted Box wrapping ``obj``.  Return ``obj`` itself
      -    if it is already tainted (a Box or a Bomb).
      +    * ``taint(obj)``
       
      -* ``is_tainted(obj)``
      +        Return a new Tainted Box wrapping ``obj``.  Return ``obj`` itself
      +        if it is already tainted (a Box or a Bomb).
       
      -    Check if ``obj`` is tainted (a Box or a Bomb).
      +    * ``is_tainted(obj)``
       
      -* ``untaint(type, obj)``
      +        Check if ``obj`` is tainted (a Box or a Bomb).
       
      -    Untaints ``obj`` if it is tainted.  Raise ``TaintError`` if the type
      -    of the untainted object is not exactly ``type``, or if ``obj`` is a
      -    Bomb.
      +    * ``untaint(type, obj)``
       
      -* ``taint_atomic(func)``
      +        Untaints ``obj`` if it is tainted.  Raise ``TaintError`` if the type
      +        of the untainted object is not exactly ``type``, or if ``obj`` is a
      +        Bomb.
       
      -    Return a wrapper function around the callable ``func``.  The wrapper
      -    behaves `like a built-in operation`_ with respect to untainting the
      -    arguments, tainting the result, and returning a Bomb.
      +    * ``taint_atomic(func)``
       
      -* ``TaintError``
      +        Return a wrapper function around the callable ``func``.  The wrapper
      +        behaves `like a built-in operation`_ with respect to untainting the
      +        arguments, tainting the result, and returning a Bomb.
       
      -    Exception.  On purpose, it provides no attribute or error message.
      +    * ``TaintError``
       
      -* ``_taint_debug(level)``
      +        Exception.  On purpose, it provides no attribute or error message.
       
      -    Set the debugging level to ``level`` (0=off).  At level 1 or above,
      -    all Taint Bombs print a diagnostic message to stderr when they are
      -    created.
      +    * ``_taint_debug(level)``
       
      -* ``_taint_look(obj)``
      +        Set the debugging level to ``level`` (0=off).  At level 1 or above,
      +        all Taint Bombs print a diagnostic message to stderr when they are
      +        created.
       
      -    For debugging purposes: prints (to stderr) the type and address of
      -    the object in a Tainted Box, or prints the exception if ``obj`` is
      -    a Taint Bomb.
      +    * ``_taint_look(obj)``
      +
      +        For debugging purposes: prints (to stderr) the type and address of
      +        the object in a Tainted Box, or prints the exception if ``obj`` is
      +        a Taint Bomb.
       
       
       .. _dump:
      
      diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
      --- a/pypy/doc/index.rst
      +++ b/pypy/doc/index.rst
      @@ -273,8 +273,6 @@
       `pypy/objspace/dump.py`_           the dump object space saves a large, searchable log file
                                          with all operations
       
      -`pypy/objspace/taint.py`_          the `taint object space`_, providing object tainting
      -
       `pypy/objspace/thunk.py`_          the `thunk object space`_, providing unique object features 
       
       `pypy/objspace/flow/`_             the FlowObjSpace_ implementing `abstract interpretation`_
      
      diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst
      --- a/pypy/doc/__pypy__-module.rst
      +++ b/pypy/doc/__pypy__-module.rst
      @@ -37,27 +37,29 @@
       .. _`thunk object space docs`: objspace-proxies.html#thunk
       .. _`interface section of the thunk object space docs`: objspace-proxies.html#thunk-interface
       
      -Taint Object Space Functionality
      -================================
      +.. broken:
       
      -When the taint object space is used (choose with :config:`objspace.name`),
      -the following names are put into ``__pypy__``:
      +    Taint Object Space Functionality
      +    ================================
       
      - - ``taint``
      - - ``is_tainted``
      - - ``untaint``
      - - ``taint_atomic``
      - - ``_taint_debug``
      - - ``_taint_look``
      - - ``TaintError``
      +    When the taint object space is used (choose with :config:`objspace.name`),
      +    the following names are put into ``__pypy__``:
       
      -Those are all described in the `interface section of the taint object space
      -docs`_.
      +     - ``taint``
      +     - ``is_tainted``
      +     - ``untaint``
      +     - ``taint_atomic``
      +     - ``_taint_debug``
      +     - ``_taint_look``
      +     - ``TaintError``
       
      -For more detailed explanations and examples see the `taint object space docs`_.
      +    Those are all described in the `interface section of the taint object space
      +    docs`_.
       
      -.. _`taint object space docs`: objspace-proxies.html#taint
      -.. _`interface section of the taint object space docs`: objspace-proxies.html#taint-interface
      +    For more detailed explanations and examples see the `taint object space docs`_.
      +
      +    .. _`taint object space docs`: objspace-proxies.html#taint
      +    .. _`interface section of the taint object space docs`: objspace-proxies.html#taint-interface
       
       Transparent Proxy Functionality
       ===============================
      
      From commits-noreply at bitbucket.org  Sat Apr 30 14:01:03 2011
      From: commits-noreply at bitbucket.org (arigo)
      Date: Sat, 30 Apr 2011 14:01:03 +0200 (CEST)
      Subject: [pypy-svn] pypy.org extradoc: In-progress.
      Message-ID: <20110430120103.D550D36C205@codespeak.net>
      
      Author: Armin Rigo 
      Branch: extradoc
      Changeset: r179:5049f1c0a94d
      Date: 2011-04-30 14:00 +0200
      http://bitbucket.org/pypy/pypy.org/changeset/5049f1c0a94d/
      
      Log:	In-progress.
      
      diff --git a/source/download.txt b/source/download.txt
      --- a/source/download.txt
      +++ b/source/download.txt
      @@ -162,14 +162,14 @@
       
       Here are the checksums for each of the downloads (md5 and sha1)::
       
      -  3dccf24c23e30b4a04cf122f704b4064  pypy-1.4.1-linux.tar.bz2
      -  1fb62a813978c2581e9e09debad6b116  pypy-1.4.1-linux64.tar.bz2
      -  769b3fb134944ee8c22ad0834970de3b  pypy-1.4.1-osx64.tar.bz2
      +  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-linux.tar.bz2
      +  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-linux64.tar.bz2
      +  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-osx64.tar.bz2
         xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-win32.zip
      -  ebbbb156b1eb842e9e65d909ed5f9f6d  pypy-1.4.1-src.tar.bz2
      +  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-src.tar.bz2
       
      -  6e2366377ad2f0c583074d3ba6f60d064549bef2  pypy-1.4.1-linux.tar.bz2
      -  1cfd53343e19264905a00d2ffcf83e03e39dcbb3  pypy-1.4.1-linux64.tar.bz2
      -  8e2830bef80b93f4d3c016b972fbdf7bcd403abc  pypy-1.4.1-osx64.tar.bz2
      +  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-linux.tar.bz2
      +  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-linux64.tar.bz2
      +  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-osx64.tar.bz2
         xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-win32.zip
      -  922a8815377fe2e0c015338fa8b28ae16bf8c840  pypy-1.4.1-src.tar.bz2
      +  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-src.tar.bz2
      
      From commits-noreply at bitbucket.org  Sat Apr 30 14:14:58 2011
      From: commits-noreply at bitbucket.org (arigo)
      Date: Sat, 30 Apr 2011 14:14:58 +0200 (CEST)
      Subject: [pypy-svn] pypy documentation-cleanup: Fix the only really outdated
      	comment that	I could find.
      Message-ID: <20110430121458.DC84436C212@codespeak.net>
      
      Author: Armin Rigo 
      Branch: documentation-cleanup
      Changeset: r43801:5e30104b8a80
      Date: 2011-04-30 14:10 +0200
      http://bitbucket.org/pypy/pypy/changeset/5e30104b8a80/
      
      Log:	Fix the only really outdated comment that I could find.
      
      diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst
      --- a/pypy/doc/jit/pyjitpl5.rst
      +++ b/pypy/doc/jit/pyjitpl5.rst
      @@ -160,8 +160,11 @@
       in the machine code.  Virtualizables, however, can escape from JIT controlled
       code.
       
      -Most of the JIT's optimizer is contained 2 files optimizefindnodes.py and
      -optimizeopt.py.
      +Other optimizations
      +*******************
      +
      +Most of the JIT's optimizer is contained in the subdirectory
      +``metainterp/optimizeopt/``.  Refer to it for more details.
       
       
       More resources
      
      From commits-noreply at bitbucket.org  Sat Apr 30 14:15:01 2011
      From: commits-noreply at bitbucket.org (arigo)
      Date: Sat, 30 Apr 2011 14:15:01 +0200 (CEST)
      Subject: [pypy-svn] pypy documentation-cleanup: merge heads
      Message-ID: <20110430121501.F3945282B52@codespeak.net>
      
      Author: Armin Rigo 
      Branch: documentation-cleanup
      Changeset: r43802:bea75c070193
      Date: 2011-04-30 14:14 +0200
      http://bitbucket.org/pypy/pypy/changeset/bea75c070193/
      
      Log:	merge heads
      
      diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst
      --- a/pypy/doc/getting-started-dev.rst
      +++ b/pypy/doc/getting-started-dev.rst
      @@ -10,10 +10,9 @@
       ------------------------- 
       
       The translator is a tool based on the PyPy interpreter which can translate
      -sufficiently static Python programs into low-level code (in particular it can
      -be used to translate the `full Python interpreter`_). To be able to use it
      -you need to (if you want to look at the flowgraphs, which you obviously
      -should):
      +sufficiently static RPython programs into low-level code (in particular it can
      +be used to translate the `full Python interpreter`_). To be able to experiment with it
      +you need to:
       
         * Download and install Pygame_.
       
      @@ -146,7 +145,7 @@
       Where to start reading the sources
       ---------------------------------- 
       
      -PyPy is made from parts that are relatively independent from each other.
      +PyPy is made from parts that are relatively independent of each other.
       You should start looking at the part that attracts you most (all paths are
       relative to the PyPy top level directory).  You may look at our `directory reference`_ 
       or start off at one of the following points:
      @@ -159,15 +158,13 @@
          interpreter are defined in `pypy/interpreter/typedef.py`_.
       
       *  `pypy/interpreter/pyparser`_ contains a recursive descent parser,
      -   and input data files that allow it to parse the syntax of various Python
      -   versions. Once the input data has been processed, the parser can be
      +   and grammar files that allow it to parse the syntax of various Python
      +   versions. Once the grammar has been processed, the parser can be
          translated by the above machinery into efficient code.
        
       *  `pypy/interpreter/astcompiler`_ contains the compiler.  This
          contains a modified version of the compiler package from CPython
      -   that fixes some bugs and is translatable.  That the compiler and
      -   parser are translatable is new in 0.8.0 and it makes using the
      -   resulting binary interactively much more pleasant.
      +   that fixes some bugs and is translatable.
       
       *  `pypy/objspace/std`_ contains the `Standard object space`_.  The main file
          is `pypy/objspace/std/objspace.py`_.  For each type, the files ``xxxtype.py`` and
      @@ -190,24 +187,25 @@
       *  `pypy/rpython`_ contains the code of the RPython typer. The typer transforms
          annotated flow graphs in a way that makes them very similar to C code so
          that they can be easy translated. The graph transformations are controlled
      -   by the stuff in `pypy/rpython/rtyper.py`_. The object model that is used can
      +   by the code in `pypy/rpython/rtyper.py`_. The object model that is used can
          be found in `pypy/rpython/lltypesystem/lltype.py`_. For each RPython type
          there is a file rxxxx.py that contains the low level functions needed for
          this type.
       
      -*  `pypy/rlib`_ contains the RPython standard library, things that you can
      +*  `pypy/rlib`_ contains the `RPython standard library`_, things that you can
          use from rpython.
       
      +.. _`RPython standard library`: rlib.html
      +
       .. _optionaltool: 
       
       
       Running PyPy's unit tests
       -------------------------
       
      -PyPy development always was and is still thorougly test-driven. 
      +PyPy development always was and is still thoroughly test-driven.
       We use the flexible `py.test testing tool`_ which you can `install independently
      -`_ and use indepedently
      -from PyPy for other projects.
      +`_ and use for other projects.
       
       The PyPy source tree comes with an inlined version of ``py.test``
       which you can invoke by typing::
      @@ -355,7 +353,7 @@
       We use the `py library`_ for filesystem path manipulations, terminal
       writing, logging and some other support  functionality.
       
      -You don't neccessarily need to install these two libraries because 
      +You don't necessarily need to install these two libraries because
       we also ship them inlined in the PyPy source tree.
       
       Getting involved 
      
      diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst
      --- a/pypy/doc/objspace-proxies.rst
      +++ b/pypy/doc/objspace-proxies.rst
      @@ -18,10 +18,6 @@
         only if and when needed; and a way to globally replace an object with
         another.
       
      -* *Taint Object Space*: a soft security system; your application cannot
      -  accidentally compute results based on tainted objects unless it
      -  explicitly untaints them first.
      -
       * *Dump Object Space*: dumps all operations performed on all the objects
         into a large log file.  For debugging your applications.
       
      @@ -133,293 +129,295 @@
          function behaves lazily: all calls to it return a thunk object.
       
       
      -.. _taint:
      +.. broken right now:
       
      -The Taint Object Space
      -======================
      +    .. _taint:
       
      -Motivation
      -----------
      +    The Taint Object Space
      +    ======================
       
      -The Taint Object Space provides a form of security: "tainted objects",
      -inspired by various sources, see [D12.1]_ for a more detailed discussion. 
      +    Motivation
      +    ----------
       
      -The basic idea of this kind of security is not to protect against
      -malicious code but to help with handling and boxing sensitive data. 
      -It covers two kinds of sensitive data: secret data which should not leak, 
      -and untrusted data coming from an external source and that must be 
      -validated before it is used.
      +    The Taint Object Space provides a form of security: "tainted objects",
      +    inspired by various sources, see [D12.1]_ for a more detailed discussion. 
       
      -The idea is that, considering a large application that handles these
      -kinds of sensitive data, there are typically only a small number of
      -places that need to explicitly manipulate that sensitive data; all the
      -other places merely pass it around, or do entirely unrelated things.
      +    The basic idea of this kind of security is not to protect against
      +    malicious code but to help with handling and boxing sensitive data. 
      +    It covers two kinds of sensitive data: secret data which should not leak, 
      +    and untrusted data coming from an external source and that must be 
      +    validated before it is used.
       
      -Nevertheless, if a large application needs to be reviewed for security,
      -it must be entirely carefully checked, because it is possible that a
      -bug at some apparently unrelated place could lead to a leak of sensitive
      -information in a way that an external attacker could exploit.  For
      -example, if any part of the application provides web services, an
      -attacker might be able to issue unexpected requests with a regular web
      -browser and deduce secret information from the details of the answers he
      -gets.  Another example is the common CGI attack where an attacker sends
      -malformed inputs and causes the CGI script to do unintended things.
      +    The idea is that, considering a large application that handles these
      +    kinds of sensitive data, there are typically only a small number of
      +    places that need to explicitly manipulate that sensitive data; all the
      +    other places merely pass it around, or do entirely unrelated things.
       
      -An approach like that of the Taint Object Space allows the small parts
      -of the program that manipulate sensitive data to be explicitly marked.
      -The effect of this is that although these small parts still need a
      -careful security review, the rest of the application no longer does,
      -because even a bug would be unable to leak the information.
      +    Nevertheless, if a large application needs to be reviewed for security,
      +    it must be entirely carefully checked, because it is possible that a
      +    bug at some apparently unrelated place could lead to a leak of sensitive
      +    information in a way that an external attacker could exploit.  For
      +    example, if any part of the application provides web services, an
      +    attacker might be able to issue unexpected requests with a regular web
      +    browser and deduce secret information from the details of the answers he
      +    gets.  Another example is the common CGI attack where an attacker sends
      +    malformed inputs and causes the CGI script to do unintended things.
       
      -We have implemented a simple two-level model: objects are either
      -regular (untainted), or sensitive (tainted).  Objects are marked as
      -sensitive if they are secret or untrusted, and only declassified at
      -carefully-checked positions (e.g. where the secret data is needed, or
      -after the untrusted data has been fully validated).
      +    An approach like that of the Taint Object Space allows the small parts
      +    of the program that manipulate sensitive data to be explicitly marked.
      +    The effect of this is that although these small parts still need a
      +    careful security review, the rest of the application no longer does,
      +    because even a bug would be unable to leak the information.
       
      -It would be simple to extend the code for more fine-grained scales of
      -secrecy.  For example it is typical in the literature to consider
      -user-specified lattices of secrecy levels, corresponding to multiple
      -"owners" that cannot access data belonging to another "owner" unless
      -explicitly authorized to do so.
      +    We have implemented a simple two-level model: objects are either
      +    regular (untainted), or sensitive (tainted).  Objects are marked as
      +    sensitive if they are secret or untrusted, and only declassified at
      +    carefully-checked positions (e.g. where the secret data is needed, or
      +    after the untrusted data has been fully validated).
       
      -Tainting and untainting
      ------------------------
      +    It would be simple to extend the code for more fine-grained scales of
      +    secrecy.  For example it is typical in the literature to consider
      +    user-specified lattices of secrecy levels, corresponding to multiple
      +    "owners" that cannot access data belonging to another "owner" unless
      +    explicitly authorized to do so.
       
      -Start a py.py with the Taint Object Space and try the following example::
      +    Tainting and untainting
      +    -----------------------
       
      -    $ py.py -o taint
      -    >>>> from __pypy__ import taint
      -    >>>> x = taint(6)
      +    Start a py.py with the Taint Object Space and try the following example::
       
      -    # x is hidden from now on.  We can pass it around and
      -    # even operate on it, but not inspect it.  Taintness
      -    # is propagated to operation results.
      +        $ py.py -o taint
      +        >>>> from __pypy__ import taint
      +        >>>> x = taint(6)
       
      -    >>>> x
      -    TaintError
      +        # x is hidden from now on.  We can pass it around and
      +        # even operate on it, but not inspect it.  Taintness
      +        # is propagated to operation results.
       
      -    >>>> if x > 5: y = 2   # see below
      -    TaintError
      +        >>>> x
      +        TaintError
       
      -    >>>> y = x + 5         # ok
      -    >>>> lst = [x, y]
      -    >>>> z = lst.pop()
      -    >>>> t = type(z)       # type() works too, tainted answer
      -    >>>> t
      -    TaintError
      -    >>>> u = t is int      # even 'is' works
      -    >>>> u
      -    TaintError
      +        >>>> if x > 5: y = 2   # see below
      +        TaintError
       
      -Notice that using a tainted boolean like ``x > 5`` in an ``if``
      -statement is forbidden.  This is because knowing which path is followed
      -would give away a hint about ``x``; in the example above, if the
      -statement ``if x > 5: y = 2`` was allowed to run, we would know
      -something about the value of ``x`` by looking at the (untainted) value
      -in the variable ``y``.
      +        >>>> y = x + 5         # ok
      +        >>>> lst = [x, y]
      +        >>>> z = lst.pop()
      +        >>>> t = type(z)       # type() works too, tainted answer
      +        >>>> t
      +        TaintError
      +        >>>> u = t is int      # even 'is' works
      +        >>>> u
      +        TaintError
       
      -Of course, there is a way to inspect tainted objects.  The basic way is
      -to explicitly "declassify" it with the ``untaint()`` function.  In an
      -application, the places that use ``untaint()`` are the places that need
      -careful security review.  To avoid unexpected objects showing up, the
      -``untaint()`` function must be called with the exact type of the object
      -to declassify.  It will raise ``TaintError`` if the type doesn't match::
      +    Notice that using a tainted boolean like ``x > 5`` in an ``if``
      +    statement is forbidden.  This is because knowing which path is followed
      +    would give away a hint about ``x``; in the example above, if the
      +    statement ``if x > 5: y = 2`` was allowed to run, we would know
      +    something about the value of ``x`` by looking at the (untainted) value
      +    in the variable ``y``.
       
      -    >>>> from __pypy__ import taint
      -    >>>> untaint(int, x)
      -    6
      -    >>>> untaint(int, z)
      -    11
      -    >>>> untaint(bool, x > 5)
      -    True
      -    >>>> untaint(int, x > 5)
      -    TaintError
      +    Of course, there is a way to inspect tainted objects.  The basic way is
      +    to explicitly "declassify" it with the ``untaint()`` function.  In an
      +    application, the places that use ``untaint()`` are the places that need
      +    careful security review.  To avoid unexpected objects showing up, the
      +    ``untaint()`` function must be called with the exact type of the object
      +    to declassify.  It will raise ``TaintError`` if the type doesn't match::
       
      +        >>>> from __pypy__ import taint
      +        >>>> untaint(int, x)
      +        6
      +        >>>> untaint(int, z)
      +        11
      +        >>>> untaint(bool, x > 5)
      +        True
      +        >>>> untaint(int, x > 5)
      +        TaintError
       
      -Taint Bombs
      ------------
       
      -In this area, a common problem is what to do about failing operations.
      -If an operation raises an exception when manipulating a tainted object,
      -then the very presence of the exception can leak information about the
      -tainted object itself.  Consider::
      +    Taint Bombs
      +    -----------
       
      -    >>>> 5 / (x-6)
      +    In this area, a common problem is what to do about failing operations.
      +    If an operation raises an exception when manipulating a tainted object,
      +    then the very presence of the exception can leak information about the
      +    tainted object itself.  Consider::
       
      -By checking if this raises ``ZeroDivisionError`` or not, we would know
      -if ``x`` was equal to 6 or not.  The solution to this problem in the
      -Taint Object Space is to introduce *Taint Bombs*.  They are a kind of
      -tainted object that doesn't contain a real object, but a pending
      -exception.  Taint Bombs are indistinguishable from normal tainted
      -objects to unprivileged code. See::
      +        >>>> 5 / (x-6)
       
      -    >>>> x = taint(6)
      -    >>>> i = 5 / (x-6)     # no exception here
      -    >>>> j = i + 1         # nor here
      -    >>>> k = j + 5         # nor here
      -    >>>> untaint(int, k)
      -    TaintError
      +    By checking if this raises ``ZeroDivisionError`` or not, we would know
      +    if ``x`` was equal to 6 or not.  The solution to this problem in the
      +    Taint Object Space is to introduce *Taint Bombs*.  They are a kind of
      +    tainted object that doesn't contain a real object, but a pending
      +    exception.  Taint Bombs are indistinguishable from normal tainted
      +    objects to unprivileged code. See::
       
      -In the above example, all of ``i``, ``j`` and ``k`` contain a Taint
      -Bomb.  Trying to untaint it raises an exception - a generic
      -``TaintError``.  What we win is that the exception gives little away,
      -and most importantly it occurs at the point where ``untaint()`` is
      -called, not where the operation failed.  This means that all calls to
      -``untaint()`` - but not the rest of the code - must be carefully
      -reviewed for what occurs if they receive a Taint Bomb; they might catch
      -the ``TaintError`` and give the user a generic message that something
      -went wrong, if we are reasonably careful that the message or even its
      -presence doesn't give information away.  This might be a
      -problem by itself, but there is no satisfying general solution here:
      -it must be considered on a case-by-case basis.  Again, what the
      -Taint Object Space approach achieves is not solving these problems, but
      -localizing them to well-defined small parts of the application - namely,
      -around calls to ``untaint()``.
      +        >>>> x = taint(6)
      +        >>>> i = 5 / (x-6)     # no exception here
      +        >>>> j = i + 1         # nor here
      +        >>>> k = j + 5         # nor here
      +        >>>> untaint(int, k)
      +        TaintError
       
      -The ``TaintError`` exception deliberately does not include any
      -useful error messages, because they might give information away.
      -Of course, this makes debugging quite a bit harder; a difficult
      -problem to solve properly.  So far we have implemented a way to peek in a Taint
      -Box or Bomb, ``__pypy__._taint_look(x)``, and a "debug mode" that
      -prints the exception as soon as a Bomb is created - both write
      -information to the low-level stderr of the application, where we hope
      -that it is unlikely to be seen by anyone but the application
      -developer.
      +    In the above example, all of ``i``, ``j`` and ``k`` contain a Taint
      +    Bomb.  Trying to untaint it raises an exception - a generic
      +    ``TaintError``.  What we win is that the exception gives little away,
      +    and most importantly it occurs at the point where ``untaint()`` is
      +    called, not where the operation failed.  This means that all calls to
      +    ``untaint()`` - but not the rest of the code - must be carefully
      +    reviewed for what occurs if they receive a Taint Bomb; they might catch
      +    the ``TaintError`` and give the user a generic message that something
      +    went wrong, if we are reasonably careful that the message or even its
      +    presence doesn't give information away.  This might be a
      +    problem by itself, but there is no satisfying general solution here:
      +    it must be considered on a case-by-case basis.  Again, what the
      +    Taint Object Space approach achieves is not solving these problems, but
      +    localizing them to well-defined small parts of the application - namely,
      +    around calls to ``untaint()``.
       
      +    The ``TaintError`` exception deliberately does not include any
      +    useful error messages, because they might give information away.
      +    Of course, this makes debugging quite a bit harder; a difficult
      +    problem to solve properly.  So far we have implemented a way to peek in a Taint
      +    Box or Bomb, ``__pypy__._taint_look(x)``, and a "debug mode" that
      +    prints the exception as soon as a Bomb is created - both write
      +    information to the low-level stderr of the application, where we hope
      +    that it is unlikely to be seen by anyone but the application
      +    developer.
       
      -Taint Atomic functions
      -----------------------
       
      -Occasionally, a more complicated computation must be performed on a
      -tainted object.  This requires first untainting the object, performing the
      -computations, and then carefully tainting the result again (including
      -hiding all exceptions into Bombs).
      +    Taint Atomic functions
      +    ----------------------
       
      -There is a built-in decorator that does this for you::
      +    Occasionally, a more complicated computation must be performed on a
      +    tainted object.  This requires first untainting the object, performing the
      +    computations, and then carefully tainting the result again (including
      +    hiding all exceptions into Bombs).
       
      -    >>>> @__pypy__.taint_atomic
      -    >>>> def myop(x, y):
      -    ....     while x > 0:
      -    ....         x -= y
      -    ....     return x
      -    ....
      -    >>>> myop(42, 10)
      -    -8
      -    >>>> z = myop(taint(42), 10)
      -    >>>> z
      -    TaintError
      -    >>>> untaint(int, z)
      -    -8
      +    There is a built-in decorator that does this for you::
       
      -The decorator makes a whole function behave like a built-in operation.
      -If no tainted argument is passed in, the function behaves normally.  But
      -if any of the arguments is tainted, it is automatically untainted - so
      -the function body always sees untainted arguments - and the eventual
      -result is tainted again (possibly in a Taint Bomb).
      +        >>>> @__pypy__.taint_atomic
      +        >>>> def myop(x, y):
      +        ....     while x > 0:
      +        ....         x -= y
      +        ....     return x
      +        ....
      +        >>>> myop(42, 10)
      +        -8
      +        >>>> z = myop(taint(42), 10)
      +        >>>> z
      +        TaintError
      +        >>>> untaint(int, z)
      +        -8
       
      -It is important for the function marked as ``taint_atomic`` to have no
      -visible side effects, as these could cause information leakage.
      -This is currently not enforced, which means that all ``taint_atomic``
      -functions have to be carefully reviewed for security (but not the
      -callers of ``taint_atomic`` functions).
      +    The decorator makes a whole function behave like a built-in operation.
      +    If no tainted argument is passed in, the function behaves normally.  But
      +    if any of the arguments is tainted, it is automatically untainted - so
      +    the function body always sees untainted arguments - and the eventual
      +    result is tainted again (possibly in a Taint Bomb).
       
      -A possible future extension would be to forbid side-effects on
      -non-tainted objects from all ``taint_atomic`` functions.
      +    It is important for the function marked as ``taint_atomic`` to have no
      +    visible side effects, as these could cause information leakage.
      +    This is currently not enforced, which means that all ``taint_atomic``
      +    functions have to be carefully reviewed for security (but not the
      +    callers of ``taint_atomic`` functions).
       
      -An example of usage: given a tainted object ``passwords_db`` that
      -references a database of passwords, we can write a function
      -that checks if a password is valid as follows::
      +    A possible future extension would be to forbid side-effects on
      +    non-tainted objects from all ``taint_atomic`` functions.
       
      -    @taint_atomic
      -    def validate(passwords_db, username, password):
      -        assert type(passwords_db) is PasswordDatabase
      -        assert type(username) is str
      -        assert type(password) is str
      -        ...load username entry from passwords_db...
      -        return expected_password == password
      +    An example of usage: given a tainted object ``passwords_db`` that
      +    references a database of passwords, we can write a function
      +    that checks if a password is valid as follows::
       
      -It returns a tainted boolean answer, or a Taint Bomb if something
      -went wrong.  A caller can do::
      +        @taint_atomic
      +        def validate(passwords_db, username, password):
      +            assert type(passwords_db) is PasswordDatabase
      +            assert type(username) is str
      +            assert type(password) is str
      +            ...load username entry from passwords_db...
      +            return expected_password == password
       
      -    ok = validate(passwords_db, 'john', '1234')
      -    ok = untaint(bool, ok)
      +    It returns a tainted boolean answer, or a Taint Bomb if something
      +    went wrong.  A caller can do::
       
      -This can give three outcomes: ``True``, ``False``, or a ``TaintError``
      -exception (with no information on it) if anything went wrong.  If even
      -this is considered giving too much information away, the ``False`` case
      -can be made indistinguishable from the ``TaintError`` case (simply by
      -raising an exception in ``validate()`` if the password is wrong).
      +        ok = validate(passwords_db, 'john', '1234')
      +        ok = untaint(bool, ok)
       
      -In the above example, the security results achieved are the following:
      -as long as ``validate()`` does not leak information, no other part of
      -the code can obtain more information about a passwords database than a
      -Yes/No answer to a precise query.
      +    This can give three outcomes: ``True``, ``False``, or a ``TaintError``
      +    exception (with no information on it) if anything went wrong.  If even
      +    this is considered giving too much information away, the ``False`` case
      +    can be made indistinguishable from the ``TaintError`` case (simply by
      +    raising an exception in ``validate()`` if the password is wrong).
       
      -A possible extension of the ``taint_atomic`` decorator would be to check
      -the argument types, as ``untaint()`` does, for the same reason: to
      -prevent bugs where a function like ``validate()`` above is accidentally
      -called with the wrong kind of tainted object, which would make it
      -misbehave.  For now, all ``taint_atomic`` functions should be
      -conservative and carefully check all assumptions on their input
      -arguments.
      +    In the above example, the security results achieved are the following:
      +    as long as ``validate()`` does not leak information, no other part of
      +    the code can obtain more information about a passwords database than a
      +    Yes/No answer to a precise query.
       
      +    A possible extension of the ``taint_atomic`` decorator would be to check
      +    the argument types, as ``untaint()`` does, for the same reason: to
      +    prevent bugs where a function like ``validate()`` above is accidentally
      +    called with the wrong kind of tainted object, which would make it
      +    misbehave.  For now, all ``taint_atomic`` functions should be
      +    conservative and carefully check all assumptions on their input
      +    arguments.
       
      -.. _`taint-interface`:
       
      -Interface
      ----------
      +    .. _`taint-interface`:
       
      -.. _`like a built-in operation`:
      +    Interface
      +    ---------
       
      -The basic rule of the Tainted Object Space is that it introduces two new
      -kinds of objects, Tainted Boxes and Tainted Bombs (which are not types
      -in the Python sense).  Each box internally contains a regular object;
      -each bomb internally contains an exception object.  An operation
      -involving Tainted Boxes is performed on the objects contained in the
      -boxes, and gives a Tainted Box or a Tainted Bomb as a result (such an
      -operation does not let an exception be raised).  An operation called
      -with a Tainted Bomb argument immediately returns the same Tainted Bomb.
      +    .. _`like a built-in operation`:
       
      -In a PyPy running with (or translated with) the Taint Object Space,
      -the ``__pypy__`` module exposes the following interface:
      +    The basic rule of the Tainted Object Space is that it introduces two new
      +    kinds of objects, Tainted Boxes and Tainted Bombs (which are not types
      +    in the Python sense).  Each box internally contains a regular object;
      +    each bomb internally contains an exception object.  An operation
      +    involving Tainted Boxes is performed on the objects contained in the
      +    boxes, and gives a Tainted Box or a Tainted Bomb as a result (such an
      +    operation does not let an exception be raised).  An operation called
      +    with a Tainted Bomb argument immediately returns the same Tainted Bomb.
       
      -* ``taint(obj)``
      +    In a PyPy running with (or translated with) the Taint Object Space,
      +    the ``__pypy__`` module exposes the following interface:
       
      -    Return a new Tainted Box wrapping ``obj``.  Return ``obj`` itself
      -    if it is already tainted (a Box or a Bomb).
      +    * ``taint(obj)``
       
      -* ``is_tainted(obj)``
      +        Return a new Tainted Box wrapping ``obj``.  Return ``obj`` itself
      +        if it is already tainted (a Box or a Bomb).
       
      -    Check if ``obj`` is tainted (a Box or a Bomb).
      +    * ``is_tainted(obj)``
       
      -* ``untaint(type, obj)``
      +        Check if ``obj`` is tainted (a Box or a Bomb).
       
      -    Untaints ``obj`` if it is tainted.  Raise ``TaintError`` if the type
      -    of the untainted object is not exactly ``type``, or if ``obj`` is a
      -    Bomb.
      +    * ``untaint(type, obj)``
       
      -* ``taint_atomic(func)``
      +        Untaints ``obj`` if it is tainted.  Raise ``TaintError`` if the type
      +        of the untainted object is not exactly ``type``, or if ``obj`` is a
      +        Bomb.
       
      -    Return a wrapper function around the callable ``func``.  The wrapper
      -    behaves `like a built-in operation`_ with respect to untainting the
      -    arguments, tainting the result, and returning a Bomb.
      +    * ``taint_atomic(func)``
       
      -* ``TaintError``
      +        Return a wrapper function around the callable ``func``.  The wrapper
      +        behaves `like a built-in operation`_ with respect to untainting the
      +        arguments, tainting the result, and returning a Bomb.
       
      -    Exception.  On purpose, it provides no attribute or error message.
      +    * ``TaintError``
       
      -* ``_taint_debug(level)``
      +        Exception.  On purpose, it provides no attribute or error message.
       
      -    Set the debugging level to ``level`` (0=off).  At level 1 or above,
      -    all Taint Bombs print a diagnostic message to stderr when they are
      -    created.
      +    * ``_taint_debug(level)``
       
      -* ``_taint_look(obj)``
      +        Set the debugging level to ``level`` (0=off).  At level 1 or above,
      +        all Taint Bombs print a diagnostic message to stderr when they are
      +        created.
       
      -    For debugging purposes: prints (to stderr) the type and address of
      -    the object in a Tainted Box, or prints the exception if ``obj`` is
      -    a Taint Bomb.
      +    * ``_taint_look(obj)``
      +
      +        For debugging purposes: prints (to stderr) the type and address of
      +        the object in a Tainted Box, or prints the exception if ``obj`` is
      +        a Taint Bomb.
       
       
       .. _dump:
      
      diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
      --- a/pypy/doc/index.rst
      +++ b/pypy/doc/index.rst
      @@ -273,8 +273,6 @@
       `pypy/objspace/dump.py`_           the dump object space saves a large, searchable log file
                                          with all operations
       
      -`pypy/objspace/taint.py`_          the `taint object space`_, providing object tainting
      -
       `pypy/objspace/thunk.py`_          the `thunk object space`_, providing unique object features 
       
       `pypy/objspace/flow/`_             the FlowObjSpace_ implementing `abstract interpretation`_
      
      diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst
      --- a/pypy/doc/__pypy__-module.rst
      +++ b/pypy/doc/__pypy__-module.rst
      @@ -37,27 +37,29 @@
       .. _`thunk object space docs`: objspace-proxies.html#thunk
       .. _`interface section of the thunk object space docs`: objspace-proxies.html#thunk-interface
       
      -Taint Object Space Functionality
      -================================
      +.. broken:
       
      -When the taint object space is used (choose with :config:`objspace.name`),
      -the following names are put into ``__pypy__``:
      +    Taint Object Space Functionality
      +    ================================
       
      - - ``taint``
      - - ``is_tainted``
      - - ``untaint``
      - - ``taint_atomic``
      - - ``_taint_debug``
      - - ``_taint_look``
      - - ``TaintError``
      +    When the taint object space is used (choose with :config:`objspace.name`),
      +    the following names are put into ``__pypy__``:
       
      -Those are all described in the `interface section of the taint object space
      -docs`_.
      +     - ``taint``
      +     - ``is_tainted``
      +     - ``untaint``
      +     - ``taint_atomic``
      +     - ``_taint_debug``
      +     - ``_taint_look``
      +     - ``TaintError``
       
      -For more detailed explanations and examples see the `taint object space docs`_.
      +    Those are all described in the `interface section of the taint object space
      +    docs`_.
       
      -.. _`taint object space docs`: objspace-proxies.html#taint
      -.. _`interface section of the taint object space docs`: objspace-proxies.html#taint-interface
      +    For more detailed explanations and examples see the `taint object space docs`_.
      +
      +    .. _`taint object space docs`: objspace-proxies.html#taint
      +    .. _`interface section of the taint object space docs`: objspace-proxies.html#taint-interface
       
       Transparent Proxy Functionality
       ===============================
      
      diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst
      --- a/pypy/doc/interpreter.rst
      +++ b/pypy/doc/interpreter.rst
      @@ -146,21 +146,15 @@
         file location can be constructed for tracebacks 
       
       Moreover the Frame class itself has a number of methods which implement
      -the actual bytecodes found in a code object.  In fact, PyPy already constructs 
      -four specialized Frame class variants depending on the code object: 
      +the actual bytecodes found in a code object.  The methods of the ``PyFrame``
      +class are added in various files:
       
      -- PyInterpFrame (in `pypy/interpreter/pyopcode.py`_)  for
      -  basic simple code objects (not involving generators or nested scopes) 
      +- the class ``PyFrame`` is defined in `pypy/interpreter/pyframe.py`_.
       
      -- PyNestedScopeFrame (in `pypy/interpreter/nestedscope.py`_) 
      -  for code objects that reference nested scopes, inherits from PyInterpFrame
      +- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcode.
       
      -- PyGeneratorFrame (in `pypy/interpreter/generator.py`_) 
      -  for code objects that yield values to the caller, inherits from PyInterpFrame
      -
      -- PyNestedScopeGeneratorFrame for code objects that reference
      -  nested scopes and yield values to the caller, inherits from both PyNestedScopeFrame
      -  and PyGeneratorFrame 
      +- nested scope support is added to the ``PyFrame`` class in
      +  `pypy/interpreter/nestedscope.py`_.
       
       .. _Code: 
       
      
      From commits-noreply at bitbucket.org  Sat Apr 30 14:27:54 2011
      From: commits-noreply at bitbucket.org (cfbolz)
      Date: Sat, 30 Apr 2011 14:27:54 +0200 (CEST)
      Subject: [pypy-svn] pypy documentation-cleanup: update contributors
      Message-ID: <20110430122754.D9C0D36C217@codespeak.net>
      
      Author: Carl Friedrich Bolz 
      Branch: documentation-cleanup
      Changeset: r43803:54cee1ad5488
      Date: 2011-04-30 14:22 +0200
      http://bitbucket.org/pypy/pypy/changeset/54cee1ad5488/
      
      Log:	update contributors
      
      diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
      --- a/pypy/doc/tool/makecontributor.py
      +++ b/pypy/doc/tool/makecontributor.py
      @@ -5,6 +5,9 @@
       """
       import py
       
      +# this file is useless, use the following commandline instead:
      +# hg churn -c -t "{author}" | sed -e 's/ <.*//'
      +
       try: 
           path = py.std.sys.argv[1]
       except IndexError: 
      
      diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
      --- a/pypy/doc/contributor.rst
      +++ b/pypy/doc/contributor.rst
      @@ -6,100 +6,155 @@
       code base, ordered by number of commits (which is certainly not a very
       appropriate measure but it's something)::
       
      -
           Armin Rigo
           Maciej Fijalkowski
           Carl Friedrich Bolz
      +    Amaury Forgeot d'Arc
      +    Antonio Cuni
           Samuele Pedroni
      -    Antonio Cuni
           Michael Hudson
      +    Holger Krekel
           Christian Tismer
      -    Holger Krekel
      +    Benjamin Peterson
           Eric van Riet Paap
      +    Anders Chrigström
      +    Håkan Ardö
           Richard Emslie
      -    Anders Chrigstrom
      -    Amaury Forgeot d Arc
      -    Aurelien Campeas
      +    Dan Villiom Podlaski Christiansen
      +    Alexander Schremmer
      +    Alex Gaynor
      +    David Schneider
      +    Aurelién Campeas
           Anders Lehmann
      +    Camillo Bruni
           Niklaus Haldimann
      +    Leonardo Santagada
      +    Toon Verwaest
           Seo Sanghyeon
      -    Leonardo Santagada
           Lawrence Oluyede
      +    Bartosz Skowron
           Jakub Gustak
           Guido Wesdorp
      -    Benjamin Peterson
      -    Alexander Schremmer
      +    Adrien Di Mascio
      +    Laura Creighton
      +    Ludovic Aubry
           Niko Matsakis
      -    Ludovic Aubry
      +    Daniel Roberts
      +    Jason Creighton
      +    Jacob Hallén
           Alex Martelli
      -    Toon Verwaest
      +    Anders Hammarquist
      +    Jan de Mooij
           Stephan Diehl
      -    Adrien Di Mascio
      +    Michael Foord
           Stefan Schwarzer
           Tomek Meka
           Patrick Maupin
      -    Jacob Hallen
      -    Laura Creighton
           Bob Ippolito
      -    Camillo Bruni
      -    Simon Burton
           Bruno Gola
           Alexandre Fayolle
           Marius Gedminas
      +    Simon Burton
      +    Jean-Paul Calderone
      +    John Witulski
      +    Wim Lavrijsen
      +    Andreas Stührk
      +    Jean-Philippe St. Pierre
           Guido van Rossum
      +    Pavel Vinogradov
           Valentino Volonghi
      +    Paul deGrandis
           Adrian Kuhn
      -    Paul deGrandis
      +    tav
      +    Georg Brandl
           Gerald Klix
           Wanja Saatkamp
      -    Anders Hammarquist
      +    Boris Feigin
           Oscar Nierstrasz
      +    Dario Bertini
      +    David Malcolm
           Eugene Oden
      +    Henry Mason
           Lukas Renggli
           Guenter Jantzen
      +    Ronny Pfannschmidt
      +    Bert Freudenberg
      +    Amit Regmi
      +    Ben Young
      +    Nicolas Chauvat
      +    Andrew Durdin
      +    Michael Schneider
      +    Nicholas Riley
      +    Rocco Moretti
      +    Gintautas Miliauskas
      +    Michael Twomey
      +    Igor Trindade Oliveira
      +    Lucian Branescu Mihaila
      +    Olivier Dormond
      +    Jared Grubb
      +    Karl Bartel
      +    Gabriel Lavoie
      +    Brian Dorsey
      +    Victor Stinner
      +    Stuart Williams
      +    Toby Watson
      +    Antoine Pitrou
      +    Justas Sadzevicius
      +    Neil Shepperd
      +    Mikael Schönenberg
      +    Gasper Zejn
      +    Jonathan David Riehl
      +    Elmo Mäntynen
      +    Anders Qvist
      +    Beatrice Düring
      +    Alexander Sedov
      +    Vincent Legoll
      +    Alan McIntyre
      +    Romain Guillebert
      +    Alex Perry
      +    Jens-Uwe Mager
      +    Dan Stromberg
      +    Lukas Diekmann
      +    Carl Meyer
      +    Pieter Zieschang
      +    Alejandro J. Cura
      +    Sylvain Thenault
      +    Travis Francis Athougies
      +    Henrik Vendelbo
      +    Lutz Paelike
      +    Jacob Oscarson
      +    Martin Blais
      +    Lucio Torre
      +    Lene Wagner
      +    Miguel de Val Borro
      +    Ignas Mikalajunas
      +    Artur Lisiecki
      +    Joshua Gilbert
      +    Godefroid Chappelle
      +    Yusei Tahara
      +    Christopher Armstrong
      +    Stephan Busemann
      +    Gustavo Niemeyer
      +    William Leslie
      +    Akira Li
      +    Kristján Valur Jonsson
      +    Bobby Impollonia
      +    Andrew Thompson
      +    Anders Sigfridsson
      +    Jacek Generowicz
      +    Dan Colish
      +    Sven Hager
      +    Zooko Wilcox-O Hearn
      +    Anders Hammarquist
           Dinu Gherman
      -    Bartosz Skowron
      -    Georg Brandl
      -    Ben Young
      -    Jean-Paul Calderone
      -    Nicolas Chauvat
      -    Rocco Moretti
      -    Michael Twomey
      -    boria
      -    Jared Grubb
      -    Olivier Dormond
      -    Stuart Williams
      -    Jens-Uwe Mager
      -    Justas Sadzevicius
      -    Mikael Schönenberg
      -    Brian Dorsey
      -    Jonathan David Riehl
      -    Beatrice During
      -    Elmo Mäntynen
      -    Andreas Friedge
      -    Alex Gaynor
      -    Anders Qvist
      -    Alan McIntyre
      -    Bert Freudenberg
      -    Pieter Zieschang
      -    Jacob Oscarson
      -    Lutz Paelike
      -    Michael Schneider
      -    Artur Lisiecki
      -    Lene Wagner
      -    Christopher Armstrong
      -    Jan de Mooij
      -    Jacek Generowicz
      -    Gasper Zejn
      -    Stephan Busemann
      -    Yusei Tahara
      -    Godefroid Chappelle
      -    Toby Watson
      -    Andrew Thompson
      -    Joshua Gilbert
      -    Anders Sigfridsson
      -    David Schneider
      +    Dan Colish
      +    Daniel Neuhäuser
           Michael Chermside
      -    tav
      -    Martin Blais
      -    Victor Stinner
      +    Konrad Delong
      +    Anna Ravencroft
      +    Greg Price
      +    Armin Ronacher
      +    Jim Baker
      +    Philip Jenvey
      +    Rodrigo Araújo
      +
      
      From commits-noreply at bitbucket.org  Sat Apr 30 14:27:56 2011
      From: commits-noreply at bitbucket.org (cfbolz)
      Date: Sat, 30 Apr 2011 14:27:56 +0200 (CEST)
      Subject: [pypy-svn] pypy documentation-cleanup: update the license too
      Message-ID: <20110430122756.ED172282B55@codespeak.net>
      
      Author: Carl Friedrich Bolz 
      Branch: documentation-cleanup
      Changeset: r43804:f6fa2ff6a591
      Date: 2011-04-30 14:26 +0200
      http://bitbucket.org/pypy/pypy/changeset/f6fa2ff6a591/
      
      Log:	update the license too
      
      diff --git a/LICENSE b/LICENSE
      --- a/LICENSE
      +++ b/LICENSE
      @@ -37,78 +37,154 @@
           Armin Rigo
           Maciej Fijalkowski
           Carl Friedrich Bolz
      +    Amaury Forgeot d'Arc
      +    Antonio Cuni
           Samuele Pedroni
      -    Antonio Cuni
           Michael Hudson
      +    Holger Krekel
           Christian Tismer
      -    Holger Krekel
      +    Benjamin Peterson
           Eric van Riet Paap
      +    Anders Chrigström
      +    Håkan Ardö
           Richard Emslie
      -    Anders Chrigstrom
      -    Amaury Forgeot d Arc
      -    Aurelien Campeas
      +    Dan Villiom Podlaski Christiansen
      +    Alexander Schremmer
      +    Alex Gaynor
      +    David Schneider
      +    Aurelién Campeas
           Anders Lehmann
      +    Camillo Bruni
           Niklaus Haldimann
      +    Leonardo Santagada
      +    Toon Verwaest
           Seo Sanghyeon
      -    Leonardo Santagada
           Lawrence Oluyede
      +    Bartosz Skowron
           Jakub Gustak
           Guido Wesdorp
      -    Benjamin Peterson
      -    Alexander Schremmer
      +    Adrien Di Mascio
      +    Laura Creighton
      +    Ludovic Aubry
           Niko Matsakis
      -    Ludovic Aubry
      +    Daniel Roberts
      +    Jason Creighton
      +    Jacob Hallén
           Alex Martelli
      -    Toon Verwaest
      +    Anders Hammarquist
      +    Jan de Mooij
           Stephan Diehl
      -    Adrien Di Mascio
      +    Michael Foord
           Stefan Schwarzer
           Tomek Meka
           Patrick Maupin
      -    Jacob Hallen
      -    Laura Creighton
           Bob Ippolito
      -    Camillo Bruni
      -    Simon Burton
           Bruno Gola
           Alexandre Fayolle
           Marius Gedminas
      +    Simon Burton
      +    Jean-Paul Calderone
      +    John Witulski
      +    Wim Lavrijsen
      +    Andreas Stührk
      +    Jean-Philippe St. Pierre
           Guido van Rossum
      +    Pavel Vinogradov
           Valentino Volonghi
      +    Paul deGrandis
           Adrian Kuhn
      -    Paul deGrandis
      +    tav
      +    Georg Brandl
           Gerald Klix
           Wanja Saatkamp
      -    Anders Hammarquist
      +    Boris Feigin
           Oscar Nierstrasz
      +    Dario Bertini
      +    David Malcolm
           Eugene Oden
      +    Henry Mason
           Lukas Renggli
           Guenter Jantzen
      +    Ronny Pfannschmidt
      +    Bert Freudenberg
      +    Amit Regmi
      +    Ben Young
      +    Nicolas Chauvat
      +    Andrew Durdin
      +    Michael Schneider
      +    Nicholas Riley
      +    Rocco Moretti
      +    Gintautas Miliauskas
      +    Michael Twomey
      +    Igor Trindade Oliveira
      +    Lucian Branescu Mihaila
      +    Olivier Dormond
      +    Jared Grubb
      +    Karl Bartel
      +    Gabriel Lavoie
      +    Brian Dorsey
      +    Victor Stinner
      +    Stuart Williams
      +    Toby Watson
      +    Antoine Pitrou
      +    Justas Sadzevicius
      +    Neil Shepperd
      +    Mikael Schönenberg
      +    Gasper Zejn
      +    Jonathan David Riehl
      +    Elmo Mäntynen
      +    Anders Qvist
      +    Beatrice Düring
      +    Alexander Sedov
      +    Vincent Legoll
      +    Alan McIntyre
      +    Romain Guillebert
      +    Alex Perry
      +    Jens-Uwe Mager
      +    Dan Stromberg
      +    Lukas Diekmann
      +    Carl Meyer
      +    Pieter Zieschang
      +    Alejandro J. Cura
      +    Sylvain Thenault
      +    Travis Francis Athougies
      +    Henrik Vendelbo
      +    Lutz Paelike
      +    Jacob Oscarson
      +    Martin Blais
      +    Lucio Torre
      +    Lene Wagner
      +    Miguel de Val Borro
      +    Ignas Mikalajunas
      +    Artur Lisiecki
      +    Joshua Gilbert
      +    Godefroid Chappelle
      +    Yusei Tahara
      +    Christopher Armstrong
      +    Stephan Busemann
      +    Gustavo Niemeyer
      +    William Leslie
      +    Akira Li
      +    Kristján Valur Jonsson
      +    Bobby Impollonia
      +    Andrew Thompson
      +    Anders Sigfridsson
      +    Jacek Generowicz
      +    Dan Colish
      +    Sven Hager
      +    Zooko Wilcox-O Hearn
      +    Anders Hammarquist
           Dinu Gherman
      -    Bartosz Skowron
      -    Georg Brandl
      -    Ben Young
      -    Jean-Paul Calderone
      -    Nicolas Chauvat
      -    Rocco Moretti
      -    Michael Twomey
      -    boria
      -    Jared Grubb
      -    Olivier Dormond
      -    Stuart Williams
      -    Jens-Uwe Mager
      -    Justas Sadzevicius
      -    Mikael Schönenberg
      -    Brian Dorsey
      -    Jonathan David Riehl
      -    Beatrice During
      -    Elmo Mäntynen
      -    Andreas Friedge
      -    Alex Gaynor
      -    Anders Qvist
      -    Alan McIntyre
      -    Bert Freudenberg
      -    Tav
      +    Dan Colish
      +    Daniel Neuhäuser
      +    Michael Chermside
      +    Konrad Delong
      +    Anna Ravencroft
      +    Greg Price
      +    Armin Ronacher
      +    Jim Baker
      +    Philip Jenvey
      +    Rodrigo Araújo
       
           Heinrich-Heine University, Germany 
           Open End AB (formerly AB Strakt), Sweden
      
      From commits-noreply at bitbucket.org  Sat Apr 30 14:29:33 2011
      From: commits-noreply at bitbucket.org (arigo)
      Date: Sat, 30 Apr 2011 14:29:33 +0200 (CEST)
      Subject: [pypy-svn] pypy post-release-1.5: Bug fix. I think it's not
      	possible to run this	function from a test
      Message-ID: <20110430122933.5FCAE36C217@codespeak.net>
      
      Author: Armin Rigo 
      Branch: post-release-1.5
      Changeset: r43805:2e8c1b2bb265
      Date: 2011-04-30 14:29 +0200
      http://bitbucket.org/pypy/pypy/changeset/2e8c1b2bb265/
      
      Log:	Bug fix. I think it's not possible to run this function from a test
      	right now.
      
      diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
      --- a/pypy/objspace/std/setobject.py
      +++ b/pypy/objspace/std/setobject.py
      @@ -310,7 +310,7 @@
       
       def ne__Set_settypedef(space, w_left, w_other):
           rd = make_setdata_from_w_iterable(space, w_other)
      -    return space.wrap(_is_eq(w_left.setdata, rd))
      +    return space.wrap(not _is_eq(w_left.setdata, rd))
       
       ne__Set_frozensettypedef = ne__Set_settypedef
       ne__Frozenset_settypedef = ne__Set_settypedef
      
      From commits-noreply at bitbucket.org  Sat Apr 30 15:39:25 2011
      From: commits-noreply at bitbucket.org (cfbolz)
      Date: Sat, 30 Apr 2011 15:39:25 +0200 (CEST)
      Subject: [pypy-svn] pypy documentation-cleanup: (arigo,
      	cfbolz) document __builtins__ behaviour.
      Message-ID: <20110430133925.CB9AC282B52@codespeak.net>
      
      Author: Carl Friedrich Bolz 
      Branch: documentation-cleanup
      Changeset: r43806:8cf94069786d
      Date: 2011-04-30 15:39 +0200
      http://bitbucket.org/pypy/pypy/changeset/8cf94069786d/
      
      Log:	(arigo, cfbolz) document __builtins__ behaviour.
      
      diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
      --- a/pypy/doc/cpython_differences.rst
      +++ b/pypy/doc/cpython_differences.rst
      @@ -127,7 +127,7 @@
       adopted by Jython or IronPython (or any other port of Python to Java or
       .NET, like PyPy itself).
       
      -This affects the precise time at which __del__ methods are called, which
      +This affects the precise time at which ``__del__`` methods are called, which
       is not reliable in PyPy (nor Jython nor IronPython).  It also means that
       weak references may stay alive for a bit longer than expected.  This
       makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
      @@ -137,12 +137,12 @@
       ``ReferenceError`` at any place that uses them.
       
       There are a few extra implications for the difference in the GC.  Most
      -notably, if an object has a __del__, the __del__ is never called more
      -than once in PyPy; but CPython will call the same __del__ several times
      -if the object is resurrected and dies again.  The __del__ methods are
      +notably, if an object has a ``__del__``, the ``__del__`` is never called more
      +than once in PyPy; but CPython will call the same ``__del__`` several times
      +if the object is resurrected and dies again.  The ``__del__`` methods are
       called in "the right" order if they are on objects pointing to each
       other, as in CPython, but unlike CPython, if there is a dead cycle of
      -objects referencing each other, their __del__ methods are called anyway;
      +objects referencing each other, their ``__del__`` methods are called anyway;
       CPython would instead put them into the list ``garbage`` of the ``gc``
       module.  More information is available on the blog `[1]`__ `[2]`__.
       
      @@ -155,7 +155,7 @@
       and calling it a lot can lead to performance problem.
       
       Note that if you have a long chain of objects, each with a reference to
      -the next one, and each with a __del__, PyPy's GC will perform badly.  On
      +the next one, and each with a ``__del__``, PyPy's GC will perform badly.  On
       the bright side, in most other cases, benchmarks have shown that PyPy's
       GCs perform much better than CPython's.
       
      @@ -234,5 +234,9 @@
         it could be supported, but then it will likely work in many
         *more* case on PyPy than on CPython 2.6/2.7.)
       
      +* the ``__builtins__`` name is always referencing the ``__builtin__`` module,
      +  never a dictionary as it sometimes is in CPython. Assigning to
      +  ``__builtins__`` has no effect.
       
       .. include:: _ref.txt
      +
      
      From commits-noreply at bitbucket.org  Sat Apr 30 15:48:24 2011
      From: commits-noreply at bitbucket.org (arigo)
      Date: Sat, 30 Apr 2011 15:48:24 +0200 (CEST)
      Subject: [pypy-svn] pypy default: Added tag release-1.5 for changeset
      	b590cf6de419
      Message-ID: <20110430134824.BF10E282B52@codespeak.net>
      
      Author: Armin Rigo 
      Branch: 
      Changeset: r43807:e08b205f88bb
      Date: 2011-04-30 15:44 +0200
      http://bitbucket.org/pypy/pypy/changeset/e08b205f88bb/
      
      Log:	Added tag release-1.5 for changeset b590cf6de419
      
      diff --git a/.hgtags b/.hgtags
      new file mode 100644
      --- /dev/null
      +++ b/.hgtags
      @@ -0,0 +1,1 @@
      +b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5
      
      From commits-noreply at bitbucket.org  Sat Apr 30 15:52:10 2011
      From: commits-noreply at bitbucket.org (cfbolz)
      Date: Sat, 30 Apr 2011 15:52:10 +0200 (CEST)
      Subject: [pypy-svn] pypy documentation-cleanup: (cfbolz,
      	lac): replace a few codespeak links
      Message-ID: <20110430135210.9781F282B52@codespeak.net>
      
      Author: Carl Friedrich Bolz 
      Branch: documentation-cleanup
      Changeset: r43808:de55c45a712a
      Date: 2011-04-30 15:51 +0200
      http://bitbucket.org/pypy/pypy/changeset/de55c45a712a/
      
      Log:	(cfbolz, lac): replace a few codespeak links
      
      diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
      --- a/pypy/doc/faq.rst
      +++ b/pypy/doc/faq.rst
      @@ -50,7 +50,7 @@
       --------------------------------------------
       
       We have experimental support for CPython extension modules, so
      -they run with minor changes.  This has been a part of pypy since
      +they run with minor changes.  This has been a part of PyPy since
       the 1.4 release, but support is still in beta phase.  CPython
       extension modules in PyPy are often much slower than in CPython due to
       the need to emulate refcounting.  It is often faster to take out your
      
      diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
      --- a/pypy/doc/index.rst
      +++ b/pypy/doc/index.rst
      @@ -128,9 +128,9 @@
       Windows, on top of .NET, and on top of Java.
       To dig into PyPy it is recommended to try out the current
       Subversion HEAD, which is always working or mostly working,
      -instead of the latest release, which is `1.2.0`__.
      +instead of the latest release, which is `1.5`__.
       
      -.. __: release-1.2.0.html
      +.. __: release-1.5.0.html
       
       PyPy is mainly developed on Linux and Mac OS X.  Windows is supported,
       but platform-specific bugs tend to take longer before we notice and fix
      
      diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst
      --- a/pypy/doc/translation.rst
      +++ b/pypy/doc/translation.rst
      @@ -385,7 +385,7 @@
       The RPython Typer
       =================
       
      -http://codespeak.net/pypy/trunk/pypy/rpython/
      +https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/
       
       The RTyper is the first place where the choice of backend makes a
       difference; as outlined above we are assuming that ANSI C is the target.
      @@ -451,7 +451,7 @@
       `D07.1 Massive Parallelism and Translation Aspects`_ for further details.
       
       .. _`Technical report`: 
      -.. _`D07.1 Massive Parallelism and Translation Aspects`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf
      +.. _`D07.1 Massive Parallelism and Translation Aspects`: https://bitbucket.org/pypy/extradoc/raw/ee3059291497/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf
       
       Backend Optimizations
       ---------------------
      @@ -625,7 +625,7 @@
       The C Back-End
       ==============
       
      -http://codespeak.net/pypy/trunk/pypy/translator/c/
      +https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/
       
       GenC is usually the most actively maintained backend -- everyone working on
       PyPy has a C compiler, for one thing -- and is usually where new features are
      
      diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
      --- a/pypy/doc/coding-guide.rst
      +++ b/pypy/doc/coding-guide.rst
      @@ -877,7 +877,7 @@
           def test_something(space):
               # use space ...
       
      -    class TestSomething:
      +    class TestSomething(object):
               def test_some(self):
                   # use 'self.space' here
       
      @@ -898,7 +898,7 @@
           def app_test_something():
               # application level test code
       
      -    class AppTestSomething:
      +    class AppTestSomething(object):
               def test_this(self):
                   # application level test code
       
      @@ -914,11 +914,8 @@
       attached to the class there and start with ``w_`` can be accessed
       via self (but without the ``w_``) in the actual test method. An example::
       
      -    from pypy.objspace.std import StdObjSpace 
      -
      -    class AppTestErrno: 
      -        def setup_class(cls): 
      -            cls.space = StdObjSpace()
      +    class AppTestErrno(object):
      +        def setup_class(cls):
                   cls.w_d = cls.space.wrap({"a": 1, "b", 2})
       
               def test_dict(self):
      @@ -946,7 +943,7 @@
         actually can fail.)
       
       - All over the pypy source code there are test/ directories
      -  which contain unittests.  Such scripts can usually be executed
      +  which contain unit tests.  Such scripts can usually be executed
         directly or are collectively run by pypy/test_all.py
       
       .. _`change documentation and website`:
      
      From commits-noreply at bitbucket.org  Sat Apr 30 16:01:35 2011
      From: commits-noreply at bitbucket.org (cfbolz)
      Date: Sat, 30 Apr 2011 16:01:35 +0200 (CEST)
      Subject: [pypy-svn] pypy default: merge the documentation-cleanup branch
      Message-ID: <20110430140135.42560282B52@codespeak.net>
      
      Author: Carl Friedrich Bolz 
      Branch: 
      Changeset: r43809:d52d395aac40
      Date: 2011-04-30 15:55 +0200
      http://bitbucket.org/pypy/pypy/changeset/d52d395aac40/
      
      Log:	merge the documentation-cleanup branch
      
      diff --git a/pypy/doc/config/objspace.std.optimized_int_add.rst b/pypy/doc/config/objspace.std.optimized_int_add.txt
      copy from pypy/doc/config/objspace.std.optimized_int_add.rst
      copy to pypy/doc/config/objspace.std.optimized_int_add.txt
      
      diff --git a/pypy/doc/throwaway.txt b/pypy/doc/throwaway.txt
      new file mode 100644
      --- /dev/null
      +++ b/pypy/doc/throwaway.txt
      @@ -0,0 +1,3 @@
      +.. warning::
      +
      +   This documentation should be removed (as discussed during the Gothenburg sprint in 2011)
      
      diff --git a/pypy/doc/config/objspace.std.withcelldict.rst b/pypy/doc/config/objspace.std.withcelldict.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withcelldict.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Enable cell-dicts. This optimization is not helpful without the JIT. In the
      -presence of the JIT, it greatly helps looking up globals.
      
      diff --git a/pypy/doc/config/objspace.std.withprebuiltint.rst b/pypy/doc/config/objspace.std.withprebuiltint.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withprebuiltint.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -This option enables the caching of small integer objects (similar to what
      -CPython does). The range of which integers are cached can be influenced with
      -the :config:`objspace.std.prebuiltintfrom` and
      -:config:`objspace.std.prebuiltintto` options.
      -
      
      diff --git a/pypy/doc/config/objspace.honor__builtins__.rst b/pypy/doc/config/objspace.honor__builtins__.txt
      copy from pypy/doc/config/objspace.honor__builtins__.rst
      copy to pypy/doc/config/objspace.honor__builtins__.txt
      
      diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
      --- a/pypy/doc/discussions.rst
      +++ b/pypy/doc/discussions.rst
      @@ -7,35 +7,11 @@
       
       
       .. toctree::
      +	
      +	discussion/finalizer-order.rst
      +	discussion/howtoimplementpickling.rst
      +	discussion/improve-rpython.rst
      +	discussion/outline-external-ootype.rst
      +	discussion/VM-integration.rst
       
      -   discussion/GC-performance.rst
      -   discussion/VM-integration.rst
      -   discussion/chained_getattr.rst
      -   discussion/cli-optimizations.rst
      -   discussion/cmd-prompt-translation.rst
      -   discussion/compiled-swamp.rst
      -   discussion/ctypes_modules.rst
      -   discussion/ctypes_todo.rst
      -   discussion/distribution.rst
      -   discussion/distribution-implementation.rst
      -   discussion/distribution-newattempt.rst
      -   discussion/distribution-roadmap.rst
      -   discussion/emptying-the-malloc-zoo.rst
      -   discussion/finalizer-order.rst
      -   discussion/gc.rst
      -   discussion/howtoimplementpickling.rst
      -   discussion/improve-rpython.rst
      -   discussion/outline-external-ootype.rst
      -   discussion/oz-thread-api.rst
      -   discussion/paper-wishlist.rst
      -   discussion/parsing-ideas.rst
      -   discussion/pypy_metaclasses_in_cl.rst
      -   discussion/removing-stable-compiler.rst
      -   discussion/security-ideas.rst
      -   discussion/somepbc-refactoring-plan.rst
      -   discussion/summer-of-pypy-pytest.rst
      -   discussion/testing-zope.rst
      -   discussion/thoughts_string_interning.rst
      -   discussion/translation-swamp.rst
      -   discussion/use_case_of_logic.rst
       
      
      diff --git a/pypy/doc/discussion/cmd-prompt-translation.rst b/pypy/doc/discussion/cmd-prompt-translation.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/cmd-prompt-translation.rst
      +++ /dev/null
      @@ -1,18 +0,0 @@
      -
      -t = Translation(entry_point[,])
      -t.annotate([])
      -t.rtype([])
      -t.backendopt[_]([])
      -t.source[_]([])
      -f = t.compile[_]([])
      -
      -and t.view(), t.viewcg()
      -
      - = c|llvm (for now)
      -you can skip steps
      -
      - = argtypes (for annotation) plus 
      -            keyword args:  gc=...|policy= etc
      -
      -
      -
      
      diff --git a/pypy/doc/config/objspace.std.optimized_comparison_op.rst b/pypy/doc/config/objspace.std.optimized_comparison_op.txt
      copy from pypy/doc/config/objspace.std.optimized_comparison_op.rst
      copy to pypy/doc/config/objspace.std.optimized_comparison_op.txt
      
      diff --git a/pypy/doc/video-index.rst b/pypy/doc/video-index.rst
      --- a/pypy/doc/video-index.rst
      +++ b/pypy/doc/video-index.rst
      @@ -20,14 +20,14 @@
       such as `mplayer`_, `xine`_, `vlc`_ or the windows media player.
       
       .. _`mplayer`: http://www.mplayerhq.hu/design7/dload.html
      -.. _`xine`: http://xinehq.de/index.php/releases
      +.. _`xine`: http://www.xine-project.org
       .. _`vlc`: http://www.videolan.org/vlc/
       
       You can find the necessary codecs in the ffdshow-library:
      -http://ffdshow.sourceforge.net/tikiwiki/tiki-index.php
      +http://sourceforge.net/projects/ffdshow/
       
       or use the original divx codec (for Windows):
      -http://www.divx.com/divx/windows/download/index.php
      +http://www.divx.com/software/divx-plus
       
       
       Copyrights and Licensing 
      @@ -162,7 +162,7 @@
       PAL, 48 min, divx AVI
       
       Holger Krekel and Armin Rigo talk about the basic implementation,
      -implementation level aspects and the translation toolchain. This
      +implementation level aspects and the RPython translation toolchain. This
       talk also gives an insight into how a developer works with these tools on
       a daily basis, and pays special attention to flow graphs.
       
      @@ -184,7 +184,7 @@
       
       PAL, 44 min, divx AVI
       
      -Michael Hudson gives an in-depth, very technical introduction to a PyPy sprint. The film provides a detailed and hands-on overview about the architecture of PyPy, especially the translation toolchain.
      +Michael Hudson gives an in-depth, very technical introduction to a PyPy sprint. The film provides a detailed and hands-on overview about the architecture of PyPy, especially the RPython translation toolchain.
       
       
       Scripting .NET with IronPython by Jim Hugunin
      @@ -292,5 +292,5 @@
       
       PAL 72 min, DivX AVI
       
      -Core developers Armin Rigo, Samuele Pedroni and Carl Friedrich Bolz are giving an overview of the PyPy architecture, the standard interpreter, the translation toolchain and the just-in-time compiler.
      +Core developers Armin Rigo, Samuele Pedroni and Carl Friedrich Bolz are giving an overview of the PyPy architecture, the standard interpreter, the RPython translation toolchain and the just-in-time compiler.
       
      
      diff --git a/pypy/doc/config/index.rst b/pypy/doc/config/index.rst
      --- a/pypy/doc/config/index.rst
      +++ b/pypy/doc/config/index.rst
      @@ -50,3 +50,12 @@
       .. _`overview`: commandline.html
       .. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html
       .. _`What PyPy can do for your objects`: ../objspace-proxies.html
      +
      +
      +.. toctree::
      +    :maxdepth: 2
      +
      +    commandline
      +    translation
      +    objspace
      +    opt
      
      diff --git a/pypy/doc/config/translation.list_comprehension_operations.rst b/pypy/doc/config/translation.list_comprehension_operations.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.list_comprehension_operations.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Experimental optimization for list comprehensions in RPython.
      -
      
      diff --git a/pypy/doc/config/objspace.soabi.rst b/pypy/doc/config/objspace.soabi.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.soabi.rst
      +++ /dev/null
      @@ -1,14 +0,0 @@
      -This option controls the tag included into extension module file names.  The
      -default is something like `pypy-14`, which means that `import foo` will look for
      -a file named `foo.pypy-14.so` (or `foo.pypy-14.pyd` on Windows).
      -
      -This is an implementation of PEP3149_, with two differences:
      -
      - * the filename without tag `foo.so` is not considered.
      - * the feature is also available on Windows.
      -
      -When set to the empty string (with `--soabi=`), the interpreter will only look
      -for a file named `foo.so`, and will crash if this file was compiled for another
      -Python interpreter.
      -
      -.. _PEP3149: http://www.python.org/dev/peps/pep-3149/
      
      diff --git a/pypy/doc/config/objspace.usemodules._ast.rst b/pypy/doc/config/objspace.usemodules._ast.txt
      copy from pypy/doc/config/objspace.usemodules._ast.rst
      copy to pypy/doc/config/objspace.usemodules._ast.txt
      
      diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst
      --- a/pypy/doc/index-report.rst
      +++ b/pypy/doc/index-report.rst
      @@ -1,4 +1,7 @@
      -.. include:: crufty.rst
      +.. warning::
      +
      +   Some of these reports are interesting for historical reasons only.
      +
       
       ============================================
       PyPy - Overview over the EU-reports
      @@ -9,7 +12,7 @@
       They also are very good documentation if you'd like to know in more
       detail about motivation and implementation of the various parts 
       and aspects of PyPy.  Feel free to send questions or comments
      -to `pypy-dev`_, the development list. 
      +to `pypy-dev`_, the development list.
       
       Reports of 2007
       ===============
      @@ -93,8 +96,8 @@
       
       
       
      -.. _`py-lib`: http://codespeak.net/py/current/doc/
      -.. _`py.test`: http://codespeak.net/py/current/doc/test.html
      +.. _`py-lib`: http://pylib.org/
      +.. _`py.test`: http://pytest.org/
       .. _codespeak: http://codespeak.net/
       .. _`pypy-dev`: http://codespeak.net/mailman/listinfo/pypy-dev
       
      @@ -137,35 +140,35 @@
       `D14.1 Report about Milestone/Phase 1`_ describes what happened in the PyPy
       project during the first year of EU funding (December 2004 - December 2005)
       
      -.. _`PyPy EU Final Activity Report`: http://codespeak.net/pypy/extradoc/eu-report/PYPY-EU-Final-Activity-Report.pdf
      -.. _`D01.2-4 Project Organization`: http://codespeak.net/pypy/extradoc/eu-report/D01.2-4_Project_Organization-2007-03-28.pdf
      -.. _`D02.1 Development Tools and Website`: http://codespeak.net/pypy/extradoc/eu-report/D02.1_Development_Tools_and_Website-2007-03-21.pdf
      -.. _`D02.2 Release Scheme`: http://codespeak.net/svn/pypy/extradoc/eu-report/D02.2_Release_Scheme-2007-03-30.pdf
      -.. _`D02.3 Testing Tool`: http://codespeak.net/pypy/extradoc/eu-report/D02.3_Testing_Framework-2007-03-23.pdf
      -.. _`D03.1 Extension Compiler`: http://codespeak.net/pypy/extradoc/eu-report/D03.1_Extension_Compiler-2007-03-21.pdf
      -.. _`D04.1 Partial Python Implementation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D04.1_Partial_Python_Implementation_on_top_of_CPython.pdf
      -.. _`D04.2 Complete Python Implementation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D04.2_Complete_Python_Implementation_on_top_of_CPython.pdf
      -.. _`D04.3 Parser and Bytecode Compiler`: http://codespeak.net/svn/pypy/extradoc/eu-report/D04.3_Report_about_the_parser_and_bytecode_compiler.pdf
      -.. _`D04.4 PyPy as a Research Tool`: http://codespeak.net/svn/pypy/extradoc/eu-report/D04.4_Release_PyPy_as_a_research_tool.pdf
      -.. _`D05.1 Compiling Dynamic Language Implementations`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
      -.. _`D05.2 A Compiled Version of PyPy`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.2_A_compiled,_self-contained_version_of_PyPy.pdf
      -.. _`D05.3 Implementation with Translation Aspects`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.3_Publish_on_implementation_with_translation_aspects.pdf
      -.. _`D05.4 Encapsulating Low Level Aspects`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.4_Publish_on_encapsulating_low_level_language_aspects.pdf
      -.. _`D06.1 Core Object Optimization Results`: http://codespeak.net/svn/pypy/extradoc/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf
      -.. _`D07.1 Massive Parallelism and Translation Aspects`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf
      -.. _`D08.2 JIT Compiler Architecture`: http://codespeak.net/pypy/extradoc/eu-report/D08.2_JIT_Compiler_Architecture-2007-05-01.pdf
      -.. _`D08.1 JIT Compiler Release`: http://codespeak.net/pypy/extradoc/eu-report/D08.1_JIT_Compiler_Release-2007-04-30.pdf
      -.. _`D09.1 Constraint Solving and Semantic Web`: http://codespeak.net/pypy/extradoc/eu-report/D09.1_Constraint_Solving_and_Semantic_Web-2007-05-11.pdf
      -.. _`D10.1 Aspect-Oriented, Design-by-Contract Programming and RPython static checking`: http://codespeak.net/pypy/extradoc/eu-report/D10.1_Aspect_Oriented_Programming_in_PyPy-2007-03-22.pdf
      -.. _`D11.1 PyPy for Embedded Devices`: http://codespeak.net/pypy/extradoc/eu-report/D11.1_PyPy_for_Embedded_Devices-2007-03-26.pdf
      -.. _`D12.1 High-Level-Backends and Feature Prototypes`: http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf
      -.. _`D13.1 Integration and Configuration`: http://codespeak.net/pypy/extradoc/eu-report/D13.1_Integration_and_Configuration-2007-03-30.pdf 
      -.. _`D14.1 Report about Milestone/Phase 1`: http://codespeak.net/svn/pypy/extradoc/eu-report/D14.1_Report_about_Milestone_Phase_1.pdf
      -.. _`D14.2 Tutorials and Guide Through the PyPy Source Code`: http://codespeak.net/pypy/extradoc/eu-report/D14.2_Tutorials_and_Guide_Through_the_PyPy_Source_Code-2007-03-22.pdf
      -.. _`D14.3 Report about Milestone/Phase 2`: http://codespeak.net/pypy/extradoc/eu-report/D14.3_Report_about_Milestone_Phase_2-final-2006-08-03.pdf
      -.. _`D14.4 PyPy-1.0 Milestone report`: http://codespeak.net/pypy/extradoc/eu-report/D14.4_Report_About_Milestone_Phase_3-2007-05-01.pdf
      -.. _`D14.5 Documentation of the development process`: http://codespeak.net/pypy/extradoc/eu-report/D14.5_Documentation_of_the_development_process-2007-03-30.pdf
      +.. _`PyPy EU Final Activity Report`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/PYPY-EU-Final-Activity-Report.pdf
      +.. _`D01.2-4 Project Organization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D01.2-4_Project_Organization-2007-03-28.pdf
      +.. _`D02.1 Development Tools and Website`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D02.1_Development_Tools_and_Website-2007-03-21.pdf
      +.. _`D02.2 Release Scheme`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D02.2_Release_Scheme-2007-03-30.pdf
      +.. _`D02.3 Testing Tool`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D02.3_Testing_Framework-2007-03-23.pdf
      +.. _`D03.1 Extension Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D03.1_Extension_Compiler-2007-03-21.pdf
      +.. _`D04.1 Partial Python Implementation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D04.1_Partial_Python_Implementation_on_top_of_CPython.pdf
      +.. _`D04.2 Complete Python Implementation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D04.2_Complete_Python_Implementation_on_top_of_CPython.pdf
      +.. _`D04.3 Parser and Bytecode Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D04.3_Report_about_the_parser_and_bytecode_compiler.pdf
      +.. _`D04.4 PyPy as a Research Tool`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D04.4_Release_PyPy_as_a_research_tool.pdf
      +.. _`D05.1 Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
      +.. _`D05.2 A Compiled Version of PyPy`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.2_A_compiled,_self-contained_version_of_PyPy.pdf
      +.. _`D05.3 Implementation with Translation Aspects`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.3_Publish_on_implementation_with_translation_aspects.pdf
      +.. _`D05.4 Encapsulating Low Level Aspects`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.4_Publish_on_encapsulating_low_level_language_aspects.pdf
      +.. _`D06.1 Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf
      +.. _`D07.1 Massive Parallelism and Translation Aspects`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf
      +.. _`D08.2 JIT Compiler Architecture`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D08.2_JIT_Compiler_Architecture-2007-05-01.pdf
      +.. _`D08.1 JIT Compiler Release`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D08.1_JIT_Compiler_Release-2007-04-30.pdf
      +.. _`D09.1 Constraint Solving and Semantic Web`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D09.1_Constraint_Solving_and_Semantic_Web-2007-05-11.pdf
      +.. _`D10.1 Aspect-Oriented, Design-by-Contract Programming and RPython static checking`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D10.1_Aspect_Oriented_Programming_in_PyPy-2007-03-22.pdf
      +.. _`D11.1 PyPy for Embedded Devices`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D11.1_PyPy_for_Embedded_Devices-2007-03-26.pdf
      +.. _`D12.1 High-Level-Backends and Feature Prototypes`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf
      +.. _`D13.1 Integration and Configuration`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D13.1_Integration_and_Configuration-2007-03-30.pdf 
      +.. _`D14.1 Report about Milestone/Phase 1`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.1_Report_about_Milestone_Phase_1.pdf
      +.. _`D14.2 Tutorials and Guide Through the PyPy Source Code`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.2_Tutorials_and_Guide_Through_the_PyPy_Source_Code-2007-03-22.pdf
      +.. _`D14.3 Report about Milestone/Phase 2`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.3_Report_about_Milestone_Phase_2-final-2006-08-03.pdf
      +.. _`D14.4 PyPy-1.0 Milestone report`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.4_Report_About_Milestone_Phase_3-2007-05-01.pdf
      +.. _`D14.5 Documentation of the development process`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D14.5_Documentation_of_the_development_process-2007-03-30.pdf
       
       
       
      -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf
      +.. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf
      
      diff --git a/pypy/doc/config/translation.ootype.mangle.rst b/pypy/doc/config/translation.ootype.mangle.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.ootype.mangle.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Mangle the names of user defined attributes of the classes, in order
      -to ensure that every name is unique. Default is true, and it should
      -not be turned off unless you know what you are doing.
      
      diff --git a/pypy/doc/release-0.6.rst b/pypy/doc/release-0.6.rst
      --- a/pypy/doc/release-0.6.rst
      +++ b/pypy/doc/release-0.6.rst
      @@ -9,11 +9,11 @@
       What it is and where to start 
       -----------------------------
       
      -Getting started:    http://codespeak.net/pypy/index.cgi?doc/getting-started.html
      +Getting started:    getting-started.html
       
      -PyPy Documentation: http://codespeak.net/pypy/index.cgi?doc
      +PyPy Documentation: index.html
       
      -PyPy Homepage:      http://codespeak.net/pypy/
      +PyPy Homepage:      http://pypy.org
       
       PyPy is a MIT-licensed reimplementation of Python written in
       Python itself.  The long term goals are an implementation that
      @@ -89,9 +89,9 @@
       from numerous people.   Please feel free to give feedback and 
       raise questions. 
       
      -    contact points: http://codespeak.net/pypy/index.cgi?contact
      +    contact points: http://pypy.org/contact.html
       
      -    contributor list: http://codespeak.net/pypy/index.cgi?doc/contributor.html 
      +    contributor list: contributor.html
       
       have fun, 
       
      
      diff --git a/pypy/doc/config/objspace.usemodules._warnings.rst b/pypy/doc/config/objspace.usemodules._warnings.txt
      copy from pypy/doc/config/objspace.usemodules._warnings.rst
      copy to pypy/doc/config/objspace.usemodules._warnings.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._random.rst b/pypy/doc/config/objspace.usemodules._random.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._random.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the '_random' module. It is necessary to use the module "random" from the standard library.
      -This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/config/objspace.usemodules.pyexpat.rst b/pypy/doc/config/objspace.usemodules.pyexpat.txt
      copy from pypy/doc/config/objspace.usemodules.pyexpat.rst
      copy to pypy/doc/config/objspace.usemodules.pyexpat.txt
      
      diff --git a/pypy/doc/config/objspace.std.withmapdict.rst b/pypy/doc/config/objspace.std.withmapdict.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withmapdict.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -Enable the new version of "sharing dictionaries".
      -
      -See the section in `Standard Interpreter Optimizations`_ for more details.
      -
      -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
      
      diff --git a/pypy/doc/config/translation.stackless.rst b/pypy/doc/config/translation.stackless.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.stackless.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -Run the `stackless transform`_ on each generated graph, which enables the use
      -of coroutines at RPython level and the "stackless" module when translating
      -PyPy.
      -
      -.. _`stackless transform`: ../stackless.html
      
      diff --git a/pypy/doc/config/translation.backendopt.none.rst b/pypy/doc/config/translation.backendopt.none.txt
      copy from pypy/doc/config/translation.backendopt.none.rst
      copy to pypy/doc/config/translation.backendopt.none.txt
      
      diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.txt
      copy from pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst
      copy to pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.txt
      
      diff --git a/pypy/doc/discussion/use_case_of_logic.rst b/pypy/doc/discussion/use_case_of_logic.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/use_case_of_logic.rst
      +++ /dev/null
      @@ -1,75 +0,0 @@
      -Use cases for a combination of Logic and Object Oriented programming approach
      --------------------------------------------------------------------------------
      -
      -Workflows
      -=========
      -
      -Defining the next state by solving certain constraints. The more
      -general term might be State machines.
      -
      -Business Logic
      -==============
      -
      -We define Business Logic as expressing consistency (as an example) on
      -a set of objects in a business application.
      -
      -For example checking the consistency of a calculation before
      -committing the changes.
      -
      -The domain is quite rich in example of uses of Business Logic.
      -
      -Datamining
      -===========
      -
      -An example is Genetic sequence matching.
      -
      -Databases
      -=========
      -
      -Validity constraints for the data can be expressed as constraints.
      -
      -Constraints can be used to perform type inference when querying the
      -database.
      -
      -Semantic web
      -=============
      -
      -The use case is like the database case, except the ontology language
      -it self is born out of Descriptive Logic
      -
      -
      -User Interfaces
      -===============
      -
      -We use rules to describe the layout and visibility constraints of
      -elements that are to be displayed on screen. The rule can also help
      -describing how an element is to be displayed depending on its state
      -(for instance, out of bound values can be displayed in a different
      -colour).
      -
      -Configuration
      -==============
      -
      -User configuration can use information inferred from : the current
      -user, current platforms , version requirements, ...
      -
      -The validity of the configuration can be checked with the constraints.
      -
      -
      -Scheduling and planning
      -========================
      -
      -Timetables, process scheduling, task scheduling.
      -
      -Use rules to determine when to execute tasks (only start batch, if load
      -is low, and previous batch is finished.
      -
      -Load sharing.
      -
      -Route optimization. Planning the routes of a technician based on tools
      -needed and such
      -
      -An example is scheduling a conference like Europython see:
      -
      -http://lists.logilab.org/pipermail/python-logic/2005-May/000107.html
      -
      
      diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.rst b/pypy/doc/config/objspace.std.withprebuiltchar.rst
      deleted file mode 100644
      
      diff --git a/pypy/doc/config/translation.backend.rst b/pypy/doc/config/translation.backend.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backend.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Which backend to use when translating, see `translation documentation`_.
      -
      -.. _`translation documentation`: ../translation.html
      
      diff --git a/pypy/doc/config/objspace.usemodules.token.rst b/pypy/doc/config/objspace.usemodules.token.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.token.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'token' module. 
      -This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/config/objspace.usemodules._winreg.rst b/pypy/doc/config/objspace.usemodules._winreg.txt
      copy from pypy/doc/config/objspace.usemodules._winreg.rst
      copy to pypy/doc/config/objspace.usemodules._winreg.txt
      
      diff --git a/pypy/doc/config/translation.debug.rst b/pypy/doc/config/translation.debug.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.debug.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Record extra debugging information during annotation. This leads to slightly
      -less obscure error messages.
      
      diff --git a/pypy/doc/config/objspace.usemodules.math.rst b/pypy/doc/config/objspace.usemodules.math.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.math.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'math' module. 
      -This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/config/translation.backendopt.rst b/pypy/doc/config/translation.backendopt.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -This group contains options about various backend optimization passes. Most of
      -them are described in the `EU report about optimization`_
      -
      -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf
      -
      
      diff --git a/pypy/doc/config/translation.jit.rst b/pypy/doc/config/translation.jit.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.jit.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Enable the JIT generator, for targets that have JIT support.
      -Experimental so far.
      
      diff --git a/pypy/doc/image/compat-matrix.png b/pypy/doc/image/compat-matrix.png
      index 162c06062b42cdb56745203c7b181707a1b14a69..060537165eca2f94eee1fabb9a0c235fe39e51ee
      GIT binary patch
      [cut]
      diff --git a/pypy/doc/config/objspace.std.builtinshortcut.rst b/pypy/doc/config/objspace.std.builtinshortcut.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.builtinshortcut.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -A shortcut speeding up primitive operations between built-in types.
      -
      -This is a space-time trade-off: at the moment, this option makes a
      -translated pypy-c executable bigger by about 1.7 MB.  (This can probably
      -be improved with careful analysis.)
      
      diff --git a/pypy/doc/config/translation.cc.rst b/pypy/doc/config/translation.cc.txt
      copy from pypy/doc/config/translation.cc.rst
      copy to pypy/doc/config/translation.cc.txt
      
      diff --git a/pypy/doc/config/objspace.std.prebuiltintfrom.rst b/pypy/doc/config/objspace.std.prebuiltintfrom.txt
      copy from pypy/doc/config/objspace.std.prebuiltintfrom.rst
      copy to pypy/doc/config/objspace.std.prebuiltintfrom.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.operator.rst b/pypy/doc/config/objspace.usemodules.operator.txt
      copy from pypy/doc/config/objspace.usemodules.operator.rst
      copy to pypy/doc/config/objspace.usemodules.operator.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.__pypy__.rst b/pypy/doc/config/objspace.usemodules.__pypy__.txt
      copy from pypy/doc/config/objspace.usemodules.__pypy__.rst
      copy to pypy/doc/config/objspace.usemodules.__pypy__.txt
      
      diff --git a/pypy/doc/config/objspace.honor__builtins__.rst b/pypy/doc/config/objspace.honor__builtins__.rst
      deleted file mode 100644
      
      diff --git a/pypy/doc/config/objspace.std.multimethods.rst b/pypy/doc/config/objspace.std.multimethods.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.multimethods.rst
      +++ /dev/null
      @@ -1,8 +0,0 @@
      -Choose the multimethod implementation.
      -
      -* ``doubledispatch`` turns
      -  a multimethod call into a sequence of normal method calls.
      -
      -* ``mrd`` uses a technique known as Multiple Row Displacement
      -  which precomputes a few compact tables of numbers and
      -  function pointers.
      
      diff --git a/pypy/doc/config/translation.linkerflags.rst b/pypy/doc/config/translation.linkerflags.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.linkerflags.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Experimental. Specify extra flags to pass to the linker.
      
      diff --git a/pypy/doc/config/objspace.std.withmethodcache.rst b/pypy/doc/config/objspace.std.withmethodcache.txt
      copy from pypy/doc/config/objspace.std.withmethodcache.rst
      copy to pypy/doc/config/objspace.std.withmethodcache.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._random.rst b/pypy/doc/config/objspace.usemodules._random.txt
      copy from pypy/doc/config/objspace.usemodules._random.rst
      copy to pypy/doc/config/objspace.usemodules._random.txt
      
      diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
      --- a/pypy/doc/how-to-release.rst
      +++ b/pypy/doc/how-to-release.rst
      @@ -1,4 +1,6 @@
      -.. include:: crufty.rst
      +.. include:: needswork.txt
      +
      +.. needs work, it talks about svn. also, it is not really user documentation
       
       Making a PyPy Release
       =======================
      
      diff --git a/pypy/doc/config/objspace.usemodules.mmap.rst b/pypy/doc/config/objspace.usemodules.mmap.txt
      copy from pypy/doc/config/objspace.usemodules.mmap.rst
      copy to pypy/doc/config/objspace.usemodules.mmap.txt
      
      diff --git a/pypy/doc/config/translation.simplifying.rst b/pypy/doc/config/translation.simplifying.txt
      copy from pypy/doc/config/translation.simplifying.rst
      copy to pypy/doc/config/translation.simplifying.txt
      
      diff --git a/pypy/doc/config/objspace.std.withmethodcache.rst b/pypy/doc/config/objspace.std.withmethodcache.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withmethodcache.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Enable method caching. See the section "Method Caching" in `Standard
      -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
      
      diff --git a/pypy/doc/config/translation.jit_backend.rst b/pypy/doc/config/translation.jit_backend.txt
      copy from pypy/doc/config/translation.jit_backend.rst
      copy to pypy/doc/config/translation.jit_backend.txt
      
      diff --git a/pypy/doc/config/translation.dump_static_data_info.rst b/pypy/doc/config/translation.dump_static_data_info.txt
      copy from pypy/doc/config/translation.dump_static_data_info.rst
      copy to pypy/doc/config/translation.dump_static_data_info.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.__pypy__.rst b/pypy/doc/config/objspace.usemodules.__pypy__.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.__pypy__.rst
      +++ /dev/null
      @@ -1,9 +0,0 @@
      -Use the '__pypy__' module. 
      -This module is expected to be working and is included by default.
      -It contains special PyPy-specific functionality.
      -For example most of the special functions described in the `object space proxies`
      -document are in the module.
      -See the `__pypy__ module documentation`_ for more details.
      -
      -.. _`object space proxy`: ../objspace-proxies.html
      -.. _`__pypy__ module documentation`: ../__pypy__-module.html
      
      diff --git a/pypy/doc/config/objspace.usemodules._hashlib.rst b/pypy/doc/config/objspace.usemodules._hashlib.txt
      copy from pypy/doc/config/objspace.usemodules._hashlib.rst
      copy to pypy/doc/config/objspace.usemodules._hashlib.txt
      
      diff --git a/pypy/doc/discussion/security-ideas.rst b/pypy/doc/discussion/security-ideas.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/security-ideas.rst
      +++ /dev/null
      @@ -1,312 +0,0 @@
      -==============
      -Security ideas
      -==============
      -
      -These are some notes I (Armin) took after a talk at Chalmers by Steve
      -Zdancewic: "Encoding Information Flow in Haskell".  That talk was
      -presenting a pure Haskell approach with monad-like constructions; I
      -think that the approach translates well to PyPy at the level of RPython.
      -
      -
      -The problem
      ------------
      -
      -The problem that we try to solve here is: how to give the programmer a
      -way to write programs that are easily checked to be "secure", in the
      -sense that bugs shouldn't allow confidential information to be
      -unexpectedly leaked.  This is not security as in defeating actively
      -malicious attackers.
      -
      -
      -Example
      --------
      -
      -Let's suppose that we want to write a telnet-based application for a
      -bidding system.  We want normal users to be able to log in with their
      -username and password, and place bids (i.e. type in an amount of money).
      -The server should record the highest bid so far but not allow users to
      -see that number.  Additionally, the administrator should be able to log
      -in with his own password and see the highest bid.  The basic program::
      -
      -    def mainloop():
      -        while True:
      -            username = raw_input()
      -            password = raw_input()
      -            user = authenticate(username, password)
      -            if user == 'guest':
      -                serve_guest()
      -            elif user == 'admin':
      -                serve_admin()
      -
      -    def serve_guest():
      -        global highest_bid
      -        print "Enter your bid:"
      -        n = int(raw_input())
      -        if n > highest_bid:     #
      -            highest_bid = n     #
      -        print "Thank you"
      -
      -    def serve_admin():
      -        print "Highest big is:", highest_bid
      -
      -The goal is to make this program more secure by declaring and enforcing
      -the following properties: first, the guest code is allowed to manipulate
      -the highest_bid, as in the lines marked with ``#``, but these lines must
      -not leak back the highest_bid in a form visible to the guest user;
      -second, the printing in serve_admin() must only be allowed if the user
      -that logged in is really the administrator (e.g. catch bugs like
      -accidentally swapping the serve_guest() and serve_admin() calls in
      -mainloop()).
      -
      -
      -Preventing leak of information in guest code: 1st try
      ------------------------------------------------------
      -
      -The basic technique to prevent leaks is to attach "confidentiality
      -level" tags to objects.  In this example, the highest_bid int object
      -would be tagged with label="secret", e.g. by being initialized as::
      -
      -    highest_bid = tag(0, label="secret")
      -
      -At first, we can think about an object space where all objects have such
      -a label, and the label propagates to operations between objects: for
      -example, code like ``highest_bid += 1`` would produce a new int object
      -with again label="secret".
      -
      -Where this approach doesn't work is with if/else or loops.  In the above
      -example, we do::
      -
      -        if n > highest_bid:
      -            ...
      -
      -However, by the object space rules introduced above, the result of the
      -comparison is a "secret" bool objects.  This means that the guest code
      -cannot know if it is True or False, and so the PyPy interpreter has no
      -clue if it must following the ``then`` or ``else`` branch of the ``if``.
      -So the guest code could do ``highest_bid += 1`` and probably even
      -``highest_bid = max(highest_bid, n)`` if max() is a clever enough
      -built-in function, but clearly this approach doesn't work well for more
      -complicated computations that we would like to perform at this point.
      -
      -There might be very cool possible ideas to solve this with doing some
      -kind of just-in-time flow object space analysis.  However, here is a
      -possibly more practical approach.  Let's forget about the object space
      -tricks and start again.  (See `Related work`_ for why the object space
      -approach doesn't work too well.)
      -
      -
      -Preventing leak of information in guest code with the annotator instead
      ------------------------------------------------------------------------
      -
      -Suppose that the program runs on top of CPython and not necessarily
      -PyPy.  We will only need PyPy's annotator.  The idea is to mark the code
      -that manipulates highest_bid explicitly, and make it RPython in the
      -sense that we can take its flow space and follow the calls (we don't
      -care about the precise types here -- we will use different annotations).
      -Note that only the bits that manipulates the secret values needs to be
      -RPython.  Example::
      -
      -    # on top of CPython, 'hidden' is a type that hides a value without
      -    # giving any way to normal programs to access it, so the program
      -    # cannot do anything with 'highest_bid'
      -
      -    highest_bid = hidden(0, label="secure")
      -
      -    def enter_bid(n):
      -        if n > highest_bid.value:
      -            highest_bid.value = n
      -
      -    enter_bid = secure(enter_bid)
      -
      -    def serve_guest():
      -        print "Enter your bid:"
      -        n = int(raw_input())
      -        enter_bid(n)
      -        print "Thank you"
      -
      -The point is that the expression ``highest_bid.value`` raises a
      -SecurityException when run normally: it is not allowed to read this
      -value.  The secure() decorator uses the annotator on the enter_bid()
      -function, with special annotations that I will describe shortly.  Then
      -secure() returns a "compiled" version of enter_bid.  The compiled
      -version is checked to satisfy the security constrains, and it contains
      -special code that then enables the ``highest_bid.value`` to work.
      -
      -The annotations propagated by secure() are ``SomeSecurityLevel``
      -annotations.  Normal constants are propagated as
      -SomeSecurityLevel("public").  The ``highest_bid.value`` returns the
      -annotation SomeSecurityLevel("secret"), which is the label of the
      -constant ``highest_bid`` hidden object.  We define operations between
      -two SomeSecurityLevels to return a SomeSecurityLevel which is the max of
      -the secret levels of the operands.
      -
      -The key point is that secure() checks that the return value is
      -SomeSecurityLevel("public").  It also checks that only
      -SomeSecurityLevel("public") values are stored e.g. in global data
      -structures.
      -
      -In this way, any CPython code like serve_guest() can safely call
      -``enter_bid(n)``.  There is no way to leak information about the current
      -highest bid back out of the compiled enter_bid().
      -
      -
      -Declassification
      -----------------
      -
      -Now there must be a controlled way to leak the highest_bid value,
      -otherwise it is impossible even for the admin to read it.  Note that
      -serve_admin(), which prints highest_bid, is considered to "leak" this
      -value because it is an input-output, i.e. it escapes the program.  This
      -is a leak that we actually want -- the terminology is that serve_admin()
      -must "declassify" the value.
      -
      -To do this, there is a capability-like model that is easy to implement
      -for us.  Let us modify the main loop as follows::
      -
      -    def mainloop():
      -        while True:
      -            username = raw_input()
      -            password = raw_input()
      -            user, priviledge_token = authenticate(username, password)
      -            if user == 'guest':
      -                serve_guest()
      -            elif user == 'admin':
      -                serve_admin(priviledge_token)
      -            del priviledge_token   # make sure nobody else uses it
      -
      -The idea is that the authenticate() function (shown later) also returns
      -a "token" object.  This is a normal Python object, but it should not be
      -possible for normal Python code to instantiate such an object manually.
      -In this example, authenticate() returns a ``priviledge("public")`` for
      -guests, and a ``priviledge("secret")`` for admins.  Now -- and this is
      -the insecure part of this scheme, but it is relatively easy to control
      --- the programmer must make sure that these priviledge_token objects
      -don't go to unexpected places, particularly the "secret" one.  They work
      -like capabilities: having a reference to them allows parts of the
      -program to see secret information, of a confidentiality level up to the
      -one corresponding to the token.
      -
      -Now we modify serve_admin() as follows:
      -
      -    def serve_admin(token):
      -        print "Highest big is:", declassify(highest_bid, token=token)
      -
      -The declassify() function reads the value if the "token" is privileged
      -enough, and raises an exception otherwise.
      -
      -What are we protecting here?  The fact that we need the administrator
      -token in order to see the highest bid.  If by mistake we swap the
      -serve_guest() and serve_admin() lines in mainloop(), then what occurs is
      -that serve_admin() would be called with the guest token.  Then
      -declassify() would fail.  If we assume that authenticate() is not buggy,
      -then the rest of the program is safe from leak bugs.
      -
      -There are another variants of declassify() that are convenient.  For
      -example, in the RPython parts of the code, declassify() can be used to
      -control more precisely at which confidentiality levels we want which
      -values, if there are more than just two such levels.  The "token"
      -argument could also be implicit in RPython parts, meaning "use the
      -current level"; normal non-RPython code always runs at "public" level,
      -but RPython functions could run with higher current levels, e.g. if they
      -are called with a "token=..." argument.
      -
      -(Do not confuse this with what enter_bid() does: enter_bid() runs at the
      -public level all along.  It is ok for it to compute with, and even
      -modify, the highest_bid.value.  The point of enter_bid() was that by
      -being an RPython function the annotator can make sure that the value, or
      -even anything that gives a hint about the value, cannot possibly escape
      -from the function.)
      -
      -It is also useful to have "globally trusted" administrator-level RPython
      -functions that always run at a higher level than the caller, a bit like
      -Unix programs with the "suid" bit.  If we set aside the consideration
      -that it should not be possible to make new "suid" functions too easily,
      -then we could define the authenticate() function of our server example
      -as follows::
      -
      -    def authenticate(username, password):
      -        database = {('guest', 'abc'): priviledge("public"),
      -                    ('admin', '123'): priviledge("secret")}
      -        token_obj = database[username, password]
      -        return username, declassify(token_obj, target_level="public")
      -
      -    authenticate = secure(authenticate, suid="secret")
      -
      -The "suid" argument makes the compiled function run on level "secret"
      -even if the caller is "public" or plain CPython code.  The declassify()
      -in the function is allowed because of the current level of "secret".
      -Note that the function returns a "public" tuple -- the username is
      -public, and the token_obj is declassified to public.  This is the
      -property that allows CPython code to call it.
      -
      -Of course, like a Unix suid program the authenticate() function could be
      -buggy and leak information, but like suid programs it is small enough
      -for us to feel that it is secure just by staring at the code.
      -
      -An alternative to the suid approach is to play with closures, e.g.::
      -
      -    def setup():
      -        #initialize new levels -- this cannot be used to access existing levels
      -        public_level = create_new_priviledge("public")
      -        secret_level = create_new_priviledge("secret")
      -
      -        database = {('guest', 'abc'): public_level,
      -                    ('admin', '123'): secret_level}
      -
      -        def authenticate(username, password):
      -            token_obj = database[username, password]
      -            return username, declassify(token_obj, target_level="public",
      -                                                   token=secret_level)
      -
      -        return secure(authenticate)
      -
      -    authenticate = setup()
      -
      -In this approach, declassify() works because it has access to the
      -secret_level token.  We still need to make authenticate() a secure()
      -compiled function to hide the database and the secret_level more
      -carefully; otherwise, code could accidentally find them by inspecting
      -the traceback of the KeyError exception if the username or password is
      -invalid.  Also, secure() will check for us that authenticate() indeed
      -returns a "public" tuple.
      -
      -This basic model is easy to extend in various directions.  For example
      -secure() RPython functions should be allowed to return non-public
      -results -- but then they have to be called either with an appropriate
      -"token=..."  keyword, or else they return hidden objects again.  They
      -could also be used directly from other RPython functions, in which the
      -level of what they return is propagated.
      -
      -
      -Related work
      -------------
      -
      -What I'm describing here is nothing more than an adaptation of existing
      -techniques to RPython.
      -
      -It is noteworthy to mention at this point why the object space approach
      -doesn't work as well as we could first expect.  The distinction between
      -static checking and dynamic checking (with labels only attached to
      -values) seems to be well known; also, it seems to be well known that the
      -latter is too coarse in practice.  The problem is about branching and
      -looping.  From the object space' point of view it is quite hard to know
      -what a newly computed value really depends on.  Basically, it is
      -difficult to do better than: after is_true() has been called on a secret
      -object, then we must assume that all objects created are also secret
      -because they could depend in some way on the truth-value of the previous
      -secret object.
      -
      -The idea to dynamically use static analysis is the key new idea
      -presented by Steve Zdancewic in his talk.  You can have small controlled
      -RPython parts of the program that must pass through a static analysis,
      -and we only need to check dynamically that some input conditions are
      -satisfied when other parts of the program call the RPython parts.
      -Previous research was mostly about designing languages that are
      -completely statically checked at compile-time.  The delicate part is to
      -get the static/dynamic mixture right so that even indirect leaks are not
      -possible -- e.g. leaks that would occur from calling functions with
      -strange arguments to provoke exceptions, and where the presence of the
      -exception or not would be information in itself.  This approach seems to
      -do that reliably.  (Of course, at the talk many people including the
      -speaker were wondering about ways to move more of the checking at
      -compile-time, but Python people won't have such worries :-)
      
      diff --git a/pypy/doc/config/objspace.usemodules.posix.rst b/pypy/doc/config/objspace.usemodules.posix.txt
      copy from pypy/doc/config/objspace.usemodules.posix.rst
      copy to pypy/doc/config/objspace.usemodules.posix.txt
      
      diff --git a/pypy/doc/discussion/parsing-ideas.rst b/pypy/doc/discussion/parsing-ideas.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/parsing-ideas.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -add a way to modularize regular expressions:
      -
      -_HEXNUM = "...";
      -_DECNUM = "...";
      -NUM = "{_HEXNUM}|{_DECNUM}";
      
      diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
      --- a/pypy/doc/index.rst
      +++ b/pypy/doc/index.rst
      @@ -11,20 +11,37 @@
       Getting into PyPy ... 
       =============================================
       
      -* `Release 1.4`_: the latest official release
      +* `Release 1.5`_: the latest official release
       
       * `PyPy Blog`_: news and status info about PyPy 
       
      -* `Documentation`_: extensive documentation about PyPy.  
      -
      -* `Getting Started`_: Getting started and playing with PyPy. 
      -
       * `Papers`_: Academic papers, talks, and related projects
       
       * `Videos`_: Videos of PyPy talks and presentations
       
       * `speed.pypy.org`_: Daily benchmarks of how fast PyPy is
       
      +Documentation for the PyPy Python Interpreter
      +===============================================
      +
      +`getting started`_ provides hands-on instructions 
      +including a two-liner to run the PyPy Python interpreter 
      +on your system, examples on advanced features and 
      +entry points for using the `RPython toolchain`_.
      +
      +`FAQ`_ contains some frequently asked questions.
      +
      +New features of PyPy's Python Interpreter and 
      +Translation Framework: 
      +
      +  * `Differences between PyPy and CPython`_
      +  * `What PyPy can do for your objects`_
      +  * `Stackless and coroutines`_
      +  * `JIT Generation in PyPy`_ 
      +  * `Sandboxing Python code`_
      +
      +Status_ of the project.
      +
       
       Mailing lists, bug tracker, IRC channel
       =============================================
      @@ -32,13 +49,13 @@
       * `Development mailing list`_: development and conceptual
         discussions. 
       
      -* `Subversion commit mailing list`_: updates to code and
      +* `Mercurial commit mailing list`_: updates to code and
         documentation. 
       
      +* `Sprint mailing list`_: mailing list for organizing upcoming sprints. 
      +
       * `Development bug/feature tracker`_: filing bugs and feature requests. 
       
      -* `Sprint mailing list`_: mailing list for organizing upcoming sprints. 
      -
       * **IRC channel #pypy on freenode**: Many of the core developers are hanging out 
         at #pypy on irc.freenode.net.  You are welcome to join and ask questions
         (if they are not already developed in the FAQ_).
      @@ -60,18 +77,291 @@
       .. _`development bug/feature tracker`: https://codespeak.net/issue/pypy-dev/ 
       .. _here: http://tismerysoft.de/pypy/irc-logs/pypy
       .. _`sprint mailing list`: http://codespeak.net/mailman/listinfo/pypy-sprint 
      -.. _`subversion commit mailing list`: http://codespeak.net/mailman/listinfo/pypy-svn
      +.. _`Mercurial commit mailing list`: http://codespeak.net/mailman/listinfo/pypy-svn
       .. _`development mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev
       .. _`FAQ`: faq.html
      -.. _`Documentation`: docindex.html 
       .. _`Getting Started`: getting-started.html
       .. _`Papers`: extradoc.html
       .. _`Videos`: video-index.html
      -.. _`Release 1.4`: http://pypy.org/download.html
      +.. _`Release 1.5`: http://pypy.org/download.html
       .. _`speed.pypy.org`: http://speed.pypy.org
      +.. _`RPython toolchain`: translation.html
       
      -Detailed Documentation
      -======================
      +
      +Project Documentation
      +=====================================
      +
      +PyPy was funded by the EU for several years. See the `web site of the EU
      +project`_ for more details.
      +
      +.. _`web site of the EU project`: http://pypy.org
      +
      +architecture_ gives a complete view of PyPy's basic design. 
      +
      +`coding guide`_ helps you to write code for PyPy (especially also describes
      +coding in RPython a bit). 
      +
      +`sprint reports`_ lists reports written at most of our sprints, from
      +2003 to the present.
      +
      +`papers, talks and related projects`_ lists presentations 
      +and related projects as well as our published papers.
      +
      +`PyPy video documentation`_ is a page linking to the videos (e.g. of talks and
      +introductions) that are available.
      +
      +`Technical reports`_ is a page that contains links to the
      +reports that we submitted to the European Union.
      +
      +`development methodology`_ describes our sprint-driven approach.
      +
      +`LICENSE`_ contains licensing details (basically a straight MIT-license). 
      +
      +`Glossary`_ of PyPy words to help you align your inner self with
      +the PyPy universe.
      +
      +
      +Status
      +===================================
      +
      +PyPy can be used to run Python programs on Linux, OS/X,
      +Windows, on top of .NET, and on top of Java.
      +To dig into PyPy it is recommended to try out the current
      +Subversion HEAD, which is always working or mostly working,
      +instead of the latest release, which is `1.5`__.
      +
      +.. __: release-1.5.0.html
      +
      +PyPy is mainly developed on Linux and Mac OS X.  Windows is supported,
      +but platform-specific bugs tend to take longer before we notice and fix
      +them.  Linux 64-bit machines are supported (though it may also take some
      +time before we notice and fix bugs).
      +
      +PyPy's own tests `summary`_, daily updated, run through BuildBot infrastructure.
      +You can also find CPython's compliance tests run with compiled ``pypy-c``
      +executables there.
      +
      +information dating from early 2007: 
      +
      +`PyPy LOC statistics`_ shows LOC statistics about PyPy.
      +
      +`PyPy statistics`_ is a page with various statistics about the PyPy project.
      +
      +`compatibility matrix`_ is a diagram that shows which of the various features
      +of the PyPy interpreter work together with which other features.
      +
      +
      +Source Code Documentation
      +===============================================
      +
      +`object spaces`_ discusses the object space interface 
      +and several implementations. 
      +
      +`bytecode interpreter`_ explains the basic mechanisms 
      +of the bytecode interpreter and virtual machine. 
      +
      +`interpreter optimizations`_ describes our various strategies for
      +improving the performance of our interpreter, including alternative
      +object implementations (for strings, dictionaries and lists) in the
      +standard object space.
      +
      +`translation`_ is a detailed overview of our translation process.  The
      +rtyper_ is the largest component of our translation process.
      +
      +`dynamic-language translation`_ is a paper that describes
      +the translation process, especially the flow object space
      +and the annotator in detail. (This document is one
      +of the `EU reports`_.)
      +
      +`low-level encapsulation`_ describes how our approach hides
      +away a lot of low level details. This document is also part
      +of the `EU reports`_.
      +
      +`translation aspects`_ describes how we weave different
      +properties into our interpreter during the translation
      +process. This document is also part of the `EU reports`_.
      +
      +`garbage collector`_ strategies that can be used by the virtual
      +machines produced by the translation process.
      +
      +`parser`_ contains (outdated, unfinished) documentation about
      +the parser.
      +
      +`rlib`_ describes some modules that can be used when implementing programs in
      +RPython.
      +
      +`configuration documentation`_ describes the various configuration options that
      +allow you to customize PyPy.
      +
      +`CLI backend`_ describes the details of the .NET backend.
      +
      +`JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler
      +from our Python interpreter.
      +
      +
      +
      +.. _`FAQ`: faq.html
      +.. _Glossary: glossary.html
      +.. _`PyPy video documentation`: video-index.html
      +.. _parser: parser.html
      +.. _`development methodology`: dev_method.html
      +.. _`sprint reports`: sprint-reports.html
      +.. _`papers, talks and related projects`: extradoc.html
      +.. _`PyPy LOC statistics`: http://codespeak.net/~hpk/pypy-stat/
      +.. _`PyPy statistics`: http://codespeak.net/pypy/trunk/pypy/doc/statistic
      +.. _`object spaces`: objspace.html 
      +.. _`interpreter optimizations`: interpreter-optimizations.html 
      +.. _`translation`: translation.html 
      +.. _`dynamic-language translation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
      +.. _`low-level encapsulation`: low-level-encapsulation.html
      +.. _`translation aspects`: translation-aspects.html
      +.. _`configuration documentation`: config/
      +.. _`coding guide`: coding-guide.html 
      +.. _`architecture`: architecture.html 
      +.. _`getting started`: getting-started.html 
      +.. _`bytecode interpreter`: interpreter.html 
      +.. _`EU reports`: index-report.html
      +.. _`Technical reports`: index-report.html
      +.. _`summary`: http://codespeak.net:8099/summary
      +.. _`ideas for PyPy related projects`: project-ideas.html
      +.. _`Nightly builds and benchmarks`: http://tuatara.cs.uni-duesseldorf.de/benchmark.html
      +.. _`directory reference`: 
      +.. _`rlib`: rlib.html
      +.. _`Sandboxing Python code`: sandbox.html
      +.. _`LICENSE`: https://bitbucket.org/pypy/pypy/src/default/LICENSE
      +
      +PyPy directory cross-reference 
      +------------------------------
      +
      +Here is a fully referenced alphabetical two-level deep 
      +directory overview of PyPy: 
      +
      +================================   =========================================== 
      +Directory                          explanation/links
      +================================   =========================================== 
      +`pypy/annotation/`_                `type inferencing code`_ for `RPython`_ programs 
      +
      +`pypy/bin/`_                       command-line scripts, mainly `py.py`_ and `translatorshell.py`_
      +
      +`pypy/config/`_                    handles the numerous options for building and running PyPy
      +
      +`pypy/doc/`_                       text versions of PyPy developer documentation
      +
      +`pypy/doc/config/`_                documentation for the numerous translation options
      +
      +`pypy/doc/discussion/`_            drafts of ideas and documentation
      +
      +``doc/*/``                         other specific documentation topics or tools
      +
      +`pypy/interpreter/`_               `bytecode interpreter`_ and related objects
      +                                   (frames, functions, modules,...) 
      +
      +`pypy/interpreter/pyparser/`_      interpreter-level Python source parser
      +
      +`pypy/interpreter/astcompiler/`_   interpreter-level bytecode compiler, via an AST
      +                                   representation
      +
      +`pypy/module/`_                    contains `mixed modules`_ implementing core modules with 
      +                                   both application and interpreter level code.
      +                                   Not all are finished and working.  Use the ``--withmod-xxx``
      +                                   or ``--allworkingmodules`` translation options.
      +
      +`pypy/objspace/`_                  `object space`_ implementations
      +
      +`pypy/objspace/trace.py`_          the `trace object space`_ monitoring bytecode and space operations
      +
      +`pypy/objspace/dump.py`_           the dump object space saves a large, searchable log file
      +                                   with all operations
      +
      +`pypy/objspace/thunk.py`_          the `thunk object space`_, providing unique object features 
      +
      +`pypy/objspace/flow/`_             the FlowObjSpace_ implementing `abstract interpretation`_
      +
      +`pypy/objspace/std/`_              the StdObjSpace_ implementing CPython's objects and types
      +
      +`pypy/rlib/`_                      a `"standard library"`_ for RPython_ programs
      +
      +`pypy/rpython/`_                   the `RPython Typer`_ 
      +
      +`pypy/rpython/lltypesystem/`_      the `low-level type system`_ for C-like backends
      +
      +`pypy/rpython/ootypesystem/`_      the `object-oriented type system`_ for OO backends
      +
      +`pypy/rpython/memory/`_            the `garbage collector`_ construction framework
      +
      +`pypy/tool/`_                      various utilities and hacks used from various places 
      +
      +`pypy/tool/algo/`_                 general-purpose algorithmic and mathematic
      +                                   tools
      +
      +`pypy/tool/pytest/`_               support code for our `testing methods`_
      +
      +`pypy/translator/`_                translation_ backends and support code
      +
      +`pypy/translator/backendopt/`_     general optimizations that run before a backend generates code
      +
      +`pypy/translator/c/`_              the `GenC backend`_, producing C code from an
      +                                   RPython program (generally via the rtyper_)
      +
      +`pypy/translator/cli/`_            the `CLI backend`_ for `.NET`_ (Microsoft CLR or Mono_)
      +
      +`pypy/translator/goal/`_           our `main PyPy-translation scripts`_ live here
      +
      +`pypy/translator/jvm/`_            the Java backend
      +
      +`pypy/translator/stackless/`_      the `Stackless Transform`_
      +
      +`pypy/translator/tool/`_           helper tools for translation, including the Pygame
      +                                   `graph viewer`_
      +
      +``*/test/``                        many directories have a test subdirectory containing test 
      +                                   modules (see `Testing in PyPy`_) 
      +
      +``_cache/``                        holds cache files from internally `translating application 
      +                                   level to interpreterlevel`_ code.   
      +================================   =========================================== 
      +
      +.. _`bytecode interpreter`: interpreter.html
      +.. _`translating application level to interpreterlevel`: geninterp.html
      +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy 
      +.. _`mixed modules`: coding-guide.html#mixed-modules 
      +.. _`modules`: coding-guide.html#modules 
      +.. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf
      +.. _`object space`: objspace.html
      +.. _FlowObjSpace: objspace.html#the-flow-object-space 
      +.. _`trace object space`: objspace.html#the-trace-object-space 
      +.. _`taint object space`: objspace-proxies.html#taint
      +.. _`thunk object space`: objspace-proxies.html#thunk
      +.. _`transparent proxies`: objspace-proxies.html#tproxy
      +.. _`Differences between PyPy and CPython`: cpython_differences.html
      +.. _`What PyPy can do for your objects`: objspace-proxies.html
      +.. _`Stackless and coroutines`: stackless.html
      +.. _StdObjSpace: objspace.html#the-standard-object-space 
      +.. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation
      +.. _`rpython`: coding-guide.html#rpython 
      +.. _`type inferencing code`: translation.html#the-annotation-pass 
      +.. _`RPython Typer`: translation.html#rpython-typer 
      +.. _`testing methods`: coding-guide.html#testing-in-pypy
      +.. _`translation`: translation.html 
      +.. _`GenC backend`: translation.html#genc 
      +.. _`CLI backend`: cli-backend.html
      +.. _`py.py`: getting-started-python.html#the-py.py-interpreter
      +.. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator
      +.. _JIT: jit/index.html
      +.. _`JIT Generation in PyPy`: jit/index.html
      +.. _`just-in-time compiler generator`: jit/index.html
      +.. _rtyper: rtyper.html
      +.. _`low-level type system`: rtyper.html#low-level-type
      +.. _`object-oriented type system`: rtyper.html#oo-type
      +.. _`garbage collector`: garbage_collection.html
      +.. _`Stackless Transform`: translation.html#the-stackless-transform
      +.. _`main PyPy-translation scripts`: getting-started-python.html#translating-the-pypy-python-interpreter
      +.. _`.NET`: http://www.microsoft.com/net/
      +.. _Mono: http://www.mono-project.com/
      +.. _`"standard library"`: rlib.html
      +.. _`graph viewer`: getting-started-dev.html#try-out-the-translator
      +.. _`compatibility matrix`: image/compat-matrix.png
      +
       
       .. The following documentation is important and reasonably up-to-date:
       
      @@ -80,6 +370,7 @@
       
       .. toctree::
          :maxdepth: 1
      +   :hidden:
       
          getting-started.rst
          getting-started-python.rst
      @@ -89,15 +380,18 @@
          architecture.rst
          coding-guide.rst
          cpython_differences.rst
      -   cleanup-todo.rst
          garbage_collection.rst
          interpreter.rst
          objspace.rst
      +   __pypy__-module.rst
      +   objspace-proxies.rst
      +   config/index.rst
       
          dev_method.rst
          extending.rst
       
          extradoc.rst
      +   video-index.rst
       
          glossary.rst
       
      @@ -105,12 +399,12 @@
       
          interpreter-optimizations.rst
          configuration.rst
      -   low-level-encapsulation.rst
          parser.rst
          rlib.rst
          rtyper.rst
      +   rffi.rst
      +   
          translation.rst
      -   jit/_ref.rst
          jit/index.rst
          jit/overview.rst
          jit/pyjitpl5.rst
      @@ -124,6 +418,7 @@
          index-report.rst
       
          stackless.rst
      +   sandbox.rst
       
          discussions.rst
       
      @@ -132,12 +427,14 @@
          sprint-reports.rst
       
          eventhistory.rst
      +   statistic/index.rst
       
       Indices and tables
       ==================
       
       * :ref:`genindex`
      -* :ref:`modindex`
       * :ref:`search`
       * :ref:`glossary`
       
      +
      +.. include:: _ref.txt
      
      diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
      --- a/pypy/doc/index-of-release-notes.rst
      +++ b/pypy/doc/index-of-release-notes.rst
      @@ -15,3 +15,4 @@
          release-1.4.0.rst
          release-1.4.0beta.rst
          release-1.4.1.rst
      +   release-1.5.0.rst
      
      diff --git a/pypy/doc/config/objspace.std.sharesmallstr.rst b/pypy/doc/config/objspace.std.sharesmallstr.rst
      deleted file mode 100644
      
      diff --git a/pypy/doc/config/objspace.geninterp.rst b/pypy/doc/config/objspace.geninterp.txt
      copy from pypy/doc/config/objspace.geninterp.rst
      copy to pypy/doc/config/objspace.geninterp.txt
      
      diff --git a/pypy/doc/config/translation.cli.exception_transformer.rst b/pypy/doc/config/translation.cli.exception_transformer.txt
      copy from pypy/doc/config/translation.cli.exception_transformer.rst
      copy to pypy/doc/config/translation.cli.exception_transformer.txt
      
      diff --git a/pypy/doc/config/translation.backendopt.stack_optimization.rst b/pypy/doc/config/translation.backendopt.stack_optimization.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.stack_optimization.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Enable the optimized code generation for stack based machine, if the backend support it
      
      diff --git a/pypy/doc/config/objspace.usemodules._codecs.rst b/pypy/doc/config/objspace.usemodules._codecs.txt
      copy from pypy/doc/config/objspace.usemodules._codecs.rst
      copy to pypy/doc/config/objspace.usemodules._codecs.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.unicodedata.rst b/pypy/doc/config/objspace.usemodules.unicodedata.txt
      copy from pypy/doc/config/objspace.usemodules.unicodedata.rst
      copy to pypy/doc/config/objspace.usemodules.unicodedata.txt
      
      diff --git a/pypy/doc/config/translation.rst b/pypy/doc/config/translation.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -..  intentionally empty
      
      diff --git a/pypy/doc/config/translation.jit_backend.rst b/pypy/doc/config/translation.jit_backend.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.jit_backend.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Choose the backend to use for the JIT.
      -By default, this is the best backend for the current platform.
      
      diff --git a/pypy/doc/config/objspace.std.newshortcut.rst b/pypy/doc/config/objspace.std.newshortcut.txt
      copy from pypy/doc/config/objspace.std.newshortcut.rst
      copy to pypy/doc/config/objspace.std.newshortcut.txt
      
      diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.rst b/pypy/doc/config/objspace.std.methodcachesizeexp.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.methodcachesizeexp.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
      
      diff --git a/pypy/doc/config/objspace.std.rst b/pypy/doc/config/objspace.std.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -..  intentionally empty
      
      diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.extmodules.rst
      +++ /dev/null
      @@ -1,12 +0,0 @@
      -You can pass a comma-separated list of third-party builtin modules
      -which should be translated along with the standard modules within
      -``pypy.module``.
      -
      -The module names need to be fully qualified (i.e. have a ``.`` in them),
      -be on the ``$PYTHONPATH`` and not conflict with any existing ones, e.g.
      -``mypkg.somemod``.
      -
      -Once translated, the module will be accessible with a simple::
      -
      -    import somemod
      -
      
      diff --git a/pypy/doc/config/objspace.usemodules._ast.rst b/pypy/doc/config/objspace.usemodules._ast.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._ast.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the '_ast' module. 
      -This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/config/objspace.usemodules.sys.rst b/pypy/doc/config/objspace.usemodules.sys.txt
      copy from pypy/doc/config/objspace.usemodules.sys.rst
      copy to pypy/doc/config/objspace.usemodules.sys.txt
      
      diff --git a/pypy/doc/config/translation.ootype.rst b/pypy/doc/config/translation.ootype.txt
      copy from pypy/doc/config/translation.ootype.rst
      copy to pypy/doc/config/translation.ootype.txt
      
      diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline.rst b/pypy/doc/config/translation.backendopt.profile_based_inline.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.profile_based_inline.rst
      +++ /dev/null
      @@ -1,10 +0,0 @@
      -Inline flowgraphs only for call-sites for which there was a minimal
      -number of calls during an instrumented run of the program. Callee
      -flowgraphs are considered candidates based on a weight heuristic like
      -for basic inlining. (see :config:`translation.backendopt.inline`,
      -:config:`translation.backendopt.profile_based_inline_threshold` ).
      -
      -The option takes as value a string which is the arguments to pass to
      -the program for the instrumented run.
      -
      -This optimization is not used by default.
      \ No newline at end of file
      
      diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile
      --- a/pypy/doc/Makefile
      +++ b/pypy/doc/Makefile
      @@ -31,32 +31,38 @@
       	-rm -rf $(BUILDDIR)/*
       
       html:
      +	python config/generate.py
       	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
       	@echo
       	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
       
       dirhtml:
      +	python config/generate.py
       	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
       	@echo
       	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
       
       pickle:
      +	python config/generate.py
       	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
       	@echo
       	@echo "Build finished; now you can process the pickle files."
       
       json:
      +	python config/generate.py
       	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
       	@echo
       	@echo "Build finished; now you can process the JSON files."
       
       htmlhelp:
      +	python config/generate.py
       	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
       	@echo
       	@echo "Build finished; now you can run HTML Help Workshop with the" \
       	      ".hhp project file in $(BUILDDIR)/htmlhelp."
       
       qthelp:
      +	python config/generate.py
       	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
       	@echo
       	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
      @@ -66,6 +72,7 @@
       	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc"
       
       latex:
      +	python config/generate.py
       	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
       	@echo
       	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
      @@ -73,17 +80,20 @@
       	      "run these through (pdf)latex."
       
       changes:
      +	python config/generate.py
       	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
       	@echo
       	@echo "The overview file is in $(BUILDDIR)/changes."
       
       linkcheck:
      +	python config/generate.py
       	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
       	@echo
       	@echo "Link check complete; look for any errors in the above output " \
       	      "or in $(BUILDDIR)/linkcheck/output.txt."
       
       doctest:
      +	python config/generate.py
       	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
       	@echo "Testing of doctests in the sources finished, look at the " \
       	      "results in $(BUILDDIR)/doctest/output.txt."
      
      diff --git a/pypy/doc/config/objspace.usemodules._md5.rst b/pypy/doc/config/objspace.usemodules._md5.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._md5.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -Use the built-in '_md5' module.
      -This module is expected to be working and is included by default.
      -There is also a pure Python version in lib_pypy which is used
      -if the built-in is disabled, but it is several orders of magnitude 
      -slower.
      
      diff --git a/pypy/doc/config/objspace.std.builtinshortcut.rst b/pypy/doc/config/objspace.std.builtinshortcut.txt
      copy from pypy/doc/config/objspace.std.builtinshortcut.rst
      copy to pypy/doc/config/objspace.std.builtinshortcut.txt
      
      diff --git a/pypy/doc/discussion/ctypes_todo.rst b/pypy/doc/discussion/ctypes_todo.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/ctypes_todo.rst
      +++ /dev/null
      @@ -1,34 +0,0 @@
      -Few ctypes-related todo points:
      -
      -* Write down missing parts and port all tests, eventually adding
      -  additional tests.
      -
      -  - for unions and structs, late assignment of _fields_ is somewhat buggy.
      -    Tests about behavior of getattr working properly on instances
      -    are missing or not comprehensive. Some tests are skipped because I didn't
      -    understand the details.
      -
      -  - _fields_ can be tuples too as well as lists
      -
      -  - restype being a function is not working.
      -
      -  - there are features, which we don't support like buffer() and
      -    array() protocols.
      -
      -  - are the _CData_value return lifetime/gc semantics correct?
      -
      -  - for some ABIs we will need completely filled ffitypes to do the
      -    right thing for passing structures by value, we are now passing enough
      -    information to rawffi that it should be possible to construct such precise
      -    ffitypes in most cases
      -
      -  - bitfields are not implemented
      -
      -  - byteorder is not implemented
      -
      -* as all stuff is applevel, we cannot have it really fast right now.
      -
      -* we shall at least try to approach ctypes from the point of the jit
      -  backends (at least on platforms that we support). The thing is that
      -  we need a lot broader support of jit backends for different argument
      -  passing in order to do it.
      
      diff --git a/pypy/doc/config/objspace.std.optimized_list_getitem.rst b/pypy/doc/config/objspace.std.optimized_list_getitem.txt
      copy from pypy/doc/config/objspace.std.optimized_list_getitem.rst
      copy to pypy/doc/config/objspace.std.optimized_list_getitem.txt
      
      diff --git a/pypy/doc/config/objspace.std.withropeunicode.rst b/pypy/doc/config/objspace.std.withropeunicode.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withropeunicode.rst
      +++ /dev/null
      @@ -1,7 +0,0 @@
      -Use ropes to implement unicode strings (and also normal strings).
      -
      -See the section in `Standard Interpreter Optimizations`_ for more details.
      -
      -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#ropes
      -
      -
      
      diff --git a/pypy/doc/config/objspace.std.optimized_int_add.rst b/pypy/doc/config/objspace.std.optimized_int_add.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.optimized_int_add.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Optimize the addition of two integers a bit. Enabling this option gives small
      -speedups.
      
      diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst
      --- a/pypy/doc/distribution.rst
      +++ b/pypy/doc/distribution.rst
      @@ -1,10 +1,8 @@
      -.. include:: crufty.rst
      +.. include:: needswork.txt
       
      -     .. ^^ Incomplete,  superceded elsewhere
      -
      -========================
      -lib/distributed features
      -========================
      +=============================
      +lib_pypy/distributed features
      +=============================
       
       The 'distributed' library is an attempt to provide transparent, lazy
       access to remote objects. This is accomplished using
      
      diff --git a/pypy/doc/config/objspace.usemodules._multiprocessing.rst b/pypy/doc/config/objspace.usemodules._multiprocessing.txt
      copy from pypy/doc/config/objspace.usemodules._multiprocessing.rst
      copy to pypy/doc/config/objspace.usemodules._multiprocessing.txt
      
      diff --git a/pypy/doc/discussion/howtoimplementpickling.rst b/pypy/doc/discussion/howtoimplementpickling.rst
      --- a/pypy/doc/discussion/howtoimplementpickling.rst
      +++ b/pypy/doc/discussion/howtoimplementpickling.rst
      @@ -1,3 +1,5 @@
      +.. XXX think more, some of this might be useful
      +
       Designing thread pickling or "the Essence of Stackless Python"
       --------------------------------------------------------------
       
      
      diff --git a/pypy/doc/config/objspace.std.withrope.rst b/pypy/doc/config/objspace.std.withrope.txt
      copy from pypy/doc/config/objspace.std.withrope.rst
      copy to pypy/doc/config/objspace.std.withrope.txt
      
      diff --git a/pypy/doc/discussion/compiled-swamp.rst b/pypy/doc/discussion/compiled-swamp.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/compiled-swamp.rst
      +++ /dev/null
      @@ -1,14 +0,0 @@
      -
      -We've got huge swamp of compiled pypy-c's used for:
      -
      -* benchmarks
      -* tests
      -* compliance tests
      -* play1
      -* downloads
      -* ...
      -
      -We've got build tool, which we don't use, etc. etc.
      -
      -Idea is to formalize it more or less, so we'll have single script
      -to make all of this work, upload builds to the web page etc.
      
      diff --git a/pypy/doc/config/objspace.usemodules.operator.rst b/pypy/doc/config/objspace.usemodules.operator.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.operator.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'operator' module. 
      -This module is expected to be working and is included by default.
      
      diff --git a/pypy/config/test/test_makerestdoc.py b/pypy/config/test/test_makerestdoc.py
      --- a/pypy/config/test/test_makerestdoc.py
      +++ b/pypy/config/test/test_makerestdoc.py
      @@ -20,14 +20,14 @@
           config = Config(descr)
           txt = descr.make_rest_doc().text()
           
      -    result = {"": checkrest(txt, descr._name + ".txt")}
      +    result = {"": txt}
           for path in config.getpaths(include_groups=True):
               subconf, step = config._cfgimpl_get_home_by_path(path)
               fullpath = (descr._name + "." + path)
               prefix = fullpath.rsplit(".", 1)[0]
               txt = getattr(subconf._cfgimpl_descr, step).make_rest_doc(
                       prefix).text()
      -        result[path] = checkrest(txt, fullpath + ".txt")
      +        result[path] = txt
           return result
       
       def test_simple():
      @@ -68,7 +68,6 @@
                   ChoiceOption("bar", "more doc", ["a", "b", "c"],
                                default="a")])
           result = generate_html(descr)
      -    assert "more doc" in result[""]
       
       def test_cmdline_overview():
           descr = OptionDescription("foo", "doc", [
      
      diff --git a/pypy/doc/config/objspace.name.rst b/pypy/doc/config/objspace.name.txt
      copy from pypy/doc/config/objspace.name.rst
      copy to pypy/doc/config/objspace.name.txt
      
      diff --git a/pypy/doc/config/translation.stackless.rst b/pypy/doc/config/translation.stackless.txt
      copy from pypy/doc/config/translation.stackless.rst
      copy to pypy/doc/config/translation.stackless.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.termios.rst b/pypy/doc/config/objspace.usemodules.termios.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.termios.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'termios' module. 
      -This module is expected to be fully working.
      
      diff --git a/pypy/tool/rest/directive.py b/pypy/tool/rest/directive.py
      --- a/pypy/tool/rest/directive.py
      +++ b/pypy/tool/rest/directive.py
      @@ -1,108 +1,9 @@
      -# XXX this file is messy since it tries to deal with several docutils versions
       import py
       
      -from pypy.tool.rest.convert import convert_dot, latexformula2png
      -
       import sys
       import docutils
       from docutils import nodes
      -from docutils.parsers.rst import directives, states, roles
      -from docutils.parsers.rst.directives import images
      -
      -if hasattr(images, "image"):
      -    directives_are_functions = True
      -else:
      -    directives_are_functions = False
      -
      -try:
      -    from docutils.utils import unescape # docutils version > 0.3.5
      -except ImportError:
      -    from docutils.parsers.rst.states import unescape # docutils 0.3.5
      -
      -if not directives_are_functions:
      -    ImageClass = images.Image
      -
      -else:
      -    class ImageClass(object):
      -        option_spec = images.image.options
      -        def run(self):
      -            return images.image(u'image',
      -                                self.arguments,
      -                                self.options,
      -                                self.content,
      -                                self.lineno,
      -                                self.content_offset,
      -                                self.block_text,
      -                                self.state,
      -                                self.state_machine)
      -
      -
      -backend_to_image_format = {"html": "png", "latex": "pdf"}
      -
      -class GraphvizDirective(ImageClass):
      -    def convert(self, fn, path):
      -        path = py.path.local(path).dirpath()
      -        dot = path.join(fn)
      -        result = convert_dot(dot, backend_to_image_format[_backend])
      -        return result.relto(path)
      -
      -    def run(self):
      -        newname = self.convert(self.arguments[0],
      -                               self.state.document.settings._source)
      -        text = self.block_text.replace("graphviz", "image", 1)
      -        self.block_text = text.replace(self.arguments[0], newname, 1)
      -        self.name = u'image'
      -        self.arguments = [newname]
      -        return ImageClass.run(self)
      -    
      -    def old_interface(self):
      -        def f(name, arguments, options, content, lineno,
      -              content_offset, block_text, state, state_machine):
      -            for arg in "name arguments options content lineno " \
      -                       "content_offset block_text state state_machine".split():
      -                setattr(self, arg, locals()[arg])
      -            return self.run()
      -        f.arguments = (1, 0, 1)
      -        f.options = self.option_spec
      -        return f
      -
      -
      -_backend = None
      -def set_backend_and_register_directives(backend):
      -    #XXX this is only used to work around the inflexibility of docutils:
      -    # a directive does not know the target format
      -    global _backend
      -    _backend = backend
      -    if not directives_are_functions:
      -        directives.register_directive("graphviz", GraphvizDirective)
      -    else:
      -        directives.register_directive("graphviz",
      -                                      GraphvizDirective().old_interface())
      -    roles.register_canonical_role("latexformula", latexformula_role)
      -
      -def latexformula_role(name, rawtext, text, lineno, inliner,
      -                      options={}, content=[]):
      -    if _backend == 'latex':
      -        options['format'] = 'latex'
      -        return roles.raw_role(name, rawtext, text, lineno, inliner,
      -                              options, content)
      -    else:
      -        # XXX: make the place of the image directory configurable
      -        sourcedir = py.path.local(inliner.document.settings._source).dirpath()
      -        imagedir = sourcedir.join("img")
      -        if not imagedir.check():
      -            imagedir.mkdir()
      -        # create halfway senseful imagename:
      -        # use hash of formula + alphanumeric characters of it
      -        # could
      -        imagename = "%s_%s.png" % (
      -            hash(text), "".join([c for c in text if c.isalnum()]))
      -        image = imagedir.join(imagename)
      -        latexformula2png(unescape(text, True), image)
      -        imagenode = nodes.image(image.relto(sourcedir), uri=image.relto(sourcedir))
      -        return [imagenode], []
      -latexformula_role.content = True
      -latexformula_role.options = {}
      +from docutils.parsers.rst import roles
       
       def register_linkrole(role_name, callback):
           def source_role(name, rawtext, text, lineno, inliner, options={},
      
      diff --git a/pypy/doc/config/objspace.timing.rst b/pypy/doc/config/objspace.timing.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.timing.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -timing of various parts of the interpreter (simple profiling)
      
      diff --git a/pypy/doc/config/objspace.usemodules.signal.rst b/pypy/doc/config/objspace.usemodules.signal.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.signal.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'signal' module. 
      -This module is expected to be fully working.
      
      diff --git a/pypy/doc/config/objspace.usepycfiles.rst b/pypy/doc/config/objspace.usepycfiles.txt
      copy from pypy/doc/config/objspace.usepycfiles.rst
      copy to pypy/doc/config/objspace.usepycfiles.txt
      
      diff --git a/pypy/doc/configuration.rst b/pypy/doc/configuration.rst
      --- a/pypy/doc/configuration.rst
      +++ b/pypy/doc/configuration.rst
      @@ -4,22 +4,23 @@
       
       Due to more and more available configuration options it became quite annoying to
       hand the necessary options to where they are actually used and even more
      -annoying to add new options. To circumvent these problems the configuration
      -management was introduced. There all the necessary options are stored into an
      -configuration object, which is available nearly everywhere in the translation
      -toolchain and in the standard interpreter so that adding new options becomes
      +annoying to add new options. To circumvent these problems configuration
      +management was introduced. There all the necessary options are stored in a
      +configuration object, which is available nearly everywhere in the `RPython 
      +toolchain`_ and in the standard interpreter so that adding new options becomes
       trivial. Options are organized into a tree. Configuration objects can be
       created in different ways, there is support for creating an optparse command
       line parser automatically.
       
      +_`RPython toolchain`: translation.html
       
       Main Assumption
       ===============
       
       Configuration objects are produced at the entry points  and handed down to
       where they are actually used. This keeps configuration local but available
      -everywhere and consistent. The configuration values can be created using the
      -command line (already implemented) or a file (still to be done).
      +everywhere and consistent. The configuration values are created using the
      +command line.
       
       
       API Details
      @@ -183,12 +184,12 @@
       The usage of config objects in PyPy
       ===================================
       
      -The two large parts of PyPy, the standard interpreter and the translation
      +The two large parts of PyPy, the Python interpreter and the `RPython 
      +toolchain`_ 
       toolchain, have two separate sets of options. The translation toolchain options
       can be found on the ``config`` attribute of all ``TranslationContext``
      -instances and are described in translationoption.py_. The interpreter options
      +instances and are described in `pypy/config/translationoption.py`_. The interpreter options
       are attached to the object space, also under the name ``config`` and are
      -described in pypyoption.py_.
      +described in `pypy/config/pypyoption.py`_.
       
      -.. _translationoption.py: ../config/translationoption.py
      -.. _pypyoption.py: ../config/pypyoption.py
      +.. include:: _ref.txt
      
      diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst
      --- a/pypy/doc/jit/index.rst
      +++ b/pypy/doc/jit/index.rst
      @@ -4,7 +4,7 @@
       
       :abstract:
       
      -    When PyPy is translated into an executable like ``pypy-c``, the
      +    When PyPy is translated into an executable such as ``pypy-c``, the
           executable contains a full virtual machine that can optionally
           include a Just-In-Time compiler.  This JIT compiler is **generated
           automatically from the interpreter** that we wrote in RPython.
      
      diff --git a/pypy/doc/config/objspace.timing.rst b/pypy/doc/config/objspace.timing.txt
      copy from pypy/doc/config/objspace.timing.rst
      copy to pypy/doc/config/objspace.timing.txt
      
      diff --git a/pypy/doc/config/translation.shared.rst b/pypy/doc/config/translation.shared.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.shared.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Build pypy as a shared library or a DLL, with a small executable to run it.
      -This is necessary on Windows to expose the C API provided by the cpyext module.
      
      diff --git a/pypy/doc/crufty.rst b/pypy/doc/crufty.txt
      copy from pypy/doc/crufty.rst
      copy to pypy/doc/crufty.txt
      
      diff --git a/pypy/doc/config/objspace.std.sharesmallstr.rst b/pypy/doc/config/objspace.std.sharesmallstr.txt
      copy from pypy/doc/config/objspace.std.sharesmallstr.rst
      copy to pypy/doc/config/objspace.std.sharesmallstr.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.select.rst b/pypy/doc/config/objspace.usemodules.select.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.select.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'select' module. 
      -This module is expected to be fully working.
      
      diff --git a/pypy/doc/config/objspace.usemodules.select.rst b/pypy/doc/config/objspace.usemodules.select.txt
      copy from pypy/doc/config/objspace.usemodules.select.rst
      copy to pypy/doc/config/objspace.usemodules.select.txt
      
      diff --git a/pypy/doc/geninterp.rst b/pypy/doc/geninterp.rst
      deleted file mode 100644
      --- a/pypy/doc/geninterp.rst
      +++ /dev/null
      @@ -1,192 +0,0 @@
      -.. include:: crufty.rst
      -
      -     .. ^^ apparently dead
      -
      -The Interpreter-Level backend
      ------------------------------
      -
      -http://codespeak.net/pypy/trunk/pypy/translator/geninterplevel.py
      -
      -Motivation
      -++++++++++
      -
      -PyPy often makes use of `application-level`_ helper methods.
      -The idea of the 'geninterplevel' backend is to automatically transform
      -such application level implementations to their equivalent representation
      -at interpreter level.  Then, the RPython to C translation hopefully can
      -produce more efficient code than always re-interpreting these methods.
      -
      -One property of translation from application level Python to
      -Python is, that the produced code does the same thing as the
      -corresponding interpreted code, but no interpreter is needed
      -any longer to execute this code.
      -
      -.. _`application-level`: coding-guide.html#app-preferable
      -
      -Bootstrap issue
      -+++++++++++++++
      -
      -One issue we had so far was of bootstrapping: some pieces of the
      -interpreter (e.g. exceptions) were written in geninterped code.
      -It is unclear how much of it is left, thought.
      -
      -That bootstrap issue is (was?) solved by invoking a new bytecode interpreter
      -which runs on FlowObjspace. FlowObjspace is complete without
      -complicated initialization. It is able to do abstract interpretation
      -of any Rpythonic code, without actually implementing anything. It just
      -records all the operations the bytecode interpreter would have done by
      -building flowgraphs for all the code. What the Python backend does is
      -just to produce correct Python code from these flowgraphs and return
      -it as source code. In the produced code Python operations recorded in
      -the original flowgraphs are replaced by calls to the corresponding
      -methods in the `object space`_ interface.
      -
      -.. _`object space`: objspace.html
      -
      -Example
      -+++++++
      -
      -.. _implementation: ../../../../pypy/translator/geninterplevel.py
      -
      -Let's try a little example. You might want to look at the flowgraph that it
      -produces. Here, we directly run the Python translation and look at the
      -generated source. See also the header section of the implementation_ for the
      -interface::
      -
      -    >>> from pypy.translator.geninterplevel import translate_as_module
      -    >>> entrypoint, source = translate_as_module("""
      -    ...
      -    ... def g(n):
      -    ...     i = 0
      -    ...     while n:
      -    ...         i = i + n
      -    ...         n = n - 1
      -    ...     return i
      -    ...
      -    ... """)
      -
      -This call has invoked a PyPy bytecode interpreter running on FlowObjspace,
      -recorded every possible codepath into a flowgraph, and then rendered the
      -following source code:: 
      -
      -    #!/bin/env python
      -    # -*- coding: LATIN-1 -*-
      -
      -    def initapp2interpexec(space):
      -      """NOT_RPYTHON"""
      -
      -      def g(space, w_n_1):
      -        goto = 3 # startblock
      -        while True:
      -
      -            if goto == 1:
      -                v0 = space.is_true(w_n)
      -                if v0 == True:
      -                    goto = 2
      -                else:
      -                    goto = 4
      -
      -            if goto == 2:
      -                w_1 = space.add(w_0, w_n)
      -                w_2 = space.sub(w_n, gi_1)
      -                w_n, w_0 = w_2, w_1
      -                goto = 1
      -                continue
      -
      -            if goto == 3:
      -                w_n, w_0 = w_n_1, gi_0
      -                goto = 1
      -                continue
      -
      -            if goto == 4:
      -                return w_0
      -
      -      fastf_g = g
      -
      -      g3dict = space.newdict()
      -      gs___name__ = space.new_interned_str('__name__')
      -      gs_app2interpexec = space.new_interned_str('app2interpexec')
      -      space.setitem(g3dict, gs___name__, gs_app2interpexec)
      -      gs_g = space.new_interned_str('g')
      -      from pypy.interpreter import gateway
      -      gfunc_g = space.wrap(gateway.interp2app(fastf_g, unwrap_spec=[gateway.ObjSpace, gateway.W_Root]))
      -      space.setitem(g3dict, gs_g, gfunc_g)
      -      gi_1 = space.wrap(1)
      -      gi_0 = space.wrap(0)
      -      return g3dict
      -
      -You see that actually a single function is produced:
      -``initapp2interpexec``. This is the function that you will call with a
      -space as argument. It defines a few functions and then does a number
      -of initialization steps, builds the global objects the function need,
      -and produces the PyPy function object ``gfunc_g``.
      -
      -The return value is ``g3dict``, which contains a module name and the
      -function we asked for.
      -
      -Let's have a look at the body of this code: The definition of ``g`` is
      -used as ``fast_g`` in the ``gateway.interp2app`` which constructs a
      -PyPy function object which takes care of argument unboxing (based on
      -the ``unwrap_spec``), and of invoking the original ``g``.
      -
      -We look at the definition of ``g`` itself which does the actual
      -computation. Comparing to the flowgraph, you see a code block for
      -every block in the graph.  Since Python has no goto statement, the
      -jumps between the blocks are implemented by a loop that switches over
      -a ``goto`` variable.
      -
      -::
      -
      -    .       if goto == 1:
      -                v0 = space.is_true(w_n)
      -                if v0 == True:
      -                    goto = 2
      -                else:
      -                    goto = 4
      -
      -This is the implementation of the "``while n:``". There is no implicit state,
      -everything is passed over to the next block by initializing its
      -input variables. This directly resembles the nature of flowgraphs.
      -They are completely stateless.
      -
      -
      -::
      -
      -    .       if goto == 2:
      -                w_1 = space.add(w_0, w_n)
      -                w_2 = space.sub(w_n, gi_1)
      -                w_n, w_0 = w_2, w_1
      -                goto = 1
      -                continue
      -
      -The "``i = i + n``" and "``n = n - 1``" instructions.
      -You see how every instruction produces a new variable.
      -The state is again shuffled around by assigning to the
      -input variables ``w_n`` and ``w_0`` of the next target, block 1.
      -
      -Note that it is possible to rewrite this by re-using variables,
      -trying to produce nested blocks instead of the goto construction
      -and much more. The source would look much more like what we
      -used to write by hand. For the C backend, this doesn't make much
      -sense since the compiler optimizes it for us. For the Python interpreter it could
      -give a bit more speed. But this is a temporary format and will
      -get optimized anyway when we produce the executable.
      -
      -Interplevel Snippets in the Sources
      -+++++++++++++++++++++++++++++++++++
      -
      -Code written in application space can consist of complete files
      -to be translated, or they
      -can be tiny snippets scattered all over a source file, similar
      -to our example from above.
      -
      -Translation of these snippets is done automatically and cached
      -in pypy/_cache with the modulename and the md5 checksum appended
      -to it as file name. If you have run your copy of pypy already,
      -this folder should exist and have some generated files in it.
      -These files consist of the generated code plus a little code
      -that auto-destructs the cached file (plus .pyc/.pyo versions)
      -if it is executed as __main__. On windows this means you can wipe
      -a cached code snippet clear by double-clicking it. Note also that
      -the auto-generated __init__.py file wipes the whole directory
      -when executed.
      
      diff --git a/pypy/doc/config/objspace.usemodules._md5.rst b/pypy/doc/config/objspace.usemodules._md5.txt
      copy from pypy/doc/config/objspace.usemodules._md5.rst
      copy to pypy/doc/config/objspace.usemodules._md5.txt
      
      diff --git a/pypy/doc/config/translation.platform.rst b/pypy/doc/config/translation.platform.txt
      copy from pypy/doc/config/translation.platform.rst
      copy to pypy/doc/config/translation.platform.txt
      
      diff --git a/pypy/doc/config/objspace.std.withmapdict.rst b/pypy/doc/config/objspace.std.withmapdict.txt
      copy from pypy/doc/config/objspace.std.withmapdict.rst
      copy to pypy/doc/config/objspace.std.withmapdict.txt
      
      diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.rst b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
      copy from pypy/doc/config/objspace.std.methodcachesizeexp.rst
      copy to pypy/doc/config/objspace.std.methodcachesizeexp.txt
      
      diff --git a/pypy/doc/discussion/somepbc-refactoring-plan.rst b/pypy/doc/discussion/somepbc-refactoring-plan.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/somepbc-refactoring-plan.rst
      +++ /dev/null
      @@ -1,161 +0,0 @@
      -==========================
      -   Refactoring SomePBCs
      -==========================
      -
      -Motivation
      -==========
      -
      -Some parts of the annotator, and especially specialization, are quite obscure
      -and hackish.  One cause for this is the need to manipulate Python objects like
      -functions directly.  This makes it hard to attach additional information directly
      -to the objects.  It makes specialization messy because it has to create new dummy
      -function objects just to represent the various specialized versions of the function.
      -
      -
      -Plan
      -====
      -
      -Let's introduce nice wrapper objects.  This refactoring is oriented towards
      -the following goal: replacing the content of SomePBC() with a plain set of
      -"description" wrapper objects.  We shall probably also remove the possibility
      -for None to explicitly be in the set and add a can_be_None flag (this is
      -closer to what the other SomeXxx classes do).
      -
      -
      -XxxDesc classes
      -===============
      -
      -To be declared in module pypy.annotator.desc, with a mapping
      -annotator.bookkeeper.descs = {: }
      -accessed with bookkeeper.getdesc().
      -
      -Maybe later the module should be moved out of pypy.annotation but for now I
      -suppose that it's the best place.
      -
      -The goal is to have a single Desc wrapper even for functions and classes that
      -are specialized.
      -
      -FunctionDesc
      -
      -    Describes (usually) a Python function object.  Contains flow graphs: one
      -    in the common case, zero for external functions, more than one if there
      -    are several specialized versions.  Also describes the signature of the
      -    function in a nice format (i.e. not by relying on func_code inspection).
      -
      -ClassDesc
      -
      -    Describes a Python class object.  Generally just maps to a ClassDef, but
      -    could map to more than one in the presence of specialization.  So we get
      -    SomePBC({}) annotations for the class, and when it's
      -    instantiated it becomes SomeInstance(classdef=...) for the particular
      -    selected classdef.
      -
      -MethodDesc
      -
      -    Describes a bound method.  Just references a FunctionDesc and a ClassDef
      -    (not a ClassDesc, because it's read out of a SomeInstance).
      -
      -FrozenDesc
      -
      -    Describes a frozen pre-built instance.  That's also a good place to store
      -    some information currently in dictionaries of the bookkeeper.
      -
      -MethodOfFrozenDesc
      -
      -    Describes a method of a FrozenDesc.  Just references a FunctionDesc and a
      -    FrozenDesc.
      -
      -NB: unbound method objects are the same as function for our purposes, so they
      -become the same FunctionDesc as their im_func.
      -
      -These XxxDesc classes should share some common interface, as we'll see during
      -the refactoring.  A common base class might be a good idea (at least I don't
      -see why it would be a bad idea :-)
      -
      -
      -Implementation plan
      -===================
      -
      -* make a branch (/branch/somepbc-refactoring/)
      -
      -* change the definition of SomePBC, start pypy.annotation.desc
      -
      -* fix all places that use SomePBC :-)
      -
      -* turn Translator.flowgraphs into a plain list of flow graphs,
      -  and make the FunctionDescs responsible for computing their own flow graphs
      -
      -* move external function functionality into the FunctionDescs too
      -
      -
      -Status
      -======
      -
      -Done, branch merged.
      -
      -
      -RTyping PBCs of functions
      -=========================
      -
      -The FuncDesc.specialize() method takes an args_s and return a
      -corresponding graph.  The caller of specialize() parses the actual
      -arguments provided by the simple_call or call_args operation, so that
      -args_s is a flat parsed list.  The returned graph must have the same
      -number and order of input variables.
      -
      -For each call family, we compute a table like this (after annotation
      -finished)::
      -
      -          call_shape   FuncDesc1   FuncDesc2   FuncDesc3   ...
      -  ----------------------------------------------------------
      -   call0    shape1       graph1
      -   call1    shape1       graph1      graph2
      -   call2    shape1                   graph3     graph4            
      -   call3    shape2                   graph5     graph6
      -
      -
      -We then need to merge some of the lines if they look similar enough,
      -e.g. call0 and call1.  Precisely, we can merge two lines if they only
      -differ in having more or less holes.  In theory, the same graph could
      -appear in two lines that are still not mergeable because of other
      -graphs.  For sanity of implementation, we should check that at the end
      -each graph only appears once in the table (unless there is only one
      -*column*, in which case all problems can be dealt with at call sites).
      -
      -(Note that before this refactoring, the code was essentially requiring
      -that the table ended up with either one single row or one single
      -column.)
      -
      -The table is computed when the annotation is complete, in
      -compute_at_fixpoint(), which calls the FuncDesc's consider_call_site()
      -for each call site.  The latter merges lines as soon as possible.  The
      -table is attached to the call family, grouped by call shape.
      -
      -During RTyping, compute_at_fixpoint() is called after each new ll
      -helper is annotated.  Normally, this should not modify existing tables
      -too much, but in some situations it will.  So the rule is that
      -consider_call_site() should not add new (unmerged) rows to the table
      -after the table is considered "finished" (again, unless there is only
      -one column, in which case we should not discover new columns).
      -
      -XXX this is now out of date, in the details at least.
      -
      -RTyping other callable PBCs
      -===========================
      -
      -The above picture attaches "calltable" information to the call
      -families containing the function.  When it comes to rtyping a call of
      -another kind of pbc (class, instance-method, frozenpbc-method) we have
      -two basic choices:
      -
      - - associate the calltable information with the funcdesc that
      -   ultimately ends up getting called, or
      -
      - - attach the calltable to the callfamily that contains the desc
      -   that's actually being called.
      -
      -Neither is totally straightforward: the former is closer to what
      -happens on the trunk but new families of funcdescs need to be created
      -at the end of annotation or by normalisation.  The latter is more of a
      -change.  The former is also perhaps a bit unnatural for ootyped
      -backends.
      
      diff --git a/pypy/doc/config/translation.backendopt.constfold.rst b/pypy/doc/config/translation.backendopt.constfold.txt
      copy from pypy/doc/config/translation.backendopt.constfold.rst
      copy to pypy/doc/config/translation.backendopt.constfold.txt
      
      diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst
      --- a/pypy/doc/eventhistory.rst
      +++ b/pypy/doc/eventhistory.rst
      @@ -54,7 +54,7 @@
       backends. For more details, read the last `sprint status`_ page and
       enjoy the pictures_.
       
      -.. _`sprint status`: http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/tokyo-planning.html
      +.. _`sprint status`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/tokyo/tokyo-planning.txt
       .. _`pictures`: http://www.flickr.com/photos/19046555 at N00/sets/72057594116388174/
       
       PyPy at Python UK/ACCU Conference (United Kingdom)
      @@ -63,12 +63,12 @@
       *April 19th - April 22nd 2006.* Several talks about PyPy were hold at
       this year's Python UK/ACCU conference. Read more at the `ACCU site`_.
       
      -.. _`ACCU site`: http://www.accu.org/
      +.. _`ACCU site`: http://accu.org/
       
       PyPy at XPDay France 2006 in Paris March 23rd - March 24th 2006
       ==================================================================
       
      -Logilab presented PyPy at the first `french XP Day`_ that it was
      +Logilab presented PyPy at the first french XP Day that it was
       sponsoring and which was held in Paris. There was over a hundred
       attendants. Interesting talks included Python as an agile language and
       Tools for continuous integration.
      @@ -99,7 +99,7 @@
       Talks at PyCon 2006 (Dallas, Texas, USA)
       ===================================================================
       
      -*Feb 24th - Feb 26th 2006.* PyPy developers spoke at `PyCon 2006`_.
      +*Feb 24th - Feb 26th 2006.* PyPy developers spoke at PyCon 2006.
       
       .. _`PyCon 2006`: http://us.pycon.org/TX2006/HomePage 
       
      @@ -247,7 +247,7 @@
       .. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg
       .. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html
       .. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ 
      -.. _`Trillke-Gut`: http://www.trillke.net/images/HomePagePictureSmall.jpg
      +.. _`Trillke-Gut`: http://www.trillke.net
       
       EuroPython 2005 sprints finished 
       ======================================================
      @@ -310,6 +310,6 @@
       Read more in `EuroPython sprint announcement`_, see who is  planning to attend
       on `the people page`_. There is also a page_ in the python wiki.
       
      -.. _`EuroPython sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/europython-2006/announce.html
      -.. _`the people page`: http://codespeak.net/pypy/extradoc/sprintinfo/europython-2006/people.html
      +.. _`EuroPython sprint announcement`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/post-ep2006/announce.txt
      +.. _`the people page`: https://bitbucket.org/pypy/extradoc/src/tip/sprintinfo/post-ep2006/people.txt
       .. _page: http://wiki.python.org/moin/EuroPython2006
      
      diff --git a/pypy/doc/discussion/testing-zope.rst b/pypy/doc/discussion/testing-zope.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/testing-zope.rst
      +++ /dev/null
      @@ -1,45 +0,0 @@
      -Testing Zope on top of pypy-c
      -=============================
      -
      -Getting Zope packages
      ----------------------
      -
      -If you don't have a full Zope installation, you can pick a Zope package,
      -check it out via Subversion, and get all its dependencies (replace
      -``$PKG`` with, for example, ``zope.interface``)::
      -
      -    svn co svn://svn.zope.org/repos/main/$PKG/trunk $PKG
      -    cd $PKG
      -    python bootstrap.py
      -    bin/buildout
      -    bin/test
      -
      -Required pypy-c version
      ------------------------
      -
      -You probably need a pypy-c built with --allworkingmodules, at least::
      -
      -    cd pypy/translator/goal
      -    ./translate.py targetpypystandalone.py --allworkingmodules
      -
      -Workarounds
      ------------
      -
      -At the moment, our ``gc`` module is incomplete, making the Zope test
      -runner unhappy.  Quick workaround: go to the
      -``lib-python/modified-2.4.1`` directory and create a
      -``sitecustomize.py`` with the following content::
      -
      -    print ""
      -    import gc
      -    gc.get_threshold = lambda : (0, 0, 0)
      -    gc.get_debug = lambda : 0
      -    gc.garbage = []
      -
      -Running the tests
      ------------------
      -
      -To run the tests we need the --oldstyle option, as follows::
      -
      -    cd $PKG
      -    pypy-c --oldstyle bin/test
      
      diff --git a/pypy/doc/config/translation.gcremovetypeptr.rst b/pypy/doc/config/translation.gcremovetypeptr.txt
      copy from pypy/doc/config/translation.gcremovetypeptr.rst
      copy to pypy/doc/config/translation.gcremovetypeptr.txt
      
      diff --git a/pypy/doc/config/objspace.std.withrangelist.rst b/pypy/doc/config/objspace.std.withrangelist.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withrangelist.rst
      +++ /dev/null
      @@ -1,11 +0,0 @@
      -Enable "range list" objects. They are an additional implementation of the Python
      -``list`` type, indistinguishable for the normal user. Whenever the ``range``
      -builtin is called, an range list is returned. As long as this list is not
      -mutated (and for example only iterated over), it uses only enough memory to
      -store the start, stop and step of the range. This makes using ``range`` as
      -efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
      -
      -See the section in `Standard Interpreter Optimizations`_ for more details.
      -
      -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists
      -
      
      diff --git a/pypy/doc/config/translation.gcrootfinder.rst b/pypy/doc/config/translation.gcrootfinder.txt
      copy from pypy/doc/config/translation.gcrootfinder.rst
      copy to pypy/doc/config/translation.gcrootfinder.txt
      --- a/pypy/doc/config/translation.gcrootfinder.rst
      +++ b/pypy/doc/config/translation.gcrootfinder.txt
      @@ -1,15 +1,16 @@
      -Choose method how to find roots in the GC. Boehm and refcounting have their own
      -methods, this is mostly only interesting for framework GCs. For those you have
      -a choice of various alternatives:
      +Choose the method used to find the roots in the GC.  This only
      +applies to our framework GCs.  You have a choice of two
      +alternatives:
       
      - - use a shadow stack (XXX link to paper), e.g. explicitly maintaining a stack
      -   of roots
      +- ``--gcrootfinder=shadowstack``: use a so-called "shadow
      +  stack", which is an explicitly maintained custom stack of
      +  root pointers.  This is the most portable solution.
       
      - - use stackless to find roots by unwinding the stack.  Requires
      -   :config:`translation.stackless`.  Note that this turned out to
      -   be slower than just using a shadow stack.
      +- ``--gcrootfinder=asmgcc``: use assembler hackery to find the
      +  roots directly from the normal stack.  This is a bit faster,
      +  but platform specific.  It works so far with GCC or MSVC,
      +  on i386 and x86-64.
       
      - - use GCC and i386 specific assembler hackery to find the roots on the stack.
      -   This is fastest but platform specific.
      -
      - - Use LLVM's GC facilities to find the roots.
      +You may have to force the use of the shadowstack root finder if
      +you are running into troubles or if you insist on translating
      +PyPy with other compilers like clang.
      
      diff --git a/pypy/doc/config/translation.force_make.rst b/pypy/doc/config/translation.force_make.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.force_make.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Force executing makefile instead of using platform.
      
      diff --git a/pypy/doc/config/objspace.std.withrangelist.rst b/pypy/doc/config/objspace.std.withrangelist.txt
      copy from pypy/doc/config/objspace.std.withrangelist.rst
      copy to pypy/doc/config/objspace.std.withrangelist.txt
      
      diff --git a/pypy/doc/discussion/chained_getattr.rst b/pypy/doc/discussion/chained_getattr.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/chained_getattr.rst
      +++ /dev/null
      @@ -1,70 +0,0 @@
      -
      -
      -"chained getattr/module global lookup" optimization
      -(discussion during trillke-sprint 2007, anto/holger, 
      -a bit of samuele and cf earlier on)  
      -
      -random example: 
      -
      -    code: 
      -        import os.path
      -        normed = [os.path.normpath(p) for p in somelist]
      -    bytecode: 
      -        [...]
      -         LOAD_GLOBAL              (os)
      -         LOAD_ATTR                (path)
      -         LOAD_ATTR                (normpath)
      -         LOAD_FAST                (p)
      -         CALL_FUNCTION            1
      -
      -    would be turned by pypy-compiler into: 
      -
      -         LOAD_CHAINED_GLOBAL      (os,path,normpath)
      -         LOAD_FAST                (p)
      -         CALL_FUNCTION            1
      -       
      -    now for the LOAD_CHAINED_GLOBAL bytecode implementation:
      -
      -        Module dicts have a special implementation, providing: 
      -
      -        - an extra "fastlookup" rpython-dict serving as a cache for
      -          LOAD_CHAINED_GLOBAL places within the modules: 
      -
      -          * keys are e.g. ('os', 'path', 'normpath')
      -
      -          * values are tuples of the form: 
      -            ([obj1, obj2, obj3], [ver1, ver2])
      -
      -             "ver1" refer to the version of the globals of "os"
      -             "ver2" refer to the version of the globals of "os.path"
      -             "obj3" is the resulting "normpath" function 
      -
      -        - upon changes to the global dict, "fastlookup.clear()" is called
      -
      -        - after the fastlookup entry is filled for a given
      -          LOAD_CHAINED_GLOBAL index, the following checks need
      -          to be performed in the bytecode implementation::
      -    
      -              value = f_globals.fastlookup.get(key, None)
      -              if value is None:
      -                 # fill entry 
      -              else:
      -                  # check that our cached lookups are still valid 
      -                  assert isinstance(value, tuple) 
      -                  objects, versions = value
      -                  i = 0
      -                  while i < len(versions): 
      -                      lastversion = versions[i]
      -                      ver = getver_for_obj(objects[i])
      -                      if ver == -1 or ver != lastversion:
      -                         name = key[i]
      -                         objects[i] = space.getattr(curobj, name)
      -                         versions[i] = ver
      -                      curobj = objects[i]
      -                      i += 1
      -              return objects[i]
      -
      -            def getver_for_obj(obj):
      -                if "obj is not Module":
      -                    return -1
      -                return obj.w_dict.version 
      
      diff --git a/pypy/doc/config/objspace.soabi.rst b/pypy/doc/config/objspace.soabi.txt
      copy from pypy/doc/config/objspace.soabi.rst
      copy to pypy/doc/config/objspace.soabi.txt
      
      diff --git a/pypy/doc/config/objspace.std.newshortcut.rst b/pypy/doc/config/objspace.std.newshortcut.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.newshortcut.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Performance only: cache and shortcut calling __new__ from builtin types
      
      diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst
      --- a/pypy/doc/rtyper.rst
      +++ b/pypy/doc/rtyper.rst
      @@ -66,7 +66,7 @@
       each operation.  In both cases the analysis of an operation depends on the
       annotations of its input arguments.  This is reflected in the usage of the same
       ``__extend__`` syntax in the source files (compare e.g.
      -`annotation/binaryop.py`_ and `rpython/rint.py`_).
      +`pypy/annotation/binaryop.py`_ and `pypy/rpython/rint.py`_).
       
       The analogy stops here, though: while it runs, the Annotator is in the middle
       of computing the annotations, so it might need to reflow and generalize until
      @@ -104,7 +104,7 @@
       implementations for the same high-level operations.  This is the reason for
       turning representations into explicit objects.
       
      -The base Repr class is defined in `rpython/rmodel.py`_.  Most of the
      +The base Repr class is defined in `pypy/rpython/rmodel.py`_.  Most of the
       ``rpython/r*.py`` files define one or a few subclasses of Repr.  The method
       getrepr() of the RTyper will build and cache a single Repr instance per
       SomeXxx() instance; moreover, two SomeXxx() instances that are equal get the
      @@ -131,9 +131,9 @@
       The RPython Typer uses a standard low-level model which we believe can
       correspond rather directly to various target languages such as C.
       This model is implemented in the first part of
      -`rpython/lltypesystem/lltype.py`_.
      +`pypy/rpython/lltypesystem/lltype.py`_.
       
      -The second part of `rpython/lltypesystem/lltype.py`_ is a runnable
      +The second part of `pypy/rpython/lltypesystem/lltype.py`_ is a runnable
       implementation of these types, for testing purposes.  It allows us to write
       and test plain Python code using a malloc() function to obtain and manipulate
       structures and arrays.  This is useful for example to implement and test
      @@ -191,7 +191,7 @@
       types like list in this elementary world.  The ``malloc()`` function is a kind
       of placeholder, which must eventually be provided by the code generator for the
       target platform; but as we have just seen its Python implementation in
      -`rpython/lltypesystem/lltype.py`_ works too, which is primarily useful for
      +`pypy/rpython/lltypesystem/lltype.py`_ works too, which is primarily useful for
       testing, interactive exploring, etc.
       
       The argument to ``malloc()`` is the structure type directly, but it returns a
      @@ -316,7 +316,7 @@
       with care: the bigger structure of which they are part of could be freed while
       the Ptr to the substructure is still in use.  In general, it is a good idea to
       avoid passing around pointers to inlined substructures of malloc()ed structures.
      -(The testing implementation of `rpython/lltypesystem/lltype.py`_ checks to some
      +(The testing implementation of `pypy/rpython/lltypesystem/lltype.py`_ checks to some
       extent that you are not trying to use a pointer to a structure after its
       container has been freed, using weak references.  But pointers to non-GC
       structures are not officially meant to be weak references: using them after what
      @@ -429,7 +429,7 @@
       change needed to the Annotator to allow it to perform type inference of our
       very-low-level snippets of code.
       
      -See for example `rpython/rlist.py`_.
      +See for example `pypy/rpython/rlist.py`_.
       
       
       .. _`oo type`:
      @@ -441,10 +441,10 @@
       targeting low level backends such as C, but it is not good
       enough for targeting higher level backends such as .NET CLI or Java
       JVM, so a new object oriented model has been introduced. This model is
      -implemented in the first part of `rpython/ootypesystem/ootype.py`_.
      +implemented in the first part of `pypy/rpython/ootypesystem/ootype.py`_.
       
       As for the low-level typesystem, the second part of
      -`rpython/ootypesystem/ootype.py`_ is a runnable implementation of
      +`pypy/rpython/ootypesystem/ootype.py`_ is a runnable implementation of
       these types, for testing purposes.
       
       
      @@ -791,4 +791,5 @@
               assert res == ~3
       
       .. _annotator: translation.html#the-annotation-pass
      -.. include:: _ref.rst
      +
      +.. include:: _ref.txt
      
      diff --git a/pypy/doc/config/objspace.std.optimized_list_getitem.rst b/pypy/doc/config/objspace.std.optimized_list_getitem.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.optimized_list_getitem.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Optimized list[int] a bit.
      
      diff --git a/pypy/doc/config/objspace.std.prebuiltintto.rst b/pypy/doc/config/objspace.std.prebuiltintto.txt
      copy from pypy/doc/config/objspace.std.prebuiltintto.rst
      copy to pypy/doc/config/objspace.std.prebuiltintto.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.parser.rst b/pypy/doc/config/objspace.usemodules.parser.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.parser.rst
      +++ /dev/null
      @@ -1,4 +0,0 @@
      -Use the 'parser' module. 
      -This is PyPy implementation of the standard library 'parser' module (e.g. if
      -this option is enabled and you say ``import parser`` you get this module).
      -It is enabled by default.
      
      diff --git a/pypy/doc/config/objspace.disable_call_speedhacks.rst b/pypy/doc/config/objspace.disable_call_speedhacks.txt
      copy from pypy/doc/config/objspace.disable_call_speedhacks.rst
      copy to pypy/doc/config/objspace.disable_call_speedhacks.txt
      
      diff --git a/pypy/doc/carbonpython.rst b/pypy/doc/carbonpython.rst
      deleted file mode 100644
      --- a/pypy/doc/carbonpython.rst
      +++ /dev/null
      @@ -1,230 +0,0 @@
      -==================================================
      -CarbonPython, aka C# considered harmful
      -==================================================
      -
      -CarbonPython overview
      -=====================
      -
      -CarbonPython is an experimental RPython to .NET compiler. Its main
      -focus is to produce DLLs to be used by other .NET programs, not
      -standalone executables; if you want to compile an RPython standalone
      -program, have a look to `translate.py`_.
      -
      -Compiled RPython programs are much faster (up to 250x) than
      -interpreted IronPython programs, hence it might be a convenient
      -replacement for C# when more speed is needed. RPython programs can be
      -as fast as C# programs.
      -
      -RPython is a restrict subset of Python, static enough to be analyzed
      -and compiled efficiently to lower level languages.  To read more about
      -the RPython limitations read the `RPython description`_.
      -
      -**Disclaimer**: RPython is a much less convenient language than Python
      -to program with. If you do not need speed, there is no reason to look
      -at RPython.
      -
      -**Big disclaimer**: CarbonPython is still in a pre-alpha stage: it's
      -not meant to be used for production code, and the API might change in
      -the future. Despite this, it might be useful in some situations and
      -you are encouraged to try it by yourself. Suggestions, bug-reports and
      -even better patches are welcome.
      -
      -.. _`RPython description`: coding-guide.html#restricted-python
      -.. _`translate.py`: faq.html#how-do-i-compile-my-own-interpreters
      -
      -
      -Quick start
      -===========
      -
      -Suppose you want to write a little DLL in RPython and call its
      -function from C#.
      -
      -Here is the file mylibrary.py::
      -
      -    from pypy.translator.cli.carbonpython import export
      -
      -    @export(int, int)
      -    def add(x, y):
      -        return x+y
      -
      -    @export(int, int)
      -    def sub(x, y):
      -        return x-y
      -
      -
      -And here the C# program main.cs::
      -
      -    using System;
      -    public class CarbonPythonTest
      -    {
      -        public static void Main()
      -        {
      -            Console.WriteLine(mylibrary.add(40, 2));
      -            Console.WriteLine(mylibrary.sub(44, 2));
      -        }
      -    }
      -
      -Once the files have been created, you can compile ``mylibrary.py``
      -with CarbonPython to get the corresponding DLL::
      -
      -    $ python carbonpython.py mylibrary.py
      -    ... lot of stuff
      -
      -Then, we compile main.cs into an executable, being sure to add a
      -reference to the newly created ``mylibrary.dll``::
      -
      -    # with mono on linux
      -    $ gmcs /r:mylibrary.dll main.cs
      -
      -    # with Microsoft CLR on windows
      -    c:\> csc /r:mylibrary main.cs
      -
      -Now we can run the executable to see whether the answers are right::
      -
      -    $ mono main.exe
      -    42
      -    42
      -
      -
      -Multiple entry-points
      -=====================
      -
      -In RPython, the type of each variable is inferred by the `Annotator`_:
      -the annotator analyzed the whole program top-down starting from an
      -entry-point, i.e. a function whose we specified the types of the
      -parameters.
      -
      -This approach works for a standalone executables, but not for a
      -library that by definition is composed by more than one
      -entry-point. Thus, you need to explicitly specify which functions you
      -want to include in your DLL, together with the expected input types.
      -
      -To mark a function as an entry-point, you use the ``@export``
      -decorator, which is defined in ``pypy.translator.cli.carbonpython``,
      -as shown by the previous example.  Note that you do not need to
      -specify the return type, because it is automatically inferenced by the
      -annotator.
      -
      -.. _`Annotator`: translation.html#annotator
      -
      -
      -Namespaces
      -==========
      -
      -Since `CLS`_ (Common Language Specification) does not support module
      -level static methods, RPython functions marked as entry-points are
      -compiled to static methods of a class, in order to be accessible by
      -every CLS-compliant language such as C# or VB.NET.
      -
      -The class which each function is placed in depends on its
      -**namespace**; for example, if the namespace of a function ``foo`` is
      -``A.B.C``, the function will be rendered as a static method of the
      -``C`` class inside the ``A.B`` namespace. This allows C# and
      -IronPython code to call the function using the intuitive ``A.B.C.foo``
      -syntax.
      -
      -By default, the default namespace for exported function is the same as
      -the name of the module. Thus in the previous example the default
      -namespace is ``mylibrary`` and the functions are placed inside the
      -corresponding class in the global namespace.
      -
      -You can change the default namespace by setting the ``_namespace_``
      -variable in the module you are compiling::
      -
      -    _namespace_ = 'Foo.Bar'
      -
      -    @export(int, int)
      -    def f(x, y):
      -        pass
      -
      -Finally, you can also set a specific namespace on a per-function
      -basis, using the appropriate keyword argument of the ``@export``
      -decorator::
      -
      -    @export(int, int, namespace='Foo.Bar')
      -    def f(x, y):
      -        pass
      -
      -
      -.. _`CLS`: http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-335.pdf
      -
      -
      -Exporting classes
      -=================
      -
      -RPython libraries can also export classes: to export a class, add the
      -``@export`` decorator to its ``__init__`` method; similarly, you can
      -also export any methods of the class::
      -
      -    class MyClass:
      -
      -        @export(int)
      -        def __init__(self, x):
      -            self.x = x
      -
      -        @export
      -        def getx(self):
      -            return self.x
      -
      -
      -Note that the type of ``self`` must not be specified: it will
      -automatically assumed to be ``MyClass``.
      -
      -The ``__init__`` method is not automatically mapped to the .NET
      -constructor; to properly initialize an RPython object from C# or
      -IronPython code you need to explicitly call ``__init__``; for example,
      -in C#::
      -
      -    MyClass obj = new MyClass();
      -    obj.__init__(x);
      -
      -Note that this is needed only when calling RPython code from 
      -outside; the RPython compiler automatically calls ``__init__``
      -whenever an RPython class is instantiated.
      -
      -In the future this discrepancy will be fixed and the ``__init__``
      -method will be automatically mapped to the constructor.
      -
      -
      -Accessing .NET libraries
      -========================
      -
      -**Warning**: the API for accessing .NET classes from RPython is highly
      -experimental and will probably change in the future.
      -
      -In RPython you can access native .NET classes through the ``CLR``
      -object defined in ``translator.cli.dotnet``: from there, you can
      -navigate through namespaces using the usual dot notation; for example,
      -``CLR.System.Collections.ArrayList`` refers to the ``ArrayList`` class
      -in the ``System.Collections`` namespace.
      -
      -To instantiate a .NET class, simply call it::
      -
      -    ArrayList = CLR.System.Collections.ArrayList
      -    def foo():
      -        obj = ArrayList()
      -        obj.Add(42)
      -        return obj
      -
      -At the moment there is no special syntax support for indexers and
      -properties: for example, you can't access ArrayList's elements using
      -the square bracket notation, but you have to call the call the
      -``get_Item`` and ``set_Item`` methods; similarly, to access a property
      -``XXX`` you need to call ``get_XXX`` and ``set_XXX``::
      -
      -    def foo():
      -        obj = ArrayList()
      -        obj.Add(42)
      -        print obj.get_Item(0)
      -        print obj.get_Count()
      -
      -Static methods and are also supported, as well as overloadings::
      -
      -    Math = CLR.System.Math
      -    def foo():
      -        print Math.Abs(-42)
      -        print Math.Abs(-42.0)
      -
      -
      -At the moment, it is not possible to reference assemblies other than
      -mscorlib. This will be fixed soon.
      
      diff --git a/pypy/doc/config/translation.jit_profiler.rst b/pypy/doc/config/translation.jit_profiler.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.jit_profiler.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Integrate profiler support into the JIT
      
      diff --git a/pypy/doc/config/translation.rst b/pypy/doc/config/translation.txt
      copy from pypy/doc/config/translation.rst
      copy to pypy/doc/config/translation.txt
      
      diff --git a/pypy/doc/config/objspace.std.withcelldict.rst b/pypy/doc/config/objspace.std.withcelldict.txt
      copy from pypy/doc/config/objspace.std.withcelldict.rst
      copy to pypy/doc/config/objspace.std.withcelldict.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.signal.rst b/pypy/doc/config/objspace.usemodules.signal.txt
      copy from pypy/doc/config/objspace.usemodules.signal.rst
      copy to pypy/doc/config/objspace.usemodules.signal.txt
      
      diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst
      --- a/pypy/doc/objspace-proxies.rst
      +++ b/pypy/doc/objspace-proxies.rst
      @@ -11,17 +11,13 @@
       behavior of all objects in a running program is easy to implement on
       top of PyPy.
       
      -Here is what we implemented so far, in historical order:
      +Here is what we have implemented so far, in historical order:
       
       * *Thunk Object Space*: lazily computed objects, computing only when an
         operation is performed on them; lazy functions, computing their result
         only if and when needed; and a way to globally replace an object with
         another.
       
      -* *Taint Object Space*: a soft security system; your application cannot
      -  accidentally compute results based on tainted objects unless it
      -  explicitly untaints them first.
      -
       * *Dump Object Space*: dumps all operations performed on all the objects
         into a large log file.  For debugging your applications.
       
      @@ -133,293 +129,295 @@
          function behaves lazily: all calls to it return a thunk object.
       
       
      -.. _taint:
      +.. broken right now:
       
      -The Taint Object Space
      -======================
      +    .. _taint:
       
      -Motivation
      -----------
      +    The Taint Object Space
      +    ======================
       
      -The Taint Object Space provides a form of security: "tainted objects",
      -inspired by various sources, see [D12.1]_ for a more detailed discussion. 
      +    Motivation
      +    ----------
       
      -The basic idea of this kind of security is not to protect against
      -malicious code but to help with handling and boxing sensitive data. 
      -It covers two kinds of sensitive data: secret data which should not leak, 
      -and untrusted data coming from an external source and that must be 
      -validated before it is used.
      +    The Taint Object Space provides a form of security: "tainted objects",
      +    inspired by various sources, see [D12.1]_ for a more detailed discussion. 
       
      -The idea is that, considering a large application that handles these
      -kinds of sensitive data, there are typically only a small number of
      -places that need to explicitly manipulate that sensitive data; all the
      -other places merely pass it around, or do entirely unrelated things.
      +    The basic idea of this kind of security is not to protect against
      +    malicious code but to help with handling and boxing sensitive data. 
      +    It covers two kinds of sensitive data: secret data which should not leak, 
      +    and untrusted data coming from an external source and that must be 
      +    validated before it is used.
       
      -Nevertheless, if a large application needs to be reviewed for security,
      -it must be entirely carefully checked, because it is possible that a
      -bug at some apparently unrelated place could lead to a leak of sensitive
      -information in a way that an external attacker could exploit.  For
      -example, if any part of the application provides web services, an
      -attacker might be able to issue unexpected requests with a regular web
      -browser and deduce secret information from the details of the answers he
      -gets.  Another example is the common CGI attack where an attacker sends
      -malformed inputs and causes the CGI script to do unintended things.
      +    The idea is that, considering a large application that handles these
      +    kinds of sensitive data, there are typically only a small number of
      +    places that need to explicitly manipulate that sensitive data; all the
      +    other places merely pass it around, or do entirely unrelated things.
       
      -An approach like that of the Taint Object Space allows the small parts
      -of the program that manipulate sensitive data to be explicitly marked.
      -The effect of this is that although these small parts still need a
      -careful security review, the rest of the application no longer does,
      -because even a bug would be unable to leak the information.
      +    Nevertheless, if a large application needs to be reviewed for security,
      +    it must be entirely carefully checked, because it is possible that a
      +    bug at some apparently unrelated place could lead to a leak of sensitive
      +    information in a way that an external attacker could exploit.  For
      +    example, if any part of the application provides web services, an
      +    attacker might be able to issue unexpected requests with a regular web
      +    browser and deduce secret information from the details of the answers he
      +    gets.  Another example is the common CGI attack where an attacker sends
      +    malformed inputs and causes the CGI script to do unintended things.
       
      -We have implemented a simple two-level model: objects are either
      -regular (untainted), or sensitive (tainted).  Objects are marked as
      -sensitive if they are secret or untrusted, and only declassified at
      -carefully-checked positions (e.g. where the secret data is needed, or
      -after the untrusted data has been fully validated).
      +    An approach like that of the Taint Object Space allows the small parts
      +    of the program that manipulate sensitive data to be explicitly marked.
      +    The effect of this is that although these small parts still need a
      +    careful security review, the rest of the application no longer does,
      +    because even a bug would be unable to leak the information.
       
      -It would be simple to extend the code for more fine-grained scales of
      -secrecy.  For example it is typical in the literature to consider
      -user-specified lattices of secrecy levels, corresponding to multiple
      -"owners" that cannot access data belonging to another "owner" unless
      -explicitly authorized to do so.
      +    We have implemented a simple two-level model: objects are either
      +    regular (untainted), or sensitive (tainted).  Objects are marked as
      +    sensitive if they are secret or untrusted, and only declassified at
      +    carefully-checked positions (e.g. where the secret data is needed, or
      +    after the untrusted data has been fully validated).
       
      -Tainting and untainting
      ------------------------
      +    It would be simple to extend the code for more fine-grained scales of
      +    secrecy.  For example it is typical in the literature to consider
      +    user-specified lattices of secrecy levels, corresponding to multiple
      +    "owners" that cannot access data belonging to another "owner" unless
      +    explicitly authorized to do so.
       
      -Start a py.py with the Taint Object Space and try the following example::
      +    Tainting and untainting
      +    -----------------------
       
      -    $ py.py -o taint
      -    >>>> from __pypy__ import taint
      -    >>>> x = taint(6)
      +    Start a py.py with the Taint Object Space and try the following example::
       
      -    # x is hidden from now on.  We can pass it around and
      -    # even operate on it, but not inspect it.  Taintness
      -    # is propagated to operation results.
      +        $ py.py -o taint
      +        >>>> from __pypy__ import taint
      +        >>>> x = taint(6)
       
      -    >>>> x
      -    TaintError
      +        # x is hidden from now on.  We can pass it around and
      +        # even operate on it, but not inspect it.  Taintness
      +        # is propagated to operation results.
       
      -    >>>> if x > 5: y = 2   # see below
      -    TaintError
      +        >>>> x
      +        TaintError
       
      -    >>>> y = x + 5         # ok
      -    >>>> lst = [x, y]
      -    >>>> z = lst.pop()
      -    >>>> t = type(z)       # type() works too, tainted answer
      -    >>>> t
      -    TaintError
      -    >>>> u = t is int      # even 'is' works
      -    >>>> u
      -    TaintError
      +        >>>> if x > 5: y = 2   # see below
      +        TaintError
       
      -Notice that using a tainted boolean like ``x > 5`` in an ``if``
      -statement is forbidden.  This is because knowing which path is followed
      -would give away a hint about ``x``; in the example above, if the
      -statement ``if x > 5: y = 2`` was allowed to run, we would know
      -something about the value of ``x`` by looking at the (untainted) value
      -in the variable ``y``.
      +        >>>> y = x + 5         # ok
      +        >>>> lst = [x, y]
      +        >>>> z = lst.pop()
      +        >>>> t = type(z)       # type() works too, tainted answer
      +        >>>> t
      +        TaintError
      +        >>>> u = t is int      # even 'is' works
      +        >>>> u
      +        TaintError
       
      -Of course, there is a way to inspect tainted objects.  The basic way is
      -to explicitly "declassify" it with the ``untaint()`` function.  In an
      -application, the places that use ``untaint()`` are the places that need
      -careful security review.  To avoid unexpected objects showing up, the
      -``untaint()`` function must be called with the exact type of the object
      -to declassify.  It will raise ``TaintError`` if the type doesn't match::
      +    Notice that using a tainted boolean like ``x > 5`` in an ``if``
      +    statement is forbidden.  This is because knowing which path is followed
      +    would give away a hint about ``x``; in the example above, if the
      +    statement ``if x > 5: y = 2`` was allowed to run, we would know
      +    something about the value of ``x`` by looking at the (untainted) value
      +    in the variable ``y``.
       
      -    >>>> from __pypy__ import taint
      -    >>>> untaint(int, x)
      -    6
      -    >>>> untaint(int, z)
      -    11
      -    >>>> untaint(bool, x > 5)
      -    True
      -    >>>> untaint(int, x > 5)
      -    TaintError
      +    Of course, there is a way to inspect tainted objects.  The basic way is
      +    to explicitly "declassify" it with the ``untaint()`` function.  In an
      +    application, the places that use ``untaint()`` are the places that need
      +    careful security review.  To avoid unexpected objects showing up, the
      +    ``untaint()`` function must be called with the exact type of the object
      +    to declassify.  It will raise ``TaintError`` if the type doesn't match::
       
      +        >>>> from __pypy__ import taint
      +        >>>> untaint(int, x)
      +        6
      +        >>>> untaint(int, z)
      +        11
      +        >>>> untaint(bool, x > 5)
      +        True
      +        >>>> untaint(int, x > 5)
      +        TaintError
       
      -Taint Bombs
      ------------
       
      -In this area, a common problem is what to do about failing operations.
      -If an operation raises an exception when manipulating a tainted object,
      -then the very presence of the exception can leak information about the
      -tainted object itself.  Consider::
      +    Taint Bombs
      +    -----------
       
      -    >>>> 5 / (x-6)
      +    In this area, a common problem is what to do about failing operations.
      +    If an operation raises an exception when manipulating a tainted object,
      +    then the very presence of the exception can leak information about the
      +    tainted object itself.  Consider::
       
      -By checking if this raises ``ZeroDivisionError`` or not, we would know
      -if ``x`` was equal to 6 or not.  The solution to this problem in the
      -Taint Object Space is to introduce *Taint Bombs*.  They are a kind of
      -tainted object that doesn't contain a real object, but a pending
      -exception.  Taint Bombs are indistinguishable from normal tainted
      -objects to unprivileged code. See::
      +        >>>> 5 / (x-6)
       
      -    >>>> x = taint(6)
      -    >>>> i = 5 / (x-6)     # no exception here
      -    >>>> j = i + 1         # nor here
      -    >>>> k = j + 5         # nor here
      -    >>>> untaint(int, k)
      -    TaintError
      +    By checking if this raises ``ZeroDivisionError`` or not, we would know
      +    if ``x`` was equal to 6 or not.  The solution to this problem in the
      +    Taint Object Space is to introduce *Taint Bombs*.  They are a kind of
      +    tainted object that doesn't contain a real object, but a pending
      +    exception.  Taint Bombs are indistinguishable from normal tainted
      +    objects to unprivileged code. See::
       
      -In the above example, all of ``i``, ``j`` and ``k`` contain a Taint
      -Bomb.  Trying to untaint it raises an exception - a generic
      -``TaintError``.  What we win is that the exception gives little away,
      -and most importantly it occurs at the point where ``untaint()`` is
      -called, not where the operation failed.  This means that all calls to
      -``untaint()`` - but not the rest of the code - must be carefully
      -reviewed for what occurs if they receive a Taint Bomb; they might catch
      -the ``TaintError`` and give the user a generic message that something
      -went wrong, if we are reasonably careful that the message or even its
      -presence doesn't give information away.  This might be a
      -problem by itself, but there is no satisfying general solution here:
      -it must be considered on a case-by-case basis.  Again, what the
      -Taint Object Space approach achieves is not solving these problems, but
      -localizing them to well-defined small parts of the application - namely,
      -around calls to ``untaint()``.
      +        >>>> x = taint(6)
      +        >>>> i = 5 / (x-6)     # no exception here
      +        >>>> j = i + 1         # nor here
      +        >>>> k = j + 5         # nor here
      +        >>>> untaint(int, k)
      +        TaintError
       
      -The ``TaintError`` exception deliberately does not include any
      -useful error messages, because they might give information away.
      -Of course, this makes debugging quite a bit harder; a difficult
      -problem to solve properly.  So far we have implemented a way to peek in a Taint
      -Box or Bomb, ``__pypy__._taint_look(x)``, and a "debug mode" that
      -prints the exception as soon as a Bomb is created - both write
      -information to the low-level stderr of the application, where we hope
      -that it is unlikely to be seen by anyone but the application
      -developer.
      +    In the above example, all of ``i``, ``j`` and ``k`` contain a Taint
      +    Bomb.  Trying to untaint it raises an exception - a generic
      +    ``TaintError``.  What we win is that the exception gives little away,
      +    and most importantly it occurs at the point where ``untaint()`` is
      +    called, not where the operation failed.  This means that all calls to
      +    ``untaint()`` - but not the rest of the code - must be carefully
      +    reviewed for what occurs if they receive a Taint Bomb; they might catch
      +    the ``TaintError`` and give the user a generic message that something
      +    went wrong, if we are reasonably careful that the message or even its
      +    presence doesn't give information away.  This might be a
      +    problem by itself, but there is no satisfying general solution here:
      +    it must be considered on a case-by-case basis.  Again, what the
      +    Taint Object Space approach achieves is not solving these problems, but
      +    localizing them to well-defined small parts of the application - namely,
      +    around calls to ``untaint()``.
       
      +    The ``TaintError`` exception deliberately does not include any
      +    useful error messages, because they might give information away.
      +    Of course, this makes debugging quite a bit harder; a difficult
      +    problem to solve properly.  So far we have implemented a way to peek in a Taint
      +    Box or Bomb, ``__pypy__._taint_look(x)``, and a "debug mode" that
      +    prints the exception as soon as a Bomb is created - both write
      +    information to the low-level stderr of the application, where we hope
      +    that it is unlikely to be seen by anyone but the application
      +    developer.
       
      -Taint Atomic functions
      -----------------------
       
      -Occasionally, a more complicated computation must be performed on a
      -tainted object.  This requires first untainting the object, performing the
      -computations, and then carefully tainting the result again (including
      -hiding all exceptions into Bombs).
      +    Taint Atomic functions
      +    ----------------------
       
      -There is a built-in decorator that does this for you::
      +    Occasionally, a more complicated computation must be performed on a
      +    tainted object.  This requires first untainting the object, performing the
      +    computations, and then carefully tainting the result again (including
      +    hiding all exceptions into Bombs).
       
      -    >>>> @__pypy__.taint_atomic
      -    >>>> def myop(x, y):
      -    ....     while x > 0:
      -    ....         x -= y
      -    ....     return x
      -    ....
      -    >>>> myop(42, 10)
      -    -8
      -    >>>> z = myop(taint(42), 10)
      -    >>>> z
      -    TaintError
      -    >>>> untaint(int, z)
      -    -8
      +    There is a built-in decorator that does this for you::
       
      -The decorator makes a whole function behave like a built-in operation.
      -If no tainted argument is passed in, the function behaves normally.  But
      -if any of the arguments is tainted, it is automatically untainted - so
      -the function body always sees untainted arguments - and the eventual
      -result is tainted again (possibly in a Taint Bomb).
      +        >>>> @__pypy__.taint_atomic
      +        >>>> def myop(x, y):
      +        ....     while x > 0:
      +        ....         x -= y
      +        ....     return x
      +        ....
      +        >>>> myop(42, 10)
      +        -8
      +        >>>> z = myop(taint(42), 10)
      +        >>>> z
      +        TaintError
      +        >>>> untaint(int, z)
      +        -8
       
      -It is important for the function marked as ``taint_atomic`` to have no
      -visible side effects, as these could cause information leakage.
      -This is currently not enforced, which means that all ``taint_atomic``
      -functions have to be carefully reviewed for security (but not the
      -callers of ``taint_atomic`` functions).
      +    The decorator makes a whole function behave like a built-in operation.
      +    If no tainted argument is passed in, the function behaves normally.  But
      +    if any of the arguments is tainted, it is automatically untainted - so
      +    the function body always sees untainted arguments - and the eventual
      +    result is tainted again (possibly in a Taint Bomb).
       
      -A possible future extension would be to forbid side-effects on
      -non-tainted objects from all ``taint_atomic`` functions.
      +    It is important for the function marked as ``taint_atomic`` to have no
      +    visible side effects, as these could cause information leakage.
      +    This is currently not enforced, which means that all ``taint_atomic``
      +    functions have to be carefully reviewed for security (but not the
      +    callers of ``taint_atomic`` functions).
       
      -An example of usage: given a tainted object ``passwords_db`` that
      -references a database of passwords, we can write a function
      -that checks if a password is valid as follows::
      +    A possible future extension would be to forbid side-effects on
      +    non-tainted objects from all ``taint_atomic`` functions.
       
      -    @taint_atomic
      -    def validate(passwords_db, username, password):
      -        assert type(passwords_db) is PasswordDatabase
      -        assert type(username) is str
      -        assert type(password) is str
      -        ...load username entry from passwords_db...
      -        return expected_password == password
      +    An example of usage: given a tainted object ``passwords_db`` that
      +    references a database of passwords, we can write a function
      +    that checks if a password is valid as follows::
       
      -It returns a tainted boolean answer, or a Taint Bomb if something
      -went wrong.  A caller can do::
      +        @taint_atomic
      +        def validate(passwords_db, username, password):
      +            assert type(passwords_db) is PasswordDatabase
      +            assert type(username) is str
      +            assert type(password) is str
      +            ...load username entry from passwords_db...
      +            return expected_password == password
       
      -    ok = validate(passwords_db, 'john', '1234')
      -    ok = untaint(bool, ok)
      +    It returns a tainted boolean answer, or a Taint Bomb if something
      +    went wrong.  A caller can do::
       
      -This can give three outcomes: ``True``, ``False``, or a ``TaintError``
      -exception (with no information on it) if anything went wrong.  If even
      -this is considered giving too much information away, the ``False`` case
      -can be made indistinguishable from the ``TaintError`` case (simply by
      -raising an exception in ``validate()`` if the password is wrong).
      +        ok = validate(passwords_db, 'john', '1234')
      +        ok = untaint(bool, ok)
       
      -In the above example, the security results achieved are the following:
      -as long as ``validate()`` does not leak information, no other part of
      -the code can obtain more information about a passwords database than a
      -Yes/No answer to a precise query.
      +    This can give three outcomes: ``True``, ``False``, or a ``TaintError``
      +    exception (with no information on it) if anything went wrong.  If even
      +    this is considered giving too much information away, the ``False`` case
      +    can be made indistinguishable from the ``TaintError`` case (simply by
      +    raising an exception in ``validate()`` if the password is wrong).
       
      -A possible extension of the ``taint_atomic`` decorator would be to check
      -the argument types, as ``untaint()`` does, for the same reason: to
      -prevent bugs where a function like ``validate()`` above is accidentally
      -called with the wrong kind of tainted object, which would make it
      -misbehave.  For now, all ``taint_atomic`` functions should be
      -conservative and carefully check all assumptions on their input
      -arguments.
      +    In the above example, the security results achieved are the following:
      +    as long as ``validate()`` does not leak information, no other part of
      +    the code can obtain more information about a passwords database than a
      +    Yes/No answer to a precise query.
       
      +    A possible extension of the ``taint_atomic`` decorator would be to check
      +    the argument types, as ``untaint()`` does, for the same reason: to
      +    prevent bugs where a function like ``validate()`` above is accidentally
      +    called with the wrong kind of tainted object, which would make it
      +    misbehave.  For now, all ``taint_atomic`` functions should be
      +    conservative and carefully check all assumptions on their input
      +    arguments.
       
      -.. _`taint-interface`:
       
      -Interface
      ----------
      +    .. _`taint-interface`:
       
      -.. _`like a built-in operation`:
      +    Interface
      +    ---------
       
      -The basic rule of the Tainted Object Space is that it introduces two new
      -kinds of objects, Tainted Boxes and Tainted Bombs (which are not types
      -in the Python sense).  Each box internally contains a regular object;
      -each bomb internally contains an exception object.  An operation
      -involving Tainted Boxes is performed on the objects contained in the
      -boxes, and gives a Tainted Box or a Tainted Bomb as a result (such an
      -operation does not let an exception be raised).  An operation called
      -with a Tainted Bomb argument immediately returns the same Tainted Bomb.
      +    .. _`like a built-in operation`:
       
      -In a PyPy running with (or translated with) the Taint Object Space,
      -the ``__pypy__`` module exposes the following interface:
      +    The basic rule of the Tainted Object Space is that it introduces two new
      +    kinds of objects, Tainted Boxes and Tainted Bombs (which are not types
      +    in the Python sense).  Each box internally contains a regular object;
      +    each bomb internally contains an exception object.  An operation
      +    involving Tainted Boxes is performed on the objects contained in the
      +    boxes, and gives a Tainted Box or a Tainted Bomb as a result (such an
      +    operation does not let an exception be raised).  An operation called
      +    with a Tainted Bomb argument immediately returns the same Tainted Bomb.
       
      -* ``taint(obj)``
      +    In a PyPy running with (or translated with) the Taint Object Space,
      +    the ``__pypy__`` module exposes the following interface:
       
      -    Return a new Tainted Box wrapping ``obj``.  Return ``obj`` itself
      -    if it is already tainted (a Box or a Bomb).
      +    * ``taint(obj)``
       
      -* ``is_tainted(obj)``
      +        Return a new Tainted Box wrapping ``obj``.  Return ``obj`` itself
      +        if it is already tainted (a Box or a Bomb).
       
      -    Check if ``obj`` is tainted (a Box or a Bomb).
      +    * ``is_tainted(obj)``
       
      -* ``untaint(type, obj)``
      +        Check if ``obj`` is tainted (a Box or a Bomb).
       
      -    Untaints ``obj`` if it is tainted.  Raise ``TaintError`` if the type
      -    of the untainted object is not exactly ``type``, or if ``obj`` is a
      -    Bomb.
      +    * ``untaint(type, obj)``
       
      -* ``taint_atomic(func)``
      +        Untaints ``obj`` if it is tainted.  Raise ``TaintError`` if the type
      +        of the untainted object is not exactly ``type``, or if ``obj`` is a
      +        Bomb.
       
      -    Return a wrapper function around the callable ``func``.  The wrapper
      -    behaves `like a built-in operation`_ with respect to untainting the
      -    arguments, tainting the result, and returning a Bomb.
      +    * ``taint_atomic(func)``
       
      -* ``TaintError``
      +        Return a wrapper function around the callable ``func``.  The wrapper
      +        behaves `like a built-in operation`_ with respect to untainting the
      +        arguments, tainting the result, and returning a Bomb.
       
      -    Exception.  On purpose, it provides no attribute or error message.
      +    * ``TaintError``
       
      -* ``_taint_debug(level)``
      +        Exception.  On purpose, it provides no attribute or error message.
       
      -    Set the debugging level to ``level`` (0=off).  At level 1 or above,
      -    all Taint Bombs print a diagnostic message to stderr when they are
      -    created.
      +    * ``_taint_debug(level)``
       
      -* ``_taint_look(obj)``
      +        Set the debugging level to ``level`` (0=off).  At level 1 or above,
      +        all Taint Bombs print a diagnostic message to stderr when they are
      +        created.
       
      -    For debugging purposes: prints (to stderr) the type and address of
      -    the object in a Tainted Box, or prints the exception if ``obj`` is
      -    a Taint Bomb.
      +    * ``_taint_look(obj)``
      +
      +        For debugging purposes: prints (to stderr) the type and address of
      +        the object in a Tainted Box, or prints the exception if ``obj`` is
      +        a Taint Bomb.
       
       
       .. _dump:
      @@ -485,7 +483,7 @@
       ----------------------------------------------------
       
       Suppose we want to have a list which stores all operations performed on
      -it for later analysis.  We can use the small `tputil`_ module to help
      +it for later analysis.  We can use the small `lib_pypy/tputil.py`_ module to help
       with transparently proxying builtin instances::
       
          from tputil import make_proxy
      @@ -534,10 +532,10 @@
       
       .. _tputil: 
       
      -tputil help module 
      +tputil helper module 
       ----------------------------
       
      -The `tputil.py`_ module provides: 
      +The `lib_pypy/tputil.py`_ module provides: 
       
       * ``make_proxy(controller, type, obj)``: function which 
         creates a transparent proxy controlled by the given 
      @@ -595,8 +593,8 @@
       to application level code. 
       
       Transparent proxies are implemented on top of the `standard object
      -space`_, in `proxy_helpers.py`_, `proxyobject.py`_ and
      -`transparent.py`_.  To use them you will need to pass a
      +space`_, in `pypy/objspace/std/proxy_helpers.py`_, `pypy/objspace/std/proxyobject.py`_ and
      +`pypy/objspace/std/transparent.py`_.  To use them you will need to pass a
       `--objspace-std-withtproxy`_ option to ``py.py`` or
       ``translate.py``.  This registers implementations named
       ``W_TransparentXxx`` - which usually correspond to an
      @@ -607,12 +605,8 @@
       lists, dicts, exceptions, tracebacks and frames.
       
       .. _`standard object space`: objspace.html#the-standard-object-space
      -.. _`proxy_helpers.py`: ../../../../pypy/objspace/std/proxy_helpers.py
      -.. _`proxyobject.py`: ../../../../pypy/objspace/std/proxyobject.py
      -.. _`transparent.py`: ../../../../pypy/objspace/std/transparent.py
      -.. _`tputil.py`: ../../lib_pypy/tputil.py
       
       .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy
                  EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf
       
      -.. include:: _ref.rst
      +.. include:: _ref.txt
      
      diff --git a/pypy/doc/config/objspace.allworkingmodules.rst b/pypy/doc/config/objspace.allworkingmodules.txt
      copy from pypy/doc/config/objspace.allworkingmodules.rst
      copy to pypy/doc/config/objspace.allworkingmodules.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.fcntl.rst b/pypy/doc/config/objspace.usemodules.fcntl.txt
      copy from pypy/doc/config/objspace.usemodules.fcntl.rst
      copy to pypy/doc/config/objspace.usemodules.fcntl.txt
      
      diff --git a/pypy/doc/config/objspace.rst b/pypy/doc/config/objspace.txt
      copy from pypy/doc/config/objspace.rst
      copy to pypy/doc/config/objspace.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._weakref.rst b/pypy/doc/config/objspace.usemodules._weakref.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._weakref.rst
      +++ /dev/null
      @@ -1,6 +0,0 @@
      -Use the '_weakref' module, necessary for the standard lib 'weakref' module.
      -PyPy's weakref implementation is not completely stable yet. The first
      -difference to CPython is that weak references only go away after the next
      -garbage collection, not immediately. The other problem seems to be that under
      -certain circumstances (that we have not determined) weak references keep the
      -object alive.
      
      diff --git a/pypy/doc/config/objspace.usemodules.array.rst b/pypy/doc/config/objspace.usemodules.array.txt
      copy from pypy/doc/config/objspace.usemodules.array.rst
      copy to pypy/doc/config/objspace.usemodules.array.txt
      
      diff --git a/pypy/doc/config/translation.backendopt.mallocs.rst b/pypy/doc/config/translation.backendopt.mallocs.txt
      copy from pypy/doc/config/translation.backendopt.mallocs.rst
      copy to pypy/doc/config/translation.backendopt.mallocs.txt
      
      diff --git a/pypy/doc/config/objspace.std.prebuiltintto.rst b/pypy/doc/config/objspace.std.prebuiltintto.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.prebuiltintto.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -See :config:`objspace.std.withprebuiltint`.
      
      diff --git a/pypy/doc/discussion/paper-wishlist.rst b/pypy/doc/discussion/paper-wishlist.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/paper-wishlist.rst
      +++ /dev/null
      @@ -1,27 +0,0 @@
      -Things we would like to write papers about
      -==========================================
      -
      -- object space architecture + reflective space
      -- stackless transformation
      -- composable coroutines
      -- jit:
      -  - overview paper
      -  - putting our jit into the context of classical partial evaluation
      -  - a jit technical paper too, probably
      -
      -- sandboxing
      -
      -Things about which writing a paper would be nice, which need more work first
      -============================================================================
      -
      -- taint object space
      -- logic object space
      -
      -- jit
      -
      -  - with some more work: how to deal in a JIT backend with less-that-
      -      full-function compilation unit
      -
      -  - work in progress (Anto?): our JIT on the JVM
      -  - (later) removing the overhead of features not used, e.g. thunk space or
      -      another special space
      
      diff --git a/pypy/doc/config/translation.vanilla.rst b/pypy/doc/config/translation.vanilla.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.vanilla.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Try to make the resulting compiled program as portable (=movable to another
      -machine) as possible. Which is not much.
      
      diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
      --- a/pypy/doc/interpreter-optimizations.rst
      +++ b/pypy/doc/interpreter-optimizations.rst
      @@ -79,7 +79,7 @@
       
           $ bin/py.py --objspace-std-withrope
           faking 
      -    PyPy 0.99.0 in StdObjSpace on top of Python 2.4.4c1 (startuptime: 17.24 secs)
      +    PyPy 1.5.0-alpha0 in StdObjSpace on top of Python 2.7.1+ (startuptime: 11.38 secs)
           >>>> import sys
           >>>> sys.maxint
           2147483647
      @@ -90,7 +90,8 @@
       
       You can enable this feature with the :config:`objspace.std.withrope` option.
       
      -.. _`"Ropes: An alternative to Strings."`: http://www.cs.ubc.ca/local/reading/proceedings/spe91-95/spe/vol25/issue12/spe986.pdf
      +.. _`"Ropes: An alternative to Strings."`: http://citeseer.ist.psu.edu/viewdoc/download?doi=10.1.1.14.9450&rep=rep1&type=pdf
      +
       
       Integer Optimizations
       ---------------------
      @@ -134,7 +135,6 @@
       implementations for various purposes (see below).
       
       This is now the default implementation of dictionaries in the Python interpreter.
      -option.
       
       Sharing Dicts
       +++++++++++++
      @@ -205,28 +205,11 @@
       User Class Optimizations
       ------------------------
       
      -Shadow Tracking
      -+++++++++++++++
      -
      -Shadow tracking is a general optimization that speeds up method calls for user
      -classes (that don't have special meta-class). For this a special dict
      -representation is used together with multidicts. This dict representation is
      -used only for instance dictionaries. The instance dictionary tracks whether an
      -instance attribute shadows an attribute of its class. This makes method calls
      -slightly faster in the following way: When calling a method the first thing that
      -is checked is the class dictionary to find descriptors. Normally, when a method
      -is found, the instance dictionary is then checked for instance attributes
      -shadowing the class attribute. If we know that there is no shadowing (since
      -instance dict tells us that) we can save this lookup on the instance dictionary.
      -
      -*This was deprecated and is no longer available.*
      -
       
       Method Caching
       ++++++++++++++
       
      -Shadow tracking is also an important building block for the method caching
      -optimization. A method cache is introduced where the result of a method lookup
      +A method cache is introduced where the result of a method lookup
       is stored (which involves potentially many lookups in the base classes of a
       class). Entries in the method cache are stored using a hash computed from
       the name being looked up, the call site (i.e. the bytecode object and
      @@ -344,14 +327,12 @@
       improving results by anything from 15-40 per cent.
       
       Another optimization, or rather set of optimizations, that has a uniformly good
      -effect is the set of three 'method optimizations', i.e. shadow tracking, the
      +effect are the two 'method optimizations', i.e. the
       method cache and the LOOKUP_METHOD and CALL_METHOD opcodes.  On a heavily
       object-oriented benchmark (richards) they combine to give a speed-up of nearly
       50%, and even on the extremely un-object-oriented pystone benchmark, the
       improvement is over 20%.
       
      -.. waffles about ropes
      -
       When building pypy, all generally useful optimizations are turned on by default
       unless you explicitly lower the translation optimization level with the
       ``--opt`` option.
      
      diff --git a/pypy/doc/config/objspace.usemodules._multiprocessing.rst b/pypy/doc/config/objspace.usemodules._multiprocessing.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._multiprocessing.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the '_multiprocessing' module.
      -Used by the 'multiprocessing' standard lib module. This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/config/objspace.usemodules.oracle.rst b/pypy/doc/config/objspace.usemodules.oracle.txt
      copy from pypy/doc/config/objspace.usemodules.oracle.rst
      copy to pypy/doc/config/objspace.usemodules.oracle.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.errno.rst b/pypy/doc/config/objspace.usemodules.errno.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.errno.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'errno' module. 
      -This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/config/objspace.usemodules.posix.rst b/pypy/doc/config/objspace.usemodules.posix.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.posix.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Use the essential 'posix' module.
      -This module is essential, included by default and cannot be removed (even when
      -specified explicitly, the option gets overridden later).
      
      diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.rst b/pypy/doc/config/objspace.std.getattributeshortcut.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.getattributeshortcut.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Performance only: track types that override __getattribute__.
      
      diff --git a/pypy/doc/config/objspace.usemodules.cpyext.rst b/pypy/doc/config/objspace.usemodules.cpyext.txt
      copy from pypy/doc/config/objspace.usemodules.cpyext.rst
      copy to pypy/doc/config/objspace.usemodules.cpyext.txt
      
      diff --git a/pypy/doc/statistic/style.css b/pypy/doc/statistic/style.css
      deleted file mode 100644
      --- a/pypy/doc/statistic/style.css
      +++ /dev/null
      @@ -1,1083 +0,0 @@
      -body,body.editor,body.body {
      -    font: 110% "Times New Roman", Arial, Verdana, Helvetica, serif;
      -    background: White;
      -    color: Black;
      -}
      -
      -a, a.reference {
      -	text-decoration: none; 
      -}
      -a[href]:hover { text-decoration: underline; }
      -
      -img {
      -    border: none;
      -	vertical-align: middle;
      -}
      -
      -p, div.text {
      -    text-align: left;
      -    line-height: 1.5em;
      -    margin: 0.5em 0em 0em 0em;
      -}
      -
      -
      -
      -p a:active {
      -	color: Red;
      -    background-color: transparent;
      -}
      -
      -p img {
      -    border: 0;
      -    margin: 0;
      -}
      -
      -img.inlinephoto {
      -    padding: 0;
      -    padding-right: 1em;
      -    padding-top: 0.7em;
      -    float: left;
      -}
      -
      -hr {
      -    clear: both;
      -    height: 1px;
      -    color: #8CACBB;
      -    background-color: transparent;
      -}
      -
      -
      -ul { 
      -    line-height: 1.5em;
      -    /*list-style-image: url("bullet.gif"); */
      -    margin-left: 1.5em;
      -    padding:0;
      -}
      -
      -ol {
      -    line-height: 1.5em;
      -    margin-left: 1.5em;
      -    padding:0;
      -}
      -
      -ul a, ol a {
      -    text-decoration: underline;
      -}
      -
      -dl {
      -}
      -
      -dt {
      -    font-weight: bold;    
      -}
      -
      -dd {
      -    line-height: 1.5em;
      -    margin-bottom: 1em;
      -}
      -
      -blockquote {
      -    font-family: Times, "Times New Roman", serif;
      -    font-style: italic;
      -    font-size: 120%;
      -}
      -
      -code {
      -    color: Black;
      -    /*background-color: #dee7ec;*/
      -    background-color: #cccccc;
      -}
      -
      -pre {
      -    padding: 1em;
      -    border: 1px solid #8cacbb;
      -    color: Black;
      -    background-color: #dee7ec;
      -    background-color: #cccccc;
      -    overflow: auto;
      -}
      -
      -
      -.netscape4 {
      -    display: none;
      -}
      -
      -/* main page styles */
      -
      -/*a[href]:hover { color: black; text-decoration: underline; }
      -a[href]:link { color: black; text-decoration: underline; }
      -a[href] { color: black; text-decoration: underline; }
      -*/
      -
      -span.menu_selected {
      -	color: black;
      -  	font: 120% Verdana, Helvetica, Arial, sans-serif;
      -	text-decoration: none;
      -    padding-right: 0.3em;
      -    background-color: #cccccc;
      -}
      -
      -
      -a.menu {
      -  	/*color: #3ba6ec; */
      -  	font: 120% Verdana, Helvetica, Arial, sans-serif;
      -	text-decoration: none;
      -    padding-right: 0.3em;
      -}
      -
      -a.menu[href]:visited, a.menu[href]:link{
      -  	/*color: #3ba6ec; */
      -  	font: 120% Verdana, Helvetica, Arial, sans-serif;
      -	text-decoration: none;
      -}
      -
      -a.menu[href]:hover {
      -  	/*color: black;*/
      -}
      -
      -div.project_title{
      -  /*border-spacing: 20px;*/
      -  font: 160% Verdana, Helvetica, Arial, sans-serif;
      -  color: #3ba6ec; 
      -  vertical-align: center;
      -  padding-bottom: 0.3em;
      -}
      -
      -a.wikicurrent {
      -  font: 100% Verdana, Helvetica, Arial, sans-serif;
      -  color: #3ba6ec; 
      -  vertical-align: middle;
      -}
      -
      -
      -table.body {
      -  border: 0;
      -  /*padding: 0;
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  */
      -}
      -
      -td.page-header-left {
      -  padding: 5px;
      -  /*border-bottom: 1px solid #444444;*/
      -}
      -
      -td.page-header-top {
      -  padding: 0;
      -    
      -  /*border-bottom: 1px solid #444444;*/
      -}
      -
      -td.sidebar {
      -  padding: 1 0 0 1;
      -}
      -
      -td.sidebar p.classblock {
      -  padding: 0 5 0 5;
      -  margin: 1 1 1 1;
      -  border: 1px solid #444444;
      -  background-color: #eeeeee;
      -}
      -
      -td.sidebar p.userblock {
      -  padding: 0 5 0 5;
      -  margin: 1 1 1 1;
      -  border: 1px solid #444444;
      -  background-color: #eeeeff;
      -}
      -
      -td.content {
      -  padding: 1 5 1 5;
      -  vertical-align: top;
      -  width: 100%;
      -}
      -
      -p.ok-message {
      -  background-color: #22bb22;
      -  padding: 5 5 5 5;
      -  color: white;
      -  font-weight: bold;
      -}
      -p.error-message {
      -  background-color: #bb2222;
      -  padding: 5 5 5 5;
      -  color: white;
      -  font-weight: bold;
      -}
      -
      -p:first-child { 
      -  margin: 0 ;
      -  padding: 0;
      -}
      -
      -/* style for forms */
      -table.form {
      -  padding: 2;
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -}
      -
      -table.form th {
      -  color: #333388;
      -  text-align: right;
      -  vertical-align: top;
      -  font-weight: normal;
      -}
      -table.form th.header {
      -  font-weight: bold;
      -  background-color: #eeeeff;
      -  text-align: left;
      -}
      -
      -table.form th.required {
      -  font-weight: bold;
      -}
      -
      -table.form td {
      -  color: #333333;
      -  empty-cells: show;
      -  vertical-align: top;
      -}
      -
      -table.form td.optional {
      -  font-weight: bold;
      -  font-style: italic;
      -}
      -
      -table.form td.html {
      -  color: #777777;
      -}
      -
      -/* style for lists */
      -table.list {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  vertical-align: top;
      -  padding-top: 0;
      -  width: 100%;
      -}
      -
      -table.list th {
      -  padding: 0 4 0 4;
      -  color: #404070;
      -  background-color: #eeeeff;
      -  border-right: 1px solid #404070;
      -  border-top: 1px solid #404070;
      -  border-bottom: 1px solid #404070;
      -  vertical-align: top;
      -  empty-cells: show;
      -}
      -table.list th a[href]:hover { color: #404070 }
      -table.list th a[href]:link { color: #404070 }
      -table.list th a[href] { color: #404070 }
      -table.list th.group {
      -  background-color: #f4f4ff;
      -  text-align: center;
      -  font-size: 120%;
      -}
      -
      -table.list td {
      -  padding: 0 4 0 4;
      -  border: 0 2 0 2;
      -  border-right: 1px solid #404070;
      -  color: #404070;
      -  background-color: white;
      -  vertical-align: top;
      -  empty-cells: show;
      -}
      -
      -table.list tr.normal td {
      -  background-color: white;
      -  white-space: nowrap;
      -}
      -
      -table.list tr.alt td {
      -  background-color: #efefef;
      -  white-space: nowrap;
      -}
      -
      -table.list td:first-child {
      -  border-left: 1px solid #404070;
      -  border-right: 1px solid #404070;
      -}
      -
      -table.list th:first-child {
      -  border-left: 1px solid #404070;
      -  border-right: 1px solid #404070;
      -}
      -
      -table.list tr.navigation th {
      -  text-align: right;
      -}
      -table.list tr.navigation th:first-child {
      -  border-right: none;
      -  text-align: left;
      -}
      -
      -
      -/* style for message displays */
      -table.messages {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.messages th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -}
      -
      -table.messages th {
      -  font-weight: bold;
      -  color: black;
      -  text-align: left;
      -  border-bottom: 1px solid #afafaf;
      -}
      -
      -table.messages td {
      -  font-family: monospace;
      -  background-color: #efefef;
      -  border-bottom: 1px solid #afafaf;
      -  color: black;
      -  empty-cells: show;
      -  border-right: 1px solid #afafaf;
      -  vertical-align: top;
      -  padding: 2 5 2 5;
      -}
      -
      -table.messages td:first-child {
      -  border-left: 1px solid #afafaf;
      -  border-right: 1px solid #afafaf;
      -}
      -
      -/* style for file displays */
      -table.files {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.files th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -}
      -
      -table.files th {
      -  border-bottom: 1px solid #afafaf;
      -  font-weight: bold;
      -  text-align: left;
      -}
      -
      -table.files td {
      -  font-family: monospace;
      -  empty-cells: show;
      -}
      -
      -/* style for history displays */
      -table.history {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.history th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -  font-size: 100%;
      -}
      -
      -table.history th {
      -  border-bottom: 1px solid #afafaf;
      -  font-weight: bold;
      -  text-align: left;
      -  font-size: 90%;
      -}
      -
      -table.history td {
      -  font-size: 90%;
      -  vertical-align: top;
      -  empty-cells: show;
      -}
      -
      -
      -/* style for class list */
      -table.classlist {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.classlist th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -}
      -
      -table.classlist th {
      -  font-weight: bold;
      -  text-align: left;
      -}
      -
      -
      -/* style for class help display */
      -table.classhelp {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.classhelp th {
      -  font-weight: bold;
      -  text-align: left;
      -  color: #707040;
      -}
      -
      -table.classhelp td {
      -  padding: 2 2 2 2;
      -  border: 1px solid black;
      -  text-align: left;
      -  vertical-align: top;
      -  empty-cells: show;
      -}
      -
      -
      -/* style for "other" displays */
      -table.otherinfo {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.otherinfo th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -}
      -
      -table.otherinfo th {
      -  border-bottom: 1px solid #afafaf;
      -  font-weight: bold;
      -  text-align: left;
      -}
      -
      -input {
      -    border: 1px solid #8cacbb;
      -    color: Black;
      -    background-color: white;
      -    vertical-align: middle;
      -    margin-bottom: 1px; /* IE bug fix */
      -    padding: 0.1em;
      -}
      -
      -select {
      -    border: 1px solid #8cacbb;
      -    color: Black;
      -    background-color: white;
      -    vertical-align: middle;
      -    margin-bottom: 1px; /* IE bug fix */
      -    padding: 0.1em;
      -}
      -
      -
      -a.nonexistent {
      -    color: #FF2222;
      -}
      -a.nonexistent:visited {
      -    color: #FF2222;
      -}
      -a.external {
      -    color: #AA6600;
      -}
      -
      -/*
      -dl,ul,ol {
      -    margin-top: 1pt;
      -}
      -tt,pre {
      -    font-family: Lucida Console,Courier New,Courier,monotype;
      -    font-size: 12pt;
      -}
      -pre.code {
      -    margin-top: 8pt;
      -    margin-bottom: 8pt;
      -    background-color: #FFFFEE;
      -    white-space:pre;
      -    border-style:solid;
      -    border-width:1pt;
      -    border-color:#999999;
      -    color:#111111;
      -    padding:5px;
      -    width:100%;
      -}
      -*/
      -div.diffold {
      -    background-color: #FFFF80;
      -    border-style:none;
      -    border-width:thin;
      -    width:100%;
      -}
      -div.diffnew {
      -    background-color: #80FF80;
      -    border-style:none;
      -    border-width:thin;
      -    width:100%;
      -}
      -div.message {
      -    margin-top: 6pt;
      -    background-color: #E8FFE8;
      -    border-style:solid;
      -    border-width:1pt;
      -    border-color:#999999;
      -    color:#440000;
      -    padding:5px;
      -    width:100%;
      -}
      -strong.highlight {
      -    background-color: #FFBBBB;
      -/* as usual, NetScape fucks up with innocent CSS
      -    border-color: #FFAAAA;
      -    border-style: solid;
      -    border-width: 1pt;
      -*/
      -}
      -
      -table.navibar {
      -    background-color: #C8C8C8;
      -    border-spacing: 3px;
      -}
      -td.navibar {
      -    background-color: #E8E8E8;
      -    vertical-align: top;
      -    text-align: right;
      -    padding: 0px;
      -}
      -
      -div.pagename {
      -    font-size: 140%;
      -    color: blue;
      -    text-align: center;
      -    font-weight: bold;
      -    background-color: white;
      -    padding: 0 ;
      -}
      -
      -a.wikiaction, input.wikiaction {
      -    color: black; 
      -    text-decoration: None;
      -    text-align: center;
      -    color: black;
      -    /*border: 1px solid #3ba6ec; */
      -    margin: 4px;
      -    padding: 5;
      -    padding-bottom: 0;
      -    white-space: nowrap;
      -}
      -
      -a.wikiaction[href]:hover { 
      -	color: black; 
      -	text-decoration: none; 
      -	/*background-color: #dddddd; */
      -}
      -
      -span.wikiuserpref {
      -    padding-top: 1em;
      -    font-size: 120%;
      -}
      -
      -div.wikitrail {
      -    vertical-align: bottom;
      -    /*font-size: -1;*/
      -    padding-top: 1em;
      -    display: none;
      -}
      -
      -div.wikiaction {
      -    vertical-align: middle;
      -    /*border-bottom: 1px solid #8cacbb;*/
      -    padding-bottom:1em;
      -    text-align: left;
      -    width: 100%;
      -}
      -
      -div.wikieditmenu {
      -    text-align: right;
      -}
      -
      -form.wikiedit {
      -    border: 1px solid #8cacbb;
      -    background-color: #f0f0f0;
      -    background-color: #fabf00;
      -    padding: 1em;
      -    padding-right: 0em;
      -}
      -
      -div.legenditem {
      -    padding-top: 0.5em;
      -    padding-left: 0.3em;
      -}
      -
      -span.wikitoken {
      -   background-color: #eeeeee;
      -}
      -    
      -
      -div#contentspace h1:first-child, div.heading:first-child { 
      -  padding-top: 0;
      -  margin-top: 0;
      -}
      -div#contentspace h2:first-child { 
      -  padding-top: 0;
      -  margin-top: 0;
      -}
      -
      -/* heading and paragraph text */
      -
      -div.heading, h1 {
      -    font-family: Verdana, Helvetica, Arial, sans-serif;
      -    background-color: #58b3ef;
      -    background-color: #FFFFFF; 
      -    /*color: #4893cf;*/
      -    color: black;
      -    padding-top: 1.0em;
      -    padding-bottom:0.2em;
      -    text-align: left;
      -    margin-top: 0em; 
      -    /*margin-bottom:8pt;*/
      -    font-weight: bold;
      -    font-size: 115%;
      -    border-bottom: 1px solid #8CACBB;
      -}
      -
      -
      -h1, h2, h3, h4, h5, h6 {
      -    color: Black;
      -    clear: left;
      -    font: 100% Verdana, Helvetica, Arial, sans-serif;
      -    margin: 0;
      -    padding-left: 0em;
      -    padding-top: 1em;
      -    padding-bottom: 0.2em;
      -    /*border-bottom: 1px solid #8CACBB;*/
      -}
      -/* h1,h2 { padding-top: 0; }*/
      -
      -
      -h1 { font-size: 145%; }
      -h2 { font-size: 135%; }
      -h3 { font-size: 125%; }
      -h4 { font-size: 120%; }
      -h5 { font-size: 110%; }
      -h6 { font-size: 80%; }
      -
      -h1 a { text-decoration: None;}
      -
      -div.exception {
      -  background-color: #bb2222;
      -  padding: 5 5 5 5;
      -  color: white;
      -  font-weight: bold;
      -}
      -pre.exception {
      -    font-size: 110%;
      -    padding: 1em;
      -    border: 1px solid #8cacbb;
      -    color: Black;
      -    background-color: #dee7ec;
      -    background-color: #cccccc;
      -}
      -
      -/* defines for navgiation bar (documentation) */
      -
      -
      -div.direntry {
      -    padding-top: 0.3em;
      -    padding-bottom: 0.3em;
      -    margin-right: 1em;
      -    font-weight: bold;
      -    background-color: #dee7ec;
      -    font-size: 110%;
      -}
      -
      -div.fileentry {
      -    font-family: Verdana, Helvetica, Arial, sans-serif;
      -    padding-bottom: 0.3em;
      -    white-space: nowrap;
      -    line-height: 150%;
      -}
      -
      -a.fileentry {
      -    white-space: nowrap;
      -}
      -
      -
      -span.left {
      -    text-align: left;
      -}
      -span.right {
      -    text-align: right;
      -}
      -
      -div.navbar {
      -  /*margin: 0;*/
      -  font-size: 80% /*smaller*/;
      -  font-weight: bold;
      -  text-align: left;
      -  /* position: fixed; */
      -  top: 100pt;
      -  left: 0pt; /*  auto; */
      -  width: 120pt;
      -  /* right: auto;
      -  right: 0pt;  2em; */
      -}
      -
      -
      -div.history a {
      -    /* font-size: 70%; */
      -}
      -
      -div.wikiactiontitle { 
      -  font-weight: bold;
      -}
      -
      -/*  REST  defines */
      -
      -div.document {
      -    margin: 0;
      -}
      -
      -h1.title {
      -    margin: 0;
      -}
      -
      -td.toplist {
      -    vertical-align: top;
      -}
      -
      -img#pyimg {
      -    position: absolute;
      -    top: 4px;
      -    left: 4px;
      -}
      -
      -img#extraimg {
      -    position: absolute;
      -    right: 14px; 
      -    top: 4px;
      -}
      -    
      -div#navspace {
      -    position: absolute;
      -    top: 130px;
      -    left: 11px;
      -    font-size: 100%;
      -    width: 150px;
      -    overflow: hidden; /* scroll;  */
      -}
      -
      -div#metaspace {
      -    position: absolute;
      -    top: 40px;
      -    left: 170px;
      -}
      -
      -div#errorline {
      -    position: relative;
      -    top: 5px; 
      -    float: right; 
      -}
      -
      -div#contentspace {
      -    position: absolute;
      -  	/* font: 120% "Times New Roman", serif;*/
      -    font: 110% Verdana, Helvetica, Arial, sans-serif;
      -    top: 130px;
      -    left: 170px;
      -    margin-right: 5px;
      -}
      -
      -div#menubar {
      -/*    width: 400px; */
      -    float: left;
      -}
      -
      -/* for the documentation page */
      -div#docinfoline {
      -  position: relative;
      -  top: 5px; 
      -  left: 0px;
      -
      -  /*background-color: #dee7ec; */
      -  padding: 5pt; 
      -  padding-bottom: 1em; 
      -  color: black;
      -  /*border-width: 1pt;
      -  border-style: solid;*/
      -
      -}
      -
      -div#docnavlist {
      -  /*background-color: #dee7ec; */
      -  padding: 5pt; 
      -  padding-bottom: 2em; 
      -  color: black;
      -  border-width: 1pt;
      -  /*border-style: solid;*/
      -}
      -
      -
      -/* text markup */
      -
      -div.listtitle {
      -    color: Black;
      -    clear: left;
      -    font: 120% Verdana, Helvetica, Arial, sans-serif;
      -    margin: 0;
      -    padding-left: 0em;
      -    padding-top: 0em;
      -    padding-bottom: 0.2em;
      -    margin-right: 0.5em;
      -    border-bottom: 1px solid #8CACBB;
      -}
      -
      -div.actionbox h3 { 
      -  padding-top: 0;
      -  padding-right: 0.5em;
      -  padding-left: 0.5em;
      -  background-color: #fabf00;
      -  text-align: center;
      -  border: 1px solid black; /* 8cacbb; */
      -}
      -
      -div.actionbox a { 
      -  display: block;
      -  padding-bottom: 0.5em;
      -  padding-top: 0.5em;
      -  margin-left: 0.5em;
      -}
      -
      -div.actionbox a.history { 
      -  display: block;
      -  padding-bottom: 0.5em;
      -  padding-top: 0.5em;
      -  margin-left: 0.5em;
      -  font-size: 90%; 
      -}
      -
      -div.actionbox { 
      -  margin-bottom: 2em;
      -  padding-bottom: 1em;
      -  overflow: hidden; /* scroll;  */
      -}
      -
      -/* taken from docutils (oh dear, a bit senseless) */
      -ol.simple, ul.simple {
      -  margin-bottom: 1em }
      -
      -ol.arabic {
      -  list-style: decimal }
      -
      -ol.loweralpha {
      -  list-style: lower-alpha }
      -
      -ol.upperalpha {
      -  list-style: upper-alpha }
      -
      -ol.lowerroman {
      -  list-style: lower-roman }
      -
      -ol.upperroman {
      -  list-style: upper-roman }
      -
      -
      -/*
      -:Author: David Goodger
      -:Contact: goodger at users.sourceforge.net
      -:date: $Date: 2003/01/22 22:26:48 $
      -:version: $Revision: 1.29 $
      -:copyright: This stylesheet has been placed in the public domain.
      -
      -Default cascading style sheet for the HTML output of Docutils.
      -*/
      -/*
      -.first {
      -  margin-top: 0 }
      -
      -.last {
      -  margin-bottom: 0 }
      -
      -a.toc-backref {
      -  text-decoration: none ;
      -  color: black }
      -
      -dd {
      -  margin-bottom: 0.5em }
      -
      -div.abstract {
      -  margin: 2em 5em }
      -
      -div.abstract p.topic-title {
      -  font-weight: bold ;
      -  text-align: center }
      -
      -div.attention, div.caution, div.danger, div.error, div.hint,
      -div.important, div.note, div.tip, div.warning {
      -  margin: 2em ;
      -  border: medium outset ;
      -  padding: 1em }
      -
      -div.attention p.admonition-title, div.caution p.admonition-title,
      -div.danger p.admonition-title, div.error p.admonition-title,
      -div.warning p.admonition-title {
      -  color: red ;
      -  font-weight: bold ;
      -  font-family: sans-serif }
      -
      -div.hint p.admonition-title, div.important p.admonition-title,
      -div.note p.admonition-title, div.tip p.admonition-title {
      -  font-weight: bold ;
      -  font-family: sans-serif }
      -
      -div.dedication {
      -  margin: 2em 5em ;
      -  text-align: center ;
      -  font-style: italic }
      -
      -div.dedication p.topic-title {
      -  font-weight: bold ;
      -  font-style: normal }
      -
      -div.figure {
      -  margin-left: 2em }
      -
      -div.footer, div.header {
      -  font-size: smaller }
      -
      -div.system-messages {
      -  margin: 5em }
      -
      -div.system-messages h1 {
      -  color: red }
      -
      -div.system-message {
      -  border: medium outset ;
      -  padding: 1em }
      -
      -div.system-message p.system-message-title {
      -  color: red ;
      -  font-weight: bold }
      -
      -div.topic {
      -  margin: 2em }
      -
      -h1.title {
      -  text-align: center }
      -
      -h2.subtitle {
      -  text-align: center }
      -
      -hr {
      -  width: 75% }
      -
      -p.caption {
      -  font-style: italic }
      -
      -p.credits {
      -  font-style: italic ;
      -  font-size: smaller }
      -
      -p.label {
      -  white-space: nowrap }
      -
      -p.topic-title {
      -  font-weight: bold }
      -
      -pre.address {
      -  margin-bottom: 0 ;
      -  margin-top: 0 ;
      -  font-family: serif ;
      -  font-size: 100% }
      -
      -pre.line-block {
      -  font-family: serif ;
      -  font-size: 100% }
      -
      -pre.literal-block, pre.doctest-block {
      -  margin-left: 2em ;
      -  margin-right: 2em ;
      -  background-color: #eeeeee }
      -
      -span.classifier {
      -  font-family: sans-serif ;
      -  font-style: oblique }
      -
      -span.classifier-delimiter {
      -  font-family: sans-serif ;
      -  font-weight: bold }
      -
      -span.interpreted {
      -  font-family: sans-serif }
      -
      -span.option {
      -  white-space: nowrap }
      -
      -span.option-argument {
      -  font-style: italic }
      -
      -span.pre {
      -  white-space: pre }
      -
      -span.problematic {
      -  color: red }
      -
      -table {
      -  margin-top: 0.5em ;
      -  margin-bottom: 0.5em }
      -
      -table.citation {
      -  border-left: solid thin gray ;
      -  padding-left: 0.5ex }
      -
      -table.docinfo {
      -  margin: 2em 4em }
      -
      -table.footnote {
      -  border-left: solid thin black ;
      -  padding-left: 0.5ex }
      -
      -td, th {
      -  padding-left: 0.5em ;
      -  padding-right: 0.5em ;
      -  vertical-align: top }
      -
      -th.docinfo-name, th.field-name {
      -  font-weight: bold ;
      -  text-align: left ;
      -  white-space: nowrap }
      -
      -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
      -  font-size: 100% }
      -
      -tt {
      -  background-color: #eeeeee }
      -
      -ul.auto-toc {
      -  list-style-type: none }
      -*/
      -
      -div.section {
      -  margin-top: 1.0em ;
      -}    
      
      diff --git a/pypy/doc/config/translation.platform.rst b/pypy/doc/config/translation.platform.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.platform.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -select the target platform, in case of cross-compilation
      
      diff --git a/pypy/doc/config/translation.instrumentctl.rst b/pypy/doc/config/translation.instrumentctl.txt
      copy from pypy/doc/config/translation.instrumentctl.rst
      copy to pypy/doc/config/translation.instrumentctl.txt
      
      
      diff --git a/pypy/doc/config/translation.fork_before.rst b/pypy/doc/config/translation.fork_before.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.fork_before.rst
      +++ /dev/null
      @@ -1,4 +0,0 @@
      -This is an option mostly useful when working on the PyPy toolchain. If you use
      -it, translate.py will fork before the specified phase. If the translation
      -crashes after that fork, you can fix the bug in the toolchain, and continue
      -translation at the fork-point.
      
      diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.rst b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
      copy from pypy/doc/config/objspace.std.withmethodcachecounter.rst
      copy to pypy/doc/config/objspace.std.withmethodcachecounter.txt
      
      diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
      --- a/pypy/doc/windows.rst
      +++ b/pypy/doc/windows.rst
      @@ -13,10 +13,21 @@
       Translating PyPy with Visual Studio
       -----------------------------------
       
      -We routinely test the translation toolchain using Visual Studio .NET
      +We routinely test the `RPython translation toolchain`_ using Visual Studio .NET
       2005, Professional Edition, and Visual Studio .NET 2008, Express
       Edition.  Other configurations may work as well.
       
      +The translation scripts will set up the appropriate environment variables
      +for the compiler.  They will attempt to locate the same compiler version that
      +was used to build the Python interpreter doing the
      +translation.  Failing that, they will pick the most recent Visual Studio
      +compiler they can find.  In addition, the target architecture
      +(32 bits, 64 bits) is automatically selected.  A 32 bit build can only be built
      +using a 32 bit Python and vice versa.
      +
      +**Note:** PyPy is currently not supported for 64 bit Windows, and translation
      +will be aborted in this case.
      +
       The compiler is all you need to build pypy-c, but it will miss some
       modules that relies on third-party libraries.  See below how to get
       and build them.
      @@ -111,3 +122,4 @@
           cp .libs/libffi-5.dll 
       
       .. _`libffi source files`: http://sourceware.org/libffi/
      +.. _`RPython translation toolchain`: translation.html
      
      diff --git a/pypy/doc/dev_method.rst b/pypy/doc/dev_method.rst
      --- a/pypy/doc/dev_method.rst
      +++ b/pypy/doc/dev_method.rst
      @@ -20,7 +20,7 @@
       Main tools for achieving this is:
       
         * py.test - automated testing
      -  * Subversion - version control
      +  * Mercurial - version control
         * Transparent communication and documentation (mailinglists, IRC, tutorials
           etc etc) 
       
      @@ -237,124 +237,3 @@
       interested in using sprints as away of making contact with active developers
       (Python/compiler design etc)!
       
      -If you have questions about our sprints and EU-funding - please send an email
      -to pypy-funding at codespeak.net, our mailinglist for project coordination.
      -
      -Previous sprints?
      -+++++++++++++++++
      -
      -The PyPy team has been sprinting on the following occasions::
      -
      -    * Hildesheim                      Feb     2003
      -    * Gothenburg                      May     2003
      -    * Europython/Louvain-La-Neuve     June    2003
      -    * Berlin                          Sept    2003
      -    * Amsterdam                       Dec     2003
      -    * Europython/Gothenburg           June    2004
      -    * Vilnius                         Nov     2004
      -    * Leysin                          Jan     2005
      -    * PyCon/Washington                March   2005     
      -    * Europython/Gothenburg           June    2005
      -    * Hildesheim                      July    2005
      -    * Heidelberg                      Aug     2005
      -    * Paris                           Oct     2005
      -    * Gothenburg                      Dec     2005
      -    * Mallorca                        Jan     2006
      -    * PyCon/Dallas                    Feb     2006
      -    * Louvain-La-Neuve                March   2006
      -    * Leysin                          April   2006
      -    * Tokyo                           April   2006
      -    * Düsseldorf                      June    2006
      -    * Europython/Geneva               July    2006
      -    * Limerick                        Aug     2006
      -    * Düsseldorf                      Oct     2006
      -    * Leysin                          Jan     2007
      -    * Hildesheim                      Feb     2007
      -    
      -People who have participated and contributed during our sprints and thus
      -contributing to PyPy (if we have missed someone here - please contact us 
      -so we can correct it):
      -
      -    Armin Rigo
      -    Holger Krekel
      -    Samuele Pedroni
      -    Christian Tismer
      -    Laura Creighton
      -    Jacob Hallén
      -    Michael Hudson
      -    Richard Emslie
      -    Anders Chrigström
      -    Alex Martelli
      -    Ludovic Aubry
      -    Adrien DiMascio
      -    Nicholas Chauvat
      -    Niklaus Haldimann
      -    Anders Lehmann
      -    Carl Friedrich Bolz
      -    Eric Van Riet Paap
      -    Stephan Diel
      -    Dinu Gherman
      -    Jens-Uwe Mager
      -    Marcus Denker
      -    Bert Freudenberg
      -    Gunther Jantzen
      -    Henrion Benjamin
      -    Godefroid Chapelle
      -    Anna Ravenscroft
      -    Tomek Meka
      -    Jonathan David Riehl
      -    Patrick Maupain
      -    Etienne Posthumus
      -    Nicola Paolucci
      -    Albertas Agejevas
      -    Marius Gedminas
      -    Jesus Cea Avion
      -    Olivier Dormond
      -    Jacek Generowicz
      -    Brian Dorsey
      -    Guido van Rossum
      -    Bob Ippolito
      -    Alan McIntyre
      -    Lutz Paelike
      -    Michael Chermside
      -    Beatrice Düring
      -    Boris Feigin
      -    Amaury Forgeot d'Arc 
      -    Andrew Thompson      
      -    Valentino Volonghi   
      -    Aurelien Campeas
      -    Stephan Busemann
      -    Johan Hahn
      -    Gerald Klix
      -    Gene Oden
      -    Josh Gilbert
      -    Geroge Paci
      -    Martin Blais
      -    Stuart Williams
      -    Jiwon Seo
      -    Michael Twomey 
      -    Wanja Saatkamp
      -    Alexandre Fayolle
      -    Raphaël Collet
      -    Grégoire Dooms
      -    Sanghyeon Seo
      -    Yutaka Niibe
      -    Yusei Tahara
      -    George Toshida
      -    Koichi Sasada
      -    Guido Wesdorp        
      -    Maciej Fijalkowski   
      -    Antonio Cuni          
      -    Lawrence Oluyede    
      -    Fabrizio Milo        
      -    Alexander Schremmer  
      -    David Douard       
      -    Michele Frettoli     
      -    Simon Burton         
      -    Aaron Bingham        
      -    Pieter Zieschang     
      -    Sad Rejeb 
      -    Brian Sutherland
      -    Georg Brandl
      -
      -
      
      diff --git a/pypy/doc/config/translation.fork_before.rst b/pypy/doc/config/translation.fork_before.txt
      copy from pypy/doc/config/translation.fork_before.rst
      copy to pypy/doc/config/translation.fork_before.txt
      
      diff --git a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst b/pypy/doc/config/translation.builtins_can_raise_exceptions.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Internal option.
      -
      -.. internal
      
      diff --git a/pypy/doc/style.css b/pypy/doc/style.css
      deleted file mode 100644
      --- a/pypy/doc/style.css
      +++ /dev/null
      @@ -1,1091 +0,0 @@
      -body,body.editor,body.body {
      -    font: 90% "Times New Roman", Arial, Verdana, Helvetica, serif;
      -    background: White;
      -    color: Black;
      -}
      -
      -a, a.reference {
      -	text-decoration: none; 
      -}
      -a[href]:hover { text-decoration: underline; }
      -
      -img {
      -    border: none;
      -	vertical-align: middle;
      -}
      -
      -p, div.text {
      -    text-align: left;
      -    line-height: 1.5em;
      -    margin: 0.5em 0em 0em 0em;
      -}
      -
      -
      -
      -p a:active {
      -	color: Red;
      -    background-color: transparent;
      -}
      -
      -p img {
      -    border: 0;
      -    margin: 0;
      -}
      -
      -img.inlinephoto {
      -    padding: 0;
      -    padding-right: 1em;
      -    padding-top: 0.7em;
      -    float: left;
      -}
      -
      -hr {
      -    clear: both;
      -    height: 1px;
      -    color: #8CACBB;
      -    background-color: transparent;
      -}
      -
      -
      -ul { 
      -    line-height: 1.5em;
      -    /*list-style-image: url("bullet.gif"); */
      -    margin-left: 1.5em;
      -    padding:0;
      -}
      -
      -ol {
      -    line-height: 1.5em;
      -    margin-left: 1.5em;
      -    padding:0;
      -}
      -
      -ul a, ol a {
      -    text-decoration: underline;
      -}
      -
      -dl {
      -}
      -
      -dt {
      -    font-weight: bold;    
      -}
      -
      -dd {
      -    line-height: 1.5em;
      -    margin-bottom: 1em;
      -}
      -
      -blockquote {
      -    font-family: Times, "Times New Roman", serif;
      -    font-style: italic;
      -    font-size: 120%;
      -}
      -
      -code {
      -    color: Black;
      -    /*background-color: #dee7ec;*/
      -    background-color: #cccccc;
      -}
      -
      -pre {
      -    padding: 1em;
      -    border: 1px solid #8cacbb;
      -    color: Black;
      -    background-color: #dee7ec;
      -    background-color: #cccccc;
      -    overflow: auto;
      -}
      -
      -
      -.netscape4 {
      -    display: none;
      -}
      -
      -/* main page styles */
      -
      -/*a[href]:hover { color: black; text-decoration: underline; }
      -a[href]:link { color: black; text-decoration: underline; }
      -a[href] { color: black; text-decoration: underline; }
      -*/
      -
      -span.menu_selected {
      -	color: black;
      -  	font: 120% Verdana, Helvetica, Arial, sans-serif;
      -	text-decoration: none;
      -    padding-right: 0.3em;
      -    background-color: #cccccc;
      -}
      -
      -
      -a.menu {
      -  	/*color: #3ba6ec; */
      -  	font: 120% Verdana, Helvetica, Arial, sans-serif;
      -	text-decoration: none;
      -    padding-right: 0.3em;
      -}
      -
      -a.menu[href]:visited, a.menu[href]:link{
      -  	/*color: #3ba6ec; */
      -  	font: 120% Verdana, Helvetica, Arial, sans-serif;
      -	text-decoration: none;
      -}
      -
      -a.menu[href]:hover {
      -  	/*color: black;*/
      -}
      -
      -div.project_title{
      -  /*border-spacing: 20px;*/
      -  font: 160% Verdana, Helvetica, Arial, sans-serif;
      -  color: #3ba6ec; 
      -  vertical-align: center;
      -  padding-bottom: 0.3em;
      -}
      -
      -a.wikicurrent {
      -  font: 100% Verdana, Helvetica, Arial, sans-serif;
      -  color: #3ba6ec; 
      -  vertical-align: middle;
      -}
      -
      -
      -table.body {
      -  border: 0;
      -  /*padding: 0;
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  */
      -}
      -
      -td.page-header-left {
      -  padding: 5px;
      -  /*border-bottom: 1px solid #444444;*/
      -}
      -
      -td.page-header-top {
      -  padding: 0;
      -    
      -  /*border-bottom: 1px solid #444444;*/
      -}
      -
      -td.sidebar {
      -  padding: 1 0 0 1;
      -}
      -
      -td.sidebar p.classblock {
      -  padding: 0 5 0 5;
      -  margin: 1 1 1 1;
      -  border: 1px solid #444444;
      -  background-color: #eeeeee;
      -}
      -
      -td.sidebar p.userblock {
      -  padding: 0 5 0 5;
      -  margin: 1 1 1 1;
      -  border: 1px solid #444444;
      -  background-color: #eeeeff;
      -}
      -
      -td.content {
      -  padding: 1 5 1 5;
      -  vertical-align: top;
      -  width: 100%;
      -}
      -
      -p.ok-message {
      -  background-color: #22bb22;
      -  padding: 5 5 5 5;
      -  color: white;
      -  font-weight: bold;
      -}
      -p.error-message {
      -  background-color: #bb2222;
      -  padding: 5 5 5 5;
      -  color: white;
      -  font-weight: bold;
      -}
      -
      -p:first-child { 
      -  margin: 0 ;
      -  padding: 0;
      -}
      -
      -/* style for forms */
      -table.form {
      -  padding: 2;
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -}
      -
      -table.form th {
      -  color: #333388;
      -  text-align: right;
      -  vertical-align: top;
      -  font-weight: normal;
      -}
      -table.form th.header {
      -  font-weight: bold;
      -  background-color: #eeeeff;
      -  text-align: left;
      -}
      -
      -table.form th.required {
      -  font-weight: bold;
      -}
      -
      -table.form td {
      -  color: #333333;
      -  empty-cells: show;
      -  vertical-align: top;
      -}
      -
      -table.form td.optional {
      -  font-weight: bold;
      -  font-style: italic;
      -}
      -
      -table.form td.html {
      -  color: #777777;
      -}
      -
      -/* style for lists */
      -table.list {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  vertical-align: top;
      -  padding-top: 0;
      -  width: 100%;
      -}
      -
      -table.list th {
      -  padding: 0 4 0 4;
      -  color: #404070;
      -  background-color: #eeeeff;
      -  border-right: 1px solid #404070;
      -  border-top: 1px solid #404070;
      -  border-bottom: 1px solid #404070;
      -  vertical-align: top;
      -  empty-cells: show;
      -}
      -table.list th a[href]:hover { color: #404070 }
      -table.list th a[href]:link { color: #404070 }
      -table.list th a[href] { color: #404070 }
      -table.list th.group {
      -  background-color: #f4f4ff;
      -  text-align: center;
      -  font-size: 120%;
      -}
      -
      -table.list td {
      -  padding: 0 4 0 4;
      -  border: 0 2 0 2;
      -  border-right: 1px solid #404070;
      -  color: #404070;
      -  background-color: white;
      -  vertical-align: top;
      -  empty-cells: show;
      -}
      -
      -table.list tr.normal td {
      -  background-color: white;
      -  white-space: nowrap;
      -}
      -
      -table.list tr.alt td {
      -  background-color: #efefef;
      -  white-space: nowrap;
      -}
      -
      -table.list td:first-child {
      -  border-left: 1px solid #404070;
      -  border-right: 1px solid #404070;
      -}
      -
      -table.list th:first-child {
      -  border-left: 1px solid #404070;
      -  border-right: 1px solid #404070;
      -}
      -
      -table.list tr.navigation th {
      -  text-align: right;
      -}
      -table.list tr.navigation th:first-child {
      -  border-right: none;
      -  text-align: left;
      -}
      -
      -
      -/* style for message displays */
      -table.messages {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.messages th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -}
      -
      -table.messages th {
      -  font-weight: bold;
      -  color: black;
      -  text-align: left;
      -  border-bottom: 1px solid #afafaf;
      -}
      -
      -table.messages td {
      -  font-family: monospace;
      -  background-color: #efefef;
      -  border-bottom: 1px solid #afafaf;
      -  color: black;
      -  empty-cells: show;
      -  border-right: 1px solid #afafaf;
      -  vertical-align: top;
      -  padding: 2 5 2 5;
      -}
      -
      -table.messages td:first-child {
      -  border-left: 1px solid #afafaf;
      -  border-right: 1px solid #afafaf;
      -}
      -
      -/* style for file displays */
      -table.files {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.files th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -}
      -
      -table.files th {
      -  border-bottom: 1px solid #afafaf;
      -  font-weight: bold;
      -  text-align: left;
      -}
      -
      -table.files td {
      -  font-family: monospace;
      -  empty-cells: show;
      -}
      -
      -/* style for history displays */
      -table.history {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.history th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -  font-size: 100%;
      -}
      -
      -table.history th {
      -  border-bottom: 1px solid #afafaf;
      -  font-weight: bold;
      -  text-align: left;
      -  font-size: 90%;
      -}
      -
      -table.history td {
      -  font-size: 90%;
      -  vertical-align: top;
      -  empty-cells: show;
      -}
      -
      -
      -/* style for class list */
      -table.classlist {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.classlist th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -}
      -
      -table.classlist th {
      -  font-weight: bold;
      -  text-align: left;
      -}
      -
      -
      -/* style for class help display */
      -table.classhelp {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.classhelp th {
      -  font-weight: bold;
      -  text-align: left;
      -  color: #707040;
      -}
      -
      -table.classhelp td {
      -  padding: 2 2 2 2;
      -  border: 1px solid black;
      -  text-align: left;
      -  vertical-align: top;
      -  empty-cells: show;
      -}
      -
      -
      -/* style for "other" displays */
      -table.otherinfo {
      -  border-spacing: 0px;
      -  border-collapse: separate;
      -  width: 100%;
      -}
      -
      -table.otherinfo th.header{
      -  padding-top: 10px;
      -  border-bottom: 1px solid gray;
      -  font-weight: bold;
      -  background-color: white;
      -  color: #707040;
      -}
      -
      -table.otherinfo th {
      -  border-bottom: 1px solid #afafaf;
      -  font-weight: bold;
      -  text-align: left;
      -}
      -
      -input {
      -    border: 1px solid #8cacbb;
      -    color: Black;
      -    background-color: white;
      -    vertical-align: middle;
      -    margin-bottom: 1px; /* IE bug fix */
      -    padding: 0.1em;
      -}
      -
      -select {
      -    border: 1px solid #8cacbb;
      -    color: Black;
      -    background-color: white;
      -    vertical-align: middle;
      -    margin-bottom: 1px; /* IE bug fix */
      -    padding: 0.1em;
      -}
      -
      -
      -a.nonexistent {
      -    color: #FF2222;
      -}
      -a.nonexistent:visited {
      -    color: #FF2222;
      -}
      -a.external {
      -    color: #AA6600;
      -}
      -
      -/*
      -dl,ul,ol {
      -    margin-top: 1pt;
      -}
      -tt,pre {
      -    font-family: Lucida Console,Courier New,Courier,monotype;
      -    font-size: 12pt;
      -}
      -pre.code {
      -    margin-top: 8pt;
      -    margin-bottom: 8pt;
      -    background-color: #FFFFEE;
      -    white-space:pre;
      -    border-style:solid;
      -    border-width:1pt;
      -    border-color:#999999;
      -    color:#111111;
      -    padding:5px;
      -    width:100%;
      -}
      -*/
      -div.diffold {
      -    background-color: #FFFF80;
      -    border-style:none;
      -    border-width:thin;
      -    width:100%;
      -}
      -div.diffnew {
      -    background-color: #80FF80;
      -    border-style:none;
      -    border-width:thin;
      -    width:100%;
      -}
      -div.message {
      -    margin-top: 6pt;
      -    background-color: #E8FFE8;
      -    border-style:solid;
      -    border-width:1pt;
      -    border-color:#999999;
      -    color:#440000;
      -    padding:5px;
      -    width:100%;
      -}
      -strong.highlight {
      -    background-color: #FFBBBB;
      -/* as usual, NetScape fucks up with innocent CSS
      -    border-color: #FFAAAA;
      -    border-style: solid;
      -    border-width: 1pt;
      -*/
      -}
      -
      -table.navibar {
      -    background-color: #C8C8C8;
      -    border-spacing: 3px;
      -}
      -td.navibar {
      -    background-color: #E8E8E8;
      -    vertical-align: top;
      -    text-align: right;
      -    padding: 0px;
      -}
      -
      -div.pagename {
      -    font-size: 140%;
      -    color: blue;
      -    text-align: center;
      -    font-weight: bold;
      -    background-color: white;
      -    padding: 0 ;
      -}
      -
      -a.wikiaction, input.wikiaction {
      -    color: black; 
      -    text-decoration: None;
      -    text-align: center;
      -    color: black;
      -    /*border: 1px solid #3ba6ec; */
      -    margin: 4px;
      -    padding: 5;
      -    padding-bottom: 0;
      -    white-space: nowrap;
      -}
      -
      -a.wikiaction[href]:hover { 
      -	color: black; 
      -	text-decoration: none; 
      -	/*background-color: #dddddd; */
      -}
      -
      -span.wikiuserpref {
      -    padding-top: 1em;
      -    font-size: 120%;
      -}
      -
      -div.wikitrail {
      -    vertical-align: bottom;
      -    /*font-size: -1;*/
      -    padding-top: 1em;
      -    display: none;
      -}
      -
      -div.wikiaction {
      -    vertical-align: middle;
      -    /*border-bottom: 1px solid #8cacbb;*/
      -    padding-bottom:1em;
      -    text-align: left;
      -    width: 100%;
      -}
      -
      -div.wikieditmenu {
      -    text-align: right;
      -}
      -
      -form.wikiedit {
      -    border: 1px solid #8cacbb;
      -    background-color: #f0f0f0;
      -    background-color: #fabf00;
      -    padding: 1em;
      -    padding-right: 0em;
      -}
      -
      -div.legenditem {
      -    padding-top: 0.5em;
      -    padding-left: 0.3em;
      -}
      -
      -span.wikitoken {
      -   background-color: #eeeeee;
      -}
      -    
      -
      -div#contentspace h1:first-child, div.heading:first-child { 
      -  padding-top: 0;
      -  margin-top: 0;
      -}
      -div#contentspace h2:first-child { 
      -  padding-top: 0;
      -  margin-top: 0;
      -}
      -
      -/* heading and paragraph text */
      -
      -div.heading, h1 {
      -    font-family: Verdana, Helvetica, Arial, sans-serif;
      -    background-color: #58b3ef;
      -    background-color: #FFFFFF; 
      -    /*color: #4893cf;*/
      -    color: black;
      -    padding-top: 1.0em;
      -    padding-bottom:0.2em;
      -    text-align: left;
      -    margin-top: 0em; 
      -    /*margin-bottom:8pt;*/
      -    font-weight: bold;
      -    font-size: 115%;
      -    border-bottom: 1px solid #8CACBB;
      -}
      -
      -
      -h1, h2, h3, h4, h5, h6 {
      -    color: Black;
      -    clear: left;
      -    font: 100% Verdana, Helvetica, Arial, sans-serif;
      -    margin: 0;
      -    padding-left: 0em;
      -    padding-top: 1em;
      -    padding-bottom: 0.2em;
      -    /*border-bottom: 1px solid #8CACBB;*/
      -}
      -/* h1,h2 { padding-top: 0; }*/
      -
      -
      -h1 { font-size: 145%; }
      -h2 { font-size: 135%; }
      -h3 { font-size: 125%; }
      -h4 { font-size: 120%; }
      -h5 { font-size: 110%; }
      -h6 { font-size: 80%; }
      -
      -h1 a { text-decoration: None;}
      -
      -div.exception {
      -  background-color: #bb2222;
      -  padding: 5 5 5 5;
      -  color: white;
      -  font-weight: bold;
      -}
      -pre.exception {
      -    font-size: 110%;
      -    padding: 1em;
      -    border: 1px solid #8cacbb;
      -    color: Black;
      -    background-color: #dee7ec;
      -    background-color: #cccccc;
      -}
      -
      -/* defines for navgiation bar (documentation) */
      -
      -
      -div.direntry {
      -    padding-top: 0.3em;
      -    padding-bottom: 0.3em;
      -    margin-right: 1em;
      -    font-weight: bold;
      -    background-color: #dee7ec;
      -    font-size: 110%;
      -}
      -
      -div.fileentry {
      -    font-family: Verdana, Helvetica, Arial, sans-serif;
      -    padding-bottom: 0.3em;
      -    white-space: nowrap;
      -    line-height: 150%;
      -}
      -
      -a.fileentry {
      -    white-space: nowrap;
      -}
      -
      -
      -span.left {
      -    text-align: left;
      -}
      -span.right {
      -    text-align: right;
      -}
      -
      -div.navbar {
      -  /*margin: 0;*/
      -  font-size: 80% /*smaller*/;
      -  font-weight: bold;
      -  text-align: left;
      -  /* position: fixed; */
      -  top: 100pt;
      -  left: 0pt; /*  auto; */
      -  width: 120pt;
      -  /* right: auto;
      -  right: 0pt;  2em; */
      -}
      -
      -
      -div.history a {
      -    /* font-size: 70%; */
      -}
      -
      -div.wikiactiontitle { 
      -  font-weight: bold;
      -}
      -
      -/*  REST  defines */
      -
      -div.document {
      -    margin: 0;
      -}
      -
      -h1.title {
      -    margin: 0;
      -}
      -
      -td.toplist {
      -    vertical-align: top;
      -}
      -
      -img#pyimg {
      -    position: absolute;
      -    top: 0px;
      -    left: 20px;
      -    margin: 20px;
      -}
      -
      -img#extraimg {
      -    position: absolute;
      -    right: 14px; 
      -    top: 4px;
      -}
      -    
      -div#navspace {
      -    position: absolute;
      -    top: 130px;
      -    left: 11px;
      -    font-size: 100%;
      -    width: 150px;
      -    overflow: hidden; /* scroll;  */
      -}
      -
      -div#metaspace {
      -    position: absolute;
      -    top: 40px;
      -    left: 210px;
      -}
      -
      -div#errorline {
      -    position: relative;
      -    top: 5px; 
      -    float: right; 
      -}
      -
      -div#contentspace {
      -    position: absolute;
      -  	/* font: 120% "Times New Roman", serif;*/
      -    font: 110% Verdana, Helvetica, Arial, sans-serif;
      -    top: 140px;
      -    left: 130px;
      -    margin-right: 140px;
      -}
      -
      -div#menubar {
      -/*    width: 400px; */
      -    float: left;
      -}
      -
      -/* for the documentation page */
      -div#docinfoline {
      -  position: relative;
      -  top: 5px; 
      -  left: 0px;
      -
      -  /*background-color: #dee7ec; */
      -  padding: 5pt; 
      -  padding-bottom: 1em; 
      -  color: black;
      -  /*border-width: 1pt;
      -  border-style: solid;*/
      -
      -}
      -
      -div#docnavlist {
      -  /*background-color: #dee7ec; */
      -  padding: 5pt; 
      -  padding-bottom: 2em; 
      -  color: black;
      -  border-width: 1pt;
      -  /*border-style: solid;*/
      -}
      -
      -
      -/* text markup */
      -
      -div.listtitle {
      -    color: Black;
      -    clear: left;
      -    font: 120% Verdana, Helvetica, Arial, sans-serif;
      -    margin: 0;
      -    padding-left: 0em;
      -    padding-top: 0em;
      -    padding-bottom: 0.2em;
      -    margin-right: 0.5em;
      -    border-bottom: 1px solid #8CACBB;
      -}
      -
      -div.actionbox h3 { 
      -  padding-top: 0;
      -  padding-right: 0.5em;
      -  padding-left: 0.5em;
      -  background-color: #fabf00;
      -  text-align: center;
      -  border: 1px solid black; /* 8cacbb; */
      -}
      -
      -div.actionbox a { 
      -  display: block;
      -  padding-bottom: 0.5em;
      -  padding-top: 0.5em;
      -  margin-left: 0.5em;
      -}
      -
      -div.actionbox a.history { 
      -  display: block;
      -  padding-bottom: 0.5em;
      -  padding-top: 0.5em;
      -  margin-left: 0.5em;
      -  font-size: 90%; 
      -}
      -
      -div.actionbox { 
      -  margin-bottom: 2em;
      -  padding-bottom: 1em;
      -  overflow: hidden; /* scroll;  */
      -}
      -
      -/* taken from docutils (oh dear, a bit senseless) */
      -ol.simple, ul.simple {
      -  margin-bottom: 1em }
      -
      -ol.arabic {
      -  list-style: decimal }
      -
      -ol.loweralpha {
      -  list-style: lower-alpha }
      -
      -ol.upperalpha {
      -  list-style: upper-alpha }
      -
      -ol.lowerroman {
      -  list-style: lower-roman }
      -
      -ol.upperroman {
      -  list-style: upper-roman }
      -
      -
      -/*
      -:Author: David Goodger
      -:Contact: goodger at users.sourceforge.net
      -:date: $Date: 2003/01/22 22:26:48 $
      -:version: $Revision: 1.29 $
      -:copyright: This stylesheet has been placed in the public domain.
      -
      -Default cascading style sheet for the HTML output of Docutils.
      -*/
      -/*
      -.first {
      -  margin-top: 0 }
      -
      -.last {
      -  margin-bottom: 0 }
      -
      -a.toc-backref {
      -  text-decoration: none ;
      -  color: black }
      -
      -dd {
      -  margin-bottom: 0.5em }
      -
      -div.abstract {
      -  margin: 2em 5em }
      -
      -div.abstract p.topic-title {
      -  font-weight: bold ;
      -  text-align: center }
      -
      -div.attention, div.caution, div.danger, div.error, div.hint,
      -div.important, div.note, div.tip, div.warning {
      -  margin: 2em ;
      -  border: medium outset ;
      -  padding: 1em }
      -
      -div.attention p.admonition-title, div.caution p.admonition-title,
      -div.danger p.admonition-title, div.error p.admonition-title,
      -div.warning p.admonition-title {
      -  color: red ;
      -  font-weight: bold ;
      -  font-family: sans-serif }
      -
      -div.hint p.admonition-title, div.important p.admonition-title,
      -div.note p.admonition-title, div.tip p.admonition-title {
      -  font-weight: bold ;
      -  font-family: sans-serif }
      -
      -div.dedication {
      -  margin: 2em 5em ;
      -  text-align: center ;
      -  font-style: italic }
      -
      -div.dedication p.topic-title {
      -  font-weight: bold ;
      -  font-style: normal }
      -
      -div.figure {
      -  margin-left: 2em }
      -
      -div.footer, div.header {
      -  font-size: smaller }
      -
      -div.system-messages {
      -  margin: 5em }
      -
      -div.system-messages h1 {
      -  color: red }
      -
      -div.system-message {
      -  border: medium outset ;
      -  padding: 1em }
      -
      -div.system-message p.system-message-title {
      -  color: red ;
      -  font-weight: bold }
      -
      -div.topic {
      -  margin: 2em }
      -
      -h1.title {
      -  text-align: center }
      -
      -h2.subtitle {
      -  text-align: center }
      -
      -hr {
      -  width: 75% }
      -
      -p.caption {
      -  font-style: italic }
      -
      -p.credits {
      -  font-style: italic ;
      -  font-size: smaller }
      -
      -p.label {
      -  white-space: nowrap }
      -
      -p.topic-title {
      -  font-weight: bold }
      -
      -pre.address {
      -  margin-bottom: 0 ;
      -  margin-top: 0 ;
      -  font-family: serif ;
      -  font-size: 100% }
      -
      -pre.line-block {
      -  font-family: serif ;
      -  font-size: 100% }
      -
      -pre.literal-block, pre.doctest-block {
      -  margin-left: 2em ;
      -  margin-right: 2em ;
      -  background-color: #eeeeee }
      -
      -span.classifier {
      -  font-family: sans-serif ;
      -  font-style: oblique }
      -
      -span.classifier-delimiter {
      -  font-family: sans-serif ;
      -  font-weight: bold }
      -
      -span.interpreted {
      -  font-family: sans-serif }
      -
      -span.option {
      -  white-space: nowrap }
      -
      -span.option-argument {
      -  font-style: italic }
      -
      -span.pre {
      -  white-space: pre }
      -
      -span.problematic {
      -  color: red }
      -
      -table {
      -  margin-top: 0.5em ;
      -  margin-bottom: 0.5em }
      -
      -table.citation {
      -  border-left: solid thin gray ;
      -  padding-left: 0.5ex }
      -
      -table.docinfo {
      -  margin: 2em 4em }
      -
      -table.footnote {
      -  border-left: solid thin black ;
      -  padding-left: 0.5ex }
      -
      -td, th {
      -  padding-left: 0.5em ;
      -  padding-right: 0.5em ;
      -  vertical-align: top }
      -
      -th.docinfo-name, th.field-name {
      -  font-weight: bold ;
      -  text-align: left ;
      -  white-space: nowrap }
      -
      -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
      -  font-size: 100% }
      -
      -tt {
      -  background-color: #eeeeee }
      -
      -ul.auto-toc {
      -  list-style-type: none }
      -*/
      -
      -div.section {
      -  margin-top: 1.0em ;
      -}    
      -
      -div.abstract {
      -  margin: 2em 4em }
      -
      -div.abstract p.topic-title {
      -  font-weight: bold ;
      -  text-align: center }
      
      diff --git a/pypy/doc/config/translation.gcremovetypeptr.rst b/pypy/doc/config/translation.gcremovetypeptr.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.gcremovetypeptr.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -If set, save one word in every object.  Framework GC only.
      
      diff --git a/pypy/doc/config/objspace.usemodules._lsprof.rst b/pypy/doc/config/objspace.usemodules._lsprof.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._lsprof.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Use the '_lsprof' module. 
      
      diff --git a/pypy/doc/config/translation.jit_profiler.rst b/pypy/doc/config/translation.jit_profiler.txt
      copy from pypy/doc/config/translation.jit_profiler.rst
      copy to pypy/doc/config/translation.jit_profiler.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._sha.rst b/pypy/doc/config/objspace.usemodules._sha.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._sha.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -Use the built-in _'sha' module.
      -This module is expected to be working and is included by default.
      -There is also a pure Python version in lib_pypy which is used
      -if the built-in is disabled, but it is several orders of magnitude 
      -slower.
      
      diff --git a/pypy/doc/config/translation.shared.rst b/pypy/doc/config/translation.shared.txt
      copy from pypy/doc/config/translation.shared.rst
      copy to pypy/doc/config/translation.shared.txt
      
      diff --git a/pypy/doc/config/translation.force_make.rst b/pypy/doc/config/translation.force_make.txt
      copy from pypy/doc/config/translation.force_make.rst
      copy to pypy/doc/config/translation.force_make.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.rbench.rst b/pypy/doc/config/objspace.usemodules.rbench.txt
      copy from pypy/doc/config/objspace.usemodules.rbench.rst
      copy to pypy/doc/config/objspace.usemodules.rbench.txt
      
      diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py
      --- a/pypy/config/makerestdoc.py
      +++ b/pypy/config/makerestdoc.py
      @@ -28,9 +28,6 @@
               fullpath = get_fullpath(self, path)
               result = Rest(
                   Title(fullpath, abovechar="=", belowchar="="),
      -            Directive("contents"),
      -            Paragraph(Link("back to parent", path + ".html")),
      -            Title("Basic Option Information"),
                   ListItem(Strong("name:"), self._name),
                   ListItem(Strong("description:"), self.doc))
               if self.cmdline is not None:
      @@ -132,47 +129,27 @@
           def make_rest_doc(self, path=""):
               fullpath = get_fullpath(self, path)
               content = Rest(
      -            Title(fullpath, abovechar="=", belowchar="="),
      -            Directive("contents"))
      -        if path:
      -            content.add(
      -                Paragraph(Link("back to parent", path + ".html")))
      +            Title(fullpath, abovechar="=", belowchar="="))
      +        toctree = []
      +        for child in self._children:
      +            subpath = fullpath + "." + child._name
      +            toctree.append(subpath)
      +        content.add(Directive("toctree", *toctree, maxdepth=4))
               content.join(
      -            Title("Basic Option Information"),
                   ListItem(Strong("name:"), self._name),
      -            ListItem(Strong("description:"), self.doc),
      -            Title("Sub-Options"))
      +            ListItem(Strong("description:"), self.doc))
               stack = []
      -        prefix = fullpath
               curr = content
               config = Config(self)
      -        for ending in self.getpaths(include_groups=True):
      -            subpath = fullpath + "." + ending
      -            while not (subpath.startswith(prefix) and
      -                       subpath[len(prefix)] == "."):
      -                curr, prefix = stack.pop()
      -            print subpath, fullpath, ending, curr
      -            sub, step = config._cfgimpl_get_home_by_path(ending)
      -            doc = getattr(sub._cfgimpl_descr, step).doc
      -            if doc:
      -                new = curr.add(ListItem(Link(subpath + ":", subpath + ".html"),
      -                                        Em(doc)))
      -            else:
      -                new = curr.add(ListItem(Link(subpath + ":", subpath + ".html")))
      -            stack.append((curr, prefix))
      -            prefix = subpath
      -            curr = new
               return content
       
       
       def _get_section_header(cmdline, fullpath, subdescr):
           # XXX:  pypy specific hack
           txtfile = configdocdir.join(fullpath + ".txt")
      -    print txtfile,
           if not txtfile.check():
      -        print "not found"
      +        print txtfile, "not found"
               return ""
      -    print "found"
           content = txtfile.read()
           if ".. internal" in content:
               return "Internal Options"
      @@ -221,7 +198,7 @@
               from docutils import nodes
               from pypy.config.pypyoption import get_pypy_config
               from pypy.config.makerestdoc import get_cmdline
      -        txt = docdir.join("config", text + ".txt")
      +        txt = docdir.join("config", text + ".rst")
               html = docdir.join("config", text + ".html")
               assert txt.check()
               assert name == "config"
      @@ -247,9 +224,8 @@
                           shortest_long_option = cmd
                   text = shortest_long_option
               target = prefix + relative
      -        print text, target
               reference_node = nodes.reference(rawtext, text, name=text, refuri=target)
               return [reference_node], []
           config_role.content = True
           config_role.options = {}
      -    roles.register_canonical_role("config", config_role)
      +    return config_role
      
      diff --git a/pypy/doc/config/translation.instrumentctl.rst b/pypy/doc/config/translation.instrumentctl.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.instrumentctl.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Internal option.
      -
      -.. internal
      
      diff --git a/pypy/doc/discussion/cli-optimizations.rst b/pypy/doc/discussion/cli-optimizations.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/cli-optimizations.rst
      +++ /dev/null
      @@ -1,233 +0,0 @@
      -Possible optimizations for the CLI backend
      -==========================================
      -
      -Stack push/pop optimization
      ----------------------------
      -
      -The CLI's VM is a stack based machine: this fact doesn't play nicely
      -with the SSI form the flowgraphs are generated in. At the moment
      -gencli does a literal translation of the SSI statements, allocating a
      -new local variable for each variable of the flowgraph.
      -
      -For example, consider the following RPython code and the corresponding
      -flowgraph::
      -
      -  def bar(x, y):
      -      foo(x+y, x-y)
      -
      -
      -  inputargs: x_0 y_0
      -  v0 = int_add(x_0, y_0)
      -  v1 = int_sub(x_0, y_0)
      -  v2 = directcall((sm foo), v0, v1)
      -
      -This is the IL code generated by the CLI backend::
      -
      -  .locals init (int32 v0, int32 v1, int32 v2)
      -    
      -  block0:
      -    ldarg 'x_0'
      -    ldarg 'y_0'
      -    add 
      -    stloc 'v0'
      -    ldarg 'x_0'
      -    ldarg 'y_0'
      -    sub 
      -    stloc 'v1'
      -    ldloc 'v0'
      -    ldloc 'v1'
      -    call int32 foo(int32, int32)
      -    stloc 'v2'
      -
      -As you can see, the results of 'add' and 'sub' are stored in v0 and
      -v1, respectively, then v0 and v1 are reloaded onto stack. These
      -store/load is redundant, since the code would work nicely even without
      -them::
      -
      -  .locals init (int32 v2)
      -    
      -  block0:
      -    ldarg 'x_0'
      -    ldarg 'y_0'
      -    add 
      -    ldarg 'x_0'
      -    ldarg 'y_0'
      -    sub 
      -    call int32 foo(int32, int32)
      -    stloc 'v2'
      -
      -I've checked the native code generated by the Mono Jit on x86 and I've
      -seen that it does not optimize it. I haven't checked the native code
      -generated by Microsoft CLR, yet.
      -
      -Thus, we might consider to optimize it manually; it should not be so
      -difficult, but it is not trivial because we have to make sure that the
      -dropped locals are used only once.
      -
      -
      -Mapping RPython exceptions to native CLI exceptions
      ----------------------------------------------------
      -
      -Both RPython and CLI have its own set of exception classes: some of
      -these are pretty similar; e.g., we have OverflowError,
      -ZeroDivisionError and IndexError on the first side and
      -OverflowException, DivideByZeroException and IndexOutOfRangeException
      -on the other side.
      -
      -The first attempt was to map RPython classes to their corresponding
      -CLI ones: this worked for simple cases, but it would have triggered
      -subtle bugs in more complex ones, because the two exception
      -hierarchies don't completely overlap.
      -
      -For now I've chosen to build an RPython exception hierarchy
      -completely independent from the CLI one, but this means that we can't
      -rely on exceptions raised by standard operations. The currently
      -implemented solution is to do an exception translation on-the-fly; for
      -example, the 'ind_add_ovf' is translated into the following IL code::
      -
      -  .try 
      -  { 
      -      ldarg 'x_0'
      -      ldarg 'y_0'
      -      add.ovf 
      -      stloc 'v1'
      -      leave __check_block_2 
      -  } 
      -  catch [mscorlib]System.OverflowException 
      -  { 
      -      newobj instance void class exceptions.OverflowError::.ctor() 
      -      dup 
      -      ldsfld class Object_meta pypy.runtime.Constants::exceptions_OverflowError_meta 
      -      stfld class Object_meta Object::meta 
      -      throw 
      -  } 
      -
      -I.e., it catches the builtin OverflowException and raises a RPython
      -OverflowError.
      -
      -I haven't measured timings yet, but I guess that this machinery brings
      -to some performance penalties even in the non-overflow case; a
      -possible optimization is to do the on-the-fly translation only when it
      -is strictly necessary, i.e. only when the except clause catches an
      -exception class whose subclass hierarchy is compatible with the
      -builtin one. As an example, consider the following RPython code::
      -
      -  try:
      -    return mylist[0]
      -  except IndexError:
      -    return -1
      -
      -Given that IndexError has no subclasses, we can map it to
      -IndexOutOfBoundException and directly catch this one::
      -
      -  try
      -  {
      -    ldloc 'mylist'
      -    ldc.i4 0
      -    call int32 getitem(MyListType, int32)
      -    ...
      -  }
      -  catch [mscorlib]System.IndexOutOfBoundException
      -  {
      -    // return -1
      -    ...
      -  }
      -
      -By contrast we can't do so if the except clause catches classes that
      -don't directly map to any builtin class, such as LookupError::
      -
      -  try:
      -    return mylist[0]
      -  except LookupError:
      -    return -1
      -
      -Has to be translated in the old way::
      -
      -  .try 
      -  { 
      -    ldloc 'mylist'
      -    ldc.i4 0
      -
      -    .try 
      -    {
      -        call int32 getitem(MyListType, int32)
      -    }
      -    catch [mscorlib]System.IndexOutOfBoundException
      -    { 
      -        // translate IndexOutOfBoundException into IndexError
      -        newobj instance void class exceptions.IndexError::.ctor() 
      -        dup 
      -        ldsfld class Object_meta pypy.runtime.Constants::exceptions_IndexError_meta 
      -        stfld class Object_meta Object::meta 
      -        throw 
      -    }
      -    ...
      -  }
      -  .catch exceptions.LookupError
      -  {
      -    // return -1
      -    ...
      -  }
      -
      -
      -Specializing methods of List
      -----------------------------
      -
      -Most methods of RPython lists are implemented by ll_* helpers placed
      -in rpython/rlist.py. For some of those we have a direct correspondent
      -already implemented in .NET List<>; we could use the oopspec attribute
      -for doing an on-the-fly replacement of these low level helpers with
      -their builtin correspondent. As an example the 'append' method is
      -already mapped to pypylib.List.append. Thanks to Armin Rigo for the
      -idea of using oopspec.
      -
      -
      -Doing some caching on Dict
      ---------------------------
      -
      -The current implementations of ll_dict_getitem and ll_dict_get in
      -ootypesystem.rdict do two consecutive lookups (calling ll_contains and
      -ll_get) on the same key. We might cache the result of
      -pypylib.Dict.ll_contains so that the successive ll_get don't need a
      -lookup. Btw, we need some profiling before choosing the best way. Or
      -we could directly refactor ootypesystem.rdict for doing a single
      -lookup.
      -
      -XXX
      -I tried it on revision 32917 and performance are slower! I don't know
      -why, but pypy.net pystone.py is slower by 17%, and pypy.net
      -richards.py is slower by 71% (!!!). I don't know why, need to be
      -investigated further.
      -
      -
      -Optimize StaticMethod
      ----------------------
      -
      -::
      -
      -  2006-10-02, 13:41
      -
      -   antocuni: do you try to not wrap static methods that are just called and not passed around
      -   no
      -             I think I don't know how to detect them
      -   antocuni: you should try to render them just as static methods not as instances when possible
      -             you need to track what appears only in direct_calls vs other places
      -
      -
      -Optimize Unicode
      -----------------
      -
      -We should try to use native .NET unicode facilities instead of our
      -own. These should save both time (especially startup time) and memory.
      -
      -On 2006-10-02 I got these benchmarks:
      -
      -Pypy.NET             Startup time   Memory used
      -with unicodedata          ~12 sec     112508 Kb
      -without unicodedata        ~6 sec      79004 Kb
      -
      -The version without unicodedata is buggy, of course.
      -
      -Unfortunately it seems that .NET doesn't expose all the things we
      -need, so we will still need some data. For example there is no way to
      -get the unicode name of a char.
      
      diff --git a/pypy/doc/config/translation.backendopt.constfold.rst b/pypy/doc/config/translation.backendopt.constfold.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.constfold.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Do constant folding of operations and constant propagation on flowgraphs.
      
      diff --git a/pypy/doc/config/objspace.usemodules.pyexpat.rst b/pypy/doc/config/objspace.usemodules.pyexpat.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.pyexpat.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use (experimental) pyexpat module written in RPython, instead of CTypes
      -version which is used by default.
      
      diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst
      --- a/pypy/doc/glossary.rst
      +++ b/pypy/doc/glossary.rst
      @@ -1,3 +1,5 @@
      +.. include:: needswork.txt
      +
       .. _glossary:
       
       ********
      @@ -12,219 +14,145 @@
       
       .. glossary::
       
      -**abstract interpretation**
      -    The technique of interpreting the bytecode of a user program with
      -    an interpreter that handles abstract objects instead of concrete ones.
      -    It can be used to check the bytecode or see what it does, without
      -    actually executing it with concrete values.  See Theory_.
      +    annotator
      +        The component of the :term:`RPython toolchain` that performs a form
      +        of :term:`type inference` on the flow graph. See the `annotator pass`_
      +        in the documentation.
       
      -.. _annotator:
      +    application level
      +        applevel_ code is normal Python code running on top of the PyPy or
      +        :term:`CPython` interpreter (see :term:`interpreter level`)
       
      -**annotator**
      -    The component of the translator_\ 's toolchain_ that performs a form
      -    of `type inference`_ on the flow graph. See the `annotator pass`_
      -    in the documentation.
      +    backend
      +        Code generator that converts an `RPython
      +        `__ program to a `target
      +        language`_ using the :term:`RPython toolchain`. A backend uses either the
      +        :term:`lltypesystem` or the :term:`ootypesystem`.
       
      -.. _`application level`:
      +    compile-time
      +        In the context of the :term:`JIT`, compile time is when the JIT is
      +        generating machine code "just in time".
       
      -**application level**
      -    applevel_ code is normal Python code running on top of the PyPy or
      -    CPython_ interpreter (see `interpreter level`_)
      +    CPython
      +        The "default" implementation of Python, written in C and
      +        distributed by the PSF_ on http://www.python.org.
       
      -.. _backend:
      +    external function
      +        Functions that we don't want to implement in Python for various
      +        reasons (e.g. they need to make calls into the OS) and whose
      +        implementation will be provided by the backend.
       
      -**backend**
      -    Code generator that converts an `RPython
      -    `__ program to a `target
      -    language`_ using the PyPy toolchain_. A backend uses either the
      -    lltypesystem_ or the ootypesystem_.
      +    garbage collection framework
      +        Code that makes it possible to write `PyPy's garbage collectors`_
      +        in Python itself.
       
      -.. _`compile-time`:
      +    interpreter level
      +        Code running at this level is part of the implementation of the
      +        PyPy interpreter and cannot interact normally with :term:`application
      +        level` code; it typically provides implementation for an object
      +        space and its builtins.
       
      -**compile-time**
      -    In the context of the JIT_, compile time is when the JIT is
      -    generating machine code "just in time".
      +    jit
      +      `just in time compiler`_.
       
      -.. _CPython:
      +    llinterpreter
      +       Piece of code that is able to interpret flow graphs.  This is very
      +       useful for testing purposes, especially if you work on the :term:`RPython`
      +       Typer.
       
      -**CPython**
      -    The "default" implementation of Python, written in C and
      -    distributed by the PSF_ on http://www.python.org.
      +    lltypesystem
      +       A `C-like type model `__ that contains
      +       structs and pointers.  A :term:`backend` that uses this type system is also
      +       called a low-level backend.  The C backend uses this
      +       typesystem.
       
      -.. _`external function`:
      +    low-level helper
      +        A function that the :term:`RTyper` can use a call to as part of implementing
      +        some operation in terms of the target :term:`type system`.
       
      -**external function**
      -    Functions that we don't want to implement in Python for various
      -    reasons (e.g. they need to make calls into the OS) and whose
      -    implementation will be provided by the backend.
      +    mixed module
      +      a module that accesses PyPy's :term:`interpreter level`.  The name comes
      +      from the fact that the module's implementation can be a mixture of
      +      :term:`application level` and :term:`interpreter level` code.
       
      -.. _`garbage collection framework`:
      +    object space
      +       The `object space `__ (often abbreviated to
      +       "objspace") creates all objects and knows how to perform operations
      +       on the objects. You may think of an object space as being a library
      +       offering a fixed API, a set of operations, with implementations
      +       that a) correspond to the known semantics of Python objects, b)
      +       extend or twist these semantics, or c) serve whole-program analysis
      +       purposes.
       
      -**garbage collection framework**
      -    Code that makes it possible to write `PyPy's garbage collectors`_
      -    in Python itself.
      +    ootypesystem
      +       An `object oriented type model `__
      +       containing classes and instances.  A :term:`backend` that uses this type system
      +       is also called a high-level backend.  The JVM and CLI backends
      +       all use this typesystem.
       
      -.. _`interpreter level`:
      +    prebuilt constant
      +       In :term:`RPython` module globals are considered constants.  Moreover,
      +       global (i.e. prebuilt) lists and dictionaries are supposed to be
      +       immutable ("prebuilt constant" is sometimes abbreviated to "pbc").
       
      -**interpreter level**
      -    Code running at this level is part of the implementation of the
      -    PyPy interpreter and cannot interact normally with `application
      -    level`_ code; it typically provides implementation for an object
      -    space and its builtins.
      +    promotion
      +       :term:`JIT` terminology.  *promotion* is a way of "using" a :term:`run-time`
      +       value at :term:`compile-time`, essentially by deferring compilation
      +       until the run-time value is known. See if `the jit docs`_ help.
       
      -.. _`jit`:
      +    RPython
      +       `Restricted Python`_, a limited subset of the Python_ language.
      +       The limitations make :term:`type inference` possible.
      +       It is also the language that the PyPy interpreter itself is written
      +       in.
       
      -**jit**
      -  `just in time compiler`_.
      +    RPython toolchain
      +       The `annotator pass`_, `The RPython Typer`_, and various
      +       :term:`backend`\ s.
       
      -.. _llinterpreter:
      +    rtyper
      +       Based on the type annotations, the `RPython Typer`_ turns the flow
      +       graph into one that fits the model of the target platform/:term:`backend`
      +       using either the :term:`lltypesystem` or the :term:`ootypesystem`.
       
      -**llinterpreter**
      -   Piece of code that is able to interpret flow graphs.  This is very
      -   useful for testing purposes, especially if you work on the RPython_
      -   Typer.
      +    run-time
      +       In the context of the :term:`JIT`, run time is when the code the JIT has
      +       generated is executing.
       
      -.. _lltypesystem:
      +    specialization
      +       A way of controlling how a specific function is handled by the
      +       :term:`annotator`.  One specialization is to treat calls to a function
      +       with different argument types as if they were calls to different
      +       functions with identical source.
       
      -**lltypesystem**
      -   A `C-like type model `__ that contains
      -   structs and pointers.  A backend_ that uses this type system is also
      -   called a low-level backend.  The C backend uses this
      -   typesystem.
      +    stackless
      +        Technology that enables various forms of non conventional control
      +        flow, such as coroutines, greenlets and tasklets.  Inspired by
      +        Christian Tismer's `Stackless Python `__.
       
      -.. _`low-level helper`:
      +    standard interpreter
      +       It is the `subsystem implementing the Python language`_, composed
      +       of the bytecode interpreter and of the standard objectspace.
       
      -**low-level helper**
      -    A function that the RTyper_ can use a call to as part of implementing
      -    some operation in terms of the target `type system`_.
      +    transformation
      +       Code that modifies flowgraphs to weave in translation aspects
       
      -.. _`mixed module`:
      +    translation-time
      +       In the context of the :term:`JIT`, translation time is when the PyPy
      +       source is being analyzed and the JIT itself is being created.
       
      -**mixed module**
      -  a module that accesses PyPy's `interpreter level`_.  The name comes
      -  from the fact that the module's implementation can be a mixture of
      -  `application level`_ and `interpreter level`_ code.
      +    translator
      +      Tool_ based on the PyPy interpreter which can translate
      +      sufficiently static Python programs into low-level code.
       
      -.. _`object space`:
      +    type system
      +        The RTyper can target either the :term:`lltypesystem` or the :term:`ootypesystem`.
       
      -**multimethod**
      -   A callable object that invokes a different Python function based
      -   on the type of all its arguments (instead of just the class of the
      -   first argument, as with normal methods).  See Theory_.
      -
      -**object space**
      -   The `object space `__ (often abbreviated to
      -   "objspace") creates all objects and knows how to perform operations
      -   on the objects. You may think of an object space as being a library
      -   offering a fixed API, a set of operations, with implementations
      -   that a) correspond to the known semantics of Python objects, b)
      -   extend or twist these semantics, or c) serve whole-program analysis
      -   purposes.
      -
      -.. _ootypesystem:
      -
      -**ootypesystem**
      -   An `object oriented type model `__
      -   containing classes and instances.  A backend_ that uses this type system
      -   is also called a high-level backend.  The JVM and CLI backends 
      -   all use this typesystem.
      -
      -.. _`prebuilt constant`:
      -
      -**prebuilt constant**
      -   In RPython_ module globals are considered constants.  Moreover,
      -   global (i.e. prebuilt) lists and dictionaries are supposed to be
      -   immutable ("prebuilt constant" is sometimes abbreviated to "pbc").
      -
      -.. _`rpython`:
      -
      -.. _`promotion`:
      -
      -**promotion**
      -   JIT_ terminology.  *promotion* is a way of "using" a `run-time`_
      -   value at `compile-time`_, essentially by deferring compilation
      -   until the run-time value is known. See if `the jit docs`_ help.
      -
      -**rpython**
      -   `Restricted Python`_, a limited subset of the Python_ language.
      -   The limitations make `type inference`_ possible.
      -   It is also the language that the PyPy interpreter itself is written
      -   in.
      -
      -.. _`rtyper`:
      -
      -**rtyper**
      -   Based on the type annotations, the `RPython Typer`_ turns the flow
      -   graph into one that fits the model of the target platform/backend_
      -   using either the lltypesystem_ or the ootypesystem_.
      -
      -.. _`run-time`:
      -
      -**run-time**
      -   In the context of the JIT_, run time is when the code the JIT has
      -   generated is executing.
      -
      -.. _`specialization`:
      -
      -**specialization**
      -   A way of controlling how a specific function is handled by the
      -   annotator_.  One specialization is to treat calls to a function
      -   with different argument types as if they were calls to different
      -   functions with identical source.
      -
      -.. _`stackless`:
      -
      -**stackless**
      -    Technology that enables various forms of non conventional control
      -    flow, such as coroutines, greenlets and tasklets.  Inspired by
      -    Christian Tismer's `Stackless Python `__.
      -
      -.. _`standard interpreter`:
      -
      -**standard interpreter**
      -   It is the `subsystem implementing the Python language`_, composed
      -   of the bytecode interpreter and of the standard objectspace.
      -
      -.. _toolchain:
      -
      -**timeshifting**
      -   JIT_ terminology.  *timeshifting* is to do with moving from the
      -   world where there are only `run-time`_ operations to a world where
      -   there are both `run-time`_ and `compile-time`_ operations.
      -
      -**toolchain**
      -   The `annotator pass`_, `The RPython Typer`_, and various
      -   `backends`_.
      -
      -.. _`transformation`:
      -
      -**transformation**
      -   Code that modifies flowgraphs to weave in `translation-aspects`_
      -
      -.. _`translation-time`:
      -
      -**translation-time**
      -   In the context of the JIT_, translation time is when the PyPy
      -   source is being analyzed and the JIT itself is being created.
      -
      -.. _`translator`:
      -
      -**translator**
      -  Tool_ based on the PyPy interpreter which can translate
      -  sufficiently static Python programs into low-level code.
      -
      -.. _`type system`:
      -
      -**type system**
      -    The RTyper can target either the lltypesystem_ or the ootypesystem_.
      -
      -.. _`type inference`:
      -
      -**type inference**
      -   Deduces either partially or fully the type of expressions as
      -   described in this `type inference article on Wikipedia`_.
      -   PyPy's tool-chain own flavour of type inference is described
      -   in the `annotator pass`_ section.
      +    type inference
      +       Deduces either partially or fully the type of expressions as
      +       described in this `type inference article on Wikipedia`_.
      +       The :term:`RPython toolchain`'s flavour of type inference is described
      +       in the `annotator pass`_ section.
       
       .. _applevel: coding-guide.html#application-level
       .. _`target language`: getting-started-dev.html#trying-out-the-translator
      @@ -235,13 +163,11 @@
       .. _`The RPython Typer`: translation.html#the-rpython-typer
       .. _`backends`: getting-started-dev.html#trying-out-the-translator
       .. _Tool: getting-started-dev.html#trying-out-the-translator
      -.. _`translation-aspects`: translation-aspects.html
       .. _`PyPy's garbage collectors`: garbage_collection.html
       .. _`Restricted Python`: coding-guide.html#restricted-python
       .. _PSF: http://www.python.org/psf/
       .. _Python: http://www.python.org
       .. _`RPython Typer`: rtyper.html
       .. _`subsystem implementing the Python language`: architecture.html#standard-interpreter
      -.. _Theory: theory.html
       
      -.. include:: _ref.rst
      +.. include:: _ref.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.binascii.rst b/pypy/doc/config/objspace.usemodules.binascii.txt
      copy from pypy/doc/config/objspace.usemodules.binascii.rst
      copy to pypy/doc/config/objspace.usemodules.binascii.txt
      
      diff --git a/pypy/doc/config/translation.type_system.rst b/pypy/doc/config/translation.type_system.txt
      copy from pypy/doc/config/translation.type_system.rst
      copy to pypy/doc/config/translation.type_system.txt
      
      diff --git a/pypy/doc/config/objspace.logbytecodes.rst b/pypy/doc/config/objspace.logbytecodes.txt
      copy from pypy/doc/config/objspace.logbytecodes.rst
      copy to pypy/doc/config/objspace.logbytecodes.txt
      
      diff --git a/pypy/doc/config/objspace.std.withtypeversion.rst b/pypy/doc/config/objspace.std.withtypeversion.txt
      copy from pypy/doc/config/objspace.std.withtypeversion.rst
      copy to pypy/doc/config/objspace.std.withtypeversion.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._io.rst b/pypy/doc/config/objspace.usemodules._io.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._io.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the '_io module.
      -Used by the 'io' standard lib module. This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/config/objspace.usemodules._lsprof.rst b/pypy/doc/config/objspace.usemodules._lsprof.txt
      copy from pypy/doc/config/objspace.usemodules._lsprof.rst
      copy to pypy/doc/config/objspace.usemodules._lsprof.txt
      
      diff --git a/pypy/doc/config/translation.backendopt.remove_asserts.rst b/pypy/doc/config/translation.backendopt.remove_asserts.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.remove_asserts.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Remove raising of assertions from the flowgraphs, which might give small speedups.
      
      diff --git a/pypy/doc/config/objspace.translationmodules.rst b/pypy/doc/config/objspace.translationmodules.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.translationmodules.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -This option enables all modules which are needed to translate PyPy using PyPy.
      
      diff --git a/pypy/doc/release-1.5.0.rst b/pypy/doc/release-1.5.0.rst
      new file mode 100644
      --- /dev/null
      +++ b/pypy/doc/release-1.5.0.rst
      @@ -0,0 +1,73 @@
      +======================
      +PyPy 1.5: Catching Up
      +======================
      +
      +We're pleased to announce the 1.5 release of PyPy. This release updates
      +PyPy with the features of CPython 2.7.1, including the standard library. Thus
      +all the features of `CPython 2.6`_ and `CPython 2.7`_ are now supported. It
      +also contains additional performance improvements. You can download it here:
      +
      +    http://pypy.org/download.html
      +
      +What is PyPy?
      +=============
      +
      +PyPy is a very compliant Python interpreter, almost a drop-in replacement for
      +CPython 2.7.1. It's fast (`pypy 1.5 and cpython 2.6.2`_ performance comparison)
      +due to its integrated tracing JIT compiler.
      +
      +This release includes the features of CPython 2.6 and 2.7. It also includes a
      +large number of small improvements to the tracing JIT compiler. It supports
      +Intel machines running Linux 32/64 or Mac OS X.  Windows is beta (it roughly
      +works but a lot of small issues have not been fixed so far).  Windows 64 is
      +not yet supported.
      +
      +Numerous speed achievements are described on `our blog`_. Normalized speed
      +charts comparing `pypy 1.5 and pypy 1.4`_ as well as `pypy 1.5 and cpython
      +2.6.2`_ are available on our benchmark website. The speed improvement over 1.4
      +seems to be around 25% on average.
      +
      +More highlights
      +===============
      +
      +- The largest change in PyPy's tracing JIT is adding support for `loop invariant
      +  code motion`_, which was mostly done by Håkan Ardö. This feature improves the
      +  performance of tight loops doing numerical calculations.
      +
      +- The CPython extension module API has been improved and now supports many more
      +  extensions. For information on which one are supported, please refer to our
      +  `compatibility wiki`_.
      +
      +- These changes make it possible to support `Tkinter and IDLE`_.
      +
      +- The `cProfile`_ profiler is now working with the JIT. However, it skews the
      +  performance in unstudied ways. Therefore it is not yet usable to analyze
      +  subtle performance problems (the same is true for CPython of course).
      +
      +- There is an `external fork`_ which includes an RPython version of the
      +  ``postgresql``.  However, there are no prebuilt binaries for this.
      +
      +- Our developer documentation was moved to Sphinx and cleaned up. It now lives
      +  on http://pypy.readthedocs.org
      +
      +- and many small things :-)
      +
      +
      +Cheers,
      +
      +Carl Friedrich Bolz, Laura Creighton, Antonio Cuni, Maciej Fijalkowski,
      +Amaury Forgeot d'Arc, Alex Gaynor, Armin Rigo and the PyPy team
      +
      +
      +.. _`CPython 2.6`: http://docs.python.org/dev/whatsnew/2.6.html
      +.. _`CPython 2.7`: http://docs.python.org/dev/whatsnew/2.7.html
      +
      +.. _`our blog`: http://morepypy.blogspot.com
      +.. _`pypy 1.5 and pypy 1.4`: http://bit.ly/joPhHo
      +.. _`pypy 1.5 and cpython 2.6.2`: http://bit.ly/mbVWwJ
      +
      +.. _`loop invariant code motion`: http://morepypy.blogspot.com/2011/01/loop-invariant-code-motion.html
      +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home
      +.. _`Tkinter and IDLE`: http://morepypy.blogspot.com/2011/04/using-tkinter-and-idle-with-pypy.html
      +.. _`cProfile`: http://docs.python.org/library/profile.html
      +.. _`external fork`: https://bitbucket.org/alex_gaynor/pypy-postgresql
      
      diff --git a/pypy/doc/config/objspace.usemodules.array.rst b/pypy/doc/config/objspace.usemodules.array.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.array.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Use interpreter-level version of array module (on by default).
      
      diff --git a/pypy/doc/config/objspace.usemodules._file.rst b/pypy/doc/config/objspace.usemodules._file.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._file.rst
      +++ /dev/null
      @@ -1,4 +0,0 @@
      -Use the '_file' module. It is an internal module that contains helper
      -functionality for the builtin ``file`` type.
      -
      -.. internal
      
      diff --git a/pypy/doc/config/objspace.usemodules.termios.rst b/pypy/doc/config/objspace.usemodules.termios.txt
      copy from pypy/doc/config/objspace.usemodules.termios.rst
      copy to pypy/doc/config/objspace.usemodules.termios.txt
      
      diff --git a/pypy/doc/config/translation.backendopt.mallocs.rst b/pypy/doc/config/translation.backendopt.mallocs.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.mallocs.rst
      +++ /dev/null
      @@ -1,29 +0,0 @@
      -This optimization enables "malloc removal", which "explodes"
      -allocations of structures which do not escape from the function they
      -are allocated in into one or more additional local variables.
      -
      -An example.  Consider this rather unlikely seeming code::
      -
      -    class C:
      -        pass
      -    def f(y):
      -        c = C()
      -        c.x = y
      -        return c.x
      -
      -Malloc removal will spot that the ``C`` object can never leave ``f``
      -and replace the above with code like this::
      -
      -    def f(y):
      -        _c__x = y
      -        return _c__x
      -
      -It is rare for code to be directly written in a way that allows this
      -optimization to be useful, but inlining often results in opportunities
      -for its use (and indeed, this is one of the main reasons PyPy does its
      -own inlining rather than relying on the C compilers).
      -
      -For much more information about this and other optimizations you can
      -read section 4.1 of the technical report on "Massive Parallelism and
      -Translation Aspects" which you can find on the `Technical reports page
      -<../index-report.html>`__.
      
      diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
      --- a/pypy/doc/discussion/finalizer-order.rst
      +++ b/pypy/doc/discussion/finalizer-order.rst
      @@ -1,3 +1,6 @@
      +.. XXX armin, what do we do with this?
      +
      +
       Ordering finalizers in the SemiSpace GC
       =======================================
       
      
      diff --git a/pypy/doc/config/objspace.opcodes.rst b/pypy/doc/config/objspace.opcodes.txt
      copy from pypy/doc/config/objspace.opcodes.rst
      copy to pypy/doc/config/objspace.opcodes.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._locale.rst b/pypy/doc/config/objspace.usemodules._locale.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._locale.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Use the '_locale' module.
      -This module runs _locale written in RPython (instead of ctypes version).
      -It's not really finished yet; it's enabled by default on Windows.
      
      diff --git a/pypy/doc/config/makemodules.py b/pypy/doc/config/makemodules.py
      --- a/pypy/doc/config/makemodules.py
      +++ b/pypy/doc/config/makemodules.py
      @@ -7,12 +7,12 @@
       if __name__ == '__main__':
           c = config.Config(pypyoption.pypy_optiondescription).usemodules
           prefix = "objspace.usemodules"
      -    thisdir.join(prefix + ".txt").ensure()
      +    thisdir.join(prefix + ".rst").ensure()
           for p in c.getpaths(include_groups=True):
      -        basename = prefix + "." + p + ".txt"
      +        basename = prefix + "." + p + ".rst"
               f = thisdir.join(basename)
      -        if f.check() and f.size():
      -            continue
      +        #if f.check() and f.size():
      +        #    continue
               print "making docs for", p
               text = ["Use the '%s' module. " % (p, )]
               if p in pypyoption.essential_modules:
      
      diff --git a/pypy/doc/config/objspace.std.withtproxy.rst b/pypy/doc/config/objspace.std.withtproxy.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withtproxy.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Enable `transparent proxies`_.
      -
      -.. _`transparent proxies`: ../objspace-proxies.html#tproxy
      
      diff --git a/pypy/doc/config/objspace.usemodules.cStringIO.rst b/pypy/doc/config/objspace.usemodules.cStringIO.txt
      copy from pypy/doc/config/objspace.usemodules.cStringIO.rst
      copy to pypy/doc/config/objspace.usemodules.cStringIO.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.thread.rst b/pypy/doc/config/objspace.usemodules.thread.txt
      copy from pypy/doc/config/objspace.usemodules.thread.rst
      copy to pypy/doc/config/objspace.usemodules.thread.txt
      
      diff --git a/pypy/doc/config/objspace.std.logspaceoptypes.rst b/pypy/doc/config/objspace.std.logspaceoptypes.txt
      copy from pypy/doc/config/objspace.std.logspaceoptypes.rst
      copy to pypy/doc/config/objspace.std.logspaceoptypes.txt
      
      diff --git a/pypy/doc/config/translation.simplifying.rst b/pypy/doc/config/translation.simplifying.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.simplifying.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Internal option.
      -
      -.. internal
      
      diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.txt
      copy from pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst
      copy to pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.txt
      
      diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst
      --- a/pypy/doc/jit/pyjitpl5.rst
      +++ b/pypy/doc/jit/pyjitpl5.rst
      @@ -10,8 +10,8 @@
       
       The JIT's `theory`_ is great in principle, but the actual code is a different
       story. This section tries to give a high level overview of how PyPy's JIT is
      -implemented.  It's helpful to have an understanding of how the PyPy `translation
      -tool chain`_ works before digging into the sources.
      +implemented.  It's helpful to have an understanding of how the `RPython translation
      +toolchain`_ works before digging into the sources.
       
       Almost all JIT specific code is found in pypy/jit subdirectories.  Translation
       time code is in the codewriter directory.  The metainterp directory holds
      @@ -19,7 +19,7 @@
       the backend directory is responsible for generating machine code.
       
       .. _`theory`: overview.html
      -.. _`translation tool chain`: ../translation.html
      +.. _`RPython translation toolchain`: ../translation.html
       
       
       JIT hints
      @@ -160,8 +160,11 @@
       in the machine code.  Virtualizables, however, can escape from JIT controlled
       code.
       
      -Most of the JIT's optimizer is contained 2 files optimizefindnodes.py and
      -optimizeopt.py.
      +Other optimizations
      +*******************
      +
      +Most of the JIT's optimizer is contained in the subdirectory
      +``metainterp/optimizeopt/``.  Refer to it for more details.
       
       
       More resources
      
      diff --git a/pypy/doc/config/objspace.usemodules.cStringIO.rst b/pypy/doc/config/objspace.usemodules.cStringIO.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.cStringIO.rst
      +++ /dev/null
      @@ -1,4 +0,0 @@
      -Use the built-in cStringIO module.
      -
      -If not enabled, importing cStringIO gives you the app-level
      -implementation from the standard library StringIO module.
      
      diff --git a/pypy/doc/image/compat-matrix.sxc b/pypy/doc/image/compat-matrix.sxc
      index 8086ba6179a7bcd49f43067ae42a80f2a5d3cca3..ab8455241eda4ec63bf647905b63bf9849fb8675
      GIT binary patch
      [cut]
      diff --git a/pypy/doc/config/objspace.usemodules._stackless.rst b/pypy/doc/config/objspace.usemodules._stackless.txt
      copy from pypy/doc/config/objspace.usemodules._stackless.rst
      copy to pypy/doc/config/objspace.usemodules._stackless.txt
      
      diff --git a/pypy/doc/discussion/gc.rst b/pypy/doc/discussion/gc.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/gc.rst
      +++ /dev/null
      @@ -1,77 +0,0 @@
      -
      -*Note: this things are experimental and are being implemented on the
      -`io-improvements`_ branch*
      -
      -.. _`io-improvements`: http://codespeak.net/svn/pypy/branch/io-improvements
      -
      -=============
      -GC operations
      -=============
      -
      -This document tries to gather gc-related issues which are very recent
      -or in-development. Also, it tries to document needed gc refactorings
      -and expected performance of certain gc-related operations.
      -
      -Problem area
      -============
      -
      -Since some of our gcs are moving, we at some point decided to simplify
      -the issue of having care of it by always copying the contents of
      -data that goes to C level. This yields a performance penalty, also
      -because some gcs does not move data around anyway.
      -
      -So we decided to introduce new operations which will simplify issues
      -regarding this.
      -
      -Pure gc operations
      -==================
      -
      -(All available from rlib.rgc)
      -
      -* can_move(p) - returns a flag telling whether pointer p will move.
      -  useful for example when you want to know whether memcopy is safe.
      -
      -* malloc_nonmovable(TP, n=None) - tries to allocate non-moving object.
      -  if it succeeds, it return an object, otherwise (for whatever reasons)
      -  returns null pointer. Does not raise! (never)
      -
      -Usage patterns
      -==============
      -
      -Usually those functions are used via helpers located in rffi. For things like
      -os.write - first get_nonmovingbuffer(data) that will give you a pointer
      -suitable of passing to C and finally free_nonmovingbuffer.
      -
      -For os.read like usage - you first call alloc_buffer (that will allocate a
      -buffer of desired size passable to C) and afterwards create str_from_buffer,
      -finally calling keep_buffer_alive_until_here.
      -
      -String builder
      -==============
      -
      -In Python strings are immutable by design. In RPython this still yields true,
      -but since we cooperate with lower (C/POSIX) level, which has no notion of
      -strings, we use buffers. Typical use case is to use list of characters l and
      -than ''.join(l) in order to get string. This requires a lot of unnecessary
      -copying, which yields performance penalty for such operations as string
      -formatting. Hence the idea of string builder. String builder would be an
      -object to which you can append strings or characters and afterwards build it
      -to a string. Ideally, this set of operations would not contain any copying
      -whatsoever.
      -
      -Low level gc operations for string builder
      -------------------------------------------
      -
      -* alloc_buffer(T, size) - allocates Array(nolength=True) with possibility
      -  of later becoming of shape T
      -
      -* realloc_buffer(buf, newsize) - tries to shrink or enlarge buffer buf. Returns
      -  new pointer (since it might involve copying)
      -
      -* build_buffer(T, buf) - creates a type T (previously passed to alloc_buffer)
      -  from buffer.
      -
      -Depending on a gc, those might be implemented dumb (realloc always copies)
      -or using C-level realloc. Might be implemented also in whatever clever way
      -comes to mind.
      -
      
      diff --git a/pypy/doc/config/translation.instrument.rst b/pypy/doc/config/translation.instrument.txt
      copy from pypy/doc/config/translation.instrument.rst
      copy to pypy/doc/config/translation.instrument.txt
      
      diff --git a/pypy/doc/config/translation.rweakref.rst b/pypy/doc/config/translation.rweakref.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.rweakref.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -This indicates if the backend and GC policy support RPython-level weakrefs.
      -Can be tested in an RPython program to select between two implementation
      -strategies.
      
      diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst
      +++ /dev/null
      @@ -1,4 +0,0 @@
      -Internal option. Switch to a different weight heuristic for inlining.
      -This is for profile-based inlining (:config:`translation.backendopt.profile_based_inline`).
      -
      -.. internal
      
      diff --git a/pypy/doc/config/translation.insist.rst b/pypy/doc/config/translation.insist.txt
      copy from pypy/doc/config/translation.insist.rst
      copy to pypy/doc/config/translation.insist.txt
      
      diff --git a/pypy/doc/config/objspace.std.withstrslice.rst b/pypy/doc/config/objspace.std.withstrslice.txt
      copy from pypy/doc/config/objspace.std.withstrslice.rst
      copy to pypy/doc/config/objspace.std.withstrslice.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._ssl.rst b/pypy/doc/config/objspace.usemodules._ssl.txt
      copy from pypy/doc/config/objspace.usemodules._ssl.rst
      copy to pypy/doc/config/objspace.usemodules._ssl.txt
      
      diff --git a/pypy/doc/config/translation.linkerflags.rst b/pypy/doc/config/translation.linkerflags.txt
      copy from pypy/doc/config/translation.linkerflags.rst
      copy to pypy/doc/config/translation.linkerflags.txt
      
      diff --git a/pypy/doc/config/translation.withsmallfuncsets.rst b/pypy/doc/config/translation.withsmallfuncsets.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.withsmallfuncsets.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Represent function sets smaller than this option's value as an integer instead
      -of a function pointer. A call is then done via a switch on that integer, which
      -allows inlining etc. Small numbers for this can speed up PyPy (try 5).
      
      diff --git a/pypy/doc/config/objspace.usemodules.math.rst b/pypy/doc/config/objspace.usemodules.math.txt
      copy from pypy/doc/config/objspace.usemodules.math.rst
      copy to pypy/doc/config/objspace.usemodules.math.txt
      
      diff --git a/pypy/doc/docindex.rst b/pypy/doc/docindex.rst
      deleted file mode 100644
      --- a/pypy/doc/docindex.rst
      +++ /dev/null
      @@ -1,314 +0,0 @@
      -=================================================
      -PyPy - a Python_ implementation written in Python 
      -=================================================
      -
      -.. _Python: http://www.python.org/doc/2.5.2/
      -
      -
      -.. contents:: :depth: 1
      -
      -
      -PyPy User Documentation
      -===============================================
      -
      -`getting started`_ provides hands-on instructions 
      -including a two-liner to run the PyPy Python interpreter 
      -on your system, examples on advanced features and 
      -entry points for using PyPy's translation tool chain. 
      -
      -`FAQ`_ contains some frequently asked questions.
      -
      -New features of PyPy's Python Interpreter and 
      -Translation Framework: 
      -
      -  * `Differences between PyPy and CPython`_
      -  * `What PyPy can do for your objects`_
      -  * `Stackless and coroutines`_
      -  * `JIT Generation in PyPy`_ 
      -  * `Sandboxing Python code`_
      -
      -Status_ of the project.
      -
      -
      -Project Documentation
      -=====================================
      -
      -PyPy was funded by the EU for several years. See the `web site of the EU
      -project`_ for more details.
      -
      -.. _`web site of the EU project`: http://pypy.org
      -
      -architecture_ gives a complete view of PyPy's basic design. 
      -
      -`coding guide`_ helps you to write code for PyPy (especially also describes
      -coding in RPython a bit). 
      -
      -`sprint reports`_ lists reports written at most of our sprints, from
      -2003 to the present.
      -
      -`papers, talks and related projects`_ lists presentations 
      -and related projects as well as our published papers.
      -
      -`ideas for PyPy related projects`_ which might be a good way to get
      -into PyPy.
      -
      -`PyPy video documentation`_ is a page linking to the videos (e.g. of talks and
      -introductions) that are available.
      -
      -`Technical reports`_ is a page that contains links to the
      -reports that we submitted to the European Union.
      -
      -`development methodology`_ describes our sprint-driven approach.
      -
      -`license`_ contains licensing details (basically a straight MIT-license). 
      -
      -`Glossary`_ of PyPy words to help you align your inner self with
      -the PyPy universe.
      -
      -
      -Status
      -===================================
      -
      -PyPy can be used to run Python programs on Linux, OS/X,
      -Windows, on top of .NET, and on top of Java.
      -To dig into PyPy it is recommended to try out the current
      -Subversion HEAD, which is always working or mostly working,
      -instead of the latest release, which is `1.2.0`__.
      -
      -.. __: release-1.2.0.html
      -
      -PyPy is mainly developed on Linux and Mac OS X.  Windows is supported,
      -but platform-specific bugs tend to take longer before we notice and fix
      -them.  Linux 64-bit machines are supported (though it may also take some
      -time before we notice and fix bugs).
      -
      -PyPy's own tests `summary`_, daily updated, run through BuildBot infrastructure.
      -You can also find CPython's compliance tests run with compiled ``pypy-c``
      -executables there.
      -
      -information dating from early 2007: 
      -
      -`PyPy LOC statistics`_ shows LOC statistics about PyPy.
      -
      -`PyPy statistics`_ is a page with various statistics about the PyPy project.
      -
      -`compatibility matrix`_ is a diagram that shows which of the various features
      -of the PyPy interpreter work together with which other features.
      -
      -
      -Source Code Documentation
      -===============================================
      -
      -`object spaces`_ discusses the object space interface 
      -and several implementations. 
      -
      -`bytecode interpreter`_ explains the basic mechanisms 
      -of the bytecode interpreter and virtual machine. 
      -
      -`interpreter optimizations`_ describes our various strategies for
      -improving the performance of our interpreter, including alternative
      -object implementations (for strings, dictionaries and lists) in the
      -standard object space.
      -
      -`translation`_ is a detailed overview of our translation process.  The
      -rtyper_ is the largest component of our translation process.
      -
      -`dynamic-language translation`_ is a paper that describes
      -the translation process, especially the flow object space
      -and the annotator in detail. (This document is one
      -of the `EU reports`_.)
      -
      -`low-level encapsulation`_ describes how our approach hides
      -away a lot of low level details. This document is also part
      -of the `EU reports`_.
      -
      -`translation aspects`_ describes how we weave different
      -properties into our interpreter during the translation
      -process. This document is also part of the `EU reports`_.
      -
      -`garbage collector`_ strategies that can be used by the virtual
      -machines produced by the translation process.
      -
      -`parser`_ contains (outdated, unfinished) documentation about
      -the parser.
      -
      -`rlib`_ describes some modules that can be used when implementing programs in
      -RPython.
      -
      -`configuration documentation`_ describes the various configuration options that
      -allow you to customize PyPy.
      -
      -`CLI backend`_ describes the details of the .NET backend.
      -
      -`JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler
      -from our Python interpreter.
      -
      -
      -
      -.. _`FAQ`: faq.html
      -.. _Glossary: glossary.html
      -.. _`PyPy video documentation`: video-index.html
      -.. _parser: parser.html
      -.. _`development methodology`: dev_method.html
      -.. _`sprint reports`: sprint-reports.html
      -.. _`papers, talks and related projects`: extradoc.html
      -.. _`license`: ../../LICENSE
      -.. _`PyPy LOC statistics`: http://codespeak.net/~hpk/pypy-stat/
      -.. _`PyPy statistics`: http://codespeak.net/pypy/trunk/pypy/doc/statistic
      -.. _`object spaces`: objspace.html 
      -.. _`interpreter optimizations`: interpreter-optimizations.html 
      -.. _`translation`: translation.html 
      -.. _`dynamic-language translation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
      -.. _`low-level encapsulation`: low-level-encapsulation.html
      -.. _`translation aspects`: translation-aspects.html
      -.. _`configuration documentation`: config/
      -.. _`coding guide`: coding-guide.html 
      -.. _`architecture`: architecture.html 
      -.. _`getting started`: getting-started.html 
      -.. _`theory`: theory.html
      -.. _`bytecode interpreter`: interpreter.html 
      -.. _`EU reports`: index-report.html
      -.. _`Technical reports`: index-report.html
      -.. _`summary`: http://codespeak.net:8099/summary
      -.. _`ideas for PyPy related projects`: project-ideas.html
      -.. _`Nightly builds and benchmarks`: http://tuatara.cs.uni-duesseldorf.de/benchmark.html
      -.. _`directory reference`: 
      -.. _`rlib`: rlib.html
      -.. _`Sandboxing Python code`: sandbox.html
      -
      -PyPy directory cross-reference 
      -------------------------------
      -
      -Here is a fully referenced alphabetical two-level deep 
      -directory overview of PyPy: 
      -
      -============================   =========================================== 
      -Directory                      explanation/links
      -============================   =========================================== 
      -`annotation/`_                 `type inferencing code`_ for `RPython`_ programs 
      -
      -`bin/`_                        command-line scripts, mainly `py.py`_ and `translatorshell.py`_
      -
      -`config/`_                     handles the numerous options for building and running PyPy
      -
      -`doc/`_                        text versions of PyPy developer documentation
      -
      -`doc/config/`_                 documentation for the numerous translation options
      -
      -`doc/discussion/`_             drafts of ideas and documentation
      -
      -``doc/*/``                     other specific documentation topics or tools
      -
      -`interpreter/`_                `bytecode interpreter`_ and related objects
      -                               (frames, functions, modules,...) 
      -
      -`interpreter/pyparser/`_       interpreter-level Python source parser
      -
      -`interpreter/astcompiler/`_    interpreter-level bytecode compiler, via an AST
      -                               representation
      -
      -`module/`_                     contains `mixed modules`_ implementing core modules with 
      -                               both application and interpreter level code.
      -                               Not all are finished and working.  Use the ``--withmod-xxx``
      -                               or ``--allworkingmodules`` translation options.
      -
      -`objspace/`_                   `object space`_ implementations
      -
      -`objspace/trace.py`_           the `trace object space`_ monitoring bytecode and space operations
      -
      -`objspace/dump.py`_            the dump object space saves a large, searchable log file
      -                               with all operations
      -
      -`objspace/taint.py`_           the `taint object space`_, providing object tainting
      -
      -`objspace/thunk.py`_           the `thunk object space`_, providing unique object features 
      -
      -`objspace/flow/`_              the FlowObjSpace_ implementing `abstract interpretation`
      -
      -`objspace/std/`_               the StdObjSpace_ implementing CPython's objects and types
      -
      -`rlib/`_                       a `"standard library"`_ for RPython_ programs
      -
      -`rpython/`_                    the `RPython Typer`_ 
      -
      -`rpython/lltypesystem/`_       the `low-level type system`_ for C-like backends
      -
      -`rpython/ootypesystem/`_       the `object-oriented type system`_ for OO backends
      -
      -`rpython/memory/`_             the `garbage collector`_ construction framework
      -
      -`tool/`_                       various utilities and hacks used from various places 
      -
      -`tool/algo/`_                  general-purpose algorithmic and mathematic
      -                               tools
      -
      -`tool/pytest/`_                support code for our `testing methods`_
      -
      -`translator/`_                 translation_ backends and support code
      -
      -`translator/backendopt/`_      general optimizations that run before a backend generates code
      -
      -`translator/c/`_               the `GenC backend`_, producing C code from an
      -                               RPython program (generally via the rtyper_)
      -
      -`translator/cli/`_             the `CLI backend`_ for `.NET`_ (Microsoft CLR or Mono_)
      -
      -`translator/goal/`_            our `main PyPy-translation scripts`_ live here
      -
      -`translator/jvm/`_             the Java backend
      -
      -`translator/stackless/`_       the `Stackless Transform`_
      -
      -`translator/tool/`_            helper tools for translation, including the Pygame
      -                               `graph viewer`_
      -
      -``*/test/``                    many directories have a test subdirectory containing test 
      -                               modules (see `Testing in PyPy`_) 
      -
      -``_cache/``                    holds cache files from internally `translating application 
      -                               level to interpreterlevel`_ code.   
      -============================   =========================================== 
      -
      -.. _`bytecode interpreter`: interpreter.html
      -.. _`translating application level to interpreterlevel`: geninterp.html
      -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy 
      -.. _`mixed modules`: coding-guide.html#mixed-modules 
      -.. _`modules`: coding-guide.html#modules 
      -.. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf
      -.. _`object space`: objspace.html
      -.. _FlowObjSpace: objspace.html#the-flow-object-space 
      -.. _`trace object space`: objspace.html#the-trace-object-space 
      -.. _`taint object space`: objspace-proxies.html#taint
      -.. _`thunk object space`: objspace-proxies.html#thunk
      -.. _`transparent proxies`: objspace-proxies.html#tproxy
      -.. _`Differences between PyPy and CPython`: cpython_differences.html
      -.. _`What PyPy can do for your objects`: objspace-proxies.html
      -.. _`Stackless and coroutines`: stackless.html
      -.. _StdObjSpace: objspace.html#the-standard-object-space 
      -.. _`abstract interpretation`: theory.html#abstract-interpretation
      -.. _`rpython`: coding-guide.html#rpython 
      -.. _`type inferencing code`: translation.html#the-annotation-pass 
      -.. _`RPython Typer`: translation.html#rpython-typer 
      -.. _`testing methods`: coding-guide.html#testing-in-pypy
      -.. _`translation`: translation.html 
      -.. _`GenC backend`: translation.html#genc 
      -.. _`CLI backend`: cli-backend.html
      -.. _`py.py`: getting-started-python.html#the-py.py-interpreter
      -.. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator
      -.. _JIT: jit/index.html
      -.. _`JIT Generation in PyPy`: jit/index.html
      -.. _`just-in-time compiler generator`: jit/index.html
      -.. _rtyper: rtyper.html
      -.. _`low-level type system`: rtyper.html#low-level-type
      -.. _`object-oriented type system`: rtyper.html#oo-type
      -.. _`garbage collector`: garbage_collection.html
      -.. _`Stackless Transform`: translation.html#the-stackless-transform
      -.. _`main PyPy-translation scripts`: getting-started-python.html#translating-the-pypy-python-interpreter
      -.. _`.NET`: http://www.microsoft.com/net/
      -.. _Mono: http://www.mono-project.com/
      -.. _`"standard library"`: rlib.html
      -.. _`graph viewer`: getting-started-dev.html#try-out-the-translator
      -.. _`compatibility matrix`: image/compat-matrix.png
      -
      -.. include:: _ref.rst
      -
      
      diff --git a/pypy/doc/config/objspace.usemodules.errno.rst b/pypy/doc/config/objspace.usemodules.errno.txt
      copy from pypy/doc/config/objspace.usemodules.errno.rst
      copy to pypy/doc/config/objspace.usemodules.errno.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.itertools.rst b/pypy/doc/config/objspace.usemodules.itertools.txt
      copy from pypy/doc/config/objspace.usemodules.itertools.rst
      copy to pypy/doc/config/objspace.usemodules.itertools.txt
      
      diff --git a/pypy/doc/config/translation.cli.exception_transformer.rst b/pypy/doc/config/translation.cli.exception_transformer.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.cli.exception_transformer.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Use the exception transformer instead of the native .NET exceptions to
      -implement RPython exceptions. Enable this option only if you know what
      -you are doing.
      
      diff --git a/pypy/doc/config/objspace.usemodules.marshal.rst b/pypy/doc/config/objspace.usemodules.marshal.txt
      copy from pypy/doc/config/objspace.usemodules.marshal.rst
      copy to pypy/doc/config/objspace.usemodules.marshal.txt
      
      diff --git a/pypy/doc/config/objspace.std.withsmallint.rst b/pypy/doc/config/objspace.std.withsmallint.txt
      copy from pypy/doc/config/objspace.std.withsmallint.rst
      copy to pypy/doc/config/objspace.std.withsmallint.txt
      
      diff --git a/pypy/translator/cli/test/mylib.py b/pypy/translator/cli/test/mylib.py
      deleted file mode 100644
      --- a/pypy/translator/cli/test/mylib.py
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -from pypy.translator.cli.carbonpython import export
      -
      - at export(int, int)
      -def sum(a, b):
      -    return a+b
      
      diff --git a/pypy/doc/config/objspace.usemodules._sre.rst b/pypy/doc/config/objspace.usemodules._sre.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._sre.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the '_sre' module. 
      -This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/config/translation.backendopt.print_statistics.rst b/pypy/doc/config/translation.backendopt.print_statistics.txt
      copy from pypy/doc/config/translation.backendopt.print_statistics.rst
      copy to pypy/doc/config/translation.backendopt.print_statistics.txt
      
      diff --git a/pypy/doc/config/translation.taggedpointers.rst b/pypy/doc/config/translation.taggedpointers.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.taggedpointers.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Enable tagged pointers. This option is mostly useful for the Smalltalk and
      -Prolog interpreters. For the Python interpreter the option
      -:config:`objspace.std.withsmallint` should be used.
      
      diff --git a/pypy/doc/crufty.rst b/pypy/doc/crufty.rst
      deleted file mode 100644
      --- a/pypy/doc/crufty.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -.. warning::
      -
      -   This documentation may be out-of-date or obsolete (identified on 2011-03-14 at the PyCon US sprint)
      
      diff --git a/pypy/doc/config/objspace.usemodules.imp.rst b/pypy/doc/config/objspace.usemodules.imp.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.imp.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'imp' module.
      -This module is included by default.
      
      diff --git a/pypy/doc/config/objspace.usemodules.time.rst b/pypy/doc/config/objspace.usemodules.time.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.time.rst
      +++ /dev/null
      @@ -1,4 +0,0 @@
      -Use the 'time' module. 
      -
      -Obsolete; use :config:`objspace.usemodules.rctime` for our up-to-date version
      -of the application-level 'time' module.
      
      diff --git a/pypy/doc/config/objspace.std.withtproxy.rst b/pypy/doc/config/objspace.std.withtproxy.txt
      copy from pypy/doc/config/objspace.std.withtproxy.rst
      copy to pypy/doc/config/objspace.std.withtproxy.txt
      
      diff --git a/pypy/doc/config/translation.output.rst b/pypy/doc/config/translation.output.txt
      copy from pypy/doc/config/translation.output.rst
      copy to pypy/doc/config/translation.output.txt
      
      diff --git a/pypy/doc/image/parsing_example3.dot b/pypy/doc/image/parsing_example3.dot
      deleted file mode 100644
      --- a/pypy/doc/image/parsing_example3.dot
      +++ /dev/null
      @@ -1,13 +0,0 @@
      -digraph G{
      -"-1219325716" [label="n"];
      -"-1219325716" -> "-1219325844";
      -"-1219325844" [shape=box,label="__0_A\n'A'"];
      -"-1219325716" -> "-1219324372";
      -"-1219324372" [label="n"];
      -"-1219324372" -> "-1219325524";
      -"-1219325524" [shape=box,label="__0_A\n'A'"];
      -"-1219324372" -> "-1219324308";
      -"-1219324308" [label="n"];
      -"-1219324308" -> "-1219325492";
      -"-1219325492" [shape=box,label="__0_A\n'A'"];
      -}
      
      diff --git a/pypy/doc/config/objspace.std.mutable_builtintypes.rst b/pypy/doc/config/objspace.std.mutable_builtintypes.txt
      copy from pypy/doc/config/objspace.std.mutable_builtintypes.rst
      copy to pypy/doc/config/objspace.std.mutable_builtintypes.txt
      
      diff --git a/pypy/doc/discussion/distribution-implementation.rst b/pypy/doc/discussion/distribution-implementation.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/distribution-implementation.rst
      +++ /dev/null
      @@ -1,91 +0,0 @@
      -=====================================================
      -Random implementation details of distribution attempt
      -=====================================================
      -
      -.. contents::
      -.. sectnum::
      -
      -This document attempts to broaden this `dist thoughts`_.
      -
      -.. _`dist thoughts`: distribution-newattempt.html
      -
      -Basic implementation:
      ----------------------
      -
      -First we do split objects into value-only primitives (like int) and other.
      -Basically immutable builtin types which cannot contain user-level objects
      -(int, float, long, str, None, etc.) will be always transferred as value-only
      -objects (having no states etc.). The every other object (user created classes,
      -instances, modules, lists, tuples, etc. etc.) are always executed by reference.
      -(Of course if somebody wants to ie. copy the instance, he can marshal/pickle
      -this to string and send, but it's outside the scope of this attempt). Special
      -case might be immutable data structure (tuple, frozenset) containing simple
      -types (this becomes simple type).
      -
      -XXX: What to do with code types? Marshalling them and sending seems to have no
      -sense. Remote execution? Local execution with remote f_locals and f_globals?
      -
      -Every remote object has got special class W_RemoteXXX where XXX is interp-level
      -class implementing this object. W_RemoteXXX implements all the operations
      -by using special app-level code that sends method name and arguments over the wire
      -(arguments might be either simple objects which are simply send over the app-level
      -code or references to local objects).
      -
      -So the basic scheme would look like::
      -
      -    remote_ref = remote("Object reference")
      -    remote_ref.any_method()
      -
      -``remote_ref`` in above example looks like normal python object to user,
      -but is implemented differently (W_RemoteXXX), and uses app-level proxy
      -to forward each interp-level method call.
      -
      -Abstraction layers:
      --------------------
      -
      -In this section we define remote side as a side on which calls are
      -executed and local side is the one on which calls are run.
      -
      -* Looking from the local side, first thing that we see is object
      -  which looks like normal object (has got the same interp-level typedef)
      -  but has got different implementation. Basically this is the shallow copy
      -  of remote object (however you define shallow, it's up to the code which
      -  makes the copy. Basically the copy which can be marshalled or send over
      -  the wire or saved for future purpose). This is W_RemoteXXX where XXX is
      -  real object name. Some operations on that object requires accessing remote
      -  side of the object, some might not need such (for example remote int
      -  is totally the same int as local one, it could not even be implemented
      -  differently).
      -
      -* For every interp-level operation, which accesses internals that are not
      -  accessible at the local side, (basically all attribute accesses which
      -  are accessing things that are subclasses of W_Object) we provide special
      -  W_Remote version, which downloads necessary object when needed
      -  (if accessed). This is the same as normal W_RemoteXXX (we know the type!)
      -  but not needed yet.
      -
      -* From the remote point of view, every exported object which needs such
      -  has got a local appropriate storage W_LocalXXX where XXX is a type 
      -  by which it could be accessed from a wire.
      -
      -The real pain:
      ---------------
      -
      -For every attribute access when we get W_RemoteXXX, we need to check
      -the download flag - which sucks a bit. (And we have to support it somehow
      -in annotator, which sucks a lot). The (some) idea is to wrap all the methods
      -with additional checks, but that's both unclear and probably not necessary.
      -
      -XXX If we can easily change underlying implementation of an object, than
      -this might become way easier. Right now I'll try to have it working and
      -thing about RPython later.
      -
      -App-level remote tool:
      -----------------------
      -
      -For purpose of app-level tool which can transfer the data (well, socket might
      -be enough, but suppose I want to be more flexible), I would use `py.execnet`_,
      -probably using some of the Armin's hacks to rewrite it using greenlets instead
      -of threads.
      -
      -.. _`py.execnet`: http://codespeak.net/py/current/doc/execnet.html
      
      diff --git a/pypy/doc/config/translation.backendopt.print_statistics.rst b/pypy/doc/config/translation.backendopt.print_statistics.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.print_statistics.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Debugging option. Print statics about the forest of flowgraphs as they
      -go through the various backend optimizations.
      \ No newline at end of file
      
      diff --git a/pypy/doc/config/objspace.usemodules._locale.rst b/pypy/doc/config/objspace.usemodules._locale.txt
      copy from pypy/doc/config/objspace.usemodules._locale.rst
      copy to pypy/doc/config/objspace.usemodules._locale.txt
      
      diff --git a/pypy/doc/config/translation.backendopt.really_remove_asserts.rst b/pypy/doc/config/translation.backendopt.really_remove_asserts.rst
      deleted file mode 100644
      
      diff --git a/pypy/doc/config/objspace.usemodules._warnings.rst b/pypy/doc/config/objspace.usemodules._warnings.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._warnings.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Use the '_warning' module. This module is expected to be working and is included by default.
      
      diff --git a/pypy/doc/image/parsing_example8.dot b/pypy/doc/image/parsing_example8.dot
      deleted file mode 100644
      --- a/pypy/doc/image/parsing_example8.dot
      +++ /dev/null
      @@ -1,21 +0,0 @@
      -digraph G{
      -"-1213611892" [label="list"];
      -"-1213611892" -> "-1213608980";
      -"-1213608980" [shape=box,label="DECIMAL\n'1'"];
      -"-1213611892" -> "-1213623476";
      -"-1213623476" [label="list"];
      -"-1213623476" -> "-1213623380";
      -"-1213623380" [shape=box,label="DECIMAL\n'2'"];
      -"-1213623476" -> "-1213442868";
      -"-1213442868" [label="list"];
      -"-1213442868" -> "-1213441652";
      -"-1213441652" [shape=box,label="DECIMAL\n'3'"];
      -"-1213442868" -> "-1213441332";
      -"-1213441332" [label="list"];
      -"-1213441332" -> "-1213441620";
      -"-1213441620" [shape=box,label="DECIMAL\n'4'"];
      -"-1213441332" -> "-1213443060";
      -"-1213443060" [label="list"];
      -"-1213443060" -> "-1213442100";
      -"-1213442100" [shape=box,label="DECIMAL\n'5'"];
      -}
      \ No newline at end of file
      
      diff --git a/pypy/doc/discussion/GC-performance.rst b/pypy/doc/discussion/GC-performance.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/GC-performance.rst
      +++ /dev/null
      @@ -1,118 +0,0 @@
      -StartHeapsize# is the framework GC as of revision 31586 with initial
      -bytes_malloced_threshold of 2-512 MB
      -
      -NewHeuristics is the framework GC with a new heuristics for adjusting
      -the bytes_malloced_threshold
      -
      -::
      -
      - Pystone
      - StartHeapsize2:
      - This machine benchmarks at 5426.92 pystones/second
      - This machine benchmarks at 5193.91 pystones/second
      - This machine benchmarks at 5403.46 pystones/second
      - StartHeapsize8:
      - This machine benchmarks at 6075.33 pystones/second
      - This machine benchmarks at 6007.21 pystones/second
      - This machine benchmarks at 6122.45 pystones/second
      - StartHeapsize32:
      - This machine benchmarks at 6643.05 pystones/second
      - This machine benchmarks at 6590.51 pystones/second
      - This machine benchmarks at 6593.41 pystones/second
      - StartHeapsize128:
      - This machine benchmarks at 7065.47 pystones/second
      - This machine benchmarks at 7102.27 pystones/second
      - This machine benchmarks at 7082.15 pystones/second
      - StartHeapsize512:
      - This machine benchmarks at 7208.07 pystones/second
      - This machine benchmarks at 7197.7 pystones/second
      - This machine benchmarks at 7246.38 pystones/second
      - NewHeuristics:
      - This machine benchmarks at 6821.28 pystones/second
      - This machine benchmarks at 6858.71 pystones/second
      - This machine benchmarks at 6902.9 pystones/second
      -
      -
      - Richards
      - StartHeapSize2:
      - Average time per iteration: 5456.21 ms
      - Average time per iteration: 5529.31 ms
      - Average time per iteration: 5398.82 ms
      - StartHeapsize8:
      - Average time per iteration: 4775.43 ms
      - Average time per iteration: 4753.25 ms
      - Average time per iteration: 4781.37 ms
      - StartHeapsize32:
      - Average time per iteration: 4554.84 ms
      - Average time per iteration: 4501.86 ms
      - Average time per iteration: 4531.59 ms
      - StartHeapsize128:
      - Average time per iteration: 4329.42 ms
      - Average time per iteration: 4360.87 ms
      - Average time per iteration: 4392.81 ms
      - StartHeapsize512:
      - Average time per iteration: 4371.72 ms
      - Average time per iteration: 4399.70 ms
      - Average time per iteration: 4354.66 ms
      - NewHeuristics:
      - Average time per iteration: 4763.56 ms
      - Average time per iteration: 4803.49 ms
      - Average time per iteration: 4840.68 ms
      -
      -
      - translate rpystone
      -   time pypy-c translate --text --batch --backendopt --no-compile targetrpystonedalone.py
      - StartHeapSize2:
      - real    1m38.459s
      - user    1m35.582s
      - sys     0m0.440s
      - StartHeapsize8:
      - real    1m35.398s
      - user    1m33.878s
      - sys     0m0.376s
      - StartHeapsize32:
      - real    1m5.475s
      - user    1m5.108s
      - sys     0m0.180s
      - StartHeapsize128:
      - real    0m52.941s
      - user    0m52.395s
      - sys     0m0.328s
      - StartHeapsize512:
      - real    1m3.727s
      - user    0m50.031s
      - sys     0m1.240s
      - NewHeuristics:
      - real    0m53.449s
      - user    0m52.771s
      - sys     0m0.356s
      -
      -
      - docutils
      -   time pypy-c rst2html doc/coding-guide.txt
      - StartHeapSize2:
      - real    0m36.125s
      - user    0m35.562s
      - sys     0m0.088s
      - StartHeapsize8:
      - real    0m32.678s
      - user    0m31.106s
      - sys     0m0.084s
      - StartHeapsize32:
      - real    0m22.041s
      - user    0m21.085s
      - sys     0m0.132s
      - StartHeapsize128:
      - real    0m19.350s
      - user    0m18.653s
      - sys     0m0.324s
      - StartHeapsize512:
      - real    0m19.116s
      - user    0m17.517s
      - sys     0m0.620s
      - NewHeuristics:
      - real    0m20.990s
      - user    0m20.109s
      - sys     0m0.196s
      -
      -
      
      diff --git a/pypy/doc/config/objspace.usemodules.bz2.rst b/pypy/doc/config/objspace.usemodules.bz2.txt
      copy from pypy/doc/config/objspace.usemodules.bz2.rst
      copy to pypy/doc/config/objspace.usemodules.bz2.txt
      
      diff --git a/pypy/doc/config/objspace.std.withstrjoin.rst b/pypy/doc/config/objspace.std.withstrjoin.txt
      copy from pypy/doc/config/objspace.std.withstrjoin.rst
      copy to pypy/doc/config/objspace.std.withstrjoin.txt
      
      diff --git a/pypy/doc/discussion/emptying-the-malloc-zoo.rst b/pypy/doc/discussion/emptying-the-malloc-zoo.rst
      deleted file mode 100644
      --- a/pypy/doc/discussion/emptying-the-malloc-zoo.rst
      +++ /dev/null
      @@ -1,40 +0,0 @@
      -.. coding: utf-8
      -
      -Emptying the malloc zoo
      -=======================
      -
      -Around the end-of-the-EU-project time there were two major areas of
      -obscurity in the memory management area:
      -
      - 1. The confusing set of operations that the low-level backend are
      -    expected to implement.
      -
      - 2. The related, but slightly different, confusion of the various
      -    "flavours" of malloc: what's the difference between
      -    lltype.malloc(T, flavour='raw') and llmemory.raw_malloc(sizeof(T))?
      -
      -At the post-ep2007 sprint, Samuele and Michael attacked the first
      -problem a bit: making the Boehm GC transformer only require three
      -simple operations of the backend.  This could be extending still
      -further by having the gc transformer use rffi to insert calls to the
      -relevant Boehm functions^Wmacros, and then the backend wouldn't need
      -to know anything about Boehm at all (but... LLVM).
      -
      -A potential next step is to work out what we want the "llpython"
      -interface to memory management to be.
      -
      -There are various use cases:
      -
      -**lltype.malloc(T) – T is a fixed-size GC container**
      -
      -  This is the default case.  Non-pointers inside the allocated memory
      -  will not be zeroed.  The object will be managed by the GC, no
      -  deallocation required.
      -
      -**lltype.malloc(T, zero=True) – T is a GC container**
      -
      -  As above, but all fields will be cleared.
      -
      -**lltype.malloc(U, raw=True) – U is not a GC container**
      -
      -  Blah.
      
      diff --git a/pypy/doc/config/translation.debug.rst b/pypy/doc/config/translation.debug.txt
      copy from pypy/doc/config/translation.debug.rst
      copy to pypy/doc/config/translation.debug.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.token.rst b/pypy/doc/config/objspace.usemodules.token.txt
      copy from pypy/doc/config/objspace.usemodules.token.rst
      copy to pypy/doc/config/objspace.usemodules.token.txt
      
      diff --git a/pypy/doc/config/objspace.std.mutable_builtintypes.rst b/pypy/doc/config/objspace.std.mutable_builtintypes.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.mutable_builtintypes.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Allow modification of builtin types.  Disabled by default.
      
      diff --git a/pypy/doc/config/translation.vanilla.rst b/pypy/doc/config/translation.vanilla.txt
      copy from pypy/doc/config/translation.vanilla.rst
      copy to pypy/doc/config/translation.vanilla.txt
      
      diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.rst b/pypy/doc/config/objspace.std.withprebuiltchar.txt
      copy from pypy/doc/config/objspace.std.withprebuiltchar.rst
      copy to pypy/doc/config/objspace.std.withprebuiltchar.txt
      
      diff --git a/pypy/doc/config/translation.profopt.rst b/pypy/doc/config/translation.profopt.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.profopt.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -Use GCCs profile-guided optimizations. This option specifies the the
      -arguments with which to call pypy-c (and in general the translated
      -RPython program) to gather profile data. Example for pypy-c: "-c 'from
      -richards import main;main(); from test import pystone;
      -pystone.main()'"
      
      diff --git a/pypy/doc/config/objspace.usemodules.clr.rst b/pypy/doc/config/objspace.usemodules.clr.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.clr.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Use the 'clr' module. 
      
      diff --git a/pypy/doc/config/objspace.usemodules.crypt.rst b/pypy/doc/config/objspace.usemodules.crypt.txt
      copy from pypy/doc/config/objspace.usemodules.crypt.rst
      copy to pypy/doc/config/objspace.usemodules.crypt.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._ssl.rst b/pypy/doc/config/objspace.usemodules._ssl.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._ssl.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -Use the '_ssl' module, which implements SSL socket operations.
      
      diff --git a/pypy/doc/config/objspace.usemodules._socket.rst b/pypy/doc/config/objspace.usemodules._socket.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._socket.rst
      +++ /dev/null
      @@ -1,7 +0,0 @@
      -Use the '_socket' module. 
      -
      -This is our implementation of '_socket', the Python builtin module
      -exposing socket primitives, which is wrapped and used by the standard
      -library 'socket.py' module. It is based on `rffi`_.
      -
      -.. _`rffi`: ../rffi.html
      
      diff --git a/pypy/doc/config/translation.backendopt.inline.rst b/pypy/doc/config/translation.backendopt.inline.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.inline.rst
      +++ /dev/null
      @@ -1,10 +0,0 @@
      -Inline flowgraphs based on an heuristic, the default one considers
      -essentially the a weight for the flowgraph based on the number of
      -low-level operations in them (see
      -:config:`translation.backendopt.inline_threshold` ).
      -
      -Some amount of inlining in order to have RPython builtin type helpers
      -inlined is needed for malloc removal
      -(:config:`translation.backendopt.mallocs`) to be effective.
      -
      -This optimization is used by default.
      
      diff --git a/LICENSE b/LICENSE
      --- a/LICENSE
      +++ b/LICENSE
      @@ -37,78 +37,154 @@
           Armin Rigo
           Maciej Fijalkowski
           Carl Friedrich Bolz
      +    Amaury Forgeot d'Arc
      +    Antonio Cuni
           Samuele Pedroni
      -    Antonio Cuni
           Michael Hudson
      +    Holger Krekel
           Christian Tismer
      -    Holger Krekel
      +    Benjamin Peterson
           Eric van Riet Paap
      +    Anders Chrigström
      +    Håkan Ardö
           Richard Emslie
      -    Anders Chrigstrom
      -    Amaury Forgeot d Arc
      -    Aurelien Campeas
      +    Dan Villiom Podlaski Christiansen
      +    Alexander Schremmer
      +    Alex Gaynor
      +    David Schneider
      +    Aurelién Campeas
           Anders Lehmann
      +    Camillo Bruni
           Niklaus Haldimann
      +    Leonardo Santagada
      +    Toon Verwaest
           Seo Sanghyeon
      -    Leonardo Santagada
           Lawrence Oluyede
      +    Bartosz Skowron
           Jakub Gustak
           Guido Wesdorp
      -    Benjamin Peterson
      -    Alexander Schremmer
      +    Adrien Di Mascio
      +    Laura Creighton
      +    Ludovic Aubry
           Niko Matsakis
      -    Ludovic Aubry
      +    Daniel Roberts
      +    Jason Creighton
      +    Jacob Hallén
           Alex Martelli
      -    Toon Verwaest
      +    Anders Hammarquist
      +    Jan de Mooij
           Stephan Diehl
      -    Adrien Di Mascio
      +    Michael Foord
           Stefan Schwarzer
           Tomek Meka
           Patrick Maupin
      -    Jacob Hallen
      -    Laura Creighton
           Bob Ippolito
      -    Camillo Bruni
      -    Simon Burton
           Bruno Gola
           Alexandre Fayolle
           Marius Gedminas
      +    Simon Burton
      +    Jean-Paul Calderone
      +    John Witulski
      +    Wim Lavrijsen
      +    Andreas Stührk
      +    Jean-Philippe St. Pierre
           Guido van Rossum
      +    Pavel Vinogradov
           Valentino Volonghi
      +    Paul deGrandis
           Adrian Kuhn
      -    Paul deGrandis
      +    tav
      +    Georg Brandl
           Gerald Klix
           Wanja Saatkamp
      -    Anders Hammarquist
      +    Boris Feigin
           Oscar Nierstrasz
      +    Dario Bertini
      +    David Malcolm
           Eugene Oden
      +    Henry Mason
           Lukas Renggli
           Guenter Jantzen
      +    Ronny Pfannschmidt
      +    Bert Freudenberg
      +    Amit Regmi
      +    Ben Young
      +    Nicolas Chauvat
      +    Andrew Durdin
      +    Michael Schneider
      +    Nicholas Riley
      +    Rocco Moretti
      +    Gintautas Miliauskas
      +    Michael Twomey
      +    Igor Trindade Oliveira
      +    Lucian Branescu Mihaila
      +    Olivier Dormond
      +    Jared Grubb
      +    Karl Bartel
      +    Gabriel Lavoie
      +    Brian Dorsey
      +    Victor Stinner
      +    Stuart Williams
      +    Toby Watson
      +    Antoine Pitrou
      +    Justas Sadzevicius
      +    Neil Shepperd
      +    Mikael Schönenberg
      +    Gasper Zejn
      +    Jonathan David Riehl
      +    Elmo Mäntynen
      +    Anders Qvist
      +    Beatrice Düring
      +    Alexander Sedov
      +    Vincent Legoll
      +    Alan McIntyre
      +    Romain Guillebert
      +    Alex Perry
      +    Jens-Uwe Mager
      +    Dan Stromberg
      +    Lukas Diekmann
      +    Carl Meyer
      +    Pieter Zieschang
      +    Alejandro J. Cura
      +    Sylvain Thenault
      +    Travis Francis Athougies
      +    Henrik Vendelbo
      +    Lutz Paelike
      +    Jacob Oscarson
      +    Martin Blais
      +    Lucio Torre
      +    Lene Wagner
      +    Miguel de Val Borro
      +    Ignas Mikalajunas
      +    Artur Lisiecki
      +    Joshua Gilbert
      +    Godefroid Chappelle
      +    Yusei Tahara
      +    Christopher Armstrong
      +    Stephan Busemann
      +    Gustavo Niemeyer
      +    William Leslie
      +    Akira Li
      +    Kristján Valur Jonsson
      +    Bobby Impollonia
      +    Andrew Thompson
      +    Anders Sigfridsson
      +    Jacek Generowicz
      +    Dan Colish
      +    Sven Hager
      +    Zooko Wilcox-O Hearn
      +    Anders Hammarquist
           Dinu Gherman
      -    Bartosz Skowron
      -    Georg Brandl
      -    Ben Young
      -    Jean-Paul Calderone
      -    Nicolas Chauvat
      -    Rocco Moretti
      -    Michael Twomey
      -    boria
      -    Jared Grubb
      -    Olivier Dormond
      -    Stuart Williams
      -    Jens-Uwe Mager
      -    Justas Sadzevicius
      -    Mikael Schönenberg
      -    Brian Dorsey
      -    Jonathan David Riehl
      -    Beatrice During
      -    Elmo Mäntynen
      -    Andreas Friedge
      -    Alex Gaynor
      -    Anders Qvist
      -    Alan McIntyre
      -    Bert Freudenberg
      -    Tav
      +    Dan Colish
      +    Daniel Neuhäuser
      +    Michael Chermside
      +    Konrad Delong
      +    Anna Ravencroft
      +    Greg Price
      +    Armin Ronacher
      +    Jim Baker
      +    Philip Jenvey
      +    Rodrigo Araújo
       
           Heinrich-Heine University, Germany 
           Open End AB (formerly AB Strakt), Sweden
      
      diff --git a/pypy/doc/config/objspace.std.withropeunicode.rst b/pypy/doc/config/objspace.std.withropeunicode.txt
      copy from pypy/doc/config/objspace.std.withropeunicode.rst
      copy to pypy/doc/config/objspace.std.withropeunicode.txt
      
      diff --git a/pypy/doc/config/objspace.std.multimethods.rst b/pypy/doc/config/objspace.std.multimethods.txt
      copy from pypy/doc/config/objspace.std.multimethods.rst
      copy to pypy/doc/config/objspace.std.multimethods.txt
      
      diff --git a/pypy/doc/config/objspace.name.rst b/pypy/doc/config/objspace.name.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.name.rst
      +++ /dev/null
      @@ -1,16 +0,0 @@
      -Determine which `Object Space`_ to use. The `Standard Object Space`_ gives the
      -normal Python semantics, the others are `Object Space Proxies`_ giving
      -additional features (except the Flow Object Space which is not intended
      -for normal usage):
      -
      -  * thunk_: The thunk object space adds lazy evaluation to PyPy.
      -  * taint_: The taint object space adds soft security features.
      -  * dump_:  Using this object spaces results in the dumpimp of all operations
      -    to a log.
      -
      -.. _`Object Space`: ../objspace.html
      -.. _`Object Space Proxies`: ../objspace-proxies.html
      -.. _`Standard Object Space`: ../objspace.html#standard-object-space
      -.. _thunk: ../objspace-proxies.html#thunk
      -.. _taint: ../objspace-proxies.html#taint
      -.. _dump: ../objspace-proxies.html#dump
      
      diff --git a/pypy/doc/config/objspace.std.withsmalllong.rst b/pypy/doc/config/objspace.std.withsmalllong.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withsmalllong.rst
      +++ /dev/null
      @@ -1,5 +0,0 @@
      -Enable "small longs", an additional implementation of the Python
      -type "long", implemented with a C long long.  It is mostly useful
      -on 32-bit; on 64-bit, a C long long is the same as a C long, so
      -its usefulness is limited to Python objects of type "long" that
      -would anyway fit in an "int".
      
      diff --git a/pypy/doc/config/objspace.opcodes.rst b/pypy/doc/config/objspace.opcodes.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.opcodes.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -..  intentionally empty
      
      diff --git a/pypy/doc/config/objspace.usemodules.parser.rst b/pypy/doc/config/objspace.usemodules.parser.txt
      copy from pypy/doc/config/objspace.usemodules.parser.rst
      copy to pypy/doc/config/objspace.usemodules.parser.txt
      
      diff --git a/pypy/doc/config/objspace.std.withrope.rst b/pypy/doc/config/objspace.std.withrope.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withrope.rst
      +++ /dev/null
      @@ -1,7 +0,0 @@
      -Enable ropes to be the default string implementation.
      -
      -See the section in `Standard Interpreter Optimizations`_ for more details.
      -
      -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#ropes
      -
      -
      
      diff --git a/pypy/doc/config/objspace.usemodules.crypt.rst b/pypy/doc/config/objspace.usemodules.crypt.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.crypt.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the 'crypt' module. 
      -This module is expected to be fully working.
      
      diff --git a/pypy/doc/config/objspace.std.logspaceoptypes.rst b/pypy/doc/config/objspace.std.logspaceoptypes.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.logspaceoptypes.rst
      +++ /dev/null
      @@ -1,4 +0,0 @@
      -.. internal
      -
      -Wrap "simple" bytecode implementations like BINARY_ADD with code that collects
      -information about which types these bytecodes receive as arguments.
      
      diff --git a/pypy/doc/config/objspace.usemodules.rst b/pypy/doc/config/objspace.usemodules.txt
      copy from pypy/doc/config/objspace.usemodules.rst
      copy to pypy/doc/config/objspace.usemodules.txt
      
      diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal_heuristic.rst
      +++ /dev/null
      @@ -1,4 +0,0 @@
      -Internal option. Switch to a different weight heuristic for inlining.
      -This is for clever malloc removal (:config:`translation.backendopt.clever_malloc_removal`).
      -
      -.. internal
      
      diff --git a/pypy/doc/config/objspace.usemodules._demo.rst b/pypy/doc/config/objspace.usemodules._demo.txt
      copy from pypy/doc/config/objspace.usemodules._demo.rst
      copy to pypy/doc/config/objspace.usemodules._demo.txt
      
      diff --git a/pypy/doc/config/translation.noprofopt.rst b/pypy/doc/config/translation.noprofopt.txt
      copy from pypy/doc/config/translation.noprofopt.rst
      copy to pypy/doc/config/translation.noprofopt.txt
      
      diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
      --- a/pypy/doc/faq.rst
      +++ b/pypy/doc/faq.rst
      @@ -43,67 +43,57 @@
       complete and well tested, so if your project does not use many
       extension modules there is a good chance that it will work with PyPy.
       
      -We list the differences we know about in `cpython_differences`_.
      +We list the differences we know about in `cpython differences`_.
       
      -There is also an experimental support for CPython extension modules, so
      -they'll run without change (from current observation, rather with little
      -change) on trunk. It has been a part of 1.4 release, but support is still
      -in alpha phase.
      +--------------------------------------------
      +Do CPython Extension modules work with PyPy?
      +--------------------------------------------
      +
      +We have experimental support for CPython extension modules, so
      +they run with minor changes.  This has been a part of PyPy since
      +the 1.4 release, but support is still in beta phase.  CPython
      +extension modules in PyPy are often much slower than in CPython due to
      +the need to emulate refcounting.  It is often faster to take out your
      +CPython extension and replace it with a pure python version that the
      +JIT can see.
      +
      +We fully support ctypes-based extensions.
      +
      +For information on which third party extensions work (or do not work) 
      +with PyPy see the `compatibility wiki`_.
      +
       
       .. _`extension modules`: cpython_differences.html#extension-modules
      -.. _`cpython_differences`: cpython_differences.html
      +.. _`cpython differences`: cpython_differences.html
      +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home
       
      ---------------------------------
      -On what platforms does PyPy run?
      ---------------------------------
      +---------------------------------
      +On which platforms does PyPy run?
      +---------------------------------
       
       PyPy is regularly and extensively tested on Linux machines and on Mac
       OS X and mostly works under Windows too (but is tested there less
       extensively). PyPy needs a CPython running on the target platform to
       bootstrap, as cross compilation is not really meant to work yet.
      -At the moment you need CPython 2.4 (with ctypes) or CPython 2.5 or 2.6
      +At the moment you need CPython 2.5 - 2.7
       for the translation process. PyPy's JIT requires an x86 or x86_64 CPU.
       
      -
       ------------------------------------------------
       Which Python version (2.x?) does PyPy implement?
       ------------------------------------------------
       
      -PyPy currently aims to be fully compatible with Python 2.5. That means that
      -it contains the standard library of Python 2.5 and that it supports 2.5
      -features (such as the with statement).  
      +PyPy currently aims to be fully compatible with Python 2.7. That means that
      +it contains the standard library of Python 2.7 and that it supports 2.7
      +features (such as set comprehensions).  
       
       .. _threading:
       
       -------------------------------------------------
      -Do threads work?  What are the modules that work?
      +Does PyPy have a GIL?  Why?
       -------------------------------------------------
       
      -Operating system-level threads basically work. If you enable the ``thread``
      -module then PyPy will get support for GIL based threading.
      -Note that PyPy also fully supports `stackless-like
      -microthreads`_ (although both cannot be mixed yet).
      -
      -All pure-python modules should work, unless they rely on ugly
      -cpython implementation details, in which case it's their fault.
      -There is an increasing number of compatible CPython extensions working,
      -including things like wxPython or PIL. This is an ongoing development effort
      -to bring as many CPython extension modules working as possible.
      -
      -.. _`stackless-like microthreads`: stackless.html
      -
      -
      -------------------------------------
      -Can I use CPython extension modules?
      -------------------------------------
      -
      -Yes, but the feature is in alpha state and is available only on trunk
      -(not in the 1.2 release). However, we'll only ever support well-behaving
      -CPython extensions. Please consult PyPy developers on IRC or mailing list
      -for explanations if your favorite module works and how you can help to make
      -it happen in case it does not.
      -
      -We fully support ctypes-based extensions, however.
      +Yes, PyPy has a GIL.  Removing the GIL is very hard.  The first problem
      +is that our garbage collectors are not re-entrant.
       
       ------------------------------------------
       How do I write extension modules for PyPy?
      @@ -113,44 +103,27 @@
       
       .. __: extending.html
       
      -
      -.. _`slower than CPython`:
      -.. _`how fast is pypy`:
      -
       -----------------
       How fast is PyPy?
       -----------------
      +This really depends on your code.
      +For pure Python algorithmic code, it is very fast.  For more typical
      +Python programs we generally are 3 times the speed of Cpython 2.6 .
      +You might be interested in our `benchmarking site`_ and our 
      +`jit documentation`_.
       
      -.. _whysoslow:
      +.. _`benchmarking site`: http://speed.pypy.org
       
      -In three words, PyPy is "kind of fast".  In more than three
      -words, the answer to this question is hard to give as a single
      -number.  The fastest PyPy available so far is clearly PyPy
      -`with a JIT included`_, optimized and translated to C.  This
      -version of PyPy is "kind of fast" in the sense that there are
      -numerous examples of Python code that run *much faster* than
      -CPython, up to a large number of times faster.  And there are
      -also examples of code that are just as slow as without the
      -JIT.  A PyPy that does not include a JIT has performance that
      -is more predictable: it runs generally somewhere between 1 and
      -2 times slower than CPython, in the worst case up to 4 times
      -slower.
      -
      -Obtaining good measurements for the performance when run on
      -the CLI or JVM is difficult, but the JIT on the CLI `seems to
      -work nicely`__ too.
      -
      -.. __: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf
      -.. _`with a JIT included`: jit/index.html
      +.. _`jit documentation`: jit/index.html
       
       
       .. _`prolog and javascript`:
       
      -----------------------------------------------------------------
      -Can PyPy support interpreters for other languages beyond Python?
      -----------------------------------------------------------------
      +--------------------------------------------------------------------------
      +Can I use PyPy's translation toolchain for other languages besides Python?
      +--------------------------------------------------------------------------
       
      -The toolsuite that translates the PyPy interpreter is quite
      +Yes. The toolsuite that translates the PyPy interpreter is quite
       general and can be used to create optimized versions of interpreters
       for any language, not just Python.  Of course, these interpreters
       can make use of the same features that PyPy brings to Python:
      @@ -161,11 +134,12 @@
       Currently, we have preliminary versions of a JavaScript interpreter
       (Leonardo Santagada as his Summer of PyPy project), a `Prolog interpreter`_
       (Carl Friedrich Bolz as his Bachelor thesis), and a `SmallTalk interpreter`_
      -(produced during a sprint).  `All of them`_ are unfinished at the moment.
      +(produced during a sprint).  On the `PyPy bitbucket page`_ there is also a
      +Scheme and an Io implementation; both of these are unfinished at the moment.
       
      -.. _`Prolog interpreter`: http://codespeak.net/svn/pypy/lang/prolog/
      +.. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/
       .. _`SmallTalk interpreter`: http://dx.doi.org/10.1007/978-3-540-89275-5_7
      -.. _`All of them`: http://codespeak.net/svn/pypy/lang/
      +.. _`PyPy bitbucket page`: https://bitbucket.org/pypy/
       
       
       Development
      @@ -175,36 +149,21 @@
       How do I get into PyPy development?  Can I come to sprints?
       -----------------------------------------------------------
       
      -Sure you can come to sprints! We always welcome newcomers and try to help them
      -get started in the project as much as possible (e.g. by providing tutorials and
      -pairing them with experienced PyPy developers). Newcomers should have some
      -Python experience and read some of the PyPy documentation before coming to a
      -sprint.
      +Certainly you can come to sprints! We always welcome newcomers and try
      +to help them as much as possible to get started with the project.  We
      +provide tutorials and pair them with experienced PyPy
      +developers. Newcomers should have some Python experience and read some
      +of the PyPy documentation before coming to a sprint.
       
      -Coming to a sprint is usually also the best way to get into PyPy development.
      -If you want to start on your own, take a look at the list of `project
      -suggestions`_. If you get stuck or need advice, `contact us`_. Usually IRC is
      +Coming to a sprint is usually the best way to get into PyPy development.
      +If you get stuck or need advice, `contact us`_. IRC is
       the most immediate way to get feedback (at least during some parts of the day;
      -many PyPy developers are in Europe) and the `mailing list`_ is better for long
      +most PyPy developers are in Europe) and the `mailing list`_ is better for long
       discussions.
       
      -.. _`project suggestions`: project-ideas.html
       .. _`contact us`: index.html
       .. _`mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev
       
      -----------------------------------------------------------------------
      -I am getting strange errors while playing with PyPy, what should I do?
      -----------------------------------------------------------------------
      -
      -It seems that a lot of strange, unexplainable problems can be magically
      -solved by removing all the \*.pyc files from the PyPy source tree
      -(the script `py.cleanup`_ from py/bin will do that for you).
      -Another thing you can do is removing the directory pypy/_cache
      -completely. If the error is persistent and still annoys you after this
      -treatment please send us a bug report (or even better, a fix :-)
      -
      -.. _`py.cleanup`: http://codespeak.net/py/current/doc/bin.html
      -
       -------------------------------------------------------------
       OSError: ... cannot restore segment prot after reloc... Help?
       -------------------------------------------------------------
      @@ -221,12 +180,12 @@
       Be sure to enable it again if you need it!
       
       
      -PyPy translation tool chain
      -===========================
      +The PyPy translation tool chain
      +===============================
       
      -----------------------------------------
      -Can PyPy compile normal Python programs?
      -----------------------------------------
      +---------------------------------------------
      +Can PyPy compile normal Python programs to C?
      +---------------------------------------------
       
       No, PyPy is not a Python compiler.
       
      @@ -234,36 +193,13 @@
       that a program will manipulate by doing a static analysis.  It should be
       clear if you are familiar with Python, but if in doubt see [BRETT]_.
       
      -What could be attempted is static "soft typing", where you would use a
      -whole bunch of heuristics to guess what types are probably going to show
      -up where.  In this way, you could compile the program into two copies of
      -itself: a "fast" version and a "slow" version.  The former would contain
      -many guards that allow it to fall back to the latter if needed.  That
      -would be a wholly different project than PyPy, though.  (As far as we
      -understand it, this is the approach that the LLVM__ group would like to
      -see LLVM used for, so if you feel like working very hard and attempting
      -something like this, check with them.)
      +If you want a fast Python program, please use our JIT_ instead.
       
      -.. __: http://llvm.org/
      -
      -What PyPy contains is, on the one hand, an non-soft static type
      -inferencer for RPython, which is a sublanguage that we defined just so
      -that it's possible and not too hard to do that; and on the other hand,
      -for the full Python language, we have an interpreter, and a JIT
      -generator which can produce a Just-In-Time Compiler from the
      -interpreter.  The resulting JIT works for the full Python language in a
      -way that doesn't need type inference at all.
      -
      -For more motivation and details about our approach see also [D05.1]_,
      -section 3.
      +.. _JIT: jit/index.html
       
       .. [BRETT] Brett Cannon,
                  Localized Type Inference of Atomic Types in Python,
      -           http://www.ocf.berkeley.edu/~bac/thesis.pdf
      -
      -.. [D05.1] Compiling Dynamic Language Implementations,
      -           Report from the PyPy project to the E.U.,
      -           http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
      +           http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.90.3231
       
       .. _`PyPy's RPython`: 
       
      @@ -272,30 +208,26 @@
       ------------------------------
       
       RPython is a restricted subset of the Python language.   It is used for 
      -implementing dynamic language interpreters within the PyPy framework.  The
      -restrictions are to ensure that type inference (and so, ultimately, translation
      -to other languages) of RPython programs is possible. These restrictions only
      -apply after the full import happens, so at import time arbitrary Python code can
      -be executed. 
      +implementing dynamic language interpreters within the PyPy toolchain.  The
      +restrictions ensure that type inference (and so, ultimately, translation
      +to other languages) of RPython programs is possible. 
       
       The property of "being RPython" always applies to a full program, not to single
      -functions or modules (the translation tool chain does a full program analysis).
      -"Full program" in the context of "being RPython" is all the code reachable from
      -an "entry point" function. The translation toolchain follows all calls
      -recursively and discovers what belongs to the program and what not.
      +functions or modules (the translation toolchain does a full program analysis).
      +The translation toolchain follows all calls
      +recursively and discovers what belongs to the program and what does not.
       
      -The restrictions that apply to programs to be RPython mostly limit the ability
      -of mixing types in arbitrary ways. RPython does not allow the usage of two
      +RPython program restrictions mostly limit the ability
      +to mix types in arbitrary ways. RPython does not allow the binding of two
       different types in the same variable. In this respect (and in some others) it
      -feels a bit like Java. Other features not allowed in RPython are the usage of
      +feels a bit like Java. Other features not allowed in RPython are the use of
       special methods (``__xxx__``) except ``__init__`` and ``__del__``, and the
      -usage of reflection capabilities (e.g. ``__dict__``).
      +use of reflection capabilities (e.g. ``__dict__``).
       
      -Most existing standard library modules are not RPython, except for
      -some functions in ``os``, ``math`` and ``time`` that are natively
      -supported. In general it is quite unlikely that an existing Python
      -program is by chance RPython; it is most likely that it would have to be
      -heavily rewritten.
      +You cannot use most existing standard library modules from RPython.  The
      +exceptions are
      +some functions in ``os``, ``math`` and ``time`` that have native support.
      +
       To read more about the RPython limitations read the `RPython description`_.
       
       .. _`RPython description`: coding-guide.html#restricted-python
      @@ -312,29 +244,6 @@
       .. _`sandboxed Python Interpreter`: sandbox.html
       .. _`Zope's RestrictedPython`: http://pypi.python.org/pypi/RestrictedPython
       
      --------------------------------------------------------------------------
      -Can I use PyPy and RPython to compile smaller parts of my Python program?
      --------------------------------------------------------------------------
      -
      -No.  That would be possible, and we played with early attempts in that
      -direction, but there are many delicate issues: for example, how the
      -compiled and the non-compiled parts exchange data.  Supporting this in a
      -nice way would be a lot of work.
      -
      -PyPy is certainly a good starting point for someone that would like to
      -work in that direction.  Early attempts were dropped because they
      -conflicted with refactorings that we needed in order to progress on the
      -rest of PyPy; the currently active developers of PyPy have different
      -priorities.  If someone wants to start working in that direction I
      -imagine that he might get a (very little) bit of support from us,
      -though.
      -
      -Alternatively, it's possible to write a mixed-module, i.e. an extension
      -module for PyPy in RPython, which you can then import from your Python
      -program when it runs on top of PyPy.  This is similar to writing a C
      -extension module for CPython in term of investment of effort (without
      -all the INCREF/DECREF mess, though).
      -
       ------------------------------------------------------
       What's the ``"NOT_RPYTHON"`` I see in some docstrings?
       ------------------------------------------------------
      @@ -350,7 +259,7 @@
       -------------------------------------------------------------------
       
       It's not necessarily nonsense, but it's not really The PyPy Way.  It's
      -pretty hard, without some kind of type inference, to translate, say this
      +pretty hard, without some kind of type inference, to translate this
       Python::
       
           a + b
      @@ -369,16 +278,16 @@
       Do I have to rewrite my programs in RPython?
       --------------------------------------------
       
      -No.  PyPy always runs your code in its own interpreter, which is a
      -full and compliant Python 2.5 interpreter.  RPython_ is only the
      +No.  And you shouldn't try.  PyPy always runs your code in its own interpreter, which is a
      +full and compliant Python 2.7 interpreter.  RPython is only the
       language in which parts of PyPy itself are written and extension
      -modules for it.  The answer to whether something needs to be written as
      -an extension module, apart from the "gluing to external libraries" reason, will
      -change over time as speed for normal Python code improves.
      +modules for it.  Not only is it not necessary for you to rewrite your
      +code in RPython, it probably won't give you any speed improvements if you 
      +try.
       
      --------------------------
      -Which backends are there?
      --------------------------
      +---------------------------------------------------
      +Which backends are there for the RPython toolchain?
      +---------------------------------------------------
       
       Currently, there are backends for C_, the CLI_, and the JVM_.
       All of these can translate the entire PyPy interpreter.
      @@ -395,31 +304,22 @@
       
       See the `getting-started`_ guide.
       
      +.. _`getting-started`: getting-started-python.html
      +
       .. _`how do I compile my own interpreters`:
       
       -------------------------------------
       How do I compile my own interpreters?
       -------------------------------------
      +Begin by reading `Andrew Brown's tutorial`_ .
       
      -Start from the example of
      -`pypy/translator/goal/targetnopstandalone.py`_, which you compile by
      -typing::
      -
      -    python translate.py targetnopstandalone
      -
      -You can have a look at intermediate C source code, which is (at the
      -moment) put in ``/tmp/usession-*/testing_1/testing_1.c``.  Of course,
      -all the functions and stuff used directly and indirectly by your
      -``entry_point()`` function has to be RPython_.
      -
      -
      -.. _`RPython`: coding-guide.html#rpython
      -.. _`getting-started`: getting-started.html
      -
      -.. include:: _ref.rst
      +.. _`Andrew Brown's tutorial`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html
       
       ----------------------------------------------------------
       Why does PyPy draw a Mandelbrot fractal while translating?
       ----------------------------------------------------------
       
       Because it's fun.
      +
      +.. include:: _ref.txt
      +
      
      diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal_threshold.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Weight threshold used to decide whether to inline flowgraphs.  
      -This is for clever malloc removal (:config:`translation.backendopt.clever_malloc_removal`).
      
      diff --git a/pypy/doc/release-0.9.0.rst b/pypy/doc/release-0.9.0.rst
      --- a/pypy/doc/release-0.9.0.rst
      +++ b/pypy/doc/release-0.9.0.rst
      @@ -59,7 +59,7 @@
       **testing refinements**
           py.test, our testing tool, now has preliminary support for doctests.
           We now run all our tests every night, and you can see the summary at:
      -    http://snake.cs.uni-duesseldorf.de/pypytest/summary.html
      +    http://buildbot.pypy.org/summary
       
       What is PyPy (about)? 
       ------------------------------------------------
      
      diff --git a/pypy/doc/config/translation.gcrootfinder.rst b/pypy/doc/config/translation.gcrootfinder.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.gcrootfinder.rst
      +++ /dev/null
      @@ -1,15 +0,0 @@
      -Choose method how to find roots in the GC. Boehm and refcounting have their own
      -methods, this is mostly only interesting for framework GCs. For those you have
      -a choice of various alternatives:
      -
      - - use a shadow stack (XXX link to paper), e.g. explicitly maintaining a stack
      -   of roots
      -
      - - use stackless to find roots by unwinding the stack.  Requires
      -   :config:`translation.stackless`.  Note that this turned out to
      -   be slower than just using a shadow stack.
      -
      - - use GCC and i386 specific assembler hackery to find the roots on the stack.
      -   This is fastest but platform specific.
      -
      - - Use LLVM's GC facilities to find the roots.
      
      diff --git a/pypy/doc/release-1.4.0beta.rst b/pypy/doc/release-1.4.0beta.rst
      --- a/pypy/doc/release-1.4.0beta.rst
      +++ b/pypy/doc/release-1.4.0beta.rst
      @@ -33,4 +33,4 @@
       Cheers,
       The PyPy team
       
      -.. _`list of patches`: http://codespeak.net/svn/pypy/trunk/pypy/module/cpyext/patches/
      +.. _`list of patches`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/cpyext/patches/
      
      diff --git a/pypy/doc/config/translation.builtins_can_raise_exceptions.rst b/pypy/doc/config/translation.builtins_can_raise_exceptions.txt
      copy from pypy/doc/config/translation.builtins_can_raise_exceptions.rst
      copy to pypy/doc/config/translation.builtins_can_raise_exceptions.txt
      
      diff --git a/.hgignore b/.hgignore
      --- a/.hgignore
      +++ b/.hgignore
      @@ -17,6 +17,7 @@
       ^pypy/module/cpyext/test/.+\.manifest$
       ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$
       ^pypy/doc/.+\.html$
      +^pypy/doc/config/.+\.rst$
       ^pypy/doc/basicblock\.asc$
       ^pypy/doc/.+\.svninfo$
       ^pypy/translator/c/src/libffi_msvc/.+\.obj$
      
      diff --git a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst b/pypy/doc/config/translation.backendopt.raisingop2direct_call.txt
      copy from pypy/doc/config/translation.backendopt.raisingop2direct_call.rst
      copy to pypy/doc/config/translation.backendopt.raisingop2direct_call.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._minimal_curses.rst b/pypy/doc/config/objspace.usemodules._minimal_curses.txt
      copy from pypy/doc/config/objspace.usemodules._minimal_curses.rst
      copy to pypy/doc/config/objspace.usemodules._minimal_curses.txt
      
      diff --git a/pypy/doc/config/objspace.std.withdictmeasurement.rst b/pypy/doc/config/objspace.std.withdictmeasurement.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.std.withdictmeasurement.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Internal option.
      -
      -.. internal
      
      diff --git a/pypy/doc/config/translation.ootype.mangle.rst b/pypy/doc/config/translation.ootype.mangle.txt
      copy from pypy/doc/config/translation.ootype.mangle.rst
      copy to pypy/doc/config/translation.ootype.mangle.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.zipimport.rst b/pypy/doc/config/objspace.usemodules.zipimport.txt
      copy from pypy/doc/config/objspace.usemodules.zipimport.rst
      copy to pypy/doc/config/objspace.usemodules.zipimport.txt
      
      diff --git a/pypy/doc/config/translation.jit_ffi.rst b/pypy/doc/config/translation.jit_ffi.txt
      copy from pypy/doc/config/translation.jit_ffi.rst
      copy to pypy/doc/config/translation.jit_ffi.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules.itertools.rst b/pypy/doc/config/objspace.usemodules.itertools.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.itertools.rst
      +++ /dev/null
      @@ -1,2 +0,0 @@
      -Use the interp-level 'itertools' module.
      -If not included, a slower app-level version of itertools is used.
      
      diff --git a/pypy/doc/config/translation.list_comprehension_operations.rst b/pypy/doc/config/translation.list_comprehension_operations.txt
      copy from pypy/doc/config/translation.list_comprehension_operations.rst
      copy to pypy/doc/config/translation.list_comprehension_operations.txt
      
      diff --git a/pypy/doc/pypyconfig.py b/pypy/doc/pypyconfig.py
      new file mode 100644
      --- /dev/null
      +++ b/pypy/doc/pypyconfig.py
      @@ -0,0 +1,9 @@
      +
      +
      +def setup(app):
      +    import sys, os
      +    sys.path.insert(0, os.path.abspath("../../"))
      +    from pypy.config import makerestdoc
      +    import py
      +    role = makerestdoc.register_config_role(py.path.local())
      +    app.add_role("config", role)
      
      diff --git a/pypy/doc/config/objspace.usemodules.rst b/pypy/doc/config/objspace.usemodules.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -..  intentionally empty
      
      diff --git a/pypy/doc/config/objspace.usemodules._rawffi.rst b/pypy/doc/config/objspace.usemodules._rawffi.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._rawffi.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -An experimental module providing very low-level interface to
      -C-level libraries, for use when implementing ctypes, not
      -intended for a direct use at all.
      \ No newline at end of file
      
      diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst
      --- a/pypy/doc/architecture.rst
      +++ b/pypy/doc/architecture.rst
      @@ -16,22 +16,20 @@
        * a common translation and support framework for producing
          implementations of dynamic languages, emphasizing a clean
          separation between language specification and implementation
      -   aspects.
      +   aspects.  We call this the `RPython toolchain`_.
       
        * a compliant, flexible and fast implementation of the Python_ Language 
      -   using the above framework to enable new advanced features without having
      -   to encode low level details into it.
      +   which uses the above toolchain to enable new advanced high-level features 
      +   without having to encode the low-level details.
       
      -By separating concerns in this way, we intend for our implementation
      -of Python - and other dynamic languages - to become robust against almost 
      -all implementation decisions, including target platform, memory and 
      -threading models, optimizations applied, up to to the point of being able to
      -automatically *generate* Just-in-Time compilers for dynamic languages.
      -
      -Conversely, our implementation techniques, including the JIT compiler 
      -generator, should become robust against changes in the languages 
      -implemented. 
      -
      +By separating concerns in this way, our implementation
      +of Python - and other dynamic languages - is able to automatically
      +generate a Just-in-Time compiler for any dynamic language.  It also
      +allows a mix-and-match approach to implementation decisions, including
      +many that have historically been outside of a user's control, such as
      +target platform, memory and 
      +threading models, garbage collection strategies, and optimizations applied, 
      +including whether or not to have a JIT in the first place.
       
       High Level Goals
       =============================
      @@ -40,9 +38,10 @@
       -----------------------------------------------
       
       Traditionally, language interpreters are written in a target platform language
      -like C/Posix, Java or C#.  Each such implementation fundamentally provides 
      -a mapping from application source code to the target environment.  One of 
      -the goals of the "all-encompassing" environments, like the .NET framework
      +such as C/Posix, Java or C#.  Each implementation provides 
      +a fundamental mapping between application source code and the target 
      +environment.  One of 
      +the goals of the "all-encompassing" environments, such as the .NET framework
       and to some extent the Java virtual machine, is to provide standardized
       and higher level functionalities in order to support language implementers
       for writing language implementations. 
      @@ -50,7 +49,7 @@
       PyPy is experimenting with a more ambitious approach.  We are using a
       subset of the high-level language Python, called RPython_, in which we
       write languages as simple interpreters with few references to and
      -dependencies on lower level details.  Our translation framework then
      +dependencies on lower level details.  The `RPython toolchain`_
       produces a concrete virtual machine for the platform of our choice by
       inserting appropriate lower level aspects.  The result can be customized
       by selecting other feature and platform configurations.
      @@ -58,8 +57,8 @@
       Our goal is to provide a possible solution to the problem of language
       implementers: having to write ``l * o * p`` interpreters for ``l``
       dynamic languages and ``p`` platforms with ``o`` crucial design
      -decisions.  PyPy aims at having any one of these parameters changeable
      -independently from each other:
      +decisions.  PyPy aims at making it possible to change each of these
      +variables independently such that:
       
       * ``l``: the language that we analyze can be evolved or entirely replaced;
       
      @@ -121,8 +120,8 @@
       The Translation Framework
       -------------------------
       
      -The job of the translation tool chain is to translate RPython_ programs
      -into an efficient version of that program for one of various target
      +The job of the RPython toolchain is to translate RPython_ programs
      +into an efficient version of that program for one of the various target
       platforms, generally one that is considerably lower-level than Python.
       
       The approach we have taken is to reduce the level of abstraction of the
      @@ -133,7 +132,7 @@
       assume an object-oriented model with classes, instances and methods (as,
       for example, the Java and .NET virtual machines do).
       
      -The translation tool chain never sees the RPython source code or syntax
      +The RPython toolchain never sees the RPython source code or syntax
       trees, but rather starts with the *code objects* that define the
       behaviour of the function objects one gives it as input.  It can be
       considered as "freezing" a pre-imported RPython program into an
      @@ -161,7 +160,7 @@
         and compiled into an executable.
       
       This process is described in much more detail in the `document about
      -the translation process`_ and in the paper `Compiling dynamic language
      +the RPython toolchain`_ and in the paper `Compiling dynamic language
       implementations`_.
       
       .. _`control flow graph`: translation.html#the-flow-model
      @@ -169,10 +168,9 @@
       .. _Annotator: translation.html#the-annotation-pass
       .. _RTyper: rtyper.html#overview
       .. _`various transformations`: translation.html#the-optional-transformations
      -.. _`document about the translation process`: translation.html
      +.. _`document about the RPython toolchain`: translation.html
       .. _`garbage collector`: garbage_collection.html
      -
      -
      +.. _`RPython toolchain`: translation.html
       .. _`standard interpreter`: 
       .. _`python interpreter`: 
       
      @@ -233,17 +231,18 @@
        * `The translation document`_: a detailed description of our
          translation process.
       
      - * All our `Technical reports`_, including `Compiling dynamic language
      -   implementations`_.
      -
        * `JIT Generation in PyPy`_, describing how we produce a Just-in-time
          Compiler from an interpreter.
       
      -.. _`documentation index`: docindex.html
      + * A tutorial of how to use the `RPython toolchain`_ to `implement your own
      +   interpreter`_.
      +
      +.. _`documentation index`: index.html#project-documentation
       .. _`getting-started`: getting-started.html
      -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf
      +.. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf
       .. _`the translation document`: translation.html
      -.. _`Compiling dynamic language implementations`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
      +.. _`RPython toolchain`: translation.html
      +.. _`Compiling dynamic language implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
       .. _`Technical reports`: index-report.html
       
       .. _`getting started`: getting-started.html
      @@ -254,11 +253,12 @@
       
       .. _`RPython`: coding-guide.html#rpython
       
      -.. _Python: http://docs.python.org/ref
      +.. _Python: http://docs.python.org/reference/
       .. _Psyco: http://psyco.sourceforge.net
       .. _stackless: stackless.html
       .. _`generate Just-In-Time Compilers`: jit/index.html
       .. _`JIT Generation in PyPy`: jit/index.html
      +.. _`implement your own interpreter`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html
       
      -.. include:: _ref.rst
      +.. include:: _ref.txt
       
      
      diff --git a/pypy/doc/config/objspace.usemodules._demo.rst b/pypy/doc/config/objspace.usemodules._demo.rst
      deleted file mode 100644
      --- a/pypy/doc/config/objspace.usemodules._demo.rst
      +++ /dev/null
      @@ -1,3 +0,0 @@
      -Use the '_demo' module. 
      -
      -This is the demo module for mixed modules. Not enabled by default.
      
      diff --git a/pypy/doc/image/parsing_example6.dot b/pypy/doc/image/parsing_example6.dot
      deleted file mode 100644
      --- a/pypy/doc/image/parsing_example6.dot
      +++ /dev/null
      @@ -1,9 +0,0 @@
      -digraph G{
      -"-1213518708" [label="list"];
      -"-1213518708" -> "-1213518196";
      -"-1213518196" [shape=box,label="DECIMAL\n'1'"];
      -"-1213518708" -> "-1213518260";
      -"-1213518260" [label="list"];
      -"-1213518260" -> "-1213520308";
      -"-1213520308" [shape=box,label="DECIMAL\n'2'"];
      -}
      \ No newline at end of file
      
      diff --git a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst b/pypy/doc/config/translation.backendopt.merge_if_blocks.txt
      copy from pypy/doc/config/translation.backendopt.merge_if_blocks.rst
      copy to pypy/doc/config/translation.backendopt.merge_if_blocks.txt
      
      diff --git a/pypy/doc/config/objspace.extmodules.rst b/pypy/doc/config/objspace.extmodules.txt
      copy from pypy/doc/config/objspace.extmodules.rst
      copy to pypy/doc/config/objspace.extmodules.txt
      
      diff --git a/pypy/doc/config/objspace.usemodules._rawffi.rst b/pypy/doc/config/objspace.usemodules._rawffi.txt
      copy from pypy/doc/config/objspace.usemodules._rawffi.rst
      copy to pypy/doc/config/objspace.usemodules._rawffi.txt
      
      diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
      --- a/pypy/doc/cpython_differences.rst
      +++ b/pypy/doc/cpython_differences.rst
      @@ -23,34 +23,47 @@
           _ast
           _bisect
           _codecs
      +    _collections
      +    `_ffi`_
      +    _hashlib
      +    _io
      +    _locale
           _lsprof
      +    _md5
           `_minimal_curses`_
      +    _multiprocessing
           _random
           `_rawffi`_
      -    _ssl
      +    _sha
           _socket
           _sre
      +    _ssl
      +    _warnings
           _weakref
      +    _winreg
           array
      +    binascii
           bz2
           cStringIO
      +    clr
      +    cmath
           `cpyext`_
           crypt
           errno
           exceptions
           fcntl
           gc
      +    imp
           itertools
           marshal
           math
      -    md5
           mmap
           operator
      +    oracle
           parser
           posix
           pyexpat
           select
      -    sha
           signal
           struct
           symbol
      @@ -77,8 +90,7 @@
       
       * Supported by being rewritten in pure Python (possibly using ``ctypes``):
         see the `lib_pypy/`_ directory.  Examples of modules that we
      -  support this way: ``ctypes``, ``cPickle``,
      -  ``cStringIO``, ``cmath``, ``dbm`` (?), ``datetime``, ``binascii``...  
      +  support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``...
         Note that some modules are both in there and in the list above;
         by default, the built-in module is used (but can be disabled
         at translation time).
      @@ -89,6 +101,7 @@
       
       .. the nonstandard modules are listed below...
       .. _`__pypy__`: __pypy__-module.html
      +.. _`_ffi`: ctypes-implementation.html
       .. _`_rawffi`: ctypes-implementation.html
       .. _`_minimal_curses`: config/objspace.usemodules._minimal_curses.html
       .. _`cpyext`: http://morepypy.blogspot.com/2010/04/using-cpython-extension-modules-with.html
      @@ -114,7 +127,7 @@
       adopted by Jython or IronPython (or any other port of Python to Java or
       .NET, like PyPy itself).
       
      -This affects the precise time at which __del__ methods are called, which
      +This affects the precise time at which ``__del__`` methods are called, which
       is not reliable in PyPy (nor Jython nor IronPython).  It also means that
       weak references may stay alive for a bit longer than expected.  This
       makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
      @@ -124,12 +137,12 @@
       ``ReferenceError`` at any place that uses them.
       
       There are a few extra implications for the difference in the GC.  Most
      -notably, if an object has a __del__, the __del__ is never called more
      -than once in PyPy; but CPython will call the same __del__ several times
      -if the object is resurrected and dies again.  The __del__ methods are
      +notably, if an object has a ``__del__``, the ``__del__`` is never called more
      +than once in PyPy; but CPython will call the same ``__del__`` several times
      +if the object is resurrected and dies again.  The ``__del__`` methods are
       called in "the right" order if they are on objects pointing to each
       other, as in CPython, but unlike CPython, if there is a dead cycle of
      -objects referencing each other, their __del__ methods are called anyway;
      +objects referencing each other, their ``__del__`` methods are called anyway;
       CPython would instead put them into the list ``garbage`` of the ``gc``
       module.  More information is available on the blog `[1]`__ `[2]`__.
       
      @@ -142,7 +155,7 @@
       and calling it a lot can lead to performance problem.
       
       Note that if you have a long chain of objects, each with a reference to
      -the next one, and each with a __del__, PyPy's GC will perform badly.  On
      +the next one, and each with a ``__del__``, PyPy's GC will perform badly.  On
       the bright side, in most other cases, benchmarks have shown that PyPy's
       GCs perform much better than CPython's.
       
      @@ -221,5 +234,9 @@
         it could be supported, but then it will likely work in many
         *more* case on PyPy than on CPython 2.6/2.7.)
       
      +* the ``__builtins__`` name is always referencing the ``__builtin__`` module,
      +  never a dictionary as it sometimes is in CPython. Assigning to
      +  ``__builtins__`` has no effect.
       
      -.. include:: _ref.rst
      +.. include:: _ref.txt
      +
      
      diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst
      --- a/pypy/doc/translation.rst
      +++ b/pypy/doc/translation.rst
      @@ -1,18 +1,18 @@
      -=====================
      - PyPy - Translation
      -=====================
      +=============================
      + PyPy - The RPython Toolchain
      +=============================
       
       .. contents::
       
       
      -This document describes the tool chain that we have developed to analyze
      +This document describes the toolchain that we have developed to analyze
       and "compile" RPython_ programs (like PyPy itself) to various target
       platforms.
       
       .. _RPython: coding-guide.html#restricted-python
       
       It consists of three broad sections: a slightly simplified overview, a
      -brief introduction to each of the major components of our tool chain and
      +brief introduction to each of the major components of our toolchain and
       then a more comprehensive section describing how the pieces fit together.
       If you are reading this document for the first time, the Overview_ is
       likely to be most useful, if you are trying to refresh your PyPy memory
      @@ -21,7 +21,7 @@
       Overview
       ========
       
      -The job of translation tool chain is to translate RPython_ programs into an
      +The job of the translation toolchain is to translate RPython_ programs into an
       efficient version of that program for one of various target platforms,
       generally one that is considerably lower-level than Python.  It divides
       this task into several steps, and the purpose of this document is to
      @@ -29,11 +29,8 @@
       
       As of the 1.2 release, RPython_ programs can be translated into the following
       languages/platforms: C/POSIX, CLI/.NET
      -and Java/JVM (in addition, there's `a backend`_ that translates
      -`application-level`_ into `interpreter-level`_ code, but this is a special
      -case in several ways).
      +and Java/JVM.
       
      -.. _`a backend`: geninterp.html
       .. _`application-level`: coding-guide.html#application-level
       .. _`interpreter-level`: coding-guide.html#interpreter-level
       
      @@ -43,7 +40,7 @@
       
       .. _`initialization time`:
       
      -The translation tool chain never sees Python source code or syntax
      +The RPython translation toolchain never sees Python source code or syntax
       trees, but rather starts with the *code objects* that define the
       behaviour of the function objects one gives it as input.  The
       `bytecode evaluator`_ and the `Flow Object Space`_ work through these
      @@ -93,7 +90,7 @@
       (although these steps are not quite as distinct as you might think from
       this presentation).
       
      -There is an `interactive interface`_ called `translatorshell.py`_ to the
      +There is an `interactive interface`_ called `pypy/bin/translatorshell.py`_ to the
       translation process which allows you to interactively work through these
       stages.
       
      @@ -104,10 +101,9 @@
       
       .. _`PDF color version`: image/translation.pdf
       .. _`bytecode evaluator`: interpreter.html
      -.. _`abstract interpretation`: theory.html#abstract-interpretation
      +.. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation
       .. _`Flow Object Space`: objspace.html#the-flow-object-space
       .. _`interactive interface`: getting-started-dev.html#try-out-the-translator
      -.. _`translatorshell.py`: ../../../../pypy/bin/translatorshell.py
       
       .. _`flow model`:
       .. _`control flow graphs`: 
      @@ -120,7 +116,7 @@
       which are the basic data structures of the translation
       process.
       
      -All these types are defined in `pypy.objspace.flow.model`_ (which is a rather
      +All these types are defined in `pypy/objspace/flow/model.py`_ (which is a rather
       important module in the PyPy source base, to reinforce the point).
       
       The flow graph of a function is represented by the class ``FunctionGraph``.
      @@ -274,7 +270,6 @@
           should not attempt to actually mutate such Constants.
       
       .. _`document describing object spaces`: objspace.html
      -.. _`pypy.objspace.flow.model`: ../../../../pypy/objspace/flow/model.py
       
       
       .. _Annotator:
      @@ -298,7 +293,7 @@
       An "annotation" is an instance of a subclass of ``SomeObject``.  Each
       subclass that represents a specific family of objects.
       
      -Here is an overview (see ``pypy.annotation.model``):
      +Here is an overview (see ``pypy/annotation/model/``):
       
       * ``SomeObject`` is the base class.  An instance of ``SomeObject()``
         represents any Python object, and as such usually means that the input
      @@ -390,7 +385,7 @@
       The RPython Typer
       =================
       
      -http://codespeak.net/pypy/trunk/pypy/rpython/
      +https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/
       
       The RTyper is the first place where the choice of backend makes a
       difference; as outlined above we are assuming that ANSI C is the target.
      @@ -456,7 +451,7 @@
       `D07.1 Massive Parallelism and Translation Aspects`_ for further details.
       
       .. _`Technical report`: 
      -.. _`D07.1 Massive Parallelism and Translation Aspects`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf
      +.. _`D07.1 Massive Parallelism and Translation Aspects`: https://bitbucket.org/pypy/extradoc/raw/ee3059291497/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf
       
       Backend Optimizations
       ---------------------
      @@ -630,17 +625,13 @@
       The C Back-End
       ==============
       
      -http://codespeak.net/pypy/trunk/pypy/translator/c/
      -
      -GenC is not really documented at the moment.  The basic principle of creating
      -code from flow graphs is similar to the `Python back-end`_.  See also
      -"Generating C code" in our `EU report about translation`_.
      +https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/
       
       GenC is usually the most actively maintained backend -- everyone working on
       PyPy has a C compiler, for one thing -- and is usually where new features are
       implemented first.
       
      -.. _`EU report about translation`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
      +.. _`EU report about translation`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf
       
       
       A Historical Note
      @@ -710,37 +701,22 @@
       GenJVM is almost entirely the work of Niko Matsakis, who worked on it
       also as part of the Summer of PyPy program.
       
      -.. _`Python again`:
      -.. _`Python back-end`:
      -
      -The Interpreter-Level backend
      ------------------------------
      -
      -http://codespeak.net/pypy/trunk/pypy/translator/geninterplevel.py
      -
      -Above, this backend was described as a "special case in several ways".  One of
      -these ways is that the job it does is specific to PyPy's standard interpreter,
      -and the other is that it does not even use the annotator -- it works directly
      -the graphs produced by the Flow Object Space.
      -
      -See `geninterp's documentation `__.
      -
       .. _extfunccalls:
       
       External Function Calls
       =======================
       
      -External function call approach is described in `rffi`_ documentation.
      +The external function call approach is described in `rffi`_ documentation.
       
       .. _`rffi`: rffi.html
       
       How It Fits Together
       ====================
       
      -As should be clear by now, the translation tool chain of PyPy is a flexible
      +As should be clear by now, the translation toolchain of PyPy is a flexible
       and complicated beast, formed from many separate components.
       
      -The following image summarizes the various parts of the tool chain as of the
      +The following image summarizes the various parts of the toolchain as of the
       0.9 release, with the default translation to C highlighted:
       
       .. image:: image/pypy-translation-0.9.png
      @@ -768,4 +744,4 @@
       collection of functions (which may refer to each other in a mutually
       recursive fashion) and annotate and rtype them all at once.
       
      -.. include:: _ref.rst
      +.. include:: _ref.txt
      
      diff --git a/pypy/doc/config/translation.ootype.rst b/pypy/doc/config/translation.ootype.rst
      deleted file mode 100644
      --- a/pypy/doc/config/translation.ootype.rst
      +++ /dev/null
      @@ -1,1 +0,0 @@
      -This group contains options specific for ootypesystem.
      
      diff --git a/pypy/doc/old_news.rst b/pypy/doc/old_news.rst
      deleted file mode 100644
      --- a/pypy/doc/old_news.rst
      +++ /dev/null
      @@ -1,306 +0,0 @@
      -The PyPy project aims at producing a flexible and fast Python_
      -implementation.  The guiding idea is to translate a Python-level
      -description of the Python language itself to lower level languages.
      -Rumors have it that the secret goal is being faster-than-C which is
      -nonsense, isn't it?  `more...`_
      -
      -.. _Python: http://www.python.org/doc/current/ref/ref.html
      -.. _`more...`: architecture.html#mission-statement 
      -
      -
      -Leysin Winter Sports Sprint, 12th - 19th January 2008
      -==================================================================
      -
      -.. raw:: html
      -
      -   
      - -The next PyPy sprint will be held in Leysin, Switzerland, for -the fifth time. The overall idea of the sprint is to continue -working on making PyPy ready for general use. - -.. raw:: html - -
      - -The proposed topics are: ctypes, JIT, testing, LLVM. This is -a fully public sprint, so newcomers and other topics are -welcome. And like previous winters, the main side goal is to -have fun in winter sports :-) See the `sprint announcement`__ -for details. - -.. raw:: html - -   -
      - -.. __: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2008/announcement.html - - -PyPy blog started -================= - -A few days ago some of the PyPy developers started a `PyPy Status Blog`_. Let's -see how this works out. *(November 13th, 2007)* - -.. _`PyPy Status Blog`: http://morepypy.blogspot.com - - -PyPy/Squeak Sprint in Bern finished -=================================== - -The Bern sprint, being the first Squeak-PyPy-collaboration-sprint is finished. -The week was very intense and productive, see `Bern Sprint Summary blog post`_ -for a list of things we accomplished. We covered most of what happened during -the sprint in quite some detail on the `PyPy Squeak blog`_. The sprint was -hosted by the Software Composition Group of the University of Bern from the -22nd to the 26th of October 2007. - -.. _`Bern sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/bern2007/announce.html -.. _`people that are known to come`: http://codespeak.net/pypy/extradoc/sprintinfo/bern2007/people.html -.. _`Bern Sprint Summary blog post`: http://pypysqueak.blogspot.com/2007/10/bern-sprint-finished-summary.html -.. _`PyPy Squeak blog`: http://pypysqueak.blogspot.com - - - -PyPy Sprint in Gothenburg: 19nd-25th November 2007 -================================================================== - - -The next post-EU-project PyPy sprint will be in Gothenburg, Sweden. It will -focus on cleaning up the PyPy codebase and making it ready for the next round -of improvements. It is a "public" sprint but it will probably be more suitable -for people already somewhat acquainted with PyPy. For more information see the -`Gothenburg sprint announcement`_ or a list of the `people that are known to -come to Gothenburg`_. - -.. _`Gothenburg sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2007/announce.html -.. _`people that are known to come to Gothenburg`: http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2007/people.html - - - - -PyPy Sprint at EuroPython, Vilnius is finished -================================================================== - -The sprint at the last EuroPython_ conference in Vilnius from the 9th to -the 11th of July, 2007 is finished. For more information -see the `Vilnius sprint announcement`_. - - -.. _EuroPython: http://europython.org -.. _`Vilnius sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2007/announcement.html - - -Review passed with flying colours -================================= - -On the 31st of May 2007 the PyPy project was reviewed by the EU -Commission in Brussels. Reviewers were Roel Wuyts, Unversité Libre de -Bruxelles and Aki Lumiaho, Ramboll, Finland. Present was also our -Project Officer, Charles McMillan. After 6 hours of presentations of -the various aspects of the project, it only took the reviewers a few -minutes to decide that the project was accepted, without any further -work being required. Professor Wuyts, who has dynamic programming -languages as his main field of research was very enthusiastic about -the entire project and the results with the Just In Time Compiler -Generator in particular. He offered his help in establishing -collaborations with the communities around Prolog, Smalltalk, Lisp and -other dynamic languages, as well as giving hints on how to get our -results most widely publicized. - -The preparations for the review left the team rather exhausted so -development progress will be rather slow until the sprint at -Europython in the second week of July. - -PyPy EU funding period over, Review ahead -=========================================================== - -The 28 month EU project period of PyPy is over and new things are to come! -On 11th May we `submitted last documents`_ to the European Union and are now -heading towards a 31st May Review Meeting in Bruxelles. The `PyPy EU Final -Activity Report`_ summarizes what we did and what we have in mind -on technical, scientific and community levels. It also contains reflections -and recommendations possibly interesting to other projects aiming at -EU funded Open Source research. *(12th May, 2007)* - -.. _`submitted last documents`: http://codespeak.net/pypy/dist/pypy/doc/index-report.html -.. _`PyPy EU Final Activity Report`: http://codespeak.net/pypy/extradoc/eu-report/PYPY-EU-Final-Activity-Report.pdf - -PyPy 1.0: JIT compiler generator, optimizations and more -================================================================== - -We are proud to release PyPy 1.0.0, our sixth public release (Download_). See -the `release announcement `__ to read about the -many new features in this release, especially the results of our -JIT generation technology. See also our detailed instructions on -how to `get started`_. *(March 27th, 2007)* - -.. _Download: getting-started.html#just-the-facts -.. _`get started`: getting-started.html - - - - -PyPy Trillke Sprints (25-28th Feb and 1-5th March 2007) finished -================================================================== - -Both of the sprints that mark the end of the EU period are over. There were very -good results, both on a `report level`_ as well as on a `technical level`_. -The sprint also had a good discussion about the future of PyPy after the EU -project ends, see the `mail Armin wrote`_ and `the meeting's minutes`_. You can -also look at the pictures that `Carl Friedrich`_ and that `Lene took`_ during -the sprint or read the `sprint announcement`_. *(March 10th, 2007)* - -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/announcement.html -.. _`report level`: http://codespeak.net/pipermail/pypy-dev/2007q1/003578.html -.. _`technical level`: http://codespeak.net/pipermail/pypy-dev/2007q1/003579.html -.. _`Carl Friedrich`: http://codespeak.net/~cfbolz/hildesheim3-sprint-pictures/ -.. _`Lene took`: http://codespeak.net/~lene/trillke-sprint-web/Page1.html -.. _`mail Armin wrote`: http://codespeak.net/pipermail/pypy-dev/2007q1/003577.html -.. _`the meeting's minutes`: http://codespeak.net/svn/pypy/extradoc/minute/post-eu-structure.txt - - - - -PyPy 0.99.0: optimizations, backends, new object spaces and more -================================================================== - -We are proud to release PyPy 0.99.0, our fifth public release. See -the `release announcement `__ to read about the -many new features in this release. See also our detailed instructions on -how to `get started`_. *(February 17th, 2007)* - -.. _`get started`: getting-started.html - - -py lib 0.9.0: py.test, distributed execution, greenlets and more -================================================================== - -Our development support and testing library was publically released, see the -`0.9 release announcement `__ -and its extensive `online documentation `__. -*(February 15th, 2007)* - - - -Leysin Winter Sports Sprint, 8th - 14th January 2007 -================================================================== - -.. raw:: html - -
      - -The PyPy Leysin sprint is over. We worked hard on various topics, including -preparing the upcoming py-lib and PyPy releases. For more details, see the -`Leysin sprint report`_, the `Leysin announcement`_ and the -`list of people present`_. - - -.. raw:: html - -
      - -.. _`Leysin announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/announcement.html -.. _`Leysin sprint report`: http://codespeak.net/pipermail/pypy-dev/2007q1/003481.html -.. _`list of people present`: http://codespeak.net/svn/pypy/extradoc/sprintinfo/leysin-winter-2007/people.txt - - -Massive Parallelism and Translation Aspects -======================================================== - -Our next big `EU report`_ about Stackless features, optimizations, and -memory management is finished. You can download it `as pdf`_. - -.. _`EU report`: index-report.html -.. _`as pdf`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf - - -Duesseldorf sprint #2, 30th October - 5th November over -================================================================== - -The Duesseldorf sprint is over. It was a very productive sprint with work done -in various areas. Read the `sprint report`_ for a detailed description of what -was achieved and the `full announcement`_ for various details. - -.. _`full announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/announce.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q4/003396.html - - - -Dynamic Languages Symposium (OOPSLA, 23rd October) -================================================================== - -We will present a paper at the `Dynamic Languages Symposium`_ describing -`PyPy's approach to virtual machine construction`_. The DLS is a -one-day forum within OOPSLA'06 (Portland, Oregon, USA). The paper is a -motivated overview of the annotation/rtyping translation tool-chain, -with experimental results. - -As usual, terminology with PyPy is delicate :-) Indeed, the title is -both correct and misleading - it does not describe "the" PyPy virtual -machine, since we have never hand-written one. This paper focuses on -how we are generating such VMs, not what they do. - -.. _`Dynamic Languages Symposium`: http://www.oopsla.org/2006/submission/tracks/dynamic_languages_symposium.html -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf - - - -Summer of PyPy: Calls for proposals open now! -================================================================== - -Happily, we are able to offer students mentoring and full sprint -participant's funding if we receive a proposal outlining an -interesting project related to PyPy and its development tools. This -follows up on the "Summer of Code" campaign from Google but is -completely independent from it and also works differently. -See the full call for details: - - http://codespeak.net/pypy/dist/pypy/doc/summer-of-pypy.html - - -Ireland sprint 21st-27th August -================================================================== - -The last PyPy sprint happened in the nice city of -Limerick in Ireland from 21st till 27th August. -The main focus of the sprint was on JIT compiler works, -various optimization works, porting extension modules, -infrastructure works like a build tool for PyPy and -extended (distributed) testing. -Read the full `announcement`_ for more details. - -.. _`announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ireland-2006/announce.html - -Release of PyPy video documentation -================================================================== - -The PyPy team is happy to announce that the first bunch of PyPy videos -can now be downloaded from: - -http://codespeak.net/pypy/dist/pypy/doc/video-index.html - -The videos introduce involved people and contain different talks, tutorials and -interviews and can be downloaded via bittorrent. **29th June 2006** - -PyPy 0.9.0 -================================================================== - -We are proud to release PyPy 0.9.0, our fourth public release. See -the `release announcement `__ to read about the -many new features in this release. - -PyPy and Summer of Code 2006 -================================================================== - -PyPy will again mentor students through Google's `Summer of Code`_ -campaign. Three students will kick-off their work on PyPy by -participating in the Duesseldorf sprint. They will be exploring a -back-end for Microsoft.NET, work on ways to build web applications -with Javascript code (in this case by translating RPython to -Javascript) and porting some CPython modules to use ctypes. Welcome to -the team! - -.. _`Summer of Code`: http://code.google.com/soc/psf/about.html - diff --git a/pypy/doc/discussion/distribution.rst b/pypy/doc/discussion/distribution.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution.rst +++ /dev/null @@ -1,34 +0,0 @@ -=================================================== -(Semi)-transparent distribution of RPython programs -=================================================== - -Some (rough) ideas how I see distribution ------------------------------------------ - -The main point about it, is to behave very much like JIT - not -to perform distribution on Python source code level, but instead -perform distribution of RPython source, and eventually perform -distribution of interpreter at the end. - -This attempt gives same advantages as off-line JIT (any RPython based -interpreter, etc.) and gives nice field to play with different -distribution heuristics. This also makes eventually nice possibility -of integrating JIT with distribution, thus allowing distribution -heuristics to have more information that they might have otherwise and -as well with specializing different nodes in performing different tasks. - -Flow graph level ----------------- - -Probably the best place to perform distribution attempt is to insert -special graph distributing operations into low-level graphs (either lltype -or ootype based), which will allow distribution heuristic to decide -on entrypoint to block/graph/some other structure??? what variables/functions -are accessed inside some part and if it's worth transferring it over wire. - -Backend level -------------- - -Backends will need explicit support for distribution of any kind. Basically -it should be possible for backend to remotely call block/graph/structure -in any manner (it should strongly depend on backend possibilities). diff --git a/pypy/doc/config/objspace.usemodules._hashlib.rst b/pypy/doc/config/objspace.usemodules._hashlib.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._hashlib.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_hashlib' module. -Used by the 'hashlib' standard lib module, and indirectly by the various cryptographic libs. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._socket.rst b/pypy/doc/config/objspace.usemodules._socket.txt copy from pypy/doc/config/objspace.usemodules._socket.rst copy to pypy/doc/config/objspace.usemodules._socket.txt diff --git a/pypy/doc/maemo.rst b/pypy/doc/maemo.rst deleted file mode 100644 --- a/pypy/doc/maemo.rst +++ /dev/null @@ -1,187 +0,0 @@ -How to run PyPy on top of maemo platform -======================================== - -This howto explains how to use Scratchbox_ to cross-compile PyPy's -Python Interpreter to an `Internet-Tablet-OS`_, more specifically -the Maemo_ platform. This howto should work well for getting -a usable Python Interpreter for Nokia's N810_ device. - -setup cross-compilation environment -------------------------------------- - -The main steps are to install scratchbox and the Maemo SDK. Please refer -to Nokia's `INSTALL.txt`_ for more detail. - -Adjust linux kernel settings -+++++++++++++++++++++++++++++++++ - -In order to install and run scratchbox you will need to adjust -your Linux kernel settings. Note that the VDSO setting may -crash your computer - if that is the case, try running without -this setting. You can try it like this:: - - $ echo 4096 | sudo tee /proc/sys/vm/mmap_min_addr - $ echo 0 | sudo tee /proc/sys/vm/vdso_enabled - -If that works fine for you (on some machines the vdso setting can freeze machines) -you can make the changes permanent by editing ``/etc/sysctl.conf`` to contain:: - - vm.vdso_enabled = 0 - vm.mmap_min_addr = 4096 - -install scratchbox packages -+++++++++++++++++++++++++++++++++ - -Download - - http://repository.maemo.org/stable/diablo/maemo-scratchbox-install_4.1.1.sh - -and run this script as root:: - - $ sh maemo-scratchbox-install_4.1.1.sh -s /scratchbox -u ACCOUNTNAME - -The script will automatically download Debian packages or tarballs -and pre-configure a scratchbox environment with so called "devkits" -and "toolchains" for performing cross-compilation. It's fine -and recommended to use your linux account name as a scratchbox -ACCOUNTNAME. - -It also sets up an "sbox" group on your system and makes you -a member - giving the right to login to a scratchbox environment. - -testing that scratchbox environment works -+++++++++++++++++++++++++++++++++++++++++++++++ - -Login freshly to your Linux account in order to activate -your membership in the "sbox" unix group and then type:: - - $ /scratchbox/login - -This should warn you with something like "sb-conf: no current -target" because we have not yet created a cross-compilation -target. - -Note that Scratchbox starts daemon services which -can be controlled via:: - - /scratchbox/sbin/sbox_ctl start|stop - - -Installing the Maemo SDK -+++++++++++++++++++++++++++++++ - -To mimic the specific N810_ environment we now install the Maemo-SDK. -This will create an target within our new scratchbox environment -that we then use to compile PyPy. - -Make sure that you are a member of the "sbox" group - this might -require logging out and in again. - -Then, download - - http://repository.maemo.org/stable/diablo/maemo-sdk-install_4.1.1.sh - -and execute it with user privileges:: - - $ sh maemo-sdk-install_4.1.1.sh - -When being asked select the default "Runtime + Dev" packages. You do not need -Closed source Nokia binaries for PyPy. This installation -script will download "rootstraps" and create so called -"targets" and preselect the "DIABLO_ARMEL" target for ARM -compilation. Within the targets a large number of packages -will be pre-installed resulting in a base scratchbox -environment that is usable for cross compilation of PyPy. - -Customizing the DIABLO_ARMEL target for PyPy -++++++++++++++++++++++++++++++++++++++++++++++++ - -As PyPy does not yet provide a debian package description -file for use on Maemo, we have to install some dependencies manually -into our Scratchbox target environment. - -1. Go into your scratchbox by executing ``/scratchbox/login`` - (this should bring you to a shell with the DIABLO_ARMEL target) - -2. Add these lines to ``/etc/apt/sources.list``:: - - deb http://repository.maemo.org/extras/ diablo free non-free - deb http://repository.maemo.org/extras-devel/ diablo free non-free - - NOTE: if you have an older version of Maemo on your device you - can try substitute "chinook" for "diablo" in the above lines - and/or update your firmware. You can probably see which version - you are using by looking at the other content of the ``sources.list``. - -3. Perform ``apt-get update``. - -4. Install some necessary packages:: - - apt-get install python2.5-dev libffi4-dev zlib1g-dev libbz2-dev libgc-dev libncurses5-dev - - The "libgc-dev" package is only needed if you want to use the Boehm - garbage collector. - -5. Leave the scratchbox shell again with ``exit``. - - -Translating PyPy for the Maemo platform ------------------------------------------- - -You at least need "gcc" and "libc-dev" packages on your host system -to compile pypy. The scratchbox and its DIABLO_ARMEL target contains -its own copies of GCC, various C libraries and header files -which pypy needs for successful cross-compilation. - -Now, on the host system, perform a subversion checkout of PyPy:: - - svn co https://codespeak.net/svn/pypy/trunk pypy-trunk - -Several svn revisions since the 60000's are known to work and -the last manually tested one is currently 65011. - -Change to the ``pypy-trunk/pypy/translator/goal`` directory and execute:: - - python translate.py --platform=maemo --opt=3 - -You need to run translate.py using Python 2.5. This will last some 30-60 -minutes on most machines. For compiling the C source code PyPy's tool chain -will use our scratchbox/Maemo cross-compilation environment. - -When this step succeeds, your ``goal`` directory will contain a binary called -``pypy-c`` which is executable on the Maemo device. To run this binary -on your device you need to also copy some support files. A good way to -perform copies to your device is to install OpenSSH on the -mobile device and use "scp" or rsync for transferring files. - -You can just copy your whole pypy-trunk directory over to your mobile -device - however, only these should be needed:: - - lib/pypy1.2/lib_pypy - lib/pypy1.2/lib-python - pypy/translator/goal/pypy-c - -It is necessary that the ``pypy-c`` can find a "lib-python" and "lib_pypy" directory -if you want to successfully startup the interpreter on the device. - -Start ``pypy-c`` on the device. If you see an error like "setupterm: could not find terminal" -you probably need to perform this install on the device:: - - apt-get install ncurses-base - -Eventually you should see something like:: - - Nokia-N810-51-3:~/pypy/trunk# ./pypy-c - Python Python 2.5.2 (pypy 1.0.0 build 59527) on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``E09 2K @CAA:85?'' - >>>> - - -.. _N810: http://en.wikipedia.org/wiki/Nokia_N810 -.. _`Internet-Tablet-OS`: http://en.wikipedia.org/wiki/Internet_Tablet_OS -.. _Maemo: http://www.maemo.org -.. _Scratchbox: http://www.scratchbox.org -.. _`INSTALL.txt`: http://tablets-dev.nokia.com/4.1/INSTALL.txt - - diff --git a/pypy/doc/config/translation.cc.rst b/pypy/doc/config/translation.cc.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cc.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify which C compiler to use. diff --git a/pypy/doc/config/objspace.lonepycfiles.rst b/pypy/doc/config/objspace.lonepycfiles.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.lonepycfiles.rst +++ /dev/null @@ -1,16 +0,0 @@ -If turned on, PyPy accepts to import a module ``x`` if it finds a -file ``x.pyc`` even if there is no file ``x.py``. - -This is the way that CPython behaves, but it is disabled by -default for PyPy because it is a common cause of issues: most -typically, the ``x.py`` file is removed (manually or by a -version control system) but the ``x`` module remains -accidentally importable because the ``x.pyc`` file stays -around. - -The usual reason for wanting this feature is to distribute -non-open-source Python programs by distributing ``pyc`` files -only, but this use case is not practical for PyPy at the -moment because multiple versions of PyPy compiled with various -optimizations might be unable to load each other's ``pyc`` -files. diff --git a/pypy/doc/config/objspace.std.withtypeversion.rst b/pypy/doc/config/objspace.std.withtypeversion.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.rst +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/image/parsing_example1.dot b/pypy/doc/image/parsing_example1.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example1.dot +++ /dev/null @@ -1,27 +0,0 @@ -digraph G{ -"-1213931828" [label="additive"]; -"-1213931828" -> "-1213951956"; -"-1213951956" [label="multitive"]; -"-1213951956" -> "-1213949172"; -"-1213949172" [label="primary"]; -"-1213949172" -> "-1213949812"; -"-1213949812" [shape=box,label="DECIMAL\l'12'"]; -"-1213931828" -> "-1213935220"; -"-1213935220" [shape=box,label="__0_+\l'+'"]; -"-1213931828" -> "-1213951316"; -"-1213951316" [label="additive"]; -"-1213951316" -> "-1213948180"; -"-1213948180" [label="multitive"]; -"-1213948180" -> "-1213951380"; -"-1213951380" [label="primary"]; -"-1213951380" -> "-1213951508"; -"-1213951508" [shape=box,label="DECIMAL\l'4'"]; -"-1213948180" -> "-1213948788"; -"-1213948788" [shape=box,label="__1_*\l'*'"]; -"-1213948180" -> "-1213951060"; -"-1213951060" [label="multitive"]; -"-1213951060" -> "-1213948980"; -"-1213948980" [label="primary"]; -"-1213948980" -> "-1213950420"; -"-1213950420" [shape=box,label="DECIMAL\l'5'"]; -} diff --git a/pypy/doc/config/translation.thread.rst b/pypy/doc/config/translation.thread.txt copy from pypy/doc/config/translation.thread.rst copy to pypy/doc/config/translation.thread.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline_heuristic.txt diff --git a/pypy/doc/config/objspace.usemodules.fcntl.rst b/pypy/doc/config/objspace.usemodules.fcntl.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.fcntl.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'fcntl' module. -This module is expected to be fully working. diff --git a/pypy/doc/config/objspace.disable_call_speedhacks.rst b/pypy/doc/config/objspace.disable_call_speedhacks.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.disable_call_speedhacks.rst +++ /dev/null @@ -1,2 +0,0 @@ -disable the speed hacks that the interpreter normally does. Usually you don't -want to set this to False, but some object spaces require it. diff --git a/pypy/doc/config/objspace.usemodules.gc.rst b/pypy/doc/config/objspace.usemodules.gc.txt copy from pypy/doc/config/objspace.usemodules.gc.rst copy to pypy/doc/config/objspace.usemodules.gc.txt diff --git a/pypy/doc/config/objspace.std.withsmalllong.rst b/pypy/doc/config/objspace.std.withsmalllong.txt copy from pypy/doc/config/objspace.std.withsmalllong.rst copy to pypy/doc/config/objspace.std.withsmalllong.txt diff --git a/pypy/doc/config/objspace.nofaking.rst b/pypy/doc/config/objspace.nofaking.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.nofaking.rst +++ /dev/null @@ -1,7 +0,0 @@ -This options prevents the automagic borrowing of implementations of -modules and types not present in PyPy from CPython. - -As such, it is required when translating, as then there is no CPython -to borrow from. For running py.py it is useful for testing the -implementation of modules like "posix", but it makes everything even -slower than it is already. diff --git a/pypy/doc/config/translation.gctransformer.rst b/pypy/doc/config/translation.gctransformer.txt copy from pypy/doc/config/translation.gctransformer.rst copy to pypy/doc/config/translation.gctransformer.txt diff --git a/pypy/doc/config/translation.backend.rst b/pypy/doc/config/translation.backend.txt copy from pypy/doc/config/translation.backend.rst copy to pypy/doc/config/translation.backend.txt diff --git a/pypy/doc/config/translation.backendopt.really_remove_asserts.rst b/pypy/doc/config/translation.backendopt.really_remove_asserts.txt copy from pypy/doc/config/translation.backendopt.really_remove_asserts.rst copy to pypy/doc/config/translation.backendopt.really_remove_asserts.txt diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -37,27 +37,29 @@ .. _`thunk object space docs`: objspace-proxies.html#thunk .. _`interface section of the thunk object space docs`: objspace-proxies.html#thunk-interface -Taint Object Space Functionality -================================ +.. broken: -When the taint object space is used (choose with :config:`objspace.name`), -the following names are put into ``__pypy__``: + Taint Object Space Functionality + ================================ - - ``taint`` - - ``is_tainted`` - - ``untaint`` - - ``taint_atomic`` - - ``_taint_debug`` - - ``_taint_look`` - - ``TaintError`` + When the taint object space is used (choose with :config:`objspace.name`), + the following names are put into ``__pypy__``: -Those are all described in the `interface section of the taint object space -docs`_. + - ``taint`` + - ``is_tainted`` + - ``untaint`` + - ``taint_atomic`` + - ``_taint_debug`` + - ``_taint_look`` + - ``TaintError`` -For more detailed explanations and examples see the `taint object space docs`_. + Those are all described in the `interface section of the taint object space + docs`_. -.. _`taint object space docs`: objspace-proxies.html#taint -.. _`interface section of the taint object space docs`: objspace-proxies.html#taint-interface + For more detailed explanations and examples see the `taint object space docs`_. + + .. _`taint object space docs`: objspace-proxies.html#taint + .. _`interface section of the taint object space docs`: objspace-proxies.html#taint-interface Transparent Proxy Functionality =============================== diff --git a/pypy/doc/config/objspace.usemodules.exceptions.rst b/pypy/doc/config/objspace.usemodules.exceptions.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.exceptions.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'exceptions' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/config/objspace.std.withstrjoin.rst b/pypy/doc/config/objspace.std.withstrjoin.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrjoin.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enable "string join" objects. - -See the page about `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#string-join-objects - - diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal.txt copy from pypy/doc/config/translation.backendopt.clever_malloc_removal.rst copy to pypy/doc/config/translation.backendopt.clever_malloc_removal.txt diff --git a/pypy/doc/config/objspace.usemodules.gc.rst b/pypy/doc/config/objspace.usemodules.gc.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.gc.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the 'gc' module. -This module is expected to be working and is included by default. -Note that since the gc module is highly implementation specific, it contains -only the ``collect`` function in PyPy, which forces a collection when compiled -with the framework or with Boehm. diff --git a/pypy/doc/config/objspace.usemodules.micronumpy.rst b/pypy/doc/config/objspace.usemodules.micronumpy.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.micronumpy.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the micronumpy module. -This module provides a very basic numpy-like interface. Major use-case -is to show how jit scales for other code. diff --git a/pypy/doc/config/translation.log.rst b/pypy/doc/config/translation.log.rst deleted file mode 100644 --- a/pypy/doc/config/translation.log.rst +++ /dev/null @@ -1,5 +0,0 @@ -Include debug prints in the translation. - -These must be enabled by setting the PYPYLOG environment variable. -The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug.h. diff --git a/pypy/doc/discussion/oz-thread-api.rst b/pypy/doc/discussion/oz-thread-api.rst deleted file mode 100644 --- a/pypy/doc/discussion/oz-thread-api.rst +++ /dev/null @@ -1,49 +0,0 @@ -Some rough notes about the Oz threading model -============================================= - -(almost verbatim from CTM) - -Scheduling ----------- - -Fair scheduling through round-robin. - -With priority levels : three queues exist, which manage high, medium, -low priority threads. The time slice ratio for these is -100:10:1. Threads inherit the priority of their parent. - -Mozart uses an external timer approach to implement thread preemption. - -Thread ops ----------- - -All these ops are defined in a Thread namespace/module. - -this() -> current thread's name (*not* another thread's name) -state(t) -> return state of t in {runnable, blocked, terminated} -suspend(t) : suspend t -resume(t) : resume execution of t -preempt(t) : preempt t -terminate(t) : terminate t immediately -injectException(t, e) : raise exception e in t -setPriority(t, p) : set t's priority to p - -Interestingly, coroutines can be build upon this thread -API. Coroutines have two ops : spawn and resume. - -spawn(p) -> creates a coroutine with procedure p, returns pid -resume(c) : transfers control from current coroutine to c - -The implementation of these ops in terms of the threads API is as -follows : - -def spawn(p): - in_thread: - pid = Thread.this() - Thread.suspend(pid) - p() - -def resume(cid): - Thread.resume cid - Thread.suspend(Thread.this()) - diff --git a/pypy/doc/config/objspace.usemodules.rbench.rst b/pypy/doc/config/objspace.usemodules.rbench.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rbench.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the built-in 'rbench' module. -This module contains geninterpreted versions of pystone and richards, -so it is useful to measure the interpretation overhead of the various -pypy-\*. diff --git a/pypy/doc/config/objspace.usemodules.__builtin__.rst b/pypy/doc/config/objspace.usemodules.__builtin__.txt copy from pypy/doc/config/objspace.usemodules.__builtin__.rst copy to pypy/doc/config/objspace.usemodules.__builtin__.txt diff --git a/pypy/doc/config/objspace.std.withstrbuf.rst b/pypy/doc/config/objspace.std.withstrbuf.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrbuf.rst +++ /dev/null @@ -1,4 +0,0 @@ -Enable "string buffer" objects. - -Similar to "string join" objects, but using a StringBuilder to represent -a string built by repeated application of ``+=``. diff --git a/pypy/doc/config/translation.compilerflags.rst b/pypy/doc/config/translation.compilerflags.txt copy from pypy/doc/config/translation.compilerflags.rst copy to pypy/doc/config/translation.compilerflags.txt diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -5,41 +5,12 @@ .. doc-index: This needs merging somehow -.. svn-help.rst: Needs merging/replacing with hg stuff: - - .. toctree:: - buildtool.rst distribution.rst - externaltools.rst - - geninterp.rst - - objspace-proxies.rst - - old_news.rst - - project-ideas.rst - - rffi.rst - - sandbox.rst - - statistic/index.rst - - theory.rst - - translation-aspects.rst - - docindex.rst - - svn-help.rst - dot-net.rst - maemo.rst diff --git a/pypy/doc/config/objspace.usemodules.cmath.rst b/pypy/doc/config/objspace.usemodules.cmath.txt copy from pypy/doc/config/objspace.usemodules.cmath.rst copy to pypy/doc/config/objspace.usemodules.cmath.txt diff --git a/pypy/doc/config/objspace.usemodules._bisect.rst b/pypy/doc/config/objspace.usemodules._bisect.txt copy from pypy/doc/config/objspace.usemodules._bisect.rst copy to pypy/doc/config/objspace.usemodules._bisect.txt diff --git a/pypy/doc/config/translation.no__thread.rst b/pypy/doc/config/translation.no__thread.txt copy from pypy/doc/config/translation.no__thread.rst copy to pypy/doc/config/translation.no__thread.txt diff --git a/pypy/doc/config/translation.backendopt.inline.rst b/pypy/doc/config/translation.backendopt.inline.txt copy from pypy/doc/config/translation.backendopt.inline.rst copy to pypy/doc/config/translation.backendopt.inline.txt diff --git a/pypy/doc/config/objspace.usemodules._minimal_curses.rst b/pypy/doc/config/objspace.usemodules._minimal_curses.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._minimal_curses.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_curses' module. -This module is just a stub. It only implements a few functions. diff --git a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst b/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst +++ /dev/null @@ -1,12 +0,0 @@ -Introduce a new opcode called ``CALL_LIKELY_BUILTIN``. It is used when something -is called, that looks like a builtin function (but could in reality be shadowed -by a name in the module globals). For all module globals dictionaries it is -then tracked which builtin name is shadowed in this module. If the -``CALL_LIKELY_BUILTIN`` opcode is executed, it is checked whether the builtin is -shadowed. If not, the corresponding builtin is called. Otherwise the object that -is shadowing it is called instead. If no shadowing is happening, this saves two -dictionary lookups on calls to builtins. - -For more information, see the section in `Standard Interpreter Optimizations`_. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#call-likely-builtin diff --git a/pypy/doc/config/objspace.usemodules.symbol.rst b/pypy/doc/config/objspace.usemodules.symbol.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.symbol.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'symbol' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst --- a/pypy/doc/cli-backend.rst +++ b/pypy/doc/cli-backend.rst @@ -198,12 +198,12 @@ int_add STORE v2 -The code produced works correctly but has some inefficiency issue that +The code produced works correctly but has some inefficiency issues that can be addressed during the optimization phase. The CLI Virtual Machine is fairly expressive, so the conversion between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the correspondent +simple: many operations maps directly to the corresponding instruction, e.g int_add and sub. By contrast some instructions do not have a direct correspondent and @@ -223,7 +223,7 @@ Mapping exceptions ------------------ -Both RPython and CLI have its own set of exception classes: some of +Both RPython and CLI have their own set of exception classes: some of these are pretty similar; e.g., we have OverflowError, ZeroDivisionError and IndexError on the first side and OverflowException, DivideByZeroException and IndexOutOfRangeException @@ -435,14 +435,14 @@ To do so, you can install `Python for .NET`_. Unfortunately, it does not work out of the box under Linux. -To make it working, download and unpack the source package of Python +To make it work, download and unpack the source package of Python for .NET; the only version tested with PyPy is the 1.0-rc2, but it might work also with others. Then, you need to create a file named Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.4):: +following lines inside the file (assuming you are using Python 2.7):: - + The installation should be complete now. To run Python for .NET, diff --git a/pypy/doc/config/translation.backendopt.storesink.rst b/pypy/doc/config/translation.backendopt.storesink.txt copy from pypy/doc/config/translation.backendopt.storesink.rst copy to pypy/doc/config/translation.backendopt.storesink.txt diff --git a/pypy/doc/needswork.txt b/pypy/doc/needswork.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/needswork.txt @@ -0,0 +1,3 @@ +.. warning:: + + This documentation needs work (as discussed during the Gothenburg sprint in 2011) diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -10,10 +10,9 @@ ------------------------- The translator is a tool based on the PyPy interpreter which can translate -sufficiently static Python programs into low-level code (in particular it can -be used to translate the `full Python interpreter`_). To be able to use it -you need to (if you want to look at the flowgraphs, which you obviously -should): +sufficiently static RPython programs into low-level code (in particular it can +be used to translate the `full Python interpreter`_). To be able to experiment with it +you need to: * Download and install Pygame_. @@ -99,7 +98,7 @@ 9 To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK 2.0`_, while Linux and Mac users +users need the `.NET Framework SDK`_, while Linux and Mac users can use Mono_. To translate and run for the JVM you must have a JDK installed (at least version 5) and ``java``/``javac`` on your path. @@ -146,41 +145,39 @@ Where to start reading the sources ---------------------------------- -PyPy is made from parts that are relatively independent from each other. +PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher - in pyopcode.py_, frame and code objects in eval.py_ and pyframe.py_, - function objects and argument passing in function.py_ and argument.py_, - the object space interface definition in baseobjspace.py_, modules in - module.py_ and mixedmodule.py_. Core types supporting the bytecode - interpreter are defined in typedef.py_. + in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, + function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, + the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, - and input data files that allow it to parse both Python 2.3 and 2.4 - syntax. Once the input data has been processed, the parser can be + and grammar files that allow it to parse the syntax of various Python + versions. Once the grammar has been processed, the parser can be translated by the above machinery into efficient code. * `pypy/interpreter/astcompiler`_ contains the compiler. This contains a modified version of the compiler package from CPython - that fixes some bugs and is translatable. That the compiler and - parser are translatable is new in 0.8.0 and it makes using the - resulting binary interactively much more pleasant. + that fixes some bugs and is translatable. * `pypy/objspace/std`_ contains the `Standard object space`_. The main file - is objspace.py_. For each type, the files ``xxxtype.py`` and + is `pypy/objspace/std/objspace.py`_. For each type, the files ``xxxtype.py`` and ``xxxobject.py`` contain respectively the definition of the type and its (default) implementation. -* `pypy/objspace`_ contains a few other object spaces: the thunk_, - trace_ and flow_ object spaces. The latter is a relatively short piece +* `pypy/objspace`_ contains a few other object spaces: the `pypy/objspace/thunk.py`_, + `pypy/objspace/trace.py`_ and `pypy/objspace/flow`_ object spaces. The latter is a relatively short piece of code that builds the control flow graphs when the bytecode interpreter runs in it. * `pypy/translator`_ contains the code analysis and generation stuff. - Start reading from translator.py_, from which it should be easy to follow + Start reading from translator.py, from which it should be easy to follow the pieces of code involved in the various translation phases. * `pypy/annotation`_ contains the data model for the type annotation that @@ -190,24 +187,25 @@ * `pypy/rpython`_ contains the code of the RPython typer. The typer transforms annotated flow graphs in a way that makes them very similar to C code so that they can be easy translated. The graph transformations are controlled - by the stuff in `pypy/rpython/rtyper.py`_. The object model that is used can + by the code in `pypy/rpython/rtyper.py`_. The object model that is used can be found in `pypy/rpython/lltypesystem/lltype.py`_. For each RPython type there is a file rxxxx.py that contains the low level functions needed for this type. -* `pypy/rlib`_ contains the RPython standard library, things that you can +* `pypy/rlib`_ contains the `RPython standard library`_, things that you can use from rpython. +.. _`RPython standard library`: rlib.html + .. _optionaltool: Running PyPy's unit tests ------------------------- -PyPy development always was and is still thorougly test-driven. +PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use indepedently -from PyPy for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -263,7 +261,7 @@ If you start an untranslated Python interpreter via:: - python pypy-svn/pypy/bin/py.py + python pypy/bin/py.py If you press on the console you enter the interpreter-level console, a @@ -347,18 +345,6 @@ pygame: http://www.pygame.org/download.shtml -CTypes on Python 2.4 -++++++++++++++++++++++++++++ - -`ctypes`_ is included in CPython 2.5 and higher. CPython 2.4 users needs to -install it if they want to run low-level tests. See -the `download page of ctypes`_. - -.. _`download page of ctypes`: http://sourceforge.net/project/showfiles.php?group_id=71702 -.. _`ctypes`: http://starship.python.net/crew/theller/ctypes/ - -.. _`py.test`: - py.test and the py lib +++++++++++++++++++++++ @@ -367,7 +353,7 @@ We use the `py library`_ for filesystem path manipulations, terminal writing, logging and some other support functionality. -You don't neccessarily need to install these two libraries because +You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. Getting involved @@ -390,33 +376,18 @@ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ -.. _`.NET Framework SDK 2.0`: http://msdn.microsoft.com/netframework/downloads/updates/default.aspx +.. _`.NET Framework SDK`: http://msdn.microsoft.com/netframework/ .. _Mono: http://www.mono-project.com/Main_Page .. _`CLI backend`: cli-backend.html .. _clr: clr-module.html .. _`Dot Graphviz`: http://www.graphviz.org/ .. _Pygame: http://www.pygame.org/ -.. _pyopcode.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/pyopcode.py -.. _eval.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/eval.py -.. _pyframe.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/pyframe.py -.. _function.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/function.py -.. _argument.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/argument.py -.. _baseobjspace.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/baseobjspace.py -.. _module.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/module.py -.. _mixedmodule.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/mixedmodule.py -.. _typedef.py: http://codespeak.net/svn/pypy/trunk/pypy/interpreter/typedef.py .. _Standard object space: objspace.html#the-standard-object-space -.. _objspace.py: ../../../../pypy/objspace/std/objspace.py -.. _thunk: ../../../../pypy/objspace/thunk.py -.. _trace: ../../../../pypy/objspace/trace.py -.. _flow: ../../../../pypy/objspace/flow/ -.. _translator.py: ../../../../pypy/translator/translator.py .. _mailing lists: index.html -.. _documentation: docindex.html +.. _documentation: index.html#project-documentation .. _unit tests: coding-guide.html#test-design -.. _`directory reference`: docindex.html#directory-reference +.. _`directory reference`: index.html#pypy-directory-reference -.. include:: _ref.rst - +.. include:: _ref.txt diff --git a/pypy/doc/image/parsing_example9.dot b/pypy/doc/image/parsing_example9.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example9.dot +++ /dev/null @@ -1,13 +0,0 @@ -digraph G{ -"-1219430228" [label="list"]; -"-1219430228" -> "-1213608980"; -"-1213608980" [shape=box,label="DECIMAL\n'1'"]; -"-1219430228" -> "-1213623380"; -"-1213623380" [shape=box,label="DECIMAL\n'2'"]; -"-1219430228" -> "-1213441652"; -"-1213441652" [shape=box,label="DECIMAL\n'3'"]; -"-1219430228" -> "-1213441620"; -"-1213441620" [shape=box,label="DECIMAL\n'4'"]; -"-1219430228" -> "-1213442100"; -"-1213442100" [shape=box,label="DECIMAL\n'5'"]; -} \ No newline at end of file diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -14,7 +14,7 @@ PyPy's bytecode interpreter has a structure reminiscent of CPython's Virtual Machine: It processes code objects parsed and compiled from -Python source code. It is implemented in the `interpreter/`_ directory. +Python source code. It is implemented in the `pypy/interpreter/`_ directory. People familiar with the CPython implementation will easily recognize similar concepts there. The major differences are the overall usage of the `object space`_ indirection to perform operations on objects, and @@ -22,12 +22,13 @@ Code objects are a nicely preprocessed, structured representation of source code, and their main content is *bytecode*. We use the same -compact bytecode format as CPython 2.4. Our bytecode compiler is +compact bytecode format as CPython 2.7, with minor differences in the bytecode +set. Our bytecode compiler is implemented as a chain of flexible passes (tokenizer, lexer, parser, abstract syntax tree builder, bytecode generator). The latter passes are based on the ``compiler`` package from the standard library of CPython, with various improvements and bug fixes. The bytecode compiler -(living under `interpreter/astcompiler/`_) is now integrated and is +(living under `pypy/interpreter/astcompiler/`_) is now integrated and is translated with the rest of PyPy. Code objects contain @@ -37,7 +38,7 @@ calling its ``frame.eval()`` method. This main entry point initialize appropriate namespaces and then interprets each bytecode instruction. Python's standard library contains -the `lib-python/2.5.2/dis.py`_ module which allows to view +the `lib-python/2.7.0/dis.py`_ module which allows to view the Virtual's machine bytecode instructions:: >>> import dis @@ -145,21 +146,15 @@ file location can be constructed for tracebacks Moreover the Frame class itself has a number of methods which implement -the actual bytecodes found in a code object. In fact, PyPy already constructs -four specialized Frame class variants depending on the code object: +the actual bytecodes found in a code object. The methods of the ``PyFrame`` +class are added in various files: -- PyInterpFrame (in `pypy/interpreter/pyopcode.py`_) for - basic simple code objects (not involving generators or nested scopes) +- the class ``PyFrame`` is defined in `pypy/interpreter/pyframe.py`_. -- PyNestedScopeFrame (in `pypy/interpreter/nestedscope.py`_) - for code objects that reference nested scopes, inherits from PyInterpFrame +- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcode. -- PyGeneratorFrame (in `pypy/interpreter/generator.py`_) - for code objects that yield values to the caller, inherits from PyInterpFrame - -- PyNestedScopeGeneratorFrame for code objects that reference - nested scopes and yield values to the caller, inherits from both PyNestedScopeFrame - and PyGeneratorFrame +- nested scope support is added to the ``PyFrame`` class in + `pypy/interpreter/nestedscope.py`_. .. _Code: @@ -269,7 +264,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: http://codespeak.net/svn/pypy/trunk/pypy/module/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: @@ -407,4 +402,4 @@ as a reference for the exact attributes of interpreter classes visible at application level. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/config/translation.cli.rst b/pypy/doc/config/translation.cli.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cli.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/translation.backendopt.remove_asserts.rst b/pypy/doc/config/translation.backendopt.remove_asserts.txt copy from pypy/doc/config/translation.backendopt.remove_asserts.rst copy to pypy/doc/config/translation.backendopt.remove_asserts.txt diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -9,46 +9,117 @@ What is PyPy ? ============== -PyPy is an implementation of the Python_ programming language written in -Python itself, flexible and easy to experiment with. +In common parlance, PyPy has been used to mean two things. The first is the +`RPython translation toolchain`_, which is a framework for generating +dynamic programming language implementations. And the second is one +particular implementation that is so generated -- +an implementation of the Python_ programming language written in +Python itself. It is designed to be flexible and easy to experiment with. + +This double usage has proven to be confusing, and we are trying to move +away from using the word PyPy to mean both things. From now on we will +try to use PyPy to only mean the Python implementation, and say the +`RPython translation toolchain`_ when we mean the framework. Some older +documents, presentations, papers and videos will still have the old +usage. You are hereby warned. + We target a large variety of platforms, small and large, by providing a compiler toolsuite that can produce custom Python versions. Platform, memory and threading models, as well as the JIT compiler itself, are aspects of the translation process - as opposed to encoding low level details into the language implementation itself. `more...`_ - -.. _Python: http://docs.python.org/ref +.. _Python: http://docs.python.org/reference/ +.. _`RPython translation toolchain`: translation.html .. _`more...`: architecture.html Just the facts ============== +Download a pre-built PyPy +------------------------- + +The quickest way to start using PyPy is to download a prebuilt binary for your +OS and architecture. You can either use the `most recent release`_ or one of +our `development nightly build`_. Please note that the nightly builds are not +guaranteed to be as stable as official releases, use them at your own risk. + +.. _`most recent release`: http://pypy.org/download.html +.. _`development nightly build`: http://buildbot.pypy.org/nightly/trunk/ + +Installing PyPy +--------------- + +PyPy is ready to be executed as soon as you unpack the tarball or the zip +file, with no need install it in any specific location:: + + $ tar xf pypy-1.5-linux.tar.bz2 + + $ ./pypy-1.5-linux/bin/pypy + Python 2.7.1 (?, Apr 27 2011, 12:44:21) + [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + And now for something completely different: ``implementing LOGO in LOGO: + "turtles all the way down"'' + >>>> + +If you want to make PyPy available system-wide, you can put a symlink to the +``pypy`` executable in ``/usr/local/bin``. It is important to put a symlink +and not move the binary there, else PyPy would not be able to find its +library. + +If you want to install 3rd party libraries, the most convenient way is to +install distribute_ and pip_: + + $ curl -O http://python-distribute.org/distribute_setup.py + + $ curl -O https://github.com/pypa/pip/raw/master/contrib/get-pip.py + + $ ./pypy-1.5-linux/bin/pypy distribute_setup.py + + $ ./pypy-1.5-linux/bin/pypy get-pip.py + + $ ./pypy-1.5-linux/bin/pip install pygments # for example + +3rd party libraries will be installed in ``pypy-1.5-linux/site-packages``, and +the scripts in ``pypy-1.5-linux/bin``. + +Installing using virtualenv +--------------------------- + +It is often convenient to run pypy inside a virtualenv. To do this +you need a recent version of virtualenv -- 1.6.1 or greater. You can +then install PyPy both from a precompiled tarball or from a mercurial +checkout:: + + # from a tarball + $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env + + # from the mercurial checkout + $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env + +Note that bin/python is now a symlink to bin/pypy. + +.. _`distribute`: http://www.python-distribute.org/ +.. _`pip`: http://pypi.python.org/pypi/pip + + Clone the repository -------------------- -Before you can play with PyPy, you will need to obtain a copy -of the sources. This can be done either by `downloading them -from the download page`_ or by checking them out from the -repository using mercurial. We suggest using mercurial if one -wants to access the current development. +If you prefer to `compile PyPy by yourself`_, or if you want to modify it, you +will need to obtain a copy of the sources. This can be done either by +`downloading them from the download page`_ or by checking them out from the +repository using mercurial. We suggest using mercurial if one wants to access +the current development. .. _`downloading them from the download page`: http://pypy.org/download.html -If you choose to use mercurial, -first make sure you have ``subversion`` installed. You must issue the following command on your command line, DOS box, or terminal:: hg clone http://bitbucket.org/pypy/pypy pypy -If you get an error like this:: - - abort: repository [svn]http://codespeak.net/svn/pypy/build/testrunner not found! - -it probably means that your mercurial version is too old. You need at least -Mercurial 1.6 to clone the PyPy repository. - This will clone the repository and place it into a directory named ``pypy``, and will get you the PyPy source in ``pypy/pypy`` and documentation files in ``pypy/pypy/doc``. @@ -64,39 +135,26 @@ where XXXXX is the revision id. + +.. _`compile PyPy by yourself`: getting-started-python.html .. _`our nightly tests:`: http://buildbot.pypy.org/summary?branch= -If you want to commit to our repository on bitbucket, you will have to -install subversion in addition to mercurial. - -Installing using virtualenv ---------------------------- - -It is often convenient to run pypy inside a virtualenv. To do this -you need a recent version of virtualenv -- 1.5 or greater. You can -then install PyPy both from a precompiled tarball or from a mercurial -checkout:: - - # from a tarball - $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env - - # from the mercurial checkout - $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env - -Note that bin/python is now a symlink to bin/pypy. - - Where to go from here ----------------------- +====================== After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ - - `Learning more about the translation toolchain and how to develop (with) PyPy`_ + - `Learning more about the RPython toolchain and how to develop (with) PyPy`_ + - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ + - `Look at our benchmark results`_ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html -.. _`Learning more about the translation toolchain and how to develop (with) PyPy`: getting-started-dev.html +.. _`Learning more about the RPython toolchain and how to develop (with) PyPy`: getting-started-dev.html +.. _`Tutorial for how to write an interpreter with the RPython toolchain and make it fast`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html +.. _`Look at our benchmark results`: http://speed.pypy.org +.. _setuptools: http://pypi.python.org/pypi/setuptools Understanding PyPy's architecture --------------------------------- @@ -106,7 +164,7 @@ interesting information. Additionally, in true hacker spirit, you may just `start reading sources`_ . -.. _`documentation section`: docindex.html +.. _`documentation section`: index.html#project-documentation .. _`start reading sources`: getting-started-dev.html#start-reading-sources Filing bugs or feature requests @@ -121,4 +179,4 @@ .. _bug reports: https://codespeak.net/issue/pypy-dev/ -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/config/translation.cli.rst b/pypy/doc/config/translation.cli.txt copy from pypy/doc/config/translation.cli.rst copy to pypy/doc/config/translation.cli.txt diff --git a/pypy/doc/config/translation.backendopt.none.rst b/pypy/doc/config/translation.backendopt.none.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.none.rst +++ /dev/null @@ -1,1 +0,0 @@ -Do not run any backend optimizations. diff --git a/pypy/doc/config/objspace.std.optimized_comparison_op.rst b/pypy/doc/config/objspace.std.optimized_comparison_op.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_comparison_op.rst +++ /dev/null @@ -1,1 +0,0 @@ -Optimize the comparison of two integers a bit. diff --git a/pypy/doc/image/parsing_example5.dot b/pypy/doc/image/parsing_example5.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example5.dot +++ /dev/null @@ -1,21 +0,0 @@ -digraph G{ -"-1219949908" [label="n"]; -"-1219949908" -> "-1214026452"; -"-1214026452" [shape=box,label="__0_a\n'a'"]; -"-1219949908" -> "-1214028276"; -"-1214028276" [shape=box,label="__1_b\n'b'"]; -"-1219949908" -> "-1214027316"; -"-1214027316" [shape=box,label="__2_c\n'c'"]; -"-1219949908" -> "-1219949876"; -"-1219949876" [label="n"]; -"-1219949876" -> "-1214141364"; -"-1214141364" [shape=box,label="__0_a\n'a'"]; -"-1219949876" -> "-1214141748"; -"-1214141748" [shape=box,label="__1_b\n'b'"]; -"-1219949876" -> "-1214140756"; -"-1214140756" [shape=box,label="__2_c\n'c'"]; -"-1219949876" -> "-1219949748"; -"-1219949748" [label="m"]; -"-1219949748" -> "-1214414868"; -"-1214414868" [shape=box,label="__5_d\n'd'"]; -} \ No newline at end of file diff --git a/pypy/doc/config/objspace.usemodules._testing.rst b/pypy/doc/config/objspace.usemodules._testing.txt copy from pypy/doc/config/objspace.usemodules._testing.rst copy to pypy/doc/config/objspace.usemodules._testing.txt diff --git a/pypy/doc/rlib.rst b/pypy/doc/rlib.rst --- a/pypy/doc/rlib.rst +++ b/pypy/doc/rlib.rst @@ -14,13 +14,11 @@ to change at some point. Usually it is useful to look at the tests in `pypy/rlib/test`_ to get an impression of how to use a module. -.. _`pypy/rlib`: ../../../../pypy/rlib -.. _`pypy/rlib/test`: ../../../../pypy/rlib/test ``listsort`` ============ -The listsort_ module contains an implementation of the timsort sorting algorithm +The `pypy/rlib/listsort.py`_ module contains an implementation of the timsort sorting algorithm (the sort method of lists is not RPython). To use it, subclass from the ``listsort.TimSort`` class and override the ``lt`` method to change the comparison behaviour. The constructor of ``TimSort`` takes a list as an @@ -29,19 +27,16 @@ be sorted using the ``listsort`` module in one program, otherwise the annotator will be confused. -.. _listsort: ../../../../pypy/rlib/listsort.py - ``nonconst`` ============ -The nonconst_ module is useful mostly for tests. The `flow object space`_ and +The `pypy/rlib/nonconst.py`_ module is useful mostly for tests. The `flow object space`_ and the `annotator`_ do quite some constant folding, which is sometimes not desired in a test. To prevent constant folding on a certain value, use the ``NonConst`` class. The constructor of ``NonConst`` takes an arbitrary value. The instance of ``NonConst`` will behave during annotation like that value, but no constant folding will happen. -.. _nonconst: ../../../../pypy/rlib/nonconst.py .. _`flow object space`: objspace.html#the-flow-object-space .. _`annotator`: translation.html#the-annotation-pass @@ -49,7 +44,7 @@ ``objectmodel`` =============== -The objectmodel_ module is a mixed bag of various functionality. Some of the +The `pypy/rlib/objectmodel.py`_ module is a mixed bag of various functionality. Some of the more useful ones are: ``ComputedIntSymbolic``: @@ -95,24 +90,21 @@ won't be allocated but represented by *tagged pointers**, that is pointers that have the lowest bit set. -.. _objectmodel: ../../../../pypy/rlib/objectmodel.py - ``rarithmetic`` =============== -The rarithmetic_ module contains functionality to handle the small differences +The `pypy/rlib/rarithmetic.py`_ module contains functionality to handle the small differences in the behaviour of arithmetic code in regular Python and RPython code. Most of them are already described in the `coding guide`_ -.. _rarithmetic: ../../../../pypy/rlib/rarithmetic.py .. _`coding guide`: coding-guide.html ``rbigint`` =========== -The rbigint module contains a full RPython implementation of the Python ``long`` +The `pypy/rlib/rbigint.py`_ module contains a full RPython implementation of the Python ``long`` type (which itself is not supported in RPython). The ``rbigint`` class contains that implementation. To construct ``rbigint`` instances use the static methods ``fromint``, ``frombool``, ``fromfloat`` and ``fromdecimalstr``. To convert back @@ -122,36 +114,30 @@ these underscores left out for better readability (so ``a.add(b)`` can be used to add two rbigint instances). -.. _rbigint: ../../../../pypy/rlib/rbigint.py - ``rrandom`` =========== -The rrandom_ module contains an implementation of the mersenne twister random +The `pypy/rlib/rrandom.py`_ module contains an implementation of the mersenne twister random number generator. It contains one class ``Random`` which most importantly has a ``random`` method which returns a pseudo-random floating point number between 0.0 and 1.0. -.. _rrandom: ../../../../pypy/rlib/rrandom.py - ``rsocket`` =========== -The rsocket_ module contains an RPython implementation of the functionality of +The `pypy/rlib/rsocket.py`_ module contains an RPython implementation of the functionality of the socket standard library with a slightly different interface. The difficulty with the Python socket API is that addresses are not "well-typed" objects: depending on the address family they are tuples, or strings, and so on, which is not suitable for RPython. Instead, ``rsocket`` contains a hierarchy of Address classes, in a typical static-OO-programming style. -.. _rsocket: ../../../../pypy/rlib/rsocket.py - ``rstack`` ========== -The rstack_ module allows an RPython program to control its own execution stack. +The `pypy/rlib/rstack.py`_ module allows an RPython program to control its own execution stack. This is only useful if the program is translated using stackless. An old description of the exposed functions is below. @@ -210,32 +196,28 @@ f() -.. _rstack: ../../../../pypy/rlib/rstack.py - ``streamio`` ============ -The streamio_ contains an RPython stream I/O implementation (which was started +The `pypy/rlib/streamio.py`_ contains an RPython stream I/O implementation (which was started by Guido van Rossum as `sio.py`_ in the CPython sandbox as a prototype for the upcoming new file implementation in Python 3000). -.. _streamio: ../../../../pypy/rlib/streamio.py .. _`sio.py`: http://svn.python.org/view/sandbox/trunk/sio/sio.py ``unroll`` ========== -The unroll_ module most importantly contains the function ``unrolling_iterable`` +The `pypy/rlib/unroll.py`_ module most importantly contains the function ``unrolling_iterable`` which wraps an iterator. Looping over the iterator in RPython code will not produce a loop in the resulting flow graph but will unroll the loop instead. -.. _unroll: ../../../../pypy/rlib/unroll.py ``parsing`` =========== -The parsing_ module is a still in-development module to generate tokenizers and +The `pypy/rlib/parsing/`_ module is a still in-development module to generate tokenizers and parsers in RPython. It is still highly experimental and only really used by the `Prolog interpreter`_ (although in slightly non-standard ways). The easiest way to specify a tokenizer/grammar is to write it down using regular expressions and @@ -289,7 +271,7 @@ returns a object with a ``recognize(input)`` method that returns True or False depending on whether ``input`` matches the string or not. -.. _`re`: http://docs.python.org/lib/module-re.html +.. _`re`: http://docs.python.org/library/re.html EBNF ---- @@ -341,14 +323,42 @@ produces a syntax tree that follows the precedence of the operators. For example the expression ``12 + 4 * 5`` is parsed into the following tree: -.. graphviz:: image/parsing_example1.dot +.. graphviz:: + + digraph G{ + "-1213931828" [label="additive"]; + "-1213931828" -> "-1213951956"; + "-1213951956" [label="multitive"]; + "-1213951956" -> "-1213949172"; + "-1213949172" [label="primary"]; + "-1213949172" -> "-1213949812"; + "-1213949812" [shape=box,label="DECIMAL\l'12'"]; + "-1213931828" -> "-1213935220"; + "-1213935220" [shape=box,label="__0_+\l'+'"]; + "-1213931828" -> "-1213951316"; + "-1213951316" [label="additive"]; + "-1213951316" -> "-1213948180"; + "-1213948180" [label="multitive"]; + "-1213948180" -> "-1213951380"; + "-1213951380" [label="primary"]; + "-1213951380" -> "-1213951508"; + "-1213951508" [shape=box,label="DECIMAL\l'4'"]; + "-1213948180" -> "-1213948788"; + "-1213948788" [shape=box,label="__1_*\l'*'"]; + "-1213948180" -> "-1213951060"; + "-1213951060" [label="multitive"]; + "-1213951060" -> "-1213948980"; + "-1213948980" [label="primary"]; + "-1213948980" -> "-1213950420"; + "-1213950420" [shape=box,label="DECIMAL\l'5'"]; + } Parse Trees ----------- The parsing process builds up a tree consisting of instances of ``Symbol`` and ``Nonterminal``, the former corresponding to tokens, the latter to nonterminal -symbols. Both classes live in the `pypy.rlib.parsing.tree`_ module. You can use +symbols. Both classes live in the `pypy/rlib/parsing/tree.py`_ module. You can use the ``view()`` method ``Nonterminal`` instances to get a pygame view of the parse tree. @@ -359,13 +369,11 @@ of the nonterminal and ``children`` which is a list of the children attributes. -.. _`pypy.rlib.parsing.tree`: ../../../../pypy/rlib/parsing/tree.py - Visitors ++++++++ To write tree visitors for the parse trees that are RPython, there is a special -baseclass ``RPythonVisitor`` in ``pypy.rlib.parsing.tree``_ to use. If your +baseclass ``RPythonVisitor`` in `pypy/rlib/parsing/tree.py`_ to use. If your class uses this, it will grow a ``dispatch(node)`` method, that calls an appropriate ``visit_`` method, depending on the ``node`` argument. Here the is replaced by the ``symbol`` attribute of the visited node. @@ -400,11 +408,43 @@ Parsing the string "A, A, A" gives the tree: -.. graphviz:: image/parsing_example2.dot +.. graphviz:: + + digraph G{ + "-1213678004" [label="n"]; + "-1213678004" -> "-1213681108"; + "-1213681108" [shape=box,label="__0_A\n'A'"]; + "-1213678004" -> "-1213681332"; + "-1213681332" [shape=box,label="__1_,\n','"]; + "-1213678004" -> "-1213837780"; + "-1213837780" [label="n"]; + "-1213837780" -> "-1213837716"; + "-1213837716" [shape=box,label="__0_A\n'A'"]; + "-1213837780" -> "-1213839476"; + "-1213839476" [shape=box,label="__1_,\n','"]; + "-1213837780" -> "-1213839956"; + "-1213839956" [label="n"]; + "-1213839956" -> "-1213840948"; + "-1213840948" [shape=box,label="__0_A\n'A'"]; + } After transformation the tree has the "," nodes removed: -.. graphviz:: image/parsing_example3.dot +.. graphviz:: + + digraph G{ + "-1219325716" [label="n"]; + "-1219325716" -> "-1219325844"; + "-1219325844" [shape=box,label="__0_A\n'A'"]; + "-1219325716" -> "-1219324372"; + "-1219324372" [label="n"]; + "-1219324372" -> "-1219325524"; + "-1219325524" [shape=box,label="__0_A\n'A'"]; + "-1219324372" -> "-1219324308"; + "-1219324308" [label="n"]; + "-1219324308" -> "-1219325492"; + "-1219325492" [shape=box,label="__0_A\n'A'"]; + } ++++++++ @@ -421,12 +461,61 @@ Parsing the string "a b c (a b c d)" gives the tree: -.. graphviz:: image/parsing_example4.dot +.. graphviz:: + + digraph G{ + "-1214029460" [label="n"]; + "-1214029460" -> "-1214026452"; + "-1214026452" [shape=box,label="__0_a\n'a'"]; + "-1214029460" -> "-1214028276"; + "-1214028276" [shape=box,label="__1_b\n'b'"]; + "-1214029460" -> "-1214027316"; + "-1214027316" [shape=box,label="__2_c\n'c'"]; + "-1214029460" -> "-1214026868"; + "-1214026868" [label="m"]; + "-1214026868" -> "-1214140436"; + "-1214140436" [shape=box,label="__3_(\n'('"]; + "-1214026868" -> "-1214143508"; + "-1214143508" [label="n"]; + "-1214143508" -> "-1214141364"; + "-1214141364" [shape=box,label="__0_a\n'a'"]; + "-1214143508" -> "-1214141748"; + "-1214141748" [shape=box,label="__1_b\n'b'"]; + "-1214143508" -> "-1214140756"; + "-1214140756" [shape=box,label="__2_c\n'c'"]; + "-1214143508" -> "-1214144468"; + "-1214144468" [label="m"]; + "-1214144468" -> "-1214414868"; + "-1214414868" [shape=box,label="__5_d\n'd'"]; + "-1214026868" -> "-1214141492"; + "-1214141492" [shape=box,label="__4_)\n')'"]; + } After transformation the tree looks like this: -.. graphviz:: image/parsing_example5.dot +.. graphviz:: + digraph G{ + "-1219949908" [label="n"]; + "-1219949908" -> "-1214026452"; + "-1214026452" [shape=box,label="__0_a\n'a'"]; + "-1219949908" -> "-1214028276"; + "-1214028276" [shape=box,label="__1_b\n'b'"]; + "-1219949908" -> "-1214027316"; + "-1214027316" [shape=box,label="__2_c\n'c'"]; + "-1219949908" -> "-1219949876"; + "-1219949876" [label="n"]; + "-1219949876" -> "-1214141364"; + "-1214141364" [shape=box,label="__0_a\n'a'"]; + "-1219949876" -> "-1214141748"; + "-1214141748" [shape=box,label="__1_b\n'b'"]; + "-1219949876" -> "-1214140756"; + "-1214140756" [shape=box,label="__2_c\n'c'"]; + "-1219949876" -> "-1219949748"; + "-1219949748" [label="m"]; + "-1219949748" -> "-1214414868"; + "-1214414868" [shape=box,label="__5_d\n'd'"]; + } >nonterminal_1 nonterminal_2 ... nonterminal_n< +++++++++++++++++++++++++++++++++++++++++++++++ @@ -441,23 +530,76 @@ Parsing the string "1 2" gives the tree: -.. graphviz:: image/parsing_example6.dot - +.. graphviz:: + + digraph G{ + "-1213518708" [label="list"]; + "-1213518708" -> "-1213518196"; + "-1213518196" [shape=box,label="DECIMAL\n'1'"]; + "-1213518708" -> "-1213518260"; + "-1213518260" [label="list"]; + "-1213518260" -> "-1213520308"; + "-1213520308" [shape=box,label="DECIMAL\n'2'"]; + } + after the transformation the tree looks like: -.. graphviz:: image/parsing_example7.dot +.. graphviz:: + + digraph G{ + "-1219505652" [label="list"]; + "-1219505652" -> "-1213518196"; + "-1213518196" [shape=box,label="DECIMAL\n'1'"]; + "-1219505652" -> "-1213520308"; + "-1213520308" [shape=box,label="DECIMAL\n'2'"]; + } Note that the transformation works recursively. That means that the following also works: if the string "1 2 3 4 5" is parsed the tree at first looks like this: -.. graphviz:: image/parsing_example8.dot +.. graphviz:: + + digraph G{ + "-1213611892" [label="list"]; + "-1213611892" -> "-1213608980"; + "-1213608980" [shape=box,label="DECIMAL\n'1'"]; + "-1213611892" -> "-1213623476"; + "-1213623476" [label="list"]; + "-1213623476" -> "-1213623380"; + "-1213623380" [shape=box,label="DECIMAL\n'2'"]; + "-1213623476" -> "-1213442868"; + "-1213442868" [label="list"]; + "-1213442868" -> "-1213441652"; + "-1213441652" [shape=box,label="DECIMAL\n'3'"]; + "-1213442868" -> "-1213441332"; + "-1213441332" [label="list"]; + "-1213441332" -> "-1213441620"; + "-1213441620" [shape=box,label="DECIMAL\n'4'"]; + "-1213441332" -> "-1213443060"; + "-1213443060" [label="list"]; + "-1213443060" -> "-1213442100"; + "-1213442100" [shape=box,label="DECIMAL\n'5'"]; + } But after transformation the whole thing collapses to one node with a lot of children: -.. graphviz:: image/parsing_example9.dot +.. graphviz:: + digraph G{ + "-1219430228" [label="list"]; + "-1219430228" -> "-1213608980"; + "-1213608980" [shape=box,label="DECIMAL\n'1'"]; + "-1219430228" -> "-1213623380"; + "-1213623380" [shape=box,label="DECIMAL\n'2'"]; + "-1219430228" -> "-1213441652"; + "-1213441652" [shape=box,label="DECIMAL\n'3'"]; + "-1219430228" -> "-1213441620"; + "-1213441620" [shape=box,label="DECIMAL\n'4'"]; + "-1219430228" -> "-1213442100"; + "-1213442100" [shape=box,label="DECIMAL\n'5'"]; + } Extensions to the EBNF grammar format ------------------------------------- @@ -526,10 +668,48 @@ looks like this: -.. graphviz:: image/parsing_example10.dot +.. graphviz:: + digraph G{ + "-1220061652" [label="object"]; + "-1220061652" -> "-1220127636"; + "-1220127636" [label="entry"]; + "-1220127636" -> "-1213915636"; + "-1213915636" [shape=box,label="STRING\n'a'"]; + "-1220127636" -> "-1214251156"; + "-1214251156" [shape=box,label="STRING\n'5'"]; + "-1220061652" -> "-1220063188"; + "-1220063188" [label="entry"]; + "-1220063188" -> "-1214253076"; + "-1214253076" [shape=box,label="STRING\n'b'"]; + "-1220063188" -> "-1220059444"; + "-1220059444" [label="array"]; + "-1220059444" -> "-1214253364"; + "-1214253364" [shape=box,label="NUMBER\n'1'"]; + "-1220059444" -> "-1214254292"; + "-1214254292" [shape=box,label="__0_null\n'null'"]; + "-1220059444" -> "-1214253268"; + "-1214253268" [shape=box,label="NUMBER\n'3'"]; + "-1220059444" -> "-1214252596"; + "-1214252596" [shape=box,label="__1_true\n'true'"]; + "-1220059444" -> "-1220062260"; + "-1220062260" [label="object"]; + "-1220062260" -> "-1220060116"; + "-1220060116" [label="entry"]; + "-1220060116" -> "-1214211860"; + "-1214211860" [shape=box,label="STRING\n'f'"]; + "-1220060116" -> "-1214210132"; + "-1214210132" [shape=box,label="STRING\n'g'"]; + "-1220062260" -> "-1220062868"; + "-1220062868" [label="entry"]; + "-1220062868" -> "-1214211956"; + "-1214211956" [shape=box,label="STRING\n'h'"]; + "-1220062868" -> "-1214212308"; + "-1214212308" [shape=box,label="NUMBER\n'6'"]; + } -.. _`Prolog interpreter`: http://codespeak.net/svn/pypy/lang/prolog/ -.. _parsing: ../../../../pypy/rlib/parsing/ +.. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _`json format`: http://www.json.org + +.. include:: _ref.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline.rst b/pypy/doc/config/translation.backendopt.profile_based_inline.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline.txt diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -1,11 +1,9 @@ -===================================== +==================================== Coding Guide -===================================== +==================================== .. contents:: - - This document describes coding requirements and conventions for working with the PyPy code base. Please read it carefully and ask back any questions you might have. The document does not talk @@ -133,7 +131,7 @@ whether a particular function is implemented at application or interpreter level. -our runtime interpreter is "restricted python" +Our runtime interpreter is "RPython" ---------------------------------------------- In order to make a C code generator feasible all code on interpreter level has @@ -148,7 +146,7 @@ implementation must behave in a static way often referenced as "RPythonic". -.. _Starkiller: http://www.python.org/pycon/dc2004/papers/1/paper.pdf +.. _Starkiller: http://people.csail.mit.edu/jrb/Projects/starkiller.pdf .. _ShedSkin: http://shed-skin.blogspot.com/ However, when the PyPy interpreter is started as a Python program, it @@ -174,7 +172,7 @@ enables the code generator to emit efficient machine level replacements for pure integer objects, for instance. -Restricted Python +RPython ================= RPython Definition, not @@ -342,9 +340,8 @@ ------------------------- While implementing the integer type, we stumbled over the problem that -integers are quite in flux in CPython right now. Starting on Python 2.2, -integers mutate into longs on overflow. However, shifting to the left -truncates up to 2.3 but extends to longs as well in 2.4. By contrast, we need +integers are quite in flux in CPython right now. Starting with Python 2.4, +integers mutate into longs on overflow. In contrast, we need a way to perform wrap-around machine-sized arithmetic by default, while still being able to check for overflow when we need it explicitly. Moreover, we need a consistent behavior before and after translation. @@ -354,9 +351,6 @@ silent wrap-around. Whenever we need more control, we use the following helpers (which live the `pypy/rlib/rarithmetic.py`_): -.. _`pypy/rlib/rarithmetic.py`: ../../../../pypy/rlib/rarithmetic.py - - **ovfcheck()** This special function should only be used with a single arithmetic operation @@ -368,15 +362,6 @@ ovfcheck() as a hint: they replace the whole ``ovfcheck(x+y)`` expression with a single overflow-checking addition in C. -**ovfcheck_lshift()** - - ovfcheck_lshift(x, y) is a workaround for ovfcheck(x<=0.13.0) can be run with the ``--rpython-mode`` command line option. This option -enables the RPython checker which will checks for some of the -restrictions RPython adds on standard Python code (and uses a -more aggressive type inference than the one used by default by -pylint). The full list of checks is available in the documentation of -Pylint. - -RPylint can be a nice tool to get some information about how much work -will be needed to convert a piece of Python code to RPython, or to get -started with RPython. While this tool will not guarantee that the -code it checks will be translate successfully, it offers a few nice -advantages over running a translation: - -* it is faster and therefore provides feedback faster than ``translate.py`` - -* it does not stop at the first problem it finds, so you can get more - feedback on the code in one run - -* the messages tend to be a bit less cryptic - -* you can easily run it from emacs, vi, eclipse or visual studio. - -Note: if pylint is not prepackaged for your OS/distribution, or if -only an older version is available, you will need to install from -source. In that case, there are a couple of dependencies, -logilab-common_ and astng_ that you will need to install too before -you can use the tool. - -.. _Pylint: http://www.logilab.org/projects/pylint -.. _logilab-common: http://www.logilab.org/projects/common -.. _astng: http://www.logilab.org/projects/astng - - Wrapping rules ============== @@ -626,7 +574,7 @@ Modules visible from application programs are imported from interpreter or application level files. PyPy reuses almost all python -modules of CPython's standard library, currently from version 2.5.2. We +modules of CPython's standard library, currently from version 2.7.1. We sometimes need to `modify modules`_ and - more often - regression tests because they rely on implementation details of CPython. @@ -649,21 +597,19 @@ >>>> import sys >>>> sys.__file__ - '/home/hpk/pypy-dist/pypy/module/sys/*.py' + '/home/hpk/pypy-dist/pypy/module/sys' - >>>> import operator - >>>> operator.__file__ - '/home/hpk/pypy-dist/lib_pypy/operator.py' + >>>> import cPickle + >>>> cPickle.__file__ + '/home/hpk/pypy-dist/lib_pypy/cPickle..py' >>>> import opcode >>>> opcode.__file__ - '/home/hpk/pypy-dist/lib-python/modified-2.5.2/opcode.py' + '/home/hpk/pypy-dist/lib-python/modified-2.7/opcode.py' >>>> import os - faking - faking >>>> os.__file__ - '/home/hpk/pypy-dist/lib-python/2.5.2/os.py' + '/home/hpk/pypy-dist/lib-python/2.7/os.py' >>>> Module directories / Import order @@ -686,11 +632,11 @@ contains pure Python reimplementation of modules. -*lib-python/modified-2.5.2/* +*lib-python/modified-2.7/* The files and tests that we have modified from the CPython library. -*lib-python/2.5.2/* +*lib-python/2.7/* The unmodified CPython library. **Never ever check anything in there**. @@ -705,14 +651,14 @@ by default and CPython has a number of places where it relies on some classes being old-style. -If you want to change a module or test contained in ``lib-python/2.5.2`` -then make sure that you copy the file to our ``lib-python/modified-2.5.2`` -directory first. In subversion commandline terms this reads:: +If you want to change a module or test contained in ``lib-python/2.7`` +then make sure that you copy the file to our ``lib-python/modified-2.7`` +directory first. In mercurial commandline terms this reads:: - svn cp lib-python/2.5.2/somemodule.py lib-python/modified-2.5.2/ + $ hg cp lib-python/2.7/somemodule.py lib-python/modified-2.7/ and subsequently you edit and commit -``lib-python/modified-2.5.2/somemodule.py``. This copying operation is +``lib-python/modified-2.7/somemodule.py``. This copying operation is important because it keeps the original CPython tree clean and makes it obvious what we had to change. @@ -860,29 +806,23 @@ - write good log messages because several people are reading the diffs. -- if you add (text/py) files to the repository then please run - pypy/tool/fixeol in that directory. This will make sure - that the property 'svn:eol-style' is set to native which - allows checkin/checkout in native line-ending format. +- What was previously called ``trunk`` is called the ``default`` branch in + mercurial. Branches in mercurial are always pushed together with the rest + of the repository. To create a ``try1`` branch (assuming that a branch named + ``try1`` doesn't already exists) you should do:: -- branching (aka "svn copy") of source code should usually - happen at ``svn/pypy/trunk`` level in order to have a full - self-contained pypy checkout for each branch. For branching - a ``try1`` branch you would for example do:: + hg branch try1 + + The branch will be recorded in the repository only after a commit. To switch + back to the default branch:: + + hg update default + + For further details use the help or refer to the `official wiki`_:: + + hg help branch - svn cp http://codespeak.net/svn/pypy/trunk \ - http://codespeak.net/svn/pypy/branch/try1 - - This allows to checkout the ``try1`` branch and receive a - self-contained working-copy for the branch. Note that - branching/copying is a cheap operation with subversion, as it - takes constant time irrespective of the size of the tree. - -- To learn more about how to use subversion read `this document`_. - -.. _`this document`: svn-help.html - - +.. _`official wiki`: http://mercurial.selenic.com/wiki/Branch .. _`using development tracker`: @@ -895,41 +835,17 @@ for the next milestone, both from an E-Mail and from a web interface. +.. _`development tracker`: https://codespeak.net/issue/pypy-dev/ + use your codespeak login or register ------------------------------------ -If you already committed to the PyPy source code, chances -are that you can simply use your codespeak login that -you use for subversion or for shell access. +If you have an existing codespeak account, you can use it to login within the +tracker. Else, you can `register with the tracker`_ easily. -If you are not a commiter then you can still `register with -the tracker`_ easily. - -modifying Issues from svn commit messages ------------------------------------------ - -If you are committing something related to -an issue in the development tracker you -can correlate your login message to a tracker -item by following these rules: - -- put the content of ``issueN STATUS`` on a single - new line - -- `N` must be an existing issue number from the `development tracker`_. - -- STATUS is one of:: - - unread - chatting - in-progress - testing - duplicate - resolved .. _`register with the tracker`: https://codespeak.net/issue/pypy-dev/user?@template=register -.. _`development tracker`: http://codespeak.net/issue/pypy-dev/ -.. _`roundup`: http://roundup.sf.net +.. _`roundup`: http://roundup.sourceforge.net/ .. _`testing in PyPy`: @@ -938,7 +854,7 @@ Testing in PyPy =============== -Our tests are based on the new `py.test`_ tool which lets you write +Our tests are based on the `py.test`_ tool which lets you write unittests without boilerplate. All tests of modules in a directory usually reside in a subdirectory **test**. There are basically two types of unit tests: @@ -949,15 +865,9 @@ - **Application Level tests**. They run at application level which means that they look like straight python code but they are interpreted by PyPy. -Both types of tests need an `objectspace`_ they can run with (the interpreter -dispatches operations on objects to an objectspace). If you run a test you -can usually give the '-o' switch to select an object space. E.g. '-o thunk' -will select the thunk object space. The default is the `Standard Object Space`_ -which aims to implement unmodified Python semantics. - .. _`standard object space`: objspace.html#standard-object-space .. _`objectspace`: objspace.html -.. _`py.test`: http://codespeak.net/py/current/doc/test.html +.. _`py.test`: http://pytest.org/ Interpreter level tests ----------------------- @@ -967,7 +877,7 @@ def test_something(space): # use space ... - class TestSomething: + class TestSomething(object): def test_some(self): # use 'self.space' here @@ -988,7 +898,7 @@ def app_test_something(): # application level test code - class AppTestSomething: + class AppTestSomething(object): def test_this(self): # application level test code @@ -1004,11 +914,8 @@ attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: - from pypy.objspace.std import StdObjSpace - - class AppTestErrno: - def setup_class(cls): - cls.space = StdObjSpace() + class AppTestErrno(object): + def setup_class(cls): cls.w_d = cls.space.wrap({"a": 1, "b", 2}) def test_dict(self): @@ -1025,7 +932,7 @@ python test_all.py file_or_directory which is a synonym for the general `py.test`_ utility -located in the ``pypy`` directory. For switches to +located in the ``py/bin/`` directory. For switches to modify test execution pass the ``-h`` option. Test conventions @@ -1036,13 +943,9 @@ actually can fail.) - All over the pypy source code there are test/ directories - which contain unittests. Such scripts can usually be executed + which contain unit tests. Such scripts can usually be executed directly or are collectively run by pypy/test_all.py -- each test directory needs a copy of pypy/tool/autopath.py which - upon import will make sure that sys.path contains the directory - where 'pypy' is in. - .. _`change documentation and website`: Changing documentation and website @@ -1056,11 +959,10 @@ files. Here is a `ReST quickstart`_ but you can also just look at the existing documentation and see how things work. -.. _`ReST quickstart`: http://docutils.sourceforge.net/docs/rst/quickref.html +.. _`ReST quickstart`: http://docutils.sourceforge.net/docs/user/rst/quickref.html Note that the web site of http://pypy.org/ is maintained separately. -For now it is in the repository https://bitbucket.org/pypy/extradoc -in the directory ``pypy.org``. +For now it is in the repository https://bitbucket.org/pypy/pypy.org Automatically test documentation/website changes ------------------------------------------------ @@ -1087,4 +989,4 @@ which will check that remote URLs are reachable. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/config/objspace.geninterp.rst b/pypy/doc/config/objspace.geninterp.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.geninterp.rst +++ /dev/null @@ -1,4 +0,0 @@ -This option enables `geninterp`_. This will usually make the PyPy interpreter -significantly faster (but also a bit bigger). - -.. _`geninterp`: ../geninterp.html diff --git a/pypy/doc/config/objspace.usemodules.zipimport.rst b/pypy/doc/config/objspace.usemodules.zipimport.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.zipimport.rst +++ /dev/null @@ -1,3 +0,0 @@ -This module implements zipimport mechanism described -in PEP 302. It's supposed to work and translate, so it's included -by default \ No newline at end of file diff --git a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst b/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst +++ /dev/null @@ -1,10 +0,0 @@ -Enable a pair of bytecodes that speed up method calls. -See ``pypy.interpreter.callmethod`` for a description. - -The goal is to avoid creating the bound method object in the common -case. So far, this only works for calls with no keyword, no ``*arg`` -and no ``**arg`` but it would be easy to extend. - -For more information, see the section in `Standard Interpreter Optimizations`_. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#lookup-method-call-method diff --git a/pypy/doc/config/objspace.usemodules.marshal.rst b/pypy/doc/config/objspace.usemodules.marshal.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.marshal.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'marshal' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/image/parsing_example10.dot b/pypy/doc/image/parsing_example10.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example10.dot +++ /dev/null @@ -1,37 +0,0 @@ -digraph G{ -"-1220061652" [label="object"]; -"-1220061652" -> "-1220127636"; -"-1220127636" [label="entry"]; -"-1220127636" -> "-1213915636"; -"-1213915636" [shape=box,label="STRING\n'a'"]; -"-1220127636" -> "-1214251156"; -"-1214251156" [shape=box,label="STRING\n'5'"]; -"-1220061652" -> "-1220063188"; -"-1220063188" [label="entry"]; -"-1220063188" -> "-1214253076"; -"-1214253076" [shape=box,label="STRING\n'b'"]; -"-1220063188" -> "-1220059444"; -"-1220059444" [label="array"]; -"-1220059444" -> "-1214253364"; -"-1214253364" [shape=box,label="NUMBER\n'1'"]; -"-1220059444" -> "-1214254292"; -"-1214254292" [shape=box,label="__0_null\n'null'"]; -"-1220059444" -> "-1214253268"; -"-1214253268" [shape=box,label="NUMBER\n'3'"]; -"-1220059444" -> "-1214252596"; -"-1214252596" [shape=box,label="__1_true\n'true'"]; -"-1220059444" -> "-1220062260"; -"-1220062260" [label="object"]; -"-1220062260" -> "-1220060116"; -"-1220060116" [label="entry"]; -"-1220060116" -> "-1214211860"; -"-1214211860" [shape=box,label="STRING\n'f'"]; -"-1220060116" -> "-1214210132"; -"-1214210132" [shape=box,label="STRING\n'g'"]; -"-1220062260" -> "-1220062868"; -"-1220062868" [label="entry"]; -"-1220062868" -> "-1214211956"; -"-1214211956" [shape=box,label="STRING\n'h'"]; -"-1220062868" -> "-1214212308"; -"-1214212308" [shape=box,label="NUMBER\n'6'"]; -} diff --git a/pypy/doc/image/parsing_example2.dot b/pypy/doc/image/parsing_example2.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example2.dot +++ /dev/null @@ -1,17 +0,0 @@ -digraph G{ -"-1213678004" [label="n"]; -"-1213678004" -> "-1213681108"; -"-1213681108" [shape=box,label="__0_A\n'A'"]; -"-1213678004" -> "-1213681332"; -"-1213681332" [shape=box,label="__1_,\n','"]; -"-1213678004" -> "-1213837780"; -"-1213837780" [label="n"]; -"-1213837780" -> "-1213837716"; -"-1213837716" [shape=box,label="__0_A\n'A'"]; -"-1213837780" -> "-1213839476"; -"-1213839476" [shape=box,label="__1_,\n','"]; -"-1213837780" -> "-1213839956"; -"-1213839956" [label="n"]; -"-1213839956" -> "-1213840948"; -"-1213840948" [shape=box,label="__0_A\n'A'"]; -} diff --git a/pypy/doc/config/objspace.usemodules._codecs.rst b/pypy/doc/config/objspace.usemodules._codecs.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._codecs.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_codecs' module. -Used by the 'codecs' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/externaltools.rst b/pypy/doc/externaltools.rst deleted file mode 100644 --- a/pypy/doc/externaltools.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. include:: crufty.rst - - .. ^^ Incomplete and wrong, superceded elsewhere - -External tools&programs needed by PyPy -====================================== - -Tools needed for testing ------------------------- - -These tools are used in various ways by PyPy tests; if they are not found, -some tests might be skipped, so they need to be installed on every buildbot -slave to be sure we actually run all tests: - - - Mono (versions 1.2.1.1 and 1.9.1 known to work) - - - Java/JVM (preferably sun-jdk; version 1.6.0 known to work) - - - Jasmin >= 2.2 (copy it from wyvern, /usr/local/bin/jasmin and /usr/local/share/jasmin.jar) - - - gcc - - - make - - - Some libraries (these are Debian package names, adapt as needed): - - * ``python-dev`` - * ``python-ctypes`` - * ``libffi-dev`` - * ``libz-dev`` (for the optional ``zlib`` module) - * ``libbz2-dev`` (for the optional ``bz2`` module) - * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) - * ``libgc-dev`` (only when translating with `--opt=0, 1` or `size`) diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -1,4 +1,4 @@ - +=================================== Writing extension modules for pypy =================================== diff --git a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst b/pypy/doc/config/translation.backendopt.merge_if_blocks.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.merge_if_blocks.rst +++ /dev/null @@ -1,26 +0,0 @@ -This optimization converts parts of flow graphs that result from -chains of ifs and elifs like this into merged blocks. - -By default flow graphing this kind of code:: - - if x == 0: - f() - elif x == 1: - g() - elif x == 4: - h() - else: - j() - -will result in a chain of blocks with two exits, somewhat like this: - -.. image:: unmergedblocks.png - -(reflecting how Python would interpret this code). Running this -optimization will transform the block structure to contain a single -"choice block" with four exits: - -.. image:: mergedblocks.png - -This can then be turned into a switch by the C backend, allowing the C -compiler to produce more efficient code. diff --git a/pypy/doc/image/parsing_example4.dot b/pypy/doc/image/parsing_example4.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example4.dot +++ /dev/null @@ -1,27 +0,0 @@ -digraph G{ -"-1214029460" [label="n"]; -"-1214029460" -> "-1214026452"; -"-1214026452" [shape=box,label="__0_a\n'a'"]; -"-1214029460" -> "-1214028276"; -"-1214028276" [shape=box,label="__1_b\n'b'"]; -"-1214029460" -> "-1214027316"; -"-1214027316" [shape=box,label="__2_c\n'c'"]; -"-1214029460" -> "-1214026868"; -"-1214026868" [label="m"]; -"-1214026868" -> "-1214140436"; -"-1214140436" [shape=box,label="__3_(\n'('"]; -"-1214026868" -> "-1214143508"; -"-1214143508" [label="n"]; -"-1214143508" -> "-1214141364"; -"-1214141364" [shape=box,label="__0_a\n'a'"]; -"-1214143508" -> "-1214141748"; -"-1214141748" [shape=box,label="__1_b\n'b'"]; -"-1214143508" -> "-1214140756"; -"-1214140756" [shape=box,label="__2_c\n'c'"]; -"-1214143508" -> "-1214144468"; -"-1214144468" [label="m"]; -"-1214144468" -> "-1214414868"; -"-1214414868" [shape=box,label="__5_d\n'd'"]; -"-1214026868" -> "-1214141492"; -"-1214141492" [shape=box,label="__4_)\n')'"]; -} \ No newline at end of file diff --git a/pypy/doc/buildtool.rst b/pypy/doc/buildtool.rst deleted file mode 100644 --- a/pypy/doc/buildtool.rst +++ /dev/null @@ -1,251 +0,0 @@ -============ -PyPyBuilder -============ - -.. include:: crufty.rst - -What is this? -============= - -PyPyBuilder is an application that allows people to build PyPy instances on -demand. If you have a nice idle machine connected to the Internet, and don't -mind us 'borrowing' it every once in a while, you can start up the client -script (in bin/client) and have the server send compile jobs to your machine. -If someone requests a build of PyPy that is not already available on the PyPy -website, and your machine is capable of making such a build, the server may ask -your machine to create it. If enough people participate, with diverse enough -machines, a 'build farm' is created. - -Quick usage instructions -======================== - -For the impatient, that just want to get started, some quick instructions. - -First you'll need to have a checkout of the 'buildtool' package, that can -be found here:: - - https://codespeak.net/svn/pypy/build/buildtool - -To start a compilation, run (from the buildtool root directory):: - - $ ./bin/startcompile.py [options] - -where the options can be found by using --help, and the email address will be -used to send mail to once the compilation is finished. - -To start a build server, to participate in the build farm, do:: - - $ ./bin/buildserver.py - -That's it for the compilation script and build server, if you have your own -project and want to set up your own meta server, you'll have to be a bit more -patient and read the details below... - -Components -========== - -The application consists of 3 main components: a meta server component, a -client component that handles compilations (let's call this a 'build server') -and a small client component to start compile jobs (which we'll call -'requesting clients' for now). - -The server waits for build server to register, and for compile job -requests. When participating clients register, they pass the server information -about what compilations the system can handle (system info), and a set of -options to use for compilation (compile info). - -When now a requesting client requests a compilation job, the server checks -whether a suitable binary is already available based on the system and compile -info, and if so returns that. If there isn't one, the server walks through a -list of connected participating clients to see if one of them can handle the -job, and if so dispatches the compilation. If there's no participating client -to handle the job, it gets queued until there is. - -If a client crashes during compilation, the build is restarted, or error -information is sent to the logs and requesting client, depending on the type of -error. As long as no compilation error occurs (read: on disconnects, system -errors, etc.) compilation will be retried until a build is available. - -Once a build is available, the server will send an email to all clients waiting -for the build (it could be that more than one person asked for some build at -the same time!). - -Configuration -============= - -There are several aspects to configuration on this system. Of course, for the -meta server, build server and startcompile components there is configuration -for the host and port to connect to, and there is some additional configuration -for things like which mailhost to use (only applies to the server), but also -there is configuration data passed around to determine what client is picked, -and what the client needs to compile exactly. - -Config file ------------ - -The host/port configuration etc. can be found in the file 'config.py' in the -build tool dir. There are several things that can be configured here, mostly -related to what application to build, and where to build it. Please read the -file carefully when setting up a new build network, or when participating for -compilation, because certain items (e.g. the svnpath_to_url function, or the -client_checkers) can make the system a lot less secure when not configured -properly. - -Note that all client-related configuration is done from command-line switches, -so the configuration file is supposed to be changed on a per-project basis: -unless you have specific needs, use a test version of the build tool, or are -working on another project than PyPy, you will not want to modify the it. - -System configuration --------------------- - -This information is used by the client and startcompile components. On the -participating clients this information is retrieved by querying the system, on -the requesting clients the system values are used by default, but may be -overridden (so a requesting client running an x86 can still request PPC builds, -for instance). The clients compare their own system config to that of a build -request, and will (should) refuse a build if it can not be executed because -of incompatibilities. - -Compilation configuration -------------------------- - -The third form of configuration is that of the to-be-built application itself, -its compilation arguments. This configuration is only provided by the -requesting clients, build servers can examine the information and refuse a -compilation based on this configuration (just like with the system config, see -'client_checkers' in 'config.py'). Compilation configuration can be controlled -using command-line arguments (use 'bin/startcompile.py --help' for an -overview). - -Build tool options ------------------- - -Yet another part of the configuration are the options that are used by the -startcompile.py script itself: the user can specify what SVN path (relative to -a certain base path) and what Subversion revision is desired. The revision can -either be specified exactly, or as a range of versions. - -Installation -============ - -Build Server ------------- - -Installing the system should not be required: just run './bin/buildserver' to -start. Note that it depends on the `py lib`_ (as does the rest of PyPy). - -When starting a build server with PyPy's default configuration, it will connect -to a meta server we have running in codespeak.net. - -Meta Server ------------ - -Also for the server there's no real setup required, and again there's a -dependency on the `py lib`_. Starting it is done by running -'./bin/metaserver'. - -Running a compile job ---------------------- - -Again installation is not required, just run './bin/startcompile.py [options] -' (see --help for the options) to start. Again, you need to have the -`py lib`_ installed. - -Normally the codespeak.net meta server will be used when this script is issued. - -.. _`py lib`: http://codespeak.net/py - -Using the build tool for other projects -======================================= - -The code for the build tool is meant to be generic. Using it for other projects -than PyPy (for which it was originally written) is relatively straight-forward: -just change the configuration, and implement a build client script (probably -highly resembling bin/buildserver.py). - -Note that there is a test project in 'tool/build/testproject' that can serve -as an example. - -Prerequisites --------------- - -Your project can use the build tool if: - - * it can be built from Python - - Of course this is a rather vague requirement: theoretically _anything_ can - be built from Python; it's just a matter of integrating it into the tool - properly... A project that can entirely be built from Python code (like - PyPy) is easier to integrate than something that is built from the command - line, though (although implementing that won't be very hard either, see - the test project for instance). - - * it is located in Subversion - - The build tool makes very little hard-coded assumptions, but having code - in Subversion is one of them. There are several locations in the code where - SVN is assumed: the command line options (see `build tool options`_), - the server (which checks SVN urls for validity, and converts HEAD revision - requests to actual revision ids) and and build client (which checks out the - data) all make this assumption, changing to a different revision control - system is currently not easy and unsupported (but who knows what the future - will bring). - - * it uses PyPy's config mechanism - - PyPy has a very nice, generic configuration mechanism (essentially wrapper - OptionParser stuff) that makes dealing with fragmented configuration - and command-line options a lot easier. This mechanism is used by the build - tool: it assumes configuration is provided in this format. If your project - uses this configuration mechanism already, you can provide the root Config - object from config.compile_config; if not it should be fairly straight- - forward to wrap your existing configuration with the PyPy stuff. - -Basically that's it: if your project is stored in SVN, and you don't mind using -Python a bit, it shouldn't be too hard to get things going (note that more -documentation about this subject will follow in the future). - -Web Front-End -============= - -To examine the status of the meta server, connected build servers and build -requests, there is a web server available. This can be started using -'./bin/webserver' and uses port 8080 by default (override in -config.py). - -The web server presents a number of different pages: - - * / and /metaserverstatus - meta server status - - this displays a small list of information about the meta server, such - as the amount of connected build servers, the amount of builds available, - the amount of waiting clients, etc. - - * /buildservers - connected build servers - - this page contains a list of all connected build servers, system - information and what build they're currently working on (if any) - - * /builds - a list of builds - - here you'll find a list of all builds, both done and in-progress and - queued ones, with links to the details pages, the date they were - requested and their status - - * /build/ - build details - - the 'build' (virtual) directory contains pages of information for each - build - each of those pages displays status information, time requested, - time started and finished (if appropriate), links to the zip and logs, - and system and compile information - -There's a build tool status web server for the meta server on codespeak.net -available at http://codespeak.net/pypy/buildstatus/. - -More info -========= - -For more information, bug reports, patches, etc., please send an email to -guido at merlinux.de. - diff --git a/pypy/doc/jit/_ref.rst b/pypy/doc/jit/_ref.txt copy from pypy/doc/jit/_ref.rst copy to pypy/doc/jit/_ref.txt diff --git a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst b/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.raisingop2direct_call.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. Transformation required by the LLVM backend. - -.. internal diff --git a/pypy/doc/config/translation.log.rst b/pypy/doc/config/translation.log.txt copy from pypy/doc/config/translation.log.rst copy to pypy/doc/config/translation.log.txt diff --git a/pypy/doc/jit/_ref.rst b/pypy/doc/jit/_ref.rst deleted file mode 100644 diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.txt copy from pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst copy to pypy/doc/config/translation.backendopt.profile_based_inline_threshold.txt diff --git a/pypy/doc/image/parsing_example7.dot b/pypy/doc/image/parsing_example7.dot deleted file mode 100644 --- a/pypy/doc/image/parsing_example7.dot +++ /dev/null @@ -1,7 +0,0 @@ -digraph G{ -"-1219505652" [label="list"]; -"-1219505652" -> "-1213518196"; -"-1213518196" [shape=box,label="DECIMAL\n'1'"]; -"-1219505652" -> "-1213520308"; -"-1213520308" [shape=box,label="DECIMAL\n'2'"]; -} \ No newline at end of file diff --git a/pypy/doc/translation-aspects.rst b/pypy/doc/translation-aspects.rst deleted file mode 100644 --- a/pypy/doc/translation-aspects.rst +++ /dev/null @@ -1,482 +0,0 @@ -.. include:: crufty.rst -.. ^^ old and needs updating - -========================================================================================== -Memory management and threading models as translation aspects -- solutions and challenges -========================================================================================== - -.. contents:: - - -Introduction -============= - -One of the goals of the PyPy project is to have the memory and concurrency -models flexible and changeable without having to reimplement the -interpreter manually. In fact, PyPy, by the time of the 0.8 release contains code for memory -management and concurrency models which allows experimentation without -requiring early design decisions. This document describes many of the more -technical details of the current state of the implementation of the memory -object model, automatic memory management and concurrency models and describes -possible future developments. - - -The low level object model -=========================== - -One important part of the translation process is *rtyping* [DLT]_, [TR]_. -Before that step all objects in our flow graphs are annotated with types at the -level of the RPython type system which is still quite high-level and -target-independent. During rtyping they are transformed into objects that -match the model of the specific target platform. For C or C-like targets this -model consists of a set of C-like types like structures, arrays and functions -in addition to primitive types (integers, characters, floating point numbers). -This multi-stage approach gives a lot of flexibility in how a given object is -represented at the target's level. The RPython process can decide what -representation to use based on the type annotation and on the way the object is -used. - -In the following the structures used to represent RPython classes are described. -There is one "vtable" per RPython class, with the following structure: The root -class "object" has a vtable of the following type (expressed in a C-like -syntax):: - - struct object_vtable { - struct object_vtable* parenttypeptr; - RuntimeTypeInfo * rtti; - Signed subclassrange_min; - Signed subclassrange_max; - array { char } * name; - struct object * instantiate(); - } - -The structure members ``subclassrange_min`` and ``subclassrange_max`` are used -for subclass checking (see below). Every other class X, with parent Y, has the -structure:: - - struct vtable_X { - struct vtable_Y super; // inlined - ... // extra class attributes - } - -The extra class attributes usually contain function pointers to the methods -of that class, although the data class attributes (which are supported by the -RPython object model) are stored there. - -The type of the instances is:: - - struct object { // for instances of the root class - struct object_vtable* typeptr; - } - - struct X { // for instances of every other class - struct Y super; // inlined - ... // extra instance attributes - } - -The extra instance attributes are all the attributes of an instance. - -These structure layouts are quite similar to how classes are usually -implemented in C++. - -Subclass checking ------------------ - -The way we do subclass checking is a good example of the flexibility provided -by our approach: in the beginning we were using a naive linear lookup -algorithm. Since subclass checking is quite a common operation (it is also used -to check whether an object is an instance of a certain class), we wanted to -replace it with the more efficient relative numbering algorithm (see [PVE]_ for -an overview of techniques). This was a matter of changing just the appropriate -code of the rtyping process to calculate the class-ids during rtyping and -insert the necessary fields into the class structure. It would be similarly -easy to switch to another implementation. - -Identity hashes ---------------- - -In the RPython type system, class instances can be used as dictionary keys using -a default hash implementation based on identity, which in practice is -implemented using the memory address. This is similar to CPython's behavior -when no user-defined hash function is present. The annotator keeps track of the -classes for which this hashing is ever used. - -One of the peculiarities of PyPy's approach is that live objects are analyzed -by our translation toolchain. This leads to the presence of instances of RPython -classes that were built before the translation started. These are called -"pre-built constants" (PBCs for short). During rtyping, these instances must be -converted to the low level model. One of the problems with doing this is that -the standard hash implementation of Python is to take the id of an object, which - -is just the memory address. If the RPython program explicitly captures the -hash of a PBC by storing it (for example in the implementation of a data -structure) then the stored hash value will not match the value of the object's -address after translation. - -To prevent this the following strategy is used: for every class whose instances -are hashed somewhere in the program (either when storing them in a -dictionary or by calling the hash function) an extra field is introduced in the -structure used for the instances of that class. For PBCs of such a class this -field is used to store the memory address of the original object and new objects -have this field initialized to zero. The hash function for instances of such a -class stores the object's memory address in this field if it is zero. The -return value of the hash function is the content of the field. This means that -instances of such a class that are converted PBCs retain the hash values they -had before the conversion whereas new objects of the class have their memory -address as hash values. A strategy along these lines would in any case have been -required if we ever switch to using a copying garbage collector. - -Cached functions with PBC arguments ------------------------------------- - -As explained in [DLT]_ the annotated code can contain -functions from a finite set of PBCs to something else. The set itself has to be -finite but its content does not need to be provided explicitly but is discovered -as the annotation of the input argument by the annotator itself. This kind of -function is translated by recording the input-result relationship by calling -the function concretely at annotation time, and adding a field to the PBCs in -the set and emitting code reading that field instead of the function call. - -Changing the representation of an object ----------------------------------------- - -One example of the flexibility the RTyper provides is how we deal with lists. -Based on information gathered by the annotator the RTyper chooses between two -different list implementations. If a list never changes its size after creation, -a low-level array is used directly. For lists which might be resized, a -representation consisting of a structure with a pointer to an array is used, -together with over-allocation. - -We plan to use similar techniques to use tagged pointers instead of using boxing -to represent builtin types of the PyPy interpreter such as integers. This would -require attaching explicit hints to the involved classes. Field access would -then be translated to the corresponding masking operations. - - -Automatic Memory Management Implementations -============================================ - -The whole implementation of the PyPy interpreter assumes automatic memory -management, e.g. automatic reclamation of memory that is no longer used. The -whole analysis toolchain also assumes that memory management is being taken -care of -- only the backends have to concern themselves with that issue. For -backends that target environments that have their own garbage collector, like -.NET or Java, this is not an issue. For other targets like C -the backend has to produce code that uses some sort of garbage collection. - -This approach has several advantages. It makes it possible to target different -platforms, with and without integrated garbage collection. Furthermore, the -interpreter implementation is not complicated by the need to do explicit memory -management everywhere. Even more important the backend can optimize the memory -handling to fit a certain situation (like a machine with very restricted -memory) or completely replace the memory management technique or memory model -with a different one without the need to change source code. Additionally, -the backend can use information that was inferred by the rest of the toolchain -to improve the quality of memory management. - -Using the Boehm garbage collector ------------------------------------ - -Currently there are two different garbage collectors implemented in the C -backend (which is the most complete backend right now). One of them uses the -existing Boehm-Demers-Weiser garbage collector [BOEHM]_. For every memory -allocating operation in a low level flow graph the C backend introduces a call -to a function of the boehm collector which returns a suitable amount of memory. -Since the C backend has a lot of information available about the data structure -being allocated it can choose the memory allocation function out of the Boehm -API that fits best. For example, for objects that do not contain references to -other objects (e.g. strings) there is a special allocation function which -signals to the collector that it does not need to consider this memory when -tracing pointers. - -Using the Boehm collector has disadvantages as well. The problems stem from the -fact that the Boehm collector is conservative which means that it has to -consider every word in memory as a potential pointer. Since PyPy's toolchain -has complete knowledge of the placement of data in memory we can generate an -exact garbage collector that considers only genuine pointers. - -Using a simple reference counting garbage collector ------------------------------------------------------ - -The other implemented garbage collector is a simple reference counting scheme. -The C backend inserts a reference count field into every structure that has to be -handled by the garbage collector and puts increment and decrement operations -for this reference count into suitable places in the resulting C code. After -every reference decrement operations a check is performed whether the reference -count has dropped to zero. If this is the case the memory of the object will be -reclaimed after the references counts of the objects the original object -refers to are decremented as well. - -The current placement of reference counter updates is far from optimal: The -reference counts are updated much more often than theoretically necessary (e.g. -sometimes a counter is increased and then immediately decreased again). -Objects passed into a function as arguments can almost always use a "trusted reference", -because the call-site is responsible to create a valid reference. -Furthermore some more analysis could show that some objects don't need a -reference counter at all because they either have a very short, foreseeable -life-time or because they live exactly as long as another object. - -Another drawback of the current reference counting implementation is that it -cannot deal with circular references, which is a fundamental flaw of reference -counting memory management schemes in general. CPython solves this problem by -having special code that handles circular garbage which PyPy lacks at the -moment. This problem has to be addressed in the future to make the reference -counting scheme a viable garbage collector. Since reference counting is quite -successfully used by CPython it will be interesting to see how far it can be -optimized for PyPy. - -Simple escape analysis to remove memory allocation ---------------------------------------------------- - -We also implemented a technique to reduce the amount of memory allocation. -Sometimes it is possible to deduce from the flow graphs that an object lives -exactly as long as the stack frame of the function it is allocated in. -This happens if no pointer to the object is stored into another object and if -no pointer to the object is returned from the function. If this is the case and -if the size of the object is known in advance the object can be allocated on -the stack. To achieve this, the object is "exploded", that means that for every -element of the structure a new variable is generated that is handed around in -the graph. Reads from elements of the structure are removed and just replaced -by one of the variables, writes by assignments to same. - -Since quite a lot of objects are allocated in small helper functions, this -simple approach which does not track objects across function boundaries only -works well in the presence of function inlining. - -A general garbage collection framework --------------------------------------- - -In addition to the garbage collectors implemented in the C backend we have also -started writing a more general toolkit for implementing exact garbage -collectors in Python. The general idea is to express the garbage collection -algorithms in Python as well and translate them as part of the translation -process to C code (or whatever the intended platform is). - -To be able to access memory in a low level manner there are special ``Address`` -objects that behave like pointers to memory and can be manipulated accordingly: -it is possible to read/write to the location they point to a variety of data -types and to do pointer arithmetic. These objects are translated to real -pointers and the appropriate operations. When run on top of CPython there is a -*memory simulator* that makes the address objects behave like they were -accessing real memory. In addition the memory simulator contains a number of -consistency checks that expose common memory handling errors like dangling -pointers, uninitialized memory, etc. - -At the moment we have three simple garbage collectors implemented for this -framework: a simple copying collector, a mark-and-sweep collector and a -deferred reference counting collector. These garbage collectors are work when run on -top of the memory simulator, but at the moment it is not yet possible to translate -PyPy to C with them. This is because it is not easy to -find the root pointers that reside on the C stack -- both because the C stack layout is -heavily platform dependent, and also due to the possibility of roots that are not -only on the stack but also hiding in registers (which would give a problem for *moving -garbage collectors*). - -There are several possible solutions for this problem: One -of them is to not use C compilers to generate machine code, so that the stack -frame layout gets into our control. This is one of the tasks that need to be -tackled in phase 2, as directly generating assembly is needed anyway for a -just-in-time compiler. The other possibility (which would be much easier to -implement) is to move all the data away from the stack to the heap -before collecting garbage, as described in section "Stackless C code" below. - -Concurrency Model Implementations -============================================ - -At the moment we have implemented two different concurrency models, and the -option to not support concurrency at all -(another proof of the modularity of our approach): -threading with a global interpreter lock and a "stackless" model. - -No threading -------------- - -By default, multi-threading is not supported at all, which gives some small -benefits for single-threaded applications since even in the single-threaded -case there is some overhead if threading capabilities are built into -the interpreter. - -Threading with a Global Interpreter Lock ------------------------------------------- - -Right now, there is one non-trivial threading model implemented. It follows -the threading implementation of CPython and thus uses a global interpreter -lock. This lock prevents any two threads from interpreting python code at -the same time. The global interpreter lock is released around calls to blocking I/O -functions. This approach has a number of advantages: it gives very little -runtime penalty for single-threaded applications, makes many of the common uses -for threading possible, and it is relatively easy to implement and maintain. It has -the disadvantage that multiple threads cannot be distributed across multiple -processors. - -To make this threading-model usable for I/O-bound applications, the global -interpreter lock should be released around blocking external function calls -(which is also what CPython does). This has been partially implemented. - - -Stackless C code ------------------ - -"Stackless" C code is C code that only uses a bounded amount of -space in the C stack, and that can generally obtain explicit -control of its own stack. This is commonly known as "continuations", -or "continuation-passing style" code, although in our case we will limit -ourselves to single-shot continuations, i.e. continuations that are -captured and subsequently will be resumed exactly once. - -The technique we have implemented is based on the recurring idea -of emulating this style via exceptions: a specific program point can -generate a pseudo-exception whose purpose is to unwind the whole C stack -in a restartable way. More precisely, the "unwind" exception causes -the C stack to be saved into the heap in a compact and explicit -format, as described below. It is then possible to resume only the -innermost (most recent) frame of the saved stack -- allowing unlimited -recursion on OSes that limit the size of the C stack -- or to resume a -different previously-saved C stack altogether, thus implementing -coroutines or light-weight threads. - -In our case, exception handling is always explicit in the generated code: -the C backend puts a cheap check -after each call site to detect if the callee exited -normally or generated an exception. So when compiling functions in -stackless mode, the generated exception handling code special-cases the -new "unwind" exception. This exception causes the current function to -respond by saving its local variables to a heap structure (a linked list -of records, one per stack frame) and then propagating the exception -outwards. Eventually, at the end of the frame chain, the outermost -function is a manually-written dispatcher that catches the "unwind" -exception. - -At this point, the whole C stack is stored away in the heap. This is a -very interesting state in itself, because precisely there is no C stack -below the dispatcher -left. It is this which will allow us to write all the algorithms -in a portable way, that -normally require machine-specific code to inspect the stack, -in particular garbage collectors. - -To continue execution, the dispatcher can resume either the freshly saved or a -completely different stack. Moreover, it can resume directly the innermost -(most recent) saved frame in the heap chain, without having to resume all -intermediate frames first. This not only makes stack switches fast, but it -also allows the frame to continue to run on top of a clean C stack. When that -frame eventually exits normally, it returns to the dispatcher, which then -invokes the previous (parent) saved frame, and so on. We insert stack checks -before calls that can lead to recursion by detecting cycles in the call graph. -These stack checks copy the stack to the heap (by raising the special -exception) if it is about to grow deeper than a certain level. -As a different point of view, the C stack can also be considered as a cache -for the heap-based saved frames in this model. When we run out -of C stack space, we flush the cache. When the cache is empty, we fill it with -the next item from the heap. - -To give the translated program some amount of control over the -heap-based stack structures and over the top-level dispatcher that jumps -between them, there are a few "external" functions directly implemented -in C. These functions provide an elementary interface, on top of which -useful abstractions can be implemented, like: - -* coroutines: explicitly switching code, similar to Greenlets [GREENLET]_. - -* "tasklets": cooperatively-scheduled microthreads, as introduced in - Stackless Python [STK]_. - -* implicitly-scheduled (preemptive) microthreads, also known as green threads. - -An important property of the changes in all the generated C functions is -that they are written in a way that does only minimally degrade their performance in -the non-exceptional case. Most optimizations performed by C compilers, -like register allocation, continue to work... - -The following picture shows a graph function together with the modifications -necessary for the stackless style: the check whether the stack is too big and -should be unwound, the check whether we are in the process of currently storing -away the stack and the check whether the call to the function is not a regular -call but a reentry call. - -.. graphviz:: image/stackless_informal.dot - :scale: 70 - - -Future work -================ - -open challenges for phase 2: - -Garbage collection ------------------- - -One of the biggest missing features of our current garbage collectors is -finalization. At present finalizers are simply not invoked if an object is -freed by the garbage collector. Along the same lines weak references are not -supported yet. It should be possible to implement these with a reasonable -amount of effort for reference counting as well as the Boehm collector (which -provides the necessary hooks). - -Integrating the now simulated-only GC framework into the rtyping process and -the code generation will require considerable effort. It requires being able to -keep track of the GC roots which is hard to do with portable C code. One -solution would be to use the "stackless" code since it can move the stack -completely to the heap. We expect that we can implement GC read and write -barriers as function calls and rely on inlining to make them more efficient. - -We may also spend some time on improving the existing reference counting -implementation by removing unnecessary incref-decref pairs and identifying -trustworthy references. A bigger task would -be to add support for detecting circular references. - - -Threading model ---------------- - -One of the interesting possibilities that stackless offers is to implement *green -threading*. This would involve writing a scheduler and some preemption logic. - -We should also investigate other threading models based on operating system -threads with various granularities of locking for access of shared objects. - -Object model ------------- - -We also might want to experiment with more sophisticated structure inlining. -Sometimes it is possible to find out that one structure object -allocated on the heap lives exactly as long as another structure object on the -heap pointing to it. If this is the case it is possible to inline the first -object into the second. This saves the space of one pointer and avoids -pointer-chasing. - - -Conclusion -=========== - -As concretely shown with various detailed examples, our approach gives us -flexibility and lets us choose various aspects at translation time instead -of encoding them into the implementation itself. - -References -=========== - -.. [BOEHM] `Boehm-Demers-Weiser garbage collector`_, a garbage collector - for C and C++, Hans Boehm, 1988-2004 -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ - -.. [GREENLET] `Lightweight concurrent programming`_, py-lib Documentation 2003-2005 -.. _`Lightweight concurrent programming`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt - -.. [STK] `Stackless Python`_, a Python implementation that does not use - the C stack, Christian Tismer, 1999-2004 -.. _`Stackless Python`: http://www.stackless.com - -.. [TR] `Translation`_, PyPy documentation, 2003-2005 -.. _`Translation`: translation.html - -.. [LE] `Encapsulating low-level implementation aspects`_, - PyPy documentation (and EU deliverable D05.4), 2005 -.. _`Encapsulating low-level implementation aspects`: low-level-encapsulation.html - -.. [DLT] `Compiling dynamic language implementations`_, - PyPy documentation (and EU deliverable D05.1), 2005 -.. _`Compiling dynamic language implementations`: http://codespeak.net/svn/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf - -.. [PVE] `Simple and Efficient Subclass Tests`_, Jonathan Bachrach, Draft submission to ECOOP-02, 2001 -.. _`Simple and Efficient Subclass Tests`: http://people.csail.mit.edu/jrb/pve/pve.pdf diff --git a/pypy/doc/navlist b/pypy/doc/navlist deleted file mode 100644 --- a/pypy/doc/navlist +++ /dev/null @@ -1,9 +0,0 @@ -[ - 'architecture.html', - 'getting-started.html', - 'coding-guide.html', - 'objspace.html', - 'translation.html', -# 'misc.html', - 'theory.html', -] diff --git a/pypy/doc/config/objspace.usemodules.pypyjit.rst b/pypy/doc/config/objspace.usemodules.pypyjit.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.pypyjit.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'pypyjit' module. diff --git a/pypy/doc/config/translation.secondaryentrypoints.rst b/pypy/doc/config/translation.secondaryentrypoints.rst deleted file mode 100644 --- a/pypy/doc/config/translation.secondaryentrypoints.rst +++ /dev/null @@ -1,1 +0,0 @@ -Enable secondary entrypoints support list. Needed for cpyext module. diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.rst b/pypy/doc/config/objspace.std.getattributeshortcut.txt copy from pypy/doc/config/objspace.std.getattributeshortcut.rst copy to pypy/doc/config/objspace.std.getattributeshortcut.txt diff --git a/pypy/doc/config/objspace.usemodules.zlib.rst b/pypy/doc/config/objspace.usemodules.zlib.txt copy from pypy/doc/config/objspace.usemodules.zlib.rst copy to pypy/doc/config/objspace.usemodules.zlib.txt diff --git a/pypy/doc/config/translation.backendopt.inline_heuristic.rst b/pypy/doc/config/translation.backendopt.inline_heuristic.txt copy from pypy/doc/config/translation.backendopt.inline_heuristic.rst copy to pypy/doc/config/translation.backendopt.inline_heuristic.txt diff --git a/pypy/doc/config/objspace.usemodules.symbol.rst b/pypy/doc/config/objspace.usemodules.symbol.txt copy from pypy/doc/config/objspace.usemodules.symbol.rst copy to pypy/doc/config/objspace.usemodules.symbol.txt diff --git a/pypy/doc/config/translation.instrument.rst b/pypy/doc/config/translation.instrument.rst deleted file mode 100644 --- a/pypy/doc/config/translation.instrument.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/config/translation.make_jobs.rst b/pypy/doc/config/translation.make_jobs.rst deleted file mode 100644 --- a/pypy/doc/config/translation.make_jobs.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify number of make jobs for make command. diff --git a/pypy/doc/config/objspace.rst b/pypy/doc/config/objspace.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.rst +++ /dev/null @@ -1,1 +0,0 @@ -.. intentionally empty diff --git a/pypy/doc/config/objspace.usemodules.rctime.rst b/pypy/doc/config/objspace.usemodules.rctime.txt copy from pypy/doc/config/objspace.usemodules.rctime.rst copy to pypy/doc/config/objspace.usemodules.rctime.txt diff --git a/pypy/doc/config/objspace.usemodules._sre.rst b/pypy/doc/config/objspace.usemodules._sre.txt copy from pypy/doc/config/objspace.usemodules._sre.rst copy to pypy/doc/config/objspace.usemodules._sre.txt diff --git a/pypy/doc/discussion/thoughts_string_interning.rst b/pypy/doc/discussion/thoughts_string_interning.rst deleted file mode 100644 --- a/pypy/doc/discussion/thoughts_string_interning.rst +++ /dev/null @@ -1,211 +0,0 @@ -String Interning in PyPy -======================== - -A few thoughts about string interning. CPython gets a remarkable -speed-up by interning strings. Interned are all builtin string -objects and all strings used as names. The effect is that when -a string lookup is done during instance attribute access, -the dict lookup method will find the string always by identity, -saving the need to do a string comparison. - -Interned Strings in CPython ---------------------------- - -CPython keeps an internal dictionary named ``interned`` for all of these -strings. It contains the string both as key and as value, which means -there are two extra references in principle. Upto Version 2.2, interned -strings were considered immortal. Once they entered the ``interned`` dict, -nothing could revert this memory usage. - -Starting with Python 2.3, interned strings became mortal by default. -The reason was less memory usage for strings that have no external -reference any longer. This seems to be a worthwhile enhancement. -Interned strings that are really needed always have a real reference. -Strings which are interned for temporary reasons get a big speed up -and can be freed after they are no longer in use. - -This was implemented by making the ``interned`` dictionary a weak dict, -by lowering the refcount of interned strings by 2. The string deallocator -got extra handling to look into the ``interned`` dict when a string is deallocated. -This is supported by the state variable on string objects which tells -whether the string is not interned, immortal or mortal. - -Implementation problems for PyPy --------------------------------- - -- The CPython implementation makes explicit use of the refcount to handle - the weak-dict behavior of ``interned``. PyPy does not expose the implementation - of object aliveness. Special handling would be needed to simulate mortal - behavior. A possible but expensive solution would be to use a real - weak dictionary. Another way is to add a special interface to the backend - that allows either the two extra references to be reset, or for the - boehm collector to exclude the ``interned`` dict from reference tracking. - -- PyPy implements quite complete internal strings, as opposed to CPython - which always uses its "applevel" strings. It also supports low-level - dictionaries. This adds some complication to the issue of interning. - Additionally, the interpreter currently handles attribute access - by calling wrap(str) on the low-level attribute string when executing - frames. This implies that we have to primarily intern low-level strings - and cache the created string objects on top of them. - A possible implementation would use a dict with ll string keys and the - string objects as values. In order to save the extra dict lookup, we also - could consider to cache the string object directly on a field of the rstr, - which of course adds some extra cost. Alternatively, a fast id-indexed - extra dictionary can provide the mapping from rstr to interned string object. - But for efficiency reasons, it is anyway necessary to put an extra flag about - interning on the strings. Flagging this by putting the string object itself - as the flag might be acceptable. A dummyobject can be used if the interned - rstr is not exposed as an interned string object. - -Update: a reasonably simple implementation -------------------------------------------- - -Instead of the complications using the stringobject as a property of an rstr -instance, I propose to special case this kind of dictionary (mapping rstr -to stringobject) and to put an integer ``interned`` field into the rstr. The -default is -1 for not interned. Non-negative values are the direct index -of this string into the interning dict. That is, we grow an extra function -that indexes the dict by slot number of the dict table and gives direct -access to its value. The dictionary gets special handling on dict_resize, -to recompute the slot numbers of the interned strings. ATM I'd say we leave -the strings immortal and support mortality later when we have a cheap -way to express this (less refcount, exclusion from Boehm, whatever). - -A prototype brute-force patch ------------------------------ - -In order to get some idea how efficient string interning is at the moment, -I implemented a quite crude version of interning. I patched space.wrap -to call this intern_string instead of W_StringObject:: - - def intern_string(space, str): - if we_are_translated(): - _intern_ids = W_StringObject._intern_ids - str_id = id(str) - w_ret = _intern_ids.get(str_id, None) - if w_ret is not None: - return w_ret - _intern = W_StringObject._intern - if str not in _intern: - _intern[str] = W_StringObject(space, str) - W_StringObject._intern_keep[str_id] = str - _intern_ids[str_id] = w_ret = _intern[str] - return w_ret - else: - return W_StringObject(space, str) - -This is no general solution at all, since it a) does not provide -interning of rstr and b) interns every app-level string. The -implementation is also by far not as efficient as it could be, -because it utilizes an extra dict _intern_ids which maps the -id of the rstr to the string object, and a dict _intern_keep to -keep these ids alive. - -With just a single _intern dict from rstr to string object, the -overall performance degraded slightly instead of an advantage. -The triple dict patch accelerates richards by about 12 percent. -Since it still has the overhead of handling the extra dicts, -I guess we can expect twice the acceleration if we add proper -interning support. - -The resulting estimated 24 % acceleration is still not enough -to justify an implementation right now. - -Here the results of the richards benchmark:: - - D:\pypy\dist\pypy\translator\goal>pypy-c-17516.exe -c "from richards import *;Richards.iterations=1;main()" - debug: entry point starting - debug: argv -> pypy-c-17516.exe - debug: argv -> -c - debug: argv -> from richards import *;Richards.iterations=1;main() - Richards benchmark (Python) starting... [] - finished. - Total time for 1 iterations: 38 secs - Average time for iterations: 38885 ms - - D:\pypy\dist\pypy\translator\goal>pypy-c.exe -c "from richards import *;Richards.iterations=1;main()" - debug: entry point starting - debug: argv -> pypy-c.exe - debug: argv -> -c - debug: argv -> from richards import *;Richards.iterations=1;main() - Richards benchmark (Python) starting... [] - finished. - Total time for 1 iterations: 34 secs - Average time for iterations: 34388 ms - - D:\pypy\dist\pypy\translator\goal> - - -This was just an exercise to get an idea. For sure this is not to be checked in. -Instead, I'm attaching the simple patch here for reference. -:: - - Index: objspace/std/objspace.py - =================================================================== - --- objspace/std/objspace.py (revision 17526) - +++ objspace/std/objspace.py (working copy) - @@ -243,6 +243,9 @@ - return self.newbool(x) - return W_IntObject(self, x) - if isinstance(x, str): - + # XXX quick speed testing hack - + from pypy.objspace.std.stringobject import intern_string - + return intern_string(self, x) - return W_StringObject(self, x) - if isinstance(x, unicode): - return W_UnicodeObject(self, [unichr(ord(u)) for u in x]) # xxx - Index: objspace/std/stringobject.py - =================================================================== - --- objspace/std/stringobject.py (revision 17526) - +++ objspace/std/stringobject.py (working copy) - @@ -18,6 +18,10 @@ - class W_StringObject(W_Object): - from pypy.objspace.std.stringtype import str_typedef as typedef - - + _intern_ids = {} - + _intern_keep = {} - + _intern = {} - + - def __init__(w_self, space, str): - W_Object.__init__(w_self, space) - w_self._value = str - @@ -32,6 +36,21 @@ - - registerimplementation(W_StringObject) - - +def intern_string(space, str): - + if we_are_translated(): - + _intern_ids = W_StringObject._intern_ids - + str_id = id(str) - + w_ret = _intern_ids.get(str_id, None) - + if w_ret is not None: - + return w_ret - + _intern = W_StringObject._intern - + if str not in _intern: - + _intern[str] = W_StringObject(space, str) - + W_StringObject._intern_keep[str_id] = str - + _intern_ids[str_id] = w_ret = _intern[str] - + return w_ret - + else: - + return W_StringObject(space, str) - - def _isspace(ch): - return ord(ch) in (9, 10, 11, 12, 13, 32) - Index: objspace/std/stringtype.py - =================================================================== - --- objspace/std/stringtype.py (revision 17526) - +++ objspace/std/stringtype.py (working copy) - @@ -47,6 +47,10 @@ - if space.is_true(space.is_(w_stringtype, space.w_str)): - return w_obj # XXX might be reworked when space.str() typechecks - value = space.str_w(w_obj) - + # XXX quick hack to check interning effect - + w_obj = W_StringObject._intern.get(value, None) - + if w_obj is not None: - + return w_obj - w_obj = space.allocate_instance(W_StringObject, w_stringtype) - W_StringObject.__init__(w_obj, space, value) - return w_obj - -ciao - chris diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,100 +6,155 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Amaury Forgeot d'Arc + Antonio Cuni Samuele Pedroni - Antonio Cuni Michael Hudson + Holger Krekel Christian Tismer - Holger Krekel + Benjamin Peterson Eric van Riet Paap + Anders Chrigström + Håkan Ardö Richard Emslie - Anders Chrigstrom - Amaury Forgeot d Arc - Aurelien Campeas + Dan Villiom Podlaski Christiansen + Alexander Schremmer + Alex Gaynor + David Schneider + Aurelién Campeas Anders Lehmann + Camillo Bruni Niklaus Haldimann + Leonardo Santagada + Toon Verwaest Seo Sanghyeon - Leonardo Santagada Lawrence Oluyede + Bartosz Skowron Jakub Gustak Guido Wesdorp - Benjamin Peterson - Alexander Schremmer + Adrien Di Mascio + Laura Creighton + Ludovic Aubry Niko Matsakis - Ludovic Aubry + Daniel Roberts + Jason Creighton + Jacob Hallén Alex Martelli - Toon Verwaest + Anders Hammarquist + Jan de Mooij Stephan Diehl - Adrien Di Mascio + Michael Foord Stefan Schwarzer Tomek Meka Patrick Maupin - Jacob Hallen - Laura Creighton Bob Ippolito - Camillo Bruni - Simon Burton Bruno Gola Alexandre Fayolle Marius Gedminas + Simon Burton + Jean-Paul Calderone + John Witulski + Wim Lavrijsen + Andreas Stührk + Jean-Philippe St. Pierre Guido van Rossum + Pavel Vinogradov Valentino Volonghi + Paul deGrandis Adrian Kuhn - Paul deGrandis + tav + Georg Brandl Gerald Klix Wanja Saatkamp - Anders Hammarquist + Boris Feigin Oscar Nierstrasz + Dario Bertini + David Malcolm Eugene Oden + Henry Mason Lukas Renggli Guenter Jantzen + Ronny Pfannschmidt + Bert Freudenberg + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Igor Trindade Oliveira + Lucian Branescu Mihaila + Olivier Dormond + Jared Grubb + Karl Bartel + Gabriel Lavoie + Brian Dorsey + Victor Stinner + Stuart Williams + Toby Watson + Antoine Pitrou + Justas Sadzevicius + Neil Shepperd + Mikael Schönenberg + Gasper Zejn + Jonathan David Riehl + Elmo Mäntynen + Anders Qvist + Beatrice Düring + Alexander Sedov + Vincent Legoll + Alan McIntyre + Romain Guillebert + Alex Perry + Jens-Uwe Mager + Dan Stromberg + Lukas Diekmann + Carl Meyer + Pieter Zieschang + Alejandro J. Cura + Sylvain Thenault + Travis Francis Athougies + Henrik Vendelbo + Lutz Paelike + Jacob Oscarson + Martin Blais + Lucio Torre + Lene Wagner + Miguel de Val Borro + Ignas Mikalajunas + Artur Lisiecki + Joshua Gilbert + Godefroid Chappelle + Yusei Tahara + Christopher Armstrong + Stephan Busemann + Gustavo Niemeyer + William Leslie + Akira Li + Kristján Valur Jonsson + Bobby Impollonia + Andrew Thompson + Anders Sigfridsson + Jacek Generowicz + Dan Colish + Sven Hager + Zooko Wilcox-O Hearn + Anders Hammarquist Dinu Gherman - Bartosz Skowron - Georg Brandl - Ben Young - Jean-Paul Calderone - Nicolas Chauvat - Rocco Moretti - Michael Twomey - boria - Jared Grubb - Olivier Dormond - Stuart Williams - Jens-Uwe Mager - Justas Sadzevicius - Mikael Schönenberg - Brian Dorsey - Jonathan David Riehl - Beatrice During - Elmo Mäntynen - Andreas Friedge - Alex Gaynor - Anders Qvist - Alan McIntyre - Bert Freudenberg - Pieter Zieschang - Jacob Oscarson - Lutz Paelike - Michael Schneider - Artur Lisiecki - Lene Wagner - Christopher Armstrong - Jan de Mooij - Jacek Generowicz - Gasper Zejn - Stephan Busemann - Yusei Tahara - Godefroid Chappelle - Toby Watson - Andrew Thompson - Joshua Gilbert - Anders Sigfridsson - David Schneider + Dan Colish + Daniel Neuhäuser Michael Chermside - tav - Martin Blais - Victor Stinner + Konrad Delong + Anna Ravencroft + Greg Price + Armin Ronacher + Jim Baker + Philip Jenvey + Rodrigo Araújo + diff --git a/pypy/doc/config/objspace.nofaking.rst b/pypy/doc/config/objspace.nofaking.txt copy from pypy/doc/config/objspace.nofaking.rst copy to pypy/doc/config/objspace.nofaking.txt diff --git a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst b/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.clever_malloc_removal.rst +++ /dev/null @@ -1,10 +0,0 @@ -Try to inline flowgraphs based on whether doing so would enable malloc -removal (:config:`translation.backendopt.mallocs`.) by eliminating -calls that result in escaping. This is an experimental optimization, -also right now some eager inlining is necessary for helpers doing -malloc itself to be inlined first for this to be effective. -This option enable also an extra subsequent malloc removal phase. - -Callee flowgraphs are considered candidates based on a weight heuristic like -for basic inlining. (see :config:`translation.backendopt.inline`, -:config:`translation.backendopt.clever_malloc_removal_threshold` ). diff --git a/pypy/doc/config/objspace.usemodules.sys.rst b/pypy/doc/config/objspace.usemodules.sys.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.sys.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'sys' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst deleted file mode 100644 --- a/pypy/doc/project-ideas.rst +++ /dev/null @@ -1,91 +0,0 @@ -Independent project ideas relating to PyPy -========================================== - -PyPy allows experimentation in many directions -- indeed facilitating -experimentation in language implementation was one of the main -motivations for the project. This page is meant to collect some ideas -of experiments that the core developers have not had time to perform -yet and also do not require too much in depth knowledge to get started -with. - -Feel free to suggest new ideas and discuss them in #pypy on the freenode IRC -network or the pypy-dev mailing list (see the home_ page). - ------------ - -.. contents:: - - - -JIT back-ends --------------------------------- - -PyPy's Just-In-Time compiler relies on backends for actual code -generation. We have so far a 32-bit Intel backend, and a CLI one. There is -Summer of Code project for 64bit (x86_64) backend, but other options -(ARM, llvm) remain open. - -.. _distribution: -.. _persistence: - -Extensions of the Python language ---------------------------------- - -+----------------------------------------------------------------------+ -| :NOTE: | -| | -| The ideas in this paragraph are marked as "experimental". We may | -| or may not be interested in helping you out. You are warned :-) | -| | -+----------------------------------------------------------------------+ - -One of the advantages of PyPy's implementation is that the Python-level type -of an object and its implementation are completely independent. This should -allow a much more intuitive interface to, for example, objects that are backed -by a persistent store. The `transparent proxy`_ objects are a key step in this -direction; now all that remains is to implement the interesting bits :-) - -An example project might be to implement functionality akin to the `ZODB's -Persistent class`_, without the need for the _p_changed hacks, and in pure -Python code (should be relatively easy on top of transparent proxy). - -Another example would be to implement a multi-CPU extension that internally -uses several processes and uses transparent proxies to share object views. - -Other ideas are to do something interesting with sandboxing_; or to -work more on the Stackless_ features (e.g. integrate it with the JIT); -or revive the logic object space, which tried to bring unification-like -features to Python. - -.. _sandboxing: sandbox.html -.. _Stackless: stackless.html - - -Other languages ---------------- - -Improve one of the `existing interpreters`__, or start a new one. -Experiment with the JIT compiler generator. - -.. __: http://codespeak.net/svn/pypy/lang/ - - -Or else... ----------- - -...or whatever else interests you! - -Feel free to mention your interest and discuss these ideas on the `pypy-dev -mailing list`_ or on the #pypy channel on irc.freenode.net. -You can also have a look around our documentation_. - - -.. _`efficient propagators for specialized finite domains`: http://codespeak.net/svn/pypy/extradoc/soc-2006/constraints.txt -.. _`object spaces`: objspace.html -.. _`code templating solution`: http://codespeak.net/svn/pypy/extradoc/soc-2006/code-templating.txt - -.. _documentation: docindex.html -.. _home: index.html -.. _`pypy-dev mailing list`: http://codespeak.net/mailman/listinfo/pypy-dev -.. _`ZODB's Persistent class`: http://www.zope.org/Documentation/Books/ZDG/current/Persistence.stx -.. _`transparent proxy`: objspace-proxies.html#tproxy diff --git a/pypy/doc/config/objspace.usemodules._collections.rst b/pypy/doc/config/objspace.usemodules._collections.txt copy from pypy/doc/config/objspace.usemodules._collections.rst copy to pypy/doc/config/objspace.usemodules._collections.txt diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -10,19 +10,18 @@ `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral -differences to CPython and some missing extensions, for details see `CPython +differences with CPython and some missing extensions, for details see `CPython differences`_. -.. _Django: http://djangoproject.org +.. _Django: http://djangoproject.com .. _Twisted: http://twistedmatrix.com .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing you typically do is -translate it to get a reasonably performing interpreter. This is described in -the next section. If you just want to play around a bit, you can also try -untranslated `py.py interpreter`_ (which is extremely slow, but still fast -enough for tiny examples). +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. + +.. _`download a pre-built PyPy`: http://pypy.org/download.html Translating the PyPy Python interpreter --------------------------------------- @@ -33,9 +32,15 @@ .. _`windows document`: windows.html You can translate the whole of PyPy's Python interpreter to low level C code, -`CLI code`_, or `JVM code`_. +or `CLI code`_. -1. Install build-time dependencies. On a Debian box these are:: +1. First `download a pre-built PyPy`_ for your architecture which you will + use to translate your Python interpreter. It is, of course, possible to + translate with a CPython 2.6 or later, but this is not the preferred way, + because it will take a lot longer to run -- depending on your architecture, + between two and three times as long. + +2. Install build-time dependencies. On a Debian box these are:: [user at debian-box ~]$ sudo apt-get install \ gcc make python-dev libffi-dev pkg-config \ @@ -58,29 +63,33 @@ * ``libexpat1-dev`` (for the optional ``pyexpat`` module) * ``libssl-dev`` (for the optional ``_ssl`` module) * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) - * ``python-sphinx`` (for the optional documentation build) + * ``python-sphinx`` (for the optional documentation build. You need version 1.0.7 or later) * ``python-greenlet`` (for the optional stackless support in interpreted mode/testing) -2. Translation is somewhat time-consuming (30 min to - over one hour) and RAM-hungry. If you have less than 1.5 GB of - RAM (or a slow machine) you might want to pick the + +3. Translation is time-consuming -- 45 minutes on a very fast machine -- + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources + are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of - `2` or `3` or `jit` gives much better results, though. + `2` or `3` or `jit` gives much better results, though. But if all + you want to do is to test that some new feature that you just wrote + translates, level 1 is enough. - Let me stress this another time: at ``--opt=1`` you get the Boehm + Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it. The resulting ``pypy-c`` is - slow. + You really do not want to pick it for a program you intend to use. + The resulting ``pypy-c`` is slow. -3. Run:: +4. Run:: cd pypy/translator/goal python translate.py --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want the included JIT - compiler. As of March 2011, Intel 32-bit environment needs **at - least** 2GB, and 64-bit needs 4GB. + of your choice like ``--opt=2`` if you do not want to include the JIT + compiler, which makes the Python interpreter much slower. .. _`optimization level`: config/opt.html @@ -92,22 +101,20 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.5.2 (64177, Apr 16 2009, 16:33:13) - [PyPy 1.1.0] on linux2 + Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) + [PyPy 1.5.0-alpha0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``this sentence is false'' >>>> 46 - 4 42 >>>> from test import pystone >>>> pystone.main() - Pystone(1.1) time for 50000 passes = 2.57 - This machine benchmarks at 19455.3 pystones/second + Pystone(1.1) time for 50000 passes = 0.280017 + This machine benchmarks at 178561 pystones/second >>>> This executable can be moved around or copied on other machines; see -Installation_ below. For now a JIT-enabled ``pypy-c`` always produces -debugging output to stderr when it exits, unless translated with -``--jit-debug=off``. +Installation_ below. The ``translate.py`` script takes a very large number of options controlling what to translate and how. See ``translate.py -h``. Some of the more @@ -134,7 +141,7 @@ ++++++++++++++++++++++++++++++++++++++++ It is possible to have non-standard features enabled for translation, -but they are not really tested any more. Look for example at the +but they are not really tested any more. Look, for example, at the `objspace proxies`_ document. .. _`objspace proxies`: objspace-proxies.html @@ -148,22 +155,14 @@ ./translate.py --backend=cli targetpypystandalone.py -Or better, try out the experimental `branch/cli-jit`_ described by -Antonio Cuni's `Ph.D. thesis`_ and translate with the JIT:: - - ./translate.py -Ojit --backend=cli targetpypystandalone.py - -.. _`branch/cli-jit`: http://codespeak.net/svn/pypy/branch/cli-jit/ -.. _`Ph.D. thesis`: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf - The executable and all its dependencies will be stored in the ./pypy-cli-data directory. To run pypy.NET, you can run ./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use the convenience ./pypy-cli script:: $ ./pypy-cli - Python 2.5.2 (64219, Apr 17 2009, 13:54:38) - [PyPy 1.1.0] on linux2 + Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) + [PyPy 1.5.0-alpha0] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``distopian and utopian chairs'' >>>> @@ -183,29 +182,31 @@ To try out the experimental .NET integration, check the documentation of the clr_ module. -.. _`JVM code`: +.. not working now: -Translating using the JVM backend -+++++++++++++++++++++++++++++++++ + .. _`JVM code`: -To create a standalone JVM executable:: + Translating using the JVM backend + +++++++++++++++++++++++++++++++++ - ./translate.py --backend=jvm targetpypystandalone.py + To create a standalone JVM executable:: -This will create a jar file ``pypy-jvm.jar`` as well as a convenience -script ``pypy-jvm`` for executing it. To try it out, simply run -``./pypy-jvm``:: + ./translate.py --backend=jvm targetpypystandalone.py - $ ./pypy-jvm - Python 2.5.2 (64214, Apr 17 2009, 08:11:23) - [PyPy 1.1.0] on darwin - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> + This will create a jar file ``pypy-jvm.jar`` as well as a convenience + script ``pypy-jvm`` for executing it. To try it out, simply run + ``./pypy-jvm``:: -Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment -the executable does not provide any interesting features, like integration with -Java. + $ ./pypy-jvm + Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) + [PyPy 1.5.0-alpha0] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + And now for something completely different: ``# assert did not crash'' + >>>> + + Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment + the executable does not provide any interesting features, like integration with + Java. Installation ++++++++++++ @@ -218,19 +219,19 @@ For installation purposes, note that the executable needs to be able to find its version of the Python standard library in the following three -directories: ``lib-python/2.5.2``, ``lib-python/modified-2.5.2`` and +directories: ``lib-python/2.7``, ``lib-python/modified-2.7`` and ``lib_pypy``. They are located by "looking around" starting from the directory in which the executable resides. The current logic is to try to find a ``PREFIX`` from which the directories -``PREFIX/lib-python/2.5.2`` and ``PREFIX/lib-python/modified.2.5.2`` and +``PREFIX/lib-python/2.7`` and ``PREFIX/lib-python/modified.2.7`` and ``PREFIX/lib_pypy`` can all be found. The prefixes that are tried are:: . - ./lib/pypy1.2 + ./lib/pypy1.5 .. - ../lib/pypy1.2 + ../lib/pypy1.5 ../.. - ../../lib/pypy-1.2 + ../../lib/pypy-1.5 ../../.. etc. @@ -240,22 +241,6 @@ most code will be fine. However, the ``sys.prefix`` will be unset and some existing libraries assume that this is never the case. -In order to use ``distutils`` or ``setuptools`` a directory ``PREFIX/site-packages`` needs to be created. Here's an example session setting up and using ``easy_install``:: - - $ cd PREFIX - $ mkdir site-packages - $ curl -sO http://peak.telecommunity.com/dist/ez_setup.py - $ bin/pypy-c ez_setup.py - ... - $ bin/easy_install WebOb - $ bin/pypy-c - Python 2.5.2 (64714, Apr 27 2009, 08:16:13) - [PyPy 1.1.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``PyPy doesn't have copolyvariadic dependently-monomorphed hyperfluxads'' - >>>> import webob - >>>> - .. _`py.py interpreter`: Running the Python Interpreter Without Translation @@ -265,7 +250,7 @@ +++++++++++++++++++++ To start interpreting Python with PyPy, install a C compiler that is -supported by distutils and use Python 2.4 or greater to run PyPy:: +supported by distutils and use Python 2.5 or greater to run PyPy:: cd pypy python bin/py.py @@ -305,7 +290,7 @@ Alternatively, as with regular Python, you can simply give a script name on the command line:: - python py.py ../../lib-python/2.5.2/test/pystone.py 10 + python py.py ../../lib-python/2.7/test/pystone.py 10 See our `configuration sections`_ for details about what all the commandline options do. @@ -317,4 +302,4 @@ .. _clr: clr-module.html .. _`CPythons core language regression tests`: http://codespeak.net:8099/summary?category=applevel&branch=%3Ctrunk%3E -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/config/translation.noprofopt.rst b/pypy/doc/config/translation.noprofopt.rst deleted file mode 100644 diff --git a/pypy/doc/discussion/ctypes_modules.rst b/pypy/doc/discussion/ctypes_modules.rst deleted file mode 100644 --- a/pypy/doc/discussion/ctypes_modules.rst +++ /dev/null @@ -1,65 +0,0 @@ -what is needed for various ctypes-based modules and how feasible they are -========================================================================== - -Quick recap for module evaluation: - -1. does the module use callbacks? - -2. how sophisticated ctypes usage is (accessing of _objects?) - -3. any specific tricks - -4. does it have tests? - -5. dependencies - -6. does it depend on cpython c-api over ctypes? - -Pygame -====== - -1. yes, for various things, but basic functionality can be achieved without - -2. probably not - -3. not that I know of - -4. yes for tests, no for unittests - -5. numpy, but can live without, besides only C-level dependencies. On OS/X - it requires PyObjC. - -6. no - - -PyOpenGL -======== - -1. yes, for GLX, but not for the core functionality - -2. probably not - -3. all the code is auto-generated - -4. it has example programs, no tests - -5. numpy, but can live without it. can use various surfaces (including pygame) to draw on - -6. no - - -Sqlite -====== - -1. yes, but I think it's not necessary - -2. no - -3. no - -4. yes - -5. datetime - -6. it passes py_object around in few places, not sure why (probably as an - opaque argument). diff --git a/pypy/doc/config/objspace.std.prebuiltintfrom.rst b/pypy/doc/config/objspace.std.prebuiltintfrom.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.prebuiltintfrom.rst +++ /dev/null @@ -1,1 +0,0 @@ -see :config:`objspace.std.withprebuiltint`. diff --git a/pypy/doc/config/translation.countmallocs.rst b/pypy/doc/config/translation.countmallocs.rst deleted file mode 100644 --- a/pypy/doc/config/translation.countmallocs.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal; used by some of the C backend tests to check that the number of -allocations matches the number of frees. - -.. internal diff --git a/pypy/doc/config/objspace.usemodules._io.rst b/pypy/doc/config/objspace.usemodules._io.txt copy from pypy/doc/config/objspace.usemodules._io.rst copy to pypy/doc/config/objspace.usemodules._io.txt diff --git a/pypy/doc/config/translation.output.rst b/pypy/doc/config/translation.output.rst deleted file mode 100644 --- a/pypy/doc/config/translation.output.rst +++ /dev/null @@ -1,1 +0,0 @@ -Specify file name that the produced executable gets. diff --git a/pypy/doc/config/objspace.usemodules._winreg.rst b/pypy/doc/config/objspace.usemodules._winreg.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._winreg.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the built-in '_winreg' module, provides access to the Windows registry. -This module is expected to be working and is included by default on Windows. diff --git a/pypy/doc/config/objspace.usemodules.clr.rst b/pypy/doc/config/objspace.usemodules.clr.txt copy from pypy/doc/config/objspace.usemodules.clr.rst copy to pypy/doc/config/objspace.usemodules.clr.txt diff --git a/pypy/doc/config/translation.jit_ffi.rst b/pypy/doc/config/translation.jit_ffi.rst deleted file mode 100644 --- a/pypy/doc/config/translation.jit_ffi.rst +++ /dev/null @@ -1,1 +0,0 @@ -Internal option: enable OptFfiCall in the jit optimizations. diff --git a/pypy/doc/config/objspace.usemodules._pickle_support.rst b/pypy/doc/config/objspace.usemodules._pickle_support.txt copy from pypy/doc/config/objspace.usemodules._pickle_support.rst copy to pypy/doc/config/objspace.usemodules._pickle_support.txt diff --git a/pypy/doc/config/translation.verbose.rst b/pypy/doc/config/translation.verbose.rst deleted file mode 100644 --- a/pypy/doc/config/translation.verbose.rst +++ /dev/null @@ -1,1 +0,0 @@ -Print some more information during translation. diff --git a/pypy/doc/config/translation.taggedpointers.rst b/pypy/doc/config/translation.taggedpointers.txt copy from pypy/doc/config/translation.taggedpointers.rst copy to pypy/doc/config/translation.taggedpointers.txt diff --git a/pypy/doc/config/translation.compilerflags.rst b/pypy/doc/config/translation.compilerflags.rst deleted file mode 100644 --- a/pypy/doc/config/translation.compilerflags.rst +++ /dev/null @@ -1,1 +0,0 @@ -Experimental. Specify extra flags to pass to the C compiler. diff --git a/pypy/doc/discussion/removing-stable-compiler.rst b/pypy/doc/discussion/removing-stable-compiler.rst deleted file mode 100644 --- a/pypy/doc/discussion/removing-stable-compiler.rst +++ /dev/null @@ -1,22 +0,0 @@ -February 28th, 2006 - -While implementing conditional expressions from 2.5 we had to change -the stable compiler in order to keep tests from breaking. While using -stable compiler as a baseline made sense when the ast compiler was -new, it is less and less true as new grammar changes are introduced. - -Options include - -1. Freezing the stable compiler at grammar 2.4. - -2. Capture AST output from the stable compiler and use that explicitly -in current tests instead of regenerating them every time, primarily -because it allows us to change the grammar without changing the stable -compiler. - - -In either case, AST production tests for new grammar changes could be -written manually, which is less effort than fixing the stable -compiler (which itself isn't really tested anyway). - -Discussion by Arre, Anders L., Stuart Williams diff --git a/pypy/translator/cli/test/test_carbonpython.py b/pypy/translator/cli/test/test_carbonpython.py deleted file mode 100644 --- a/pypy/translator/cli/test/test_carbonpython.py +++ /dev/null @@ -1,175 +0,0 @@ -import py -py.test.skip("it passes usually, but fails on buildbot, no clue why") - -import os -import os.path -from pypy.tool import udir -from pypy.translator.cli.rte import Target -from pypy.translator.cli.carbonpython import DllDef, export, collect_entrypoints,\ - collect_class_entrypoints, compile_dll -from pypy.translator.cli.test.runtest import CliFunctionWrapper, CliTest - -TEMPLATE = """ -using System; -using System.Collections; -class CarbonPytonTest { - public static void Main() { - %s - } -} -""" - -class TestCarbonPython(CliTest): - - def _csharp(self, source, references=[], netmodules=[]): - tmpfile = udir.udir.join('tmp.cs') - tmpfile.write(TEMPLATE % source) - flags = ['/r:%s' % ref for ref in references] - flags += ['/addmodule:%s' % mod for mod in netmodules] - - class MyTarget(Target): - SOURCES = [str(tmpfile)] - FLAGS = flags - OUTPUT = 'tmp.exe' - SRC_DIR = str(udir.udir) - - func = CliFunctionWrapper(MyTarget.get()) - return func() - - def test_compilation(self): - res = self._csharp('Console.WriteLine(42);') - assert res == 42 - - def test_func_namespace(self): - def foo(x): - return x+1 - def bar(x): - return foo(x) - foo._namespace_ = 'MyNamespace.MyClass' - bar._namespace_ = 'MyClass' - res = self.interpret(bar, [41], backendopt=False) - assert res == 42 - - def test_simple_functions(self): - def foo(x): - return x+1 - def bar(x): - return x*2 - dll = DllDef('test', 'Test', [(foo, [int]), - (bar, [int])]) - dll.compile() - res = self._csharp('Console.WriteLine("{0}, {1}", Test.foo(42), Test.bar(42));', ['test']) - assert res == (43, 84) - - def test_export(self): - @export(int, float) - def foo(x, y): - pass - @export(int, float, namespace='test') - def bar(x, y): - pass - @export - def baz(): - pass - - assert foo._inputtypes_ == (int, float) - assert not hasattr(foo, '_namespace_') - assert bar._inputtypes_ == (int, float) - assert bar._namespace_ == 'test' - assert baz._inputtypes_ == () - - def test_collect_entrypoints(self): - @export(int, float) - def foo(x, y): - pass - def bar(x, y): - pass - mydict = dict(foo=foo, bar=bar, x=42) - entrypoints = collect_entrypoints(mydict) - assert entrypoints == [(foo, (int, float))] - - def test_collect_class_entrypoints(self): - class NotExported: - def __init__(self): - pass - - class MyClass: - @export - def __init__(self): - pass - @export(int) - def foo(self, x): - return x - - assert collect_class_entrypoints(NotExported) == [] - entrypoints = collect_class_entrypoints(MyClass) - assert len(entrypoints) == 2 - assert entrypoints[0][1] == () # __init__ inputtypes - assert entrypoints[1][1] == (MyClass, int) # foo inputtypes - - def test_compile_class(self): - py.test.skip('This test fails every other day. No clue why :-(') - class MyClass: - @export(int) - def __init__(self, x): - self.x = x - @export(int, int) - def add(self, y, z): - return self.x + y + z - MyClass.__module__ = 'Test' # put the class in the Test namespace - - entrypoints = collect_entrypoints({'MyClass': MyClass}) - dll = DllDef('test', 'Test', entrypoints) - dll.compile() - res = self._csharp(""" - Test.MyClass obj = new Test.MyClass(); - obj.__init__(39); - Console.WriteLine(obj.add(1, 2)); - """, ['test']) - assert res == 42 - - def test_export_cliclass(self): - py.test.skip('it fails every other day on builbot, no clue why') - from pypy.translator.cli.dotnet import CLR - - @export(CLR.System.Collections.ArrayList, int) - def getitem(obj, i): - return obj.get_Item(i) - - entrypoints = collect_entrypoints({'getitem': getitem}) - dll = DllDef('test', 'Test', entrypoints) - dll.compile() - res = self._csharp(""" - ArrayList obj = new ArrayList(); - obj.Add(42); - Console.WriteLine(Test.getitem(obj, 0)); - """, ['test']) - assert res == 42 - - def test_compile_dll(self): - py.test.skip('This test fails every other day. No clue why :-(') - cwd, _ = os.path.split(__file__) - mylib_py = os.path.join(cwd, 'mylib.py') - compile_dll(mylib_py, copy_dll=False) - res = self._csharp(""" - Console.WriteLine(mylib.sum(20, 22)); - """, ['mylib']) - assert res == 42 - - def test_compile_dll_alternative_name(self): - cwd, _ = os.path.split(__file__) - mylib_py = os.path.join(cwd, 'mylib.py') - compile_dll(mylib_py, 'mylibxxx.dll', copy_dll=False) - res = self._csharp(""" - Console.WriteLine(mylibxxx.sum(20, 22)); - """, ['mylibxxx']) - assert res == 42 - - def test_compile_netmodule(self): - def foo(x): - return x+1 - dll = DllDef('mymodule', 'Test', [(foo, [int])], isnetmodule=True) - dll.compile() - res = self._csharp('Console.WriteLine("{0}", Test.foo(41));', - netmodules = ['mymodule']) - diff --git a/pypy/doc/config/objspace.std.withsmallint.rst b/pypy/doc/config/objspace.std.withsmallint.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withsmallint.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use "tagged pointers" to represent small enough integer values: Integers that -fit into 31 bits (respective 63 bits on 64 bit machines) are not represented by -boxing them in an instance of ``W_IntObject``. Instead they are represented as a -pointer having the lowest bit set and the rest of the bits used to store the -value of the integer. This gives a small speedup for integer operations as well -as better memory behaviour. diff --git a/pypy/tool/rest/rst.py b/pypy/tool/rest/rst.py --- a/pypy/tool/rest/rst.py +++ b/pypy/tool/rest/rst.py @@ -389,18 +389,14 @@ indent = ' ' def __init__(self, name, *args, **options): self.name = name - self.content = options.pop('content', []) - children = list(args) - super(Directive, self).__init__(*children) + self.content = args + super(Directive, self).__init__() self.options = options def text(self): # XXX not very pretty... - namechunksize = len(self.name) + 2 - self.children.insert(0, Text('X' * namechunksize)) - txt = super(Directive, self).text() - txt = '.. %s::%s' % (self.name, txt[namechunksize + 3:],) - options = '\n'.join([' :%s: %s' % (k, v) for (k, v) in + txt = '.. %s::' % (self.name,) + options = '\n'.join([' :%s: %s' % (k, v) for (k, v) in self.options.iteritems()]) if options: txt += '\n%s' % (options,) @@ -408,10 +404,7 @@ if self.content: txt += '\n' for item in self.content: - assert item.parentclass == Rest, 'only top-level items allowed' - assert not item.indent - item.indent = ' ' - txt += '\n' + item.text() + txt += '\n ' + item return txt diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -7,10 +7,6 @@ Introduction ================ -.. include:: crufty.rst - - .. apparently this still works; needs JIT integration; hasn't been maintained for years - PyPy can expose to its user language features similar to the ones present in `Stackless Python`_: **no recursion depth limit**, and the ability to write code in a **massively concurrent style**. It actually @@ -142,6 +138,11 @@ will come from any call to ``coro.switch()`` and can be caught. If the exception isn't caught, it will be propagated to the parent coroutine. +When a coroutine is garbage-collected, it gets the ``.kill()`` method sent to +it. This happens at the point the next ``.switch`` method is called, so the +target coroutine of this call will be executed only after the ``.kill`` has +finished. + Example ~~~~~~~ @@ -430,32 +431,6 @@ These cases are not supported yet. -Coroutine Cloning -+++++++++++++++++ - -In theory, coroutine pickling is general enough to allow coroutines to -be *cloned* within a process; i.e. from one suspended coroutine, a copy -can be made - simply by pickling and immediately unpickling it. Both -the original and the copy can then continue execution from the same -point on. Cloning gives much of the expressive power of full -*continuations*. - -However, pickling has several problems in practice (besides a relatively -high overhead). It is not a completely general solution because not all -kinds of objects can be pickled; moreover, which objects are pickled by -value or by reference only depends on the type of the object. For the -purpose of cloning, this means that coroutines cannot be -pickled/unpickled in all situations, and even when they can, the user -does not have full control over which of the objects currently reachable -from a coroutine will be duplicated, and which will be shared with the -original coroutine. - -For this reason, we implemented a direct cloning operation. It has been -deprecated for some time, however, as it was slightly buggy and relied -on a specific (and deprecated) garbage collector. It is not available -out of the box right now, so we will not talk any more about this. - - Composability +++++++++++++ @@ -622,7 +597,7 @@ .. _`Stackless Python`: http://www.stackless.com -.. _`documentation of the greenlets`: http://codespeak.net/svn/greenlet/trunk/doc/greenlet.txt +.. _`documentation of the greenlets`: http://packages.python.org/greenlet/ .. _`Stackless Transform`: translation.html#the-stackless-transform -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/config/translation.sandbox.rst b/pypy/doc/config/translation.sandbox.txt copy from pypy/doc/config/translation.sandbox.rst copy to pypy/doc/config/translation.sandbox.txt diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -78,6 +78,4 @@ .. _`CERN (July 2010)`: http://morepypy.blogspot.com/2010/07/cern-sprint-report-wrapping-c-libraries.html .. _`Düsseldorf (October 2010)`: http://morepypy.blogspot.com/2010/10/dusseldorf-sprint-report-2010.html -Further event notes: -* :ref:`eventhistory.rst` diff --git a/pypy/doc/config/translation.gctransformer.rst b/pypy/doc/config/translation.gctransformer.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gctransformer.rst +++ /dev/null @@ -1,1 +0,0 @@ -internal option diff --git a/pypy/doc/low-level-encapsulation.rst b/pypy/doc/low-level-encapsulation.rst deleted file mode 100644 --- a/pypy/doc/low-level-encapsulation.rst +++ /dev/null @@ -1,343 +0,0 @@ -============================================================ - Encapsulating low-level implementation aspects -============================================================ - -.. contents:: - - - -Abstract -======== - -It has always been a major goal of PyPy to not force implementation -decisions. This means that even after the implementation of the -standard interpreter [#]_ has been written we are still able to experiment -with different approaches to memory management or concurrency and to -target wildly different platforms such as the Java Virtual Machine or -a very memory-limited embedded environment. - -We do this by allowing the encapsulation of these low level aspects as -well defined parts of the translation process. - -In the following document, we give examples of aspects that have been -successfully encapsulated in more detail and contrast the potential of -our approach with CPython. - -.. [#] `standard interpreter`_ is our term for the code which - implements the Python language, i.e. the interpreter and the - standard object space. - - -Background -========== - -One of the better known significant modifications to CPython are -Christian Tismer's "stackless" patches [STK]_, which allow for far more -flexible control flow than the typical function call/return supported by -CPython. Originally implemented as a series of invasive patches, -Christian found that maintaining these patches as CPython itself was -further developed was time consuming to the point of no longer being -able to work on the new functionality that was the point of the -exercise. - -One solution would have been for the patches to become part of core -CPython but this was not done partly because the code that fully -enabled stackless required widespread modifications that made the code -harder to understand (as the "stackless" model contains control flow -that is not easily expressable in C, the implementation became much -less "natural" in some sense). - -With PyPy, however, it is possible to obtain this flexible control -flow whilst retaining transparent implementation code as the necessary -modifications can be implemented as a localized translation aspect, -and indeed this was done at the Paris sprint in a couple of days (as -compared to around six months for the original stackless patches). - -Of course, this is not the only aspect that can be so decided a -posteriori, during translation. - - -Translation aspects -=================== - -Our standard interpreter is implemented at a very high level of -abstraction. This has a number of happy consequences, among which is -enabling the encapsulation of language aspects as described in this -document. For example, the implementation code simply makes no -reference to memory management, which clearly gives the translator -complete freedom to decide about this aspect. This contrasts with -CPython where the decision to use reference counting is reflected tens -or even hundreds of times in each C source file in the codebase. - -As described in [ARCH]_, producing a Python implementation from the -source of our standard interpreter involves various stages: the -initialization code is run, the resulting code is annotated, typed and -finally translated. By the nature of the task, the encapsulation of -*low-level aspects* mainly affects the typer and the translation -process. At the coarsest level, the selection of target platform -involves writing a new backend -- still a significant task, but much -much easier than writing a complete implementation of Python! - -Other aspects affect different levels, as their needs require. The -remainder of this section describes a few aspects that we have -successfully encapsulated. - -An advantage of our approach is that any combination of aspects can be -freely selected, avoiding the problem of combinatorial explosion of -variants seen in manually written interpreters. - - -Stacklessness -------------- - -The stackless modifications are mostly implemented in the C backend, -with a single extra flow graph operation to influence some details of -the generated C code. The total changes only required about 300 lines -of source, vindicating our abstract approach. - -In stackless mode, the C backend generates functions that are -systematically extended with a small amount of bookkeeping code. This -allows the C code to save its own stack to the heap on demand, where it -can then be inspected, manipulated and eventually resumed. This is -described in more detail in [TA]_. In this way, unlimited (or more -precisely heap-limited) recursion is possible, even on operating systems -that limit the size of the C stack. Alternatively, a different saved -stack can be resumed, thus implementing soft context switches - -coroutines, or green threads with an appropriate scheduler. We reobtain -in this way all the major benefits of the original "stackless" patches. - -This effect requires a number of changes in each and every C function -that would be extremely tedious to write by hand: checking for the -signal triggering the saving of the stack, actually saving precisely the -currently active local variables, and when re-entering the function -check which variables are being restored and which call site is resumed. -In addition, a couple of global tables must be maintained to drive the -process. The key point is that we can fine-tune all these interactions -freely, without having to rewrite the whole code all the time but only -modifying the C backend (in addition, of course, to being able to change -at any time the high-level code that is the input of the translation -process). So far, this allowed us to find a style that does not hinder -the optimizations performed by the C compiler and so has only a minor -impact on performance in the normal case. - -Also note that the fact that the C stack can be fully saved into the -heap can tremendously simplify the portable implementation of garbage -collection: after the stack has been completely transferred to the heap, -there are no roots left on the stack. - - -Multiple Interpreters ---------------------- - -Another implementation detail that causes tension between functionality -and both code clarity and memory consumption in CPython is the issue of -multiple independent interpreters in the same process. In CPython there -is a partial implementation of this idea in the "interpreter state" API, -but the interpreters produced by this are not truly independent -- for -instance the dictionary that contains interned strings is implemented as -file-level static object, and is thus shared between the interpreters. -A full implementation of this idea would entirely eschew the use of file -level statics and place all interpreter-global data in some large -structure, which would hamper readability and maintainability. In -addition, in many situations it is necessary to determine which -interpreter a given object is "from" -- and this is not possible in -CPython largely because of the memory overhead that adding a 'interp' -pointer to all Python objects would create. - -In PyPy, all of our implementation code manipulates an explicit object -space instance, as described in [CODG]_. The situation of multiple -interpreters is thus handled automatically: if there is only one space -instance, it is regarded as a pre-constructed constant and the space -object pointer (though not its non-constant contents) disappears from -the produced source, i.e. from function arguments, local variables and -instance fields. If there are two or more such instances, a 'space' -attribute will be automatically added to all application objects (or -more precisely, it will not be removed by the translation process), the -best of both worlds. - - -Memory Management ------------------ - -As mentioned above, CPython's decision to use a garbage collector based -on reference counting is reflected throughout its source. In the -implementation code of PyPy, it is not, and in fact the standard -interpreter can currently be compiled to use a reference counted scheme -or the Boehm GC [BOEHM]_. Again, more details are in [TA]_. We also -have an experimental framework for developing custom exact GCs [GC]_, -but it is not yet integrated with the low-level translation back-ends. - -Another advantage of the aspect oriented approach shows itself most -clearly with this memory management aspect: that of correctness. -Although reference counting is a fairly simple scheme, writing code for -CPython requires that the programmer make a large number of -not-quite-trivial decisions about the refcounting code. Experience -suggests that mistakes will always creep in, leading to crashes or -leaks. While tools exist to help find these mistakes, it is surely -better to not write the reference count manipulations at all and this is -what PyPy's approach allows. Writing the code that emits the correct -reference count manipulations is surely harder than writing any one -piece of explicit refcounting code, but once it is done and tested, it -just works without further effort. - - -Concurrency ------------ - -The aspect of CPython's implementation that has probably caused more -discussion than any other mentioned here is that of the threading -model. Python has supported threads since version 1.5 with what is -commonly referred to as the "Global Interpreter Lock" or GIL; the -execution of bytecodes is serialized such that only one thread can be -executing Python code at one time. This has the benefit of being -relatively unintrusive and not too complex, but has the disadvantage -that multi-threaded, computation-bound Python code does not gain -performance on multi-processor machines. - -PyPy will offer the opportunity to experiment with different models, -although currently we only offer a version with no thread support and -another with a GIL-like model [TA]_. (We also plan to support soon -"green" software-only threads in the Stackless model described above, -but obviously this would not solve the multi-processor scalability -issue.) - -The future work in this direction is to collect the numerous possible -approaches that have between thought out along the years and -e.g. presented on the CPython development mailing list. Most of them -have never been tried out in CPython, for lack of necessary resources. -A number of them are clearly easy to try out in PyPy, at least in an -experimental version that would allow its costs to be assessed -- for -example, various forms of object-level locking. - - -Evaluation Strategy -------------------- - -Possibly the most radical aspect to tinker with is the evaluation -strategy. The thunk object space [OBJS]_ wraps the standard object -space to allow the production of "lazily computed objects", i.e. objects -whose values are only calculated when needed. It also allows global and -total replacement of one object with another. - -The thunk object space is mostly meant as an example of what our -approach can achieve -- the combination of side-effects and lazy -evaluation is not easy to understand. This demonstration is important -because this level of flexibility will be required to implement future -features along the lines of Prolog-style logic variables, transparent -persistency, object distribution across several machines, or -object-level security. - - -Experimental results -==================== - -All the aspects described in the previous chapter have been successfully -implemented and are available since the release 0.7 or 0.8 of PyPy. - -We have conducted preliminary experimental measures of the performance -impact of enabling each of these features in the compiled PyPy -interpreter. We present below the current results as of October 2005. -Most figures appear to vary from machine to machine. Given that the -generated code is large (it produce a binary of 5.6MB on a Linux -Pentium), there might be locality and code ordering issues that cause -important cache effects. - -We have not particularly optimized any of these aspects yet. Our goal -is primarily to prove that the whole approach is worthwhile, and rely on -future work and push for external contributions to implement -state-of-the-art techniques in each of these domains. - -Stacklessness - - Producing Stackless-style C code means that all the functions of the - PyPy interpreter that can be involved in recursions contain stack - bookkeeping code (leaf functions, functions calling only leaves, - etc. do not need to use this style). The current performance impact - is to make PyPy slower by about 8%. A couple of minor pending - optimizations could reduce this figure a bit. We expect the rest of - the performance impact to be mainly caused by the increase of size - of the generated executable (+28%). - -Multiple Interpreters - - A binary that allowed selection between two copies of the standard - object space with a command line switch was about 10% slower and - about 40% larger than the default. Most of the extra size is - likely accounted for by the duplication of the large amount of - prebuilt data involved in an instance of the standard object - space. - -Memory Management - - The [Boehm] GC is well-optimized and produces excellent results. By - comparison, using reference counting instead makes the interpreter - twice as slow. This is almost certainly due to the naive approach - to reference counting used so far, which updates the counter far - more often than strictly necessary; we also still have a lot of - objects that would theoretically not need a reference counter, - either because they are short-lived or because we can prove that - they are "owned" by another object and can share its lifetime. In - the long run, it will be interesting to see how far this figure can - be reduced, given past experiences with CPython which seem to show - that reference counting is a viable idea for Python interpreters. - -Concurrency - - No experimental data available so far. Just enabling threads - currently creates an overhead that hides the real costs of locking. - -Evaluation Strategy - - When translated to C code, the Thunk object space has a global - performance impact of 6%. The executable is 13% bigger (probably - due to the arguably excessive inlining we perform). - -We have described five aspects in this document, each currently with -two implementation choices, leading to 32 possible translations. We -have not measured the performance of each variant, but the few we have -tried suggests that the performance impacts are what one would expect, -e.g. a translated stackless binary using the thunk object space would -be expected to be about 1.06 x 1.08 ~= 1.14 times slower than the -default and was found to be 1.15 times slower. - - -Conclusion -========== - -Although still a work in progress, we believe that the successes we -have had in encapsulating implementation aspects justifies the -approach we have taken. In particular, the relative ease of -implementing the translation aspects described in this paper -- as -mentioned above, the stackless modifications took only a few days -- -means we are confident that it will be easily possible to encapsulate -implementation aspects we have not yet considered. - - -References -========== - -.. [ARCH] `Architecture Overview`_, PyPy documentation, 2003-2005 - -.. [BOEHM] `Boehm-Demers-Weiser garbage collector`_, a garbage collector - for C and C++, Hans Boehm, 1988-2004 - -.. [CODG] `Coding Guide`_, PyPy documentation, 2003-2005 - -.. [GC] `Garbage Collection`_, PyPy documentation, 2005 - -.. [OBJS] `Object Spaces`_, PyPy documentation, 2003-2005 - -.. [STK] `Stackless Python`_, a Python implementation that does not use - the C stack, Christian Tismer, 1999-2004 - -.. [TA] `Memory management and threading models as translation aspects`_, - PyPy documentation (and EU Deliverable D05.3), 2005 - -.. _`standard interpreter`: architecture.html#standard-interpreter -.. _`Architecture Overview`: architecture.html -.. _`Coding Guide`: coding-guide.html -.. _`Garbage Collection`: garbage_collection.html -.. _`Object Spaces`: objspace.html -.. _`Stackless Python`: http://www.stackless.com -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _`Memory management and threading models as translation aspects`: translation-aspects.html diff --git a/pypy/doc/config/objspace.usemodules.binascii.rst b/pypy/doc/config/objspace.usemodules.binascii.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.binascii.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the RPython 'binascii' module. diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -1,5 +1,3 @@ -.. include:: crufty.rst - ============================= PyPy's ctypes implementation ============================= @@ -71,8 +69,6 @@ interface require more object allocations and copying than strictly necessary; this too could be improved. -The implementation was developed and has only been tested on x86-32 Linux. - Here is a list of the limitations and missing features of the current implementation: @@ -94,53 +90,15 @@ between its primitive types and user subclasses of its primitive types -Getting the code and test suites -================================= - -A stable revision of PyPy containing the ctypes implementation can be checked out with subversion from the tag: - -http://codespeak.net/svn/pypy/tag/ctypes-stable - -The various tests and later examples can be run on x86-32 Linux. We tried them -on an up-to-date Ubuntu 7.10 x86-32 system. - -If one goes inside the checkout it is possible to run ``_rawffi`` tests with:: - - $ cd pypy - $ python test_all.py module/_rawffi/ - -The ctypes implementation test suite is derived from the tests for -ctypes 1.0.2, we have skipped some tests corresponding to not -implemented features or implementation details, we have also added -some tests. - -To run the test suite a compiled pypy-c is required with the proper configuration. To build the required pypy-c one should inside the checkout:: - - $ cd pypy/translator/goal - $ ./translate.py --text --batch --gc=generation targetpypystandalone.py - --withmod-_rawffi --allworkingmodules - -this should produce a pypy-c executable in the ``goal`` directory. - -To run the tests then:: - - $ cd ../../.. # back to pypy-trunk - $ ./pypy/translator/goal/pypy-c pypy/test_all.py lib/pypy1.2/lib_pypy/pypy_test/ctypes_tests - -There should be 36 skipped tests and all other tests should pass. - Running application examples ============================== -`pyglet`_ is known to run. We had some success also with pygame-ctypes which is not maintained anymore and with a snapshot of the experimental pysqlite-ctypes. We will only describe how to run the pyglet examples. +`pyglet`_ is known to run. We also had some success with pygame-ctypes (which is no longer maintained) and with a snapshot of the experimental pysqlite-ctypes. We will only describe how to run the pyglet examples. pyglet ------- We tried pyglet checking it out from its repository at revision 1984. -For convenience a tarball of the checkout can also be found at: - -http://codespeak.net/~pedronis/pyglet-r1984.tgz From pyglet, the following examples are known to work: diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: http://codespeak.net/svn/pypy/trunk/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: http://codespeak.net/svn/pypy/trunk/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,7 +68,7 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: http://codespeak.net/svn/pypy/trunk/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py OO backends diff --git a/pypy/doc/config/objspace.usemodules.zlib.rst b/pypy/doc/config/objspace.usemodules.zlib.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.zlib.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'zlib' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/commandline.rst b/pypy/doc/config/commandline.rst deleted file mode 100644 --- a/pypy/doc/config/commandline.rst +++ /dev/null @@ -1,33 +0,0 @@ - -.. contents:: - - -.. _objspace: -.. _`overview-of-command-line-options-for-objspace`: - -------------------------------- -PyPy Python interpreter options -------------------------------- - -The following options can be used after ``translate.py -targetpypystandalone`` or as options to ``py.py``. - -.. GENERATE: objspace - - -.. _translation: -.. _`overview-of-command-line-options-for-translation`: - ---------------------------- -General translation options ---------------------------- - -The following are options of ``translate.py``. They must be -given before the ``targetxxx`` on the command line. - -* `--opt -O:`__ set the optimization level `[0, 1, size, mem, 2, 3]` - -.. __: opt.html - -.. GENERATE: translation - diff --git a/pypy/doc/config/objspace.std.rst b/pypy/doc/config/objspace.std.txt copy from pypy/doc/config/objspace.std.rst copy to pypy/doc/config/objspace.std.txt diff --git a/pypy/doc/config/objspace.usemodules.micronumpy.rst b/pypy/doc/config/objspace.usemodules.micronumpy.txt copy from pypy/doc/config/objspace.usemodules.micronumpy.rst copy to pypy/doc/config/objspace.usemodules.micronumpy.txt diff --git a/pypy/doc/config/objspace.usemodules.thread.rst b/pypy/doc/config/objspace.usemodules.thread.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.thread.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'thread' module. diff --git a/pypy/doc/config/objspace.usemodules.mmap.rst b/pypy/doc/config/objspace.usemodules.mmap.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.mmap.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'mmap' module. -This module is expected to be fully working. diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -56,7 +56,7 @@ The present document gives a description of the above object spaces. The sources of PyPy contain the various object spaces in the directory -`objspace/`_. +`pypy/objspace/`_. To choose which object space to use, use the :config:`objspace.name` option. @@ -297,7 +297,7 @@ Introduction ------------ -The Standard Object Space (StdObjSpace_) is the direct equivalent of CPython's +The Standard Object Space (`pypy/objspace/std/`_) is the direct equivalent of CPython's object library (the "Objects/" subdirectory in the distribution). It is an implementation of the common Python types in a lower-level language. @@ -341,13 +341,11 @@ using plain integers instead is the complex path, not the other way around. -.. _StdObjSpace: ../../../../pypy/objspace/std/ - Object types ------------ -The larger part of the `StdObjSpace`_ package defines and implements the +The larger part of the `pypy/objspace/std/`_ package defines and implements the library of Python's standard built-in object types. Each type (int, float, list, tuple, str, type, etc.) is typically implemented by two modules: @@ -356,17 +354,17 @@ * the *implementation* module, called ``xxxobject.py``. The ``xxxtype.py`` module basically defines the type object itself. For -example, `listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `listtype.py`_ enumerates the methods +example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when +you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods specific to lists, like ``append()``. A particular method implemented by all types is the ``__new__()`` special method, which in Python's new-style-classes world is responsible for creating an instance of the type. In PyPy, ``__new__()`` locates and imports the module implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `tupletype.py`_ +arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ defines ``__new__()`` to import the class ``W_TupleObject`` from -`tupleobject.py`_ and instantiate it. The `tupleobject.py`_ then contains a +`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a "real" implementation of tuples: the way the data is stored in the ``W_TupleObject`` class, how the operations work, etc. @@ -387,18 +385,13 @@ same Python type. PyPy knows that (e.g.) the application-level type of its interpreter-level ``W_StringObject`` instances is str because there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `stringtype.py`_; all +points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all other implementations of strings use the same ``typedef`` from -`stringtype.py`_. +`pypy/objspace/std/stringtype.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. -.. _`listtype.py`: ../../../../pypy/objspace/std/listtype.py -.. _`stringtype.py`: ../../../../pypy/objspace/std/stringtype.py -.. _`tupletype.py`: ../../../../pypy/objspace/std/tupletype.py -.. _`tupleobject.py`: ../../../../pypy/objspace/std/tupleobject.py - .. _`Standard Interpreter Optimizations`: interpreter-optimizations.html @@ -408,12 +401,10 @@ The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, -see the comment at the start of the source__. However, multimethods +see the comment at the start of `pypy/objspace/std/multimethod.py`_. However, multimethods alone are not enough for the Standard Object Space: the complete picture spans several levels in order to emulate the exact Python semantics. -.. __: ../../../../pypy/objspace/std/multimethod.py - Consider the example of the ``space.getitem(w_a, w_b)`` operation, corresponding to the application-level syntax ``a[b]``. The Standard Object Space contains a corresponding ``getitem`` multimethod and a @@ -469,7 +460,7 @@ resulting pair of basic strings. This is similar to the C++ method overloading resolution mechanism (but occurs at runtime). -.. _multimethods: theory.html#multimethods +.. _multimethods: http://en.wikipedia.org/wiki/Multimethods Multimethod slicing @@ -552,13 +543,11 @@ operations are usually shown. A quick introduction on how to use the trace object space can be `found here`_. -A number of options for configuration is here in `traceconfig.py`_. +A number of options for configuration is here in `pypy/tool/traceconfig.py`_. .. _`found here` : getting-started-dev.html#tracing-bytecode-and-operations-on-objects -.. _`Abstract Interpretation`: theory.html#abstract-interpretation -.. _`traceconfig.py`: ../tool/traceconfig.py - +.. _`Abstract Interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation .. _`Flow Object Space`: @@ -568,7 +557,7 @@ Introduction ------------ -The task of the FlowObjSpace_ is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. @@ -588,7 +577,7 @@ appear in some next operation. This technique is an example of `Abstract Interpretation`_. -.. _`Abstract Interpretation`: theory.html#abstract-interpretation +.. _`Abstract Interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation For example, if the placeholder ``v1`` is given as the argument to the above function, the bytecode interpreter will call ``v2 = space.mul(space.wrap(3), @@ -600,8 +589,6 @@ v3 = add(v2, Constant(2)) -.. _FlowObjSpace: ../../../../pypy/objspace/flow/ - The Flow model -------------- @@ -650,4 +637,4 @@ .. _`What PyPy can do for your objects`: objspace-proxies.html -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/config/objspace.std.withstrbuf.rst b/pypy/doc/config/objspace.std.withstrbuf.txt copy from pypy/doc/config/objspace.std.withstrbuf.rst copy to pypy/doc/config/objspace.std.withstrbuf.txt diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst --- a/pypy/doc/discussion/outline-external-ootype.rst +++ b/pypy/doc/discussion/outline-external-ootype.rst @@ -1,22 +1,10 @@ Some discussion about external objects in ootype ================================================ -Current approaches: - -* BasicExternal, used for js backend +Current approach: * SomeCliXxx for .NET backend -BasicExternal -------------- - -* Is using types to make rpython happy (ie, every single method or field - is hardcoded) - -* Supports callbacks by SomeGenericCallable - -* Supports fields, also with callable fields - SomeCliXxx ---------- @@ -26,11 +14,11 @@ * Supports static methods -Would be extremely cool to have just one approach instead of two, -so here are some notes: +Would be extremely cool to generalize the approach to be useful also for the +JVM backend. Here are some notes: * There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, js, jvm for now). + to support any possible backend (cli, jvm for now). * This approach might be eventually extended by a backend itself, but as much as possible code should be factored out. @@ -46,24 +34,22 @@ ================================ The goal of the task is to let RPython program access "external -objects" which are available in the target platform; these include: +entities" which are available in the target platform; these include: - external classes (e.g. for .NET: System.Collections.ArrayList) - - external instances (e.g. for js: window, window.document) + - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - - external functions? (they are not needed for .NET and JVM, maybe - for js?) - -External objects should behave as much as possible as "internal -objects". +External entities should behave as much as possible as "internal +entities". Moreover, we want to preserve the possibility of *testing* RPython programs on top of CPython if possible. For example, it should be possible to RPython programs using .NET external objects using -PythonNet; probably there is something similar for JVM, but not for -JS as I know. +PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: +.. _JPype: http://jpype.sourceforge.net/ +.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm How to represent types ---------------------- @@ -124,11 +110,6 @@ and JVM the job can be easily automatized, since the objects have got precise signatures. -For JS, signatures must be written by hand, so we must provide a -convenient syntax for it; I think it should be possible to use the -current syntax and write a tool which translates it to low-level -types. - RPython interface ----------------- @@ -146,9 +127,8 @@ - access to static methods: return an object which will be annotated as SomeExternalStaticMeth. -Instances are annotated as SomeExternalInstance. Prebuilt external -objects (such as JS's window.document) are annotated as -SomeExternalInstance(const=...). +Instances are annotated as SomeExternalInstance. Prebuilt external objects are +annotated as SomeExternalInstance(const=...). Open issues ----------- @@ -179,18 +159,12 @@ It would be nice to allow programmers to inherit from an external class. Not sure about the implications, though. -Callbacks -~~~~~~~~~ - -I know that they are an issue for JS, but I don't know how they are -currently implemented. - Special methods/properties ~~~~~~~~~~~~~~~~~~~~~~~~~~ In .NET there are special methods that can be accessed using a special syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#. +RPython the same syntax as C#, although we can live without that. Implementation details diff --git a/pypy/doc/config/translation.backendopt.rst b/pypy/doc/config/translation.backendopt.txt copy from pypy/doc/config/translation.backendopt.rst copy to pypy/doc/config/translation.backendopt.txt diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.rst b/pypy/doc/config/objspace.std.withmethodcachecounter.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcachecounter.rst +++ /dev/null @@ -1,1 +0,0 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -64,21 +64,21 @@ for Python`_, A. Rigo -.. _bibtex: http://codespeak.net/svn/pypy/extradoc/talk/bibtex.bib +.. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://codespeak.net/svn/user/antocuni/phd/thesis/thesis.pdf -.. _`How to *not* write Virtual Machines for Dynamic Languages`: http://codespeak.net/svn/pypy/extradoc/talk/dyla2007/dyla.pdf -.. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: http://codespeak.net/svn/pypy/extradoc/talk/icooolps2009/bolz-tracing-jit.pdf -.. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: http://codespeak.net/svn/pypy/extradoc/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://codespeak.net/svn/user/cfbolz/jitpl/thesis/final-master.pdf +.. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf +.. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf +.. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html -.. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://www.iam.unibe.ch/~verwaest/pygirl.pdf +.. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf .. _`Representation-Based Just-in-Time Specialization and the Psyco Prototype for Python`: http://psyco.sourceforge.net/psyco-pepm-a.ps.gz .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 -.. _`Automatic generation of JIT compilers for dynamic languages in .NET`: http://codespeak.net/svn/pypy/extradoc/talk/ecoop2009/main.pdf -.. _`Core Object Optimization Results`: http://codespeak.net/svn/pypy/extradoc/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf +.. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf +.. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf .. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf @@ -131,7 +131,7 @@ * `PyCon 2008`_. -.. __: http://codespeak.net/svn/pypy/extradoc/talk/s3-2008/talk.pdf +.. __: https://bitbucket.org/pypy/extradoc/raw/tip/talk/s3-2008/talk.pdf Talks in 2007 @@ -166,9 +166,9 @@ * `Warsaw 2007`_. -.. __: http://codespeak.net/svn/pypy/extradoc/talk/roadshow-ibm/ -.. __: http://codespeak.net/svn/pypy/extradoc/talk/roadshow-google/Pypy_architecture.pdf -.. __: http://codespeak.net/svn/pypy/extradoc/talk/dls2007/rpython-talk.pdf +.. __: https://bitbucket.org/pypy/extradoc/raw/tip/talk/roadshow-ibm/ +.. __: https://bitbucket.org/pypy/extradoc/raw/tip/talk/roadshow-google/Pypy_architecture.pdf +.. __: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2007/rpython-talk.pdf Talks in 2006 @@ -266,36 +266,36 @@ .. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf .. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf .. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html -.. _`Sprinting the PyPy way`: http://codespeak.net/svn/pypy/extradoc/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf +.. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf .. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html -.. _`PyPy's approach to virtual machine construction`: http://codespeak.net/svn/pypy/extradoc/talk/dls2006/pypy-vm-construction.pdf -.. _`EuroPython talks 2009`: http://codespeak.net/svn/pypy/extradoc/talk/ep2009/ -.. _`PyCon talks 2009`: http://codespeak.net/svn/pypy/extradoc/talk/pycon2009/ -.. _`Wroclaw (Poland) presentation`: http://codespeak.net/svn/pypy/extradoc/talk/wroclaw2009/talk.pdf +.. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf +.. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ +.. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ +.. _`Wroclaw (Poland) presentation`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/wroclaw2009/talk.pdf .. _`PyPy talk at OpenBossa 09`: http://morepypy.blogspot.com/2009/03/pypy-talk-at-openbossa-09.html -.. _`at SFI 08`: http://codespeak.net/svn/pypy/extradoc/talk/sfi2008/ -.. _`at PyCon Poland 08`: http://codespeak.net/svn/pypy/extradoc/talk/pyconpl-2008/talk.pdf -.. _`The PyPy Project and You`: http://codespeak.net/svn/pypy/extradoc/talk/osdc2008/osdc08.pdf -.. _`EuroPython talks 2008`: http://codespeak.net/svn/pypy/extradoc/talk/ep2008/ +.. _`at SFI 08`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/sfi2008/ +.. _`at PyCon Poland 08`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pyconpl-2008/talk.pdf +.. _`The PyPy Project and You`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/osdc2008/osdc08.pdf +.. _`EuroPython talks 2008`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2008/ .. _`Maemo summit`: http://morepypy.blogspot.com/2008/09/pypypython-at-maemo-summit.html -.. _`PyCon UK 2008 - JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pycon-uk-2008/jit/pypy-vm.pdf -.. _`PyCon UK 2008 - Status`: http://codespeak.net/svn/pypy/extradoc/talk/pycon-uk-2008/status/status.pdf -.. _`PyCon Italy 2008`: http://codespeak.net/svn/pypy/extradoc/talk/pycon-italy-2008/pypy-vm.pdf -.. _`RuPy 2008`: http://codespeak.net/svn/pypy/extradoc/talk/rupy2008/ -.. _`RuPy 2007`: http://codespeak.net/svn/pypy/extradoc/talk/rupy2007/ -.. _`PyCon 2008`: http://codespeak.net/svn/pypy/extradoc/talk/pycon2008/ -.. _`ESUG 2007`: http://codespeak.net/svn/pypy/extradoc/talk/esug2007/ -.. _`Bern (Switzerland) 2007`: http://codespeak.net/svn/pypy/extradoc/talk/bern2007/ -.. _`PyCon UK 2007`: http://codespeak.net/svn/pypy/extradoc/talk/pyconuk07/ -.. _Dresden: http://codespeak.net/svn/pypy/extradoc/talk/dresden/ -.. _`EuroPython 2007`: http://codespeak.net/svn/pypy/extradoc/talk/ep2007/ -.. _`Bad Honnef 2007`: http://codespeak.net/svn/pypy/extradoc/talk/badhonnef2007/talk.pdf -.. _`Dzug talk`: http://codespeak.net/svn/pypy/extradoc/talk/dzug2007/dzug2007.txt -.. _`PyCon 2007`: http://codespeak.net/svn/pypy/extradoc/talk/pycon2007/ -.. _`PyCon - Uno 2007`: http://codespeak.net/svn/pypy/extradoc/talk/pycon-uno2007/pycon07.pdf -.. _`Warsaw 2007`: http://codespeak.net/svn/pypy/extradoc/talk/warsaw2007/ -.. _`Warsaw 2006`: http://codespeak.net/svn/pypy/extradoc/talk/warsaw2006/ -.. _`Tokyo 2006`: http://codespeak.net/svn/pypy/extradoc/talk/tokyo/ +.. _`PyCon UK 2008 - JIT`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon-uk-2008/jit/pypy-vm.pdf +.. _`PyCon UK 2008 - Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon-uk-2008/status/status.pdf +.. _`PyCon Italy 2008`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon-italy-2008/pypy-vm.pdf +.. _`RuPy 2008`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/rupy2008/ +.. _`RuPy 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/rupy2007/ +.. _`PyCon 2008`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2008/ +.. _`ESUG 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/esug2007/ +.. _`Bern (Switzerland) 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bern2007/ +.. _`PyCon UK 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pyconuk07/ +.. _Dresden: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dresden/ +.. _`EuroPython 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2007/ +.. _`Bad Honnef 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/badhonnef2007/talk.pdf +.. _`Dzug talk`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dzug2007/dzug2007.txt +.. _`PyCon 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2007/ +.. _`PyCon - Uno 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon-uno2007/pycon07.pdf +.. _`Warsaw 2007`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/warsaw2007/ +.. _`Warsaw 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/warsaw2006/ +.. _`Tokyo 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/tokyo/ Related projects @@ -351,8 +351,8 @@ .. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html .. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ -.. _IronPython: http://www.codeplex.com/Wiki/View.aspx?ProjectName=IronPython -.. _`Dynamic Native Optimization of Native Interpreters`: http://www.ai.mit.edu/~gregs/dynamorio.html -.. _JikesRVM: http://jikesrvm.sf.net +.. _IronPython: http://ironpython.codeplex.com/ +.. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html +.. _JikesRVM: http://jikesrvm.org/ .. _Tunes: http://tunes.org .. _`old Tunes Wiki`: http://codespeak.net/cliki.tunes.org/ diff --git a/pypy/doc/config/objspace.usemodules.rctime.rst b/pypy/doc/config/objspace.usemodules.rctime.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rctime.rst +++ /dev/null @@ -1,7 +0,0 @@ -Use the 'rctime' module. - -'rctime' is our `rffi`_ based implementation of the builtin 'time' module. -It supersedes the less complete :config:`objspace.usemodules.time`, -at least for C-like targets (the C and LLVM backends). - -.. _`rffi`: ../rffi.html diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -5,18 +5,11 @@ pypydir = py.path.local(pypy.__file__).dirpath() distdir = pypydir.dirpath() issue_url = 'http://codespeak.net/issue/pypy-dev/' +bitbucket_url = 'https://bitbucket.org/pypy/pypy/src/default/' import urllib2, posixpath -possible_start_dirs = [ - distdir, - distdir.join('pypy'), - # for now, let the jit links point to the oo-jit branch - 'http://codespeak.net/svn/pypy/branch/oo-jit', - 'http://codespeak.net/svn/pypy/branch/oo-jit/pypy', - ] - def makeref(docdir): reffile = docdir.join('_ref.txt') @@ -30,36 +23,24 @@ return name2target.setdefault(linktarget, []).append(linkname) - for textfile in docdir.listdir(): # for subdirs, see below - if textfile.ext != '.txt': + for textfile in sorted(docdir.listdir()): # for subdirs, see below + if textfile.ext != '.rst': continue - for linkname in linkrex.findall(textfile.read()): - if '/' in linkname: - for startdir in possible_start_dirs: - if isinstance(startdir, str): - assert startdir.startswith('http://') - target = posixpath.join(startdir, linkname) - try: - urllib2.urlopen(target).close() - except urllib2.HTTPError: - continue - else: - cand = startdir.join(linkname) - if not cand.check(): - continue - assert cand.relto(distdir) - dotdots = 0 - p = docdir - while p != distdir: - p = p.dirpath() - dotdots += 1 - target = '../' * dotdots + cand.relto(distdir) - addlink(linkname, target) - break - else: - print "WARNING %s: link %r may be bogus" %(textfile, linkname) + content = textfile.read() + found = False + for linkname in linkrex.findall(content): + if '/' in linkname: + found = True + assert distdir.join(linkname).check(), "link %s in %s is dead" % (linkname, textfile) + url = bitbucket_url + linkname + if not linkname.endswith("/") and distdir.join(linkname).check(dir=1): + url += "/" + addlink(linkname, url) elif linkname.startswith('issue'): + found = True addlink(linkname, issue_url+linkname) + if found: + assert ".. include:: _ref.txt" in content, "you need to include _ref.txt in %s" % (textfile, ) items = name2target.items() items.sort() diff --git a/pypy/doc/config/commandline.rst b/pypy/doc/config/commandline.txt copy from pypy/doc/config/commandline.rst copy to pypy/doc/config/commandline.txt --- a/pypy/doc/config/commandline.rst +++ b/pypy/doc/config/commandline.txt @@ -1,6 +1,6 @@ .. contents:: - + .. _objspace: .. _`overview-of-command-line-options-for-objspace`: diff --git a/pypy/doc/config/objspace.usemodules._sha.rst b/pypy/doc/config/objspace.usemodules._sha.txt copy from pypy/doc/config/objspace.usemodules._sha.rst copy to pypy/doc/config/objspace.usemodules._sha.txt diff --git a/pypy/doc/config/objspace.usemodules.time.rst b/pypy/doc/config/objspace.usemodules.time.txt copy from pypy/doc/config/objspace.usemodules.time.rst copy to pypy/doc/config/objspace.usemodules.time.txt diff --git a/pypy/doc/config/objspace.translationmodules.rst b/pypy/doc/config/objspace.translationmodules.txt copy from pypy/doc/config/objspace.translationmodules.rst copy to pypy/doc/config/objspace.translationmodules.txt diff --git a/pypy/doc/config/translation.backendopt.inline_threshold.rst b/pypy/doc/config/translation.backendopt.inline_threshold.txt copy from pypy/doc/config/translation.backendopt.inline_threshold.rst copy to pypy/doc/config/translation.backendopt.inline_threshold.txt diff --git a/pypy/doc/config/translation.backendopt.inline_heuristic.rst b/pypy/doc/config/translation.backendopt.inline_heuristic.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.inline_heuristic.rst +++ /dev/null @@ -1,4 +0,0 @@ -Internal option. Switch to a different weight heuristic for inlining. -This is for basic inlining (:config:`translation.backendopt.inline`). - -.. internal diff --git a/pypy/doc/config/translation.countmallocs.rst b/pypy/doc/config/translation.countmallocs.txt copy from pypy/doc/config/translation.countmallocs.rst copy to pypy/doc/config/translation.countmallocs.txt diff --git a/pypy/doc/config/translation.verbose.rst b/pypy/doc/config/translation.verbose.txt copy from pypy/doc/config/translation.verbose.rst copy to pypy/doc/config/translation.verbose.txt diff --git a/pypy/doc/config/objspace.usemodules._pickle_support.rst b/pypy/doc/config/objspace.usemodules._pickle_support.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._pickle_support.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use the '_pickle_support' module. -Internal helpers for pickling runtime builtin types (frames, cells, etc) -for `stackless`_ tasklet pickling support. -.. _`stackless`: ../stackless.html - -.. internal diff --git a/pypy/doc/config/translation.secondaryentrypoints.rst b/pypy/doc/config/translation.secondaryentrypoints.txt copy from pypy/doc/config/translation.secondaryentrypoints.rst copy to pypy/doc/config/translation.secondaryentrypoints.txt diff --git a/pypy/doc/config/objspace.lonepycfiles.rst b/pypy/doc/config/objspace.lonepycfiles.txt copy from pypy/doc/config/objspace.lonepycfiles.rst copy to pypy/doc/config/objspace.lonepycfiles.txt diff --git a/pypy/doc/config/objspace.usemodules.oracle.rst b/pypy/doc/config/objspace.usemodules.oracle.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.oracle.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'oracle' module. -This module is off by default, requires oracle client installed. diff --git a/pypy/doc/config/translation.cli.trace_calls.rst b/pypy/doc/config/translation.cli.trace_calls.txt copy from pypy/doc/config/translation.cli.trace_calls.rst copy to pypy/doc/config/translation.cli.trace_calls.txt diff --git a/pypy/doc/config/objspace.usemodules.struct.rst b/pypy/doc/config/objspace.usemodules.struct.txt copy from pypy/doc/config/objspace.usemodules.struct.rst copy to pypy/doc/config/objspace.usemodules.struct.txt diff --git a/pypy/tool/rest/rest.py b/pypy/tool/rest/rest.py --- a/pypy/tool/rest/rest.py +++ b/pypy/tool/rest/rest.py @@ -10,14 +10,12 @@ pass def convert_rest_html(source, source_path, stylesheet=None, encoding='latin1'): - from pypy.tool.rest import directive """ return html latin1-encoded document for the given input. source a ReST-string sourcepath where to look for includes (basically) stylesheet path (to be used if any) """ from docutils.core import publish_string - directive.set_backend_and_register_directives("html") kwargs = { 'stylesheet' : stylesheet, 'stylesheet_path': None, diff --git a/pypy/doc/config/objspace.usemodules._collections.rst b/pypy/doc/config/objspace.usemodules._collections.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._collections.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '_collections' module. -Used by the 'collections' standard lib module. This module is expected to be working and is included by default. diff --git a/pypy/doc/config/objspace.usemodules._testing.rst b/pypy/doc/config/objspace.usemodules._testing.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._testing.rst +++ /dev/null @@ -1,3 +0,0 @@ -Use the '_testing' module. This module exists only for PyPy own testing purposes. - -This module is expected to be working and is included by default. diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -1,15 +1,62 @@ import autopath import py -from pypy.config import pypyoption, translationoption, config +from pypy.config import pypyoption, translationoption, config, makerestdoc from pypy.doc.config.confrest import all_optiondescrs +all_optiondescrs = [pypyoption.pypy_optiondescription, + translationoption.translation_optiondescription, + ] +start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) + +def make_cmdline_overview(): + result = [] + txtpath = thisdir.join("commandline.txt") + for line in txtpath.read().splitlines(): + if line.startswith('.. GENERATE:'): + start = line[len('.. GENERATE:'):].strip() + descr = start_to_descr[start] + line = makerestdoc.make_cmdline_overview(descr, title=False).text() + result.append(line) + rstpath = txtpath.new(ext=".rst") + rstpath.write("\n".join(result)) + +def make_rst(basename): + txtpath = thisdir.join(basename) + txtpath.ensure() + rstpath = txtpath.new(ext=".rst") + + fullpath = txtpath.purebasename + start = fullpath.split(".")[0] + path = fullpath.rsplit(".", 1)[0] + basedescr = start_to_descr.get(start) + if basedescr is None: + return + if fullpath.count(".") == 0: + descr = basedescr + path = "" + else: + conf = config.Config(basedescr) + subconf, step = conf._cfgimpl_get_home_by_path( + fullpath.split(".", 1)[1]) + descr = getattr(subconf._cfgimpl_descr, step) + text = unicode(descr.make_rest_doc(path).text()) + if txtpath.check(file=True): + content = txtpath.read() + if content: + text += "\n\n" + text = u"%s\n\n%s" % (text, unicode(txtpath.read(), "utf-8")) + rstpath.write(text.encode("utf-8")) + + thisdir = py.path.local(__file__).dirpath() for descr in all_optiondescrs: prefix = descr._name c = config.Config(descr) - thisdir.join(prefix + ".rst").ensure() + thisdir.join(prefix + ".txt").ensure() + make_rst(prefix + ".txt") for p in c.getpaths(include_groups=True): - basename = prefix + "." + p + ".rst" - f = thisdir.join(basename) - f.ensure() + basename = prefix + "." + p + ".txt" + make_rst(basename) + +make_cmdline_overview() diff --git a/pypy/doc/config/objspace.usemodules._weakref.rst b/pypy/doc/config/objspace.usemodules._weakref.txt copy from pypy/doc/config/objspace.usemodules._weakref.rst copy to pypy/doc/config/objspace.usemodules._weakref.txt diff --git a/pypy/doc/config/objspace.usemodules.struct.rst b/pypy/doc/config/objspace.usemodules.struct.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.struct.rst +++ /dev/null @@ -1,5 +0,0 @@ -Use the built-in 'struct' module. -This module is expected to be working and is included by default. -There is also a pure Python version in lib_pypy which is used -if the built-in is disabled, but it is several orders of magnitude -slower. diff --git a/pypy/doc/config/translation.cli.trace_calls.rst b/pypy/doc/config/translation.cli.trace_calls.rst deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -20,8 +20,6 @@ Garbage collectors currently written for the GC framework ========================================================= -(Very rough sketch only for now.) - Reminder: to select which GC you want to include in a translated RPython program, use the ``--gc=NAME`` option of ``translate.py``. For more details, see the `overview of command line options for @@ -36,7 +34,7 @@ -------------- Classical Mark and Sweep collector. Also contains a lot of experimental -and half-unmaintained features. See `rpython/memory/gc/marksweep.py`_. +and half-unmaintained features. See `pypy/rpython/memory/gc/marksweep.py`_. Semispace copying collector --------------------------- @@ -44,7 +42,7 @@ Two arenas of equal size, with only one arena in use and getting filled with new objects. When the arena is full, the live objects are copied into the other arena using Cheney's algorithm. The old arena is then -cleared. See `rpython/memory/gc/semispace.py`_. +cleared. See `pypy/rpython/memory/gc/semispace.py`_. On Unix the clearing is done by reading ``/dev/zero`` into the arena, which is extremely memory efficient at least on Linux: it lets the @@ -57,7 +55,7 @@ Generational GC --------------- -This is a two-generations GC. See `rpython/memory/gc/generation.py`_. +This is a two-generations GC. See `pypy/rpython/memory/gc/generation.py`_. It is implemented as a subclass of the Semispace copying collector. It adds a nursery, which is a chunk of the current semispace. Its size is @@ -88,7 +86,7 @@ Each generation is collected much less often than the previous one. The division of the generations is slightly more complicated than just nursery / semispace / external; see the diagram at the start of the -source code, in `rpython/memory/gc/hybrid.py`_. +source code, in `pypy/rpython/memory/gc/hybrid.py`_. Mark & Compact GC ----------------- @@ -126,7 +124,7 @@ information in the regular headers. More details are available as comments at the start of the source -in `rpython/memory/gc/markcompact.py`_. +in `pypy/rpython/memory/gc/markcompact.py`_. Minimark GC ----------- @@ -214,4 +212,90 @@ becomes free garbage, to be collected at the next major collection. -.. include:: _ref.rst +Minimark GC +----------- + +This is a simplification and rewrite of the ideas from the Hybrid GC. +It uses a nursery for the young objects, and mark-and-sweep for the old +objects. This is a moving GC, but objects may only move once (from +the nursery to the old stage). + +The main difference with the Hybrid GC is that the mark-and-sweep +objects (the "old stage") are directly handled by the GC's custom +allocator, instead of being handled by malloc() calls. The gain is that +it is then possible, during a major collection, to walk through all old +generation objects without needing to store a list of pointers to them. +So as a first approximation, when compared to the Hybrid GC, the +Minimark GC saves one word of memory per old object. + +There are a number of environment variables that can be tweaked to +influence the GC. (Their default value should be ok for most usages.) +You can read more about them at the start of +`pypy/rpython/memory/gc/minimark.py`_. + +In more details: + +- The small newly malloced objects are allocated in the nursery (case 1). + All objects living in the nursery are "young". + +- The big objects are always handled directly by the system malloc(). + But the big newly malloced objects are still "young" when they are + allocated (case 2), even though they don't live in the nursery. + +- When the nursery is full, we do a minor collection, i.e. we find + which "young" objects are still alive (from cases 1 and 2). The + "young" flag is then removed. The surviving case 1 objects are moved + to the old stage. The dying case 2 objects are immediately freed. + +- The old stage is an area of memory containing old (small) objects. It + is handled by `pypy/rpython/memory/gc/minimarkpage.py`_. It is organized + as "arenas" of 256KB or 512KB, subdivided into "pages" of 4KB or 8KB. + Each page can either be free, or contain small objects of all the same + size. Furthermore at any point in time each object location can be + either allocated or freed. The basic design comes from ``obmalloc.c`` + from CPython (which itself comes from the same source as the Linux + system malloc()). + +- New objects are added to the old stage at every minor collection. + Immediately after a minor collection, when we reach some threshold, we + trigger a major collection. This is the mark-and-sweep step. It walks + over *all* objects (mark), and then frees some fraction of them (sweep). + This means that the only time when we want to free objects is while + walking over all of them; we never ask to free an object given just its + address. This allows some simplifications and memory savings when + compared to ``obmalloc.c``. + +- As with all generational collectors, this GC needs a write barrier to + record which old objects have a reference to young objects. + +- Additionally, we found out that it is useful to handle the case of + big arrays specially: when we allocate a big array (with the system + malloc()), we reserve a small number of bytes before. When the array + grows old, we use the extra bytes as a set of bits. Each bit + represents 128 entries in the array. Whenever the write barrier is + called to record a reference from the Nth entry of the array to some + young object, we set the bit number ``(N/128)`` to 1. This can + considerably speed up minor collections, because we then only have to + scan 128 entries of the array instead of all of them. + +- As usual, we need special care about weak references, and objects with + finalizers. Weak references are allocated in the nursery, and if they + survive they move to the old stage, as usual for all objects; the + difference is that the reference they contain must either follow the + object, or be set to NULL if the object dies. And the objects with + finalizers, considered rare enough, are immediately allocated old to + simplify the design. In particular their ``__del__`` method can only + be called just after a major collection. + +- The objects move once only, so we can use a trick to implement id() + and hash(). If the object is not in the nursery, it won't move any + more, so its id() and hash() are the object's address, cast to an + integer. If the object is in the nursery, and we ask for its id() + or its hash(), then we pre-reserve a location in the old stage, and + return the address of that location. If the object survives the + next minor collection, we move it there, and so its id() and hash() + are preserved. If the object dies then the pre-reserved location + becomes free garbage, to be collected at the next major collection. + + +.. include:: _ref.txt diff --git a/pypy/doc/config/objspace.std.withstrslice.rst b/pypy/doc/config/objspace.std.withstrslice.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withstrslice.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enable "string slice" objects. - -See the page about `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#string-slice-objects - - diff --git a/pypy/doc/config/translation.dump_static_data_info.rst b/pypy/doc/config/translation.dump_static_data_info.rst deleted file mode 100644 --- a/pypy/doc/config/translation.dump_static_data_info.rst +++ /dev/null @@ -1,3 +0,0 @@ -Dump information about static prebuilt constants, to the file -TARGETNAME.staticdata.info in the /tmp/usession-... directory. This file can -be later inspected using the script ``bin/reportstaticdata.py``. diff --git a/pypy/doc/config/objspace.allworkingmodules.rst b/pypy/doc/config/objspace.allworkingmodules.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.allworkingmodules.rst +++ /dev/null @@ -1,6 +0,0 @@ -This option enables the usage of all modules that are known to be working well -and that translate without problems. - -Note that this option defaults to True (except when running -``py.py`` because it takes a long time to start). To force it -to False, use ``--no-allworkingmodules``. diff --git a/pypy/doc/config/objspace.opcodes.CALL_METHOD.rst b/pypy/doc/config/objspace.opcodes.CALL_METHOD.txt copy from pypy/doc/config/objspace.opcodes.CALL_METHOD.rst copy to pypy/doc/config/objspace.opcodes.CALL_METHOD.txt diff --git a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst b/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt copy from pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.rst copy to pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt diff --git a/pypy/doc/config/translation.sandbox.rst b/pypy/doc/config/translation.sandbox.rst deleted file mode 100644 --- a/pypy/doc/config/translation.sandbox.rst +++ /dev/null @@ -1,15 +0,0 @@ -Generate a special fully-sandboxed executable. - -The fully-sandboxed executable cannot be run directly, but -only as a subprocess of an outer "controlling" process. The -sandboxed process is "safe" in the sense that it doesn't do -any library or system call - instead, whenever it would like -to perform such an operation, it marshals the operation name -and the arguments to its stdout and it waits for the -marshalled result on its stdin. This controller process must -handle these operation requests, in any way it likes, allowing -full virtualization. - -For examples of controller processes, see -``pypy/translator/sandbox/interact.py`` and -``pypy/translator/sandbox/pypy_interact.py``. diff --git a/pypy/doc/config/translation.rweakref.rst b/pypy/doc/config/translation.rweakref.txt copy from pypy/doc/config/translation.rweakref.rst copy to pypy/doc/config/translation.rweakref.txt diff --git a/pypy/doc/theory.rst b/pypy/doc/theory.rst deleted file mode 100644 --- a/pypy/doc/theory.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. include:: crufty.rst - - .. ^^ old ideas; we're not doing it this way any more - -=================================== -Techniques used in PyPy -=================================== - -.. contents:: - - -.. _`abstract interpretation`: - -Abstract Interpretation -======================= - -Abstract Interpretation is a general technique which consists of an -interpreter that follows the bytecode instructions of a user program, just -like a normal interpreter does, but with abstract objects instead of concrete -ones. Remember that in PyPy this is done by using alternate object spaces with -the same bytecode interpreter main loop. - -As a theoretical example, the most abstract object space would be the one manipulating the most abstract objects that you could imagine: they are all equivalent, because we have abstracted away any information about the object. There is actually only one of them left, and we could call it "the object". In Python terms, an AbstractObjectSpace could use None for all its wrapped objects. Any operation between wrapped objects gives None again as the wrapped result -- there is nothing else it could give anyway. So when you have said that the add method of AbstractObjectSpace takes None and None and returns None you have said everything. - -The point of such an object space is for example to check the bytecode. The -bytecode interpreter will really run your bytecode, just with completely -abstract arguments. If there is no problem then you are sure that the bytecode -is valid. You could also record, during this abstract interpretation, how much -the stack ever grows; that would give you a fool-proof method of computing or -checking the co_stacksize argument of a code object. (There are subtleties -which I won't describe here, but that's the basic idea.) - -Typically, however, abstract object spaces are a (little) bit less abstract, still maintaining a minimal amount of information about the objects. For example, a wrapped object could be represented by its type. You then define the object space's add to return int when the two arguments are int and int. That way, you abstractedly call a function with the input argument's types and what the interpreter will do is a type inference. (Here also there are subtle problems, even besides the remark that integer operations can overflow and actually return longs in a real Python implementation.) - -As an example of more abstract object spaces you have the ones with finite domain, i.e. with a finite number of different possible wrapped objects. For example, you can use True and False as wrapped values to denote the fact that the object is, respectively, a non-negative integer or anything else. In this way you are doing another kind of type inference that just tells you which variables will only ever contain non-negative integers. - -In PyPy, the FlowObjSpace_ uses the abstract interpretation technique to generate a control flow graph of the functions of RPython_ programs. - -In its `more formal definition`_, Abstract Interpretation typically -considers abstract objects that are organized in a lattice_: some of -these objects are more (or less) abstract than others, in the sense that -they represent less (or more) known information; to say that this forms -a lattice essentially means that any two abstract objects have -well-defined unions and intersections (which are again abstract -objects). - -.. _FlowObjSpace: objspace.html#the-flow-object-space -.. _RPython: coding-guide.html#restricted-python -.. _`more formal definition`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _lattice: http://en.wikipedia.org/wiki/Lattice_%28order%29 - - -Multimethods -============ - -A "multimethod" is the generalization of the OOP notion of "method". -Theoretically, a method is a "message name" and signature attached to a -particular base class, which is implemented in the class or its subclasses. -To do a "method call" means to send a message to an object, using a message -name and actual arguments. We call "message dispatch" the operation of -finding which actual implementation is suitable for a particular call. For -methods, a message is dispatched by looking up the class of the "self" object, -and finding an implementation in that class, or in its base classes, in a -certain order. - -A multimethod is a message name and signature that can have implementations -that depend not only on the class of the first "self" argument, but on the -class of several arguments. Because of this we cannot use Python's nice model -of storing method implementations as functions, in the attributes of the -class. - -Here is a common implementation of multimethods: they are instances of a -specific MultiMethod class, and the instances are callable (there is a -__call__ operator on MultiMethod). When a MultiMethod is called, a dispatch -algorithm is used to find which, among the registered implementations, is the -one that should be called; this implementation is then immediately called. The -most important difference with normal methods is that the MultiMethod object -to call is no longer syntactically attached to classes. In other words, -whereas a method is called with ``obj.somemethod(args)``, a multimethod is -called much like a function, e.g. ``dosomething(obj1, obj2, obj3...)``. You -have to find the MultiMethod object ``dosomething`` in some namespace; it is -no longer implicitly looked up in the namespace of the "self" object. - -PyPy contains two different implementations of multimethods: a `quite general -one`_ written in RPython_ for the purposes of the StdObjSpace_, and a `short -two-arguments-dispatching one`_ used internally by the annotator_. - -.. _`quite general one`: http://codespeak.net/svn/pypy/dist/pypy/objspace/std/multimethod.py -.. _StdObjSpace: objspace.html#the-standard-object-space -.. _`short two-arguments-dispatching one`: http://codespeak.net/svn/pypy/dist/pypy/tool/pairtype.py -.. _annotator: translation.html#annotator diff --git a/pypy/doc/config/objspace.std.withprebuiltint.rst b/pypy/doc/config/objspace.std.withprebuiltint.txt copy from pypy/doc/config/objspace.std.withprebuiltint.rst copy to pypy/doc/config/objspace.std.withprebuiltint.txt diff --git a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst b/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.profile_based_inline_threshold.rst +++ /dev/null @@ -1,2 +0,0 @@ -Weight threshold used to decide whether to inline flowgraphs. -This is for profile-based inlining (:config:`translation.backendopt.profile_based_inline`). diff --git a/pypy/doc/config/translation.withsmallfuncsets.rst b/pypy/doc/config/translation.withsmallfuncsets.txt copy from pypy/doc/config/translation.withsmallfuncsets.rst copy to pypy/doc/config/translation.withsmallfuncsets.txt diff --git a/pypy/doc/discussion/pypy_metaclasses_in_cl.rst b/pypy/doc/discussion/pypy_metaclasses_in_cl.rst deleted file mode 100644 --- a/pypy/doc/discussion/pypy_metaclasses_in_cl.rst +++ /dev/null @@ -1,139 +0,0 @@ -IRC log -======= - -:: - - [09:41] arigo: is it possible to ask the backendoptimizer to completely remove all the oogetfield('meta', obj)? - [09:42] and at the same time to change all the oogetfield('somefield', meta) into oogetfield('somefield', obj) - [09:42] because then we wouldn't need the metaclass hierarchy anymore - [09:42] (at least in common lisp) - [09:42] as far as I know the idea was indeed to be able to do this kind of things - [09:43] but not necessarily in the existing backendopt - [09:44] uhmmm - [09:44] I have no idea how to do this stuff - [09:44] if I understand it correctly, as a first step you can just tweak gencl to recognize oogetfield('meta', obj) - [09:44] I'll think about it on the plane maybe - [09:44] and produce a same_as equivalent instead - [09:44] (do I make any sense at all?) - [09:44] yes - [09:45] same_as(meta, obj) - [09:45] so that the next oogetfield() will still work on meta which in reality is the obj - [09:45] yes - [09:45] thus you obtained the same thing without removing anything - [09:45] cool - [09:46] dialtone: can you explain me better what are you trying to do? - [09:46] it looks kinda simple - [09:46] am I a fool? - [09:46] antocuni: I want to get rid of the metaclass stuff in common lisp - [09:47] since common lisp supports class variables - [09:47] (DEFCLASS foo () ((bar :allocate :class))) - [09:47] cool - [09:47] but to do that I also have to get rid of the opcodes that work on the object model - [09:48] at first I thought about removing the metaclass related operations (or change them) but armin got a great idea about using same_as - [09:48] idnar (i=mithrand at unaffiliated/idnar) left irc: Remote closed the connection - [09:48] there might be a few problems, though - [09:48] and here comes the part I feared - [09:48] I'm not sure if the meta object is used for more than oogetfields - [09:49] and also, let's see if there are name clashes in the fields - [09:49] I can't understand a thing: are you trying to lookup some fields in the obj directly, instead of in the metclass, right? - [09:49] antocuni: yes - [09:50] why an object should have fields that belongs to its metaclass? - [09:50] arigo: uhmmm you can have both a class variable and an instance variable named in the same way? - [09:50] metaclass is not a real metaclass - [09:50] I don't know - [09:50] arigo - r26566 - Support geterrno() from rctypes to genc. - [09:50] dialtone: ah, now I understand - [09:50] I would expect it not to be the case, as the names come from RPython names - [09:51] arigo: indeed - [09:51] but I guess I can set different accessors maybe for class level things and for instance level things - [09:51] let's try - [09:51] no... - [09:52] so a name clash would break stuff - [09:52] but... how do you recognize an access to a class variable and one to an instance variable from RPython? - [09:53] dialtone: I think we don't have name clashes, because there is some mangling anyway - [09:53] cool - [09:53] if I see it correctly, class variable names start with 'pbc' and instance ones with 'o' - [09:53] that's what we've done in gencl yes - [09:54] ? that's what the ootyping is doing - [09:54] yes yes - [09:54] :-) - [09:54] I mean that I see the distinction in gencl :) - [09:54] sooooooo - [09:55] if I have a getfield where the first argument is meta and I simply emit the same code that I emit for the same_as I should be safe removing all the meta stuff... maybe - [09:55] seems like a tiny change in gencl - [09:55] dialtone: in RPython, the annotator says that attributes are instance fields as soon as they are written to instances, otherwise they are class attributes - [09:56] yes, it should work - [09:56] Palats (n=Pierre at izumi.palats.com) left irc: Read error: 104 (Connection reset by peer) - [09:56] unless of course metaclasses are used for something else than class variables - [09:56] ideally, you should not look for the name 'meta' but for some other hint - [09:57] I'm not completely at ease with the various levels of ootype - [09:57] neither am I\ - [09:57] all field names other than those defined by ootype (like "meta") will be mangled, so i guess checking for "meta" is good enough - [09:57] and I also have to ignore the setfield opcode that deals with metaclasses - [09:58] or make it a same_as as well - [09:59] apparently, the meta instances are used as the ootype of RPython classes - [10:00] so they can be manipulated by RPython code that passes classes around - [10:01] I guess you can also pass classes around in CL, read attributes from them, and instantiate them - [10:01] yes - [10:01] so a saner approach might be to try to have gencl use CL classes instead of these meta instances - [10:03] uhmmmmm - [10:03] which means: recognize if an ootype.Instance is actually representing an RPython class (by using a hint) - [10:03] I also have to deal with the Class_ - [10:03] but that can probably be set to standard-class - [10:03] yes, I think it's saner to make, basically, oogetfield('class_') be a same_as - [10:04] cool - [10:04] I think I'll save this irc log to put it in the svn tree for sanxiyn - [10:04] to recognize RPython class represenations: if the ootype.Instance has the superclass ootypesystem.rclass.CLASSTYPE, then it's a "metaclass" - [10:04] he is thinking about this in the plane (at least this is what he told) - [10:05] :-) - [10:05] nikh: yes - [10:05] ootype is indeed rather complicated, level-wise, to support limited languages like Java - [10:05] unfortunately, yes - [10:05] well, in a way it's very convenient for the backends - [10:05] but if you want to use more native constructs, it gets hairy quickly - [10:05] I dunno - [10:05] depends on the backend - [10:06] hum, there is still an information missing that gencl would need here - [10:06] I think if the language of the backend is powerful enough it could use an higher abstraction - [10:07] dialtone: yes, there is also the (hairly to implement) idea of producing slightly different things for different back-ends too - [10:07] using backendopts? - [10:08] would it make sense to have a kind of backend_supports=['metaclasses', 'classvariables', 'first_class_functions'...] - [10:08] maybe, but I was thinking about doing different things in ootypesystem/rclass already - [10:08] yes, such a backend_supports would be great - [10:09] dialtone: there is still an hour left to sprint, so go go go ;) - [10:09] you can do it, if you want it ;) - [10:09] what is missing is the link from the concrete Instance types, and which Instance corresponds to its meta-instance - [10:10] idnar (i=mithrand at unaffiliated/idnar) joined #pypy. - [10:10] dialtone: it's not as simple as making an oogetfield be a same_as - [10:10] KnowledgeUnboundError, Missing documentation in slot brain - [10:10] right now for CL the goal would be to generate for a normal Instance, a DEFCLASS whose :allocate :class attributes are the attributes of the meta-Instance - [10:11] we could optionally have class fields in Instances, and then operations like ooget/setclassfield - [10:11] the reason why I ask is that if we manage to do this then we could also use default Condition as Exception - [10:11] and we could map the Conditions in common lisp to exceptions in python transparently - [10:12] since the object systems will then match (and they are vaguely similar anyway) - [10:12] nice - [10:12] at least I think - [10:18] I'm still rather confused by ootypesystem/rclass - [10:18] although I think that blame would show my name on quite some bits :-) - [10:19] there are no class attributes read through instances - [10:19] they are turned into method calls - [10:19] accessor methods - [10:20] it's a bit organically grown - [10:20] accessor methods were introduced at one point, and the meta-Instance later - [10:21] uhmmm - [10:22] what was the reason for having accessor methods? - [10:22] they seem to be only generated for class vars that are overriden in subclasses. - [10:22] yes - [10:22] before we had the meta-Instance trick, it was the only way to avoid storing the value in all instances - [10:22] aha - [10:23] we could possibly get rid of these accessors - [10:23] now, yes, by storing the values in the meta-Instance - [10:23] they are alway anyway stored in the meta-Instance, I think - [10:23] no, I think that other values are stored in the meta-Instance right now - [10:24] it's the values that are only ever accessed with a syntax 'ClassName.attr', i.e. not through an instance - [10:24] ...more precisely, with 'x = ClassName or OtherClassName; x.attr' - [10:25] hm, i'm still trying to read this out of the code ... - [10:28] it's in ClassRepr._setup_repr() - [10:28] there is no clsfields here, just pbcfields - [10:28] # attributes showing up in getattrs done on the class as a PBC - [10:28] i see diff --git a/pypy/bin/carbonpython.py b/pypy/bin/carbonpython.py deleted file mode 100755 --- a/pypy/bin/carbonpython.py +++ /dev/null @@ -1,5 +0,0 @@ -#! /usr/bin/env python -import autopath, sys -from pypy.translator.cli.carbonpython import main - -main(sys.argv) diff --git a/pypy/doc/config/objspace.usemodules._stackless.rst b/pypy/doc/config/objspace.usemodules._stackless.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._stackless.rst +++ /dev/null @@ -1,6 +0,0 @@ -Use the '_stackless' module. - -Exposes the `stackless` primitives, and also implies a stackless build. -See also :config:`translation.stackless`. - -.. _`stackless`: ../stackless.html diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -1,7 +1,3 @@ -.. include:: crufty.rst - - .. ^^ it continues to work, but is unmaintained - PyPy's sandboxing features ========================== diff --git a/pypy/doc/config/translation.gc.rst b/pypy/doc/config/translation.gc.rst deleted file mode 100644 --- a/pypy/doc/config/translation.gc.rst +++ /dev/null @@ -1,13 +0,0 @@ -Choose the Garbage Collector used by the translated program: - - - "ref": reference counting. Takes very long to translate and the result is - slow. - - - "marksweep": naive mark & sweep. - - - "semispace": a copying semi-space GC. - - - "generation": a generational GC using the semi-space GC for the - older generation. - - - "boehm": use the Boehm conservative GC. diff --git a/pypy/doc/config/translation.gc.rst b/pypy/doc/config/translation.gc.txt copy from pypy/doc/config/translation.gc.rst copy to pypy/doc/config/translation.gc.txt diff --git a/pypy/doc/config/objspace.usemodules.imp.rst b/pypy/doc/config/objspace.usemodules.imp.txt copy from pypy/doc/config/objspace.usemodules.imp.rst copy to pypy/doc/config/objspace.usemodules.imp.txt diff --git a/pypy/doc/config/objspace.usemodules.bz2.rst b/pypy/doc/config/objspace.usemodules.bz2.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.bz2.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'bz2' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/discussion/distribution-roadmap.rst b/pypy/doc/discussion/distribution-roadmap.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution-roadmap.rst +++ /dev/null @@ -1,72 +0,0 @@ -Distribution: -============= - -Some random thoughts about automatic (or not) distribution layer. - -What I want to achieve is to make clean approach to perform -distribution mechanism with virtually any distribution heuristic. - -First step - RPython level: ---------------------------- - -First (simplest) step is to allow user to write RPython programs with -some kind of remote control over program execution. For start I would -suggest using RMI (Remote Method Invocation) and remote object access -(in case of low level it would be struct access). For the simplicity -it will make some sense to target high-level platform at the beginning -(CLI platform seems like obvious choice), which provides more primitives -for performing such operations. To make attempt easier, I'll provide -some subset of type system to be serializable which can go as parameters -to such a call. - -I take advantage of several assumptions: - -* globals are constants - this allows us to just run multiple instances - of the same program on multiple machines and perform RMI. - -* I/O is explicit - this makes GIL problem not that important. XXX: I've got - to read more about GIL to notice if this is true. - -Second step - doing it a little bit more automatically: -------------------------------------------------------- - -The second step is to allow some heuristic to live and change -calls to RMI calls. This should follow some assumptions (which may vary, -regarding implementation): - -* Not to move I/O to different machine (we can track I/O and side-effects - in RPython code). - -* Make sure all C calls are safe to transfer if we want to do that (this - depends on probably static API declaration from programmer "I'm sure this - C call has no side-effects", we don't want to check it in C) or not transfer - them at all. - -* Perform it all statically, at the time of program compilation. - -* We have to generate serialization methods for some classes, which - we want to transfer (Same engine might be used to allow JSON calls in JS - backend to transfer arbitrary python object). - -Third step - Just-in-time distribution: ---------------------------------------- - -The biggest step here is to provide JIT integration into distribution -system. This should allow to make it really useful (probably compile-time -distribution will not work for example for whole Python interpreter, because -of too huge granularity). This is quite unclear for me how to do that -(JIT is not complete and I don't know too much about it). Probably we -take JIT information about graphs and try to feed it to heuristic in some way -to change the calls into RMI. - -Problems to fight with: ------------------------ - -Most problems are to make mechanism working efficiently, so: - -* Avoid too much granularity (copying a lot of objects in both directions - all the time) - -* Make heuristic not eat too much CPU time/memory and all of that. - -* ... diff --git a/pypy/doc/config/objspace.usemodules.pypyjit.rst b/pypy/doc/config/objspace.usemodules.pypyjit.txt copy from pypy/doc/config/objspace.usemodules.pypyjit.rst copy to pypy/doc/config/objspace.usemodules.pypyjit.txt diff --git a/pypy/doc/config/objspace.usemodules._file.rst b/pypy/doc/config/objspace.usemodules._file.txt copy from pypy/doc/config/objspace.usemodules._file.rst copy to pypy/doc/config/objspace.usemodules._file.txt diff --git a/pypy/doc/discussion/summer-of-pypy-pytest.rst b/pypy/doc/discussion/summer-of-pypy-pytest.rst deleted file mode 100644 --- a/pypy/doc/discussion/summer-of-pypy-pytest.rst +++ /dev/null @@ -1,56 +0,0 @@ -============================================ -Summer of PyPy proposal: Distributed py.test -============================================ - - -Purpose: -======== - -The main purpose of distributing py.test is to speedup tests -of actual applications (running all pypy tests already takes -ages). - -Method: -======= - -Remote imports: ---------------- - -On the beginning of communication, master server sends to client -import hook code, which then can import all needed libraries. - -Libraries are uploaded server -> client if they're needed (when -__import__ is called). Possible extension is to add some kind of -checksum (md5?) and store files in some directory. - -Previous experiments: ---------------------- - -Previous experiments tried to run on the lowest level - when function/ -method is called. This is pretty clear (you run as few code on client -side as possible), but has got some drawbacks: - -- You must simulate *everything* and transform it to server side in - case of need of absolutely anything (tracebacks, short and long, - source code etc.) -- It's sometimes hard to catch exceptions. -- Top level code in testing module does not work at all. - -Possible approach: ------------------- - -On client side (side really running tests) run some kind of cut-down -session, which is imported by remote import at the very beginning and -after that, we run desired tests (probably by importing whole test -file which allows us to have top-level imports). - -Then we transfer output data to server as string, possibly tweaking -file names (which is quite easy). - -Deliverables: -============= - -- better use of testing machines -- cut down test time -- possible extension to run distributed code testing, by running and - controlling several distributed parts on different machines. diff --git a/pypy/doc/config/objspace.usemodules.unicodedata.rst b/pypy/doc/config/objspace.usemodules.unicodedata.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.unicodedata.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'unicodedata' module. -This module is expected to be fully working. diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst --- a/pypy/doc/dot-net.rst +++ b/pypy/doc/dot-net.rst @@ -9,4 +9,3 @@ cli-backend.rst clr-module.rst - carbonpython.rst diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -5,6 +5,9 @@ """ import py +# this file is useless, use the following commandline instead: +# hg churn -c -t "{author}" | sed -e 's/ <.*//' + try: path = py.std.sys.argv[1] except IndexError: diff --git a/pypy/doc/config/translation.type_system.rst b/pypy/doc/config/translation.type_system.rst deleted file mode 100644 --- a/pypy/doc/config/translation.type_system.rst +++ /dev/null @@ -1,4 +0,0 @@ -Which type system to use when rtyping_. This option should not be set -explicitly. - -.. _rtyping: ../rtyper.html diff --git a/pypy/doc/_ref.rst b/pypy/doc/_ref.txt copy from pypy/doc/_ref.rst copy to pypy/doc/_ref.txt --- a/pypy/doc/_ref.rst +++ b/pypy/doc/_ref.txt @@ -1,107 +1,125 @@ -.. _`demo/`: ../../demo -.. _`demo/pickle_coroutine.py`: ../../demo/pickle_coroutine.py -.. _`lib-python/`: ../../lib-python -.. _`lib-python/2.5.2/dis.py`: ../../lib-python/2.5.2/dis.py -.. _`annotation/`: -.. _`pypy/annotation`: ../../../../pypy/annotation -.. _`pypy/annotation/annrpython.py`: ../../../../pypy/annotation/annrpython.py -.. _`annotation/binaryop.py`: ../../../../pypy/annotation/binaryop.py -.. _`pypy/annotation/builtin.py`: ../../../../pypy/annotation/builtin.py -.. _`pypy/annotation/model.py`: ../../../../pypy/annotation/model.py -.. _`bin/`: ../../../../pypy/bin -.. _`config/`: ../../../../pypy/config -.. _`pypy/config/pypyoption.py`: ../../../../pypy/config/pypyoption.py -.. _`doc/`: ../../../../pypy/doc -.. _`doc/config/`: ../../../../pypy/doc/config -.. _`doc/discussion/`: ../../../../pypy/doc/discussion -.. _`interpreter/`: -.. _`pypy/interpreter`: ../../../../pypy/interpreter -.. _`pypy/interpreter/argument.py`: ../../../../pypy/interpreter/argument.py -.. _`interpreter/astcompiler/`: -.. _`pypy/interpreter/astcompiler`: ../../../../pypy/interpreter/astcompiler -.. _`pypy/interpreter/executioncontext.py`: ../../../../pypy/interpreter/executioncontext.py -.. _`pypy/interpreter/function.py`: ../../../../pypy/interpreter/function.py -.. _`interpreter/gateway.py`: -.. _`pypy/interpreter/gateway.py`: ../../../../pypy/interpreter/gateway.py -.. _`pypy/interpreter/generator.py`: ../../../../pypy/interpreter/generator.py -.. _`pypy/interpreter/mixedmodule.py`: ../../../../pypy/interpreter/mixedmodule.py -.. _`pypy/interpreter/module.py`: ../../../../pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: ../../../../pypy/interpreter/nestedscope.py -.. _`pypy/interpreter/pyopcode.py`: ../../../../pypy/interpreter/pyopcode.py -.. _`interpreter/pyparser/`: -.. _`pypy/interpreter/pyparser`: ../../../../pypy/interpreter/pyparser -.. _`pypy/interpreter/pyparser/pytokenizer.py`: ../../../../pypy/interpreter/pyparser/pytokenizer.py -.. _`pypy/interpreter/pyparser/parser.py`: ../../../../pypy/interpreter/pyparser/parser.py -.. _`pypy/interpreter/pyparser/pyparse.py`: ../../../../pypy/interpreter/pyparser/pyparse.py -.. _`pypy/interpreter/pyparser/future.py`: ../../../../pypy/interpreter/pyparser/future.py -.. _`pypy/interpreter/pyparser/metaparser.py`: ../../../../pypy/interpreter/pyparser/metaparser.py -.. _`pypy/interpreter/astcompiler/astbuilder.py`: ../../../../pypy/interpreter/astcompiler/astbuilder.py -.. _`pypy/interpreter/astcompiler/optimize.py`: ../../../../pypy/interpreter/astcompiler/optimize.py -.. _`pypy/interpreter/astcompiler/codegen.py`: ../../../../pypy/interpreter/astcompiler/codegen.py -.. _`pypy/interpreter/astcompiler/tools/asdl_py.py`: ../../../../pypy/interpreter/astcompiler/tools/asdl_py.py -.. _`pypy/interpreter/astcompiler/tools/Python.asdl`: ../../../../pypy/interpreter/astcompiler/tools/Python.asdl -.. _`pypy/interpreter/astcompiler/assemble.py`: ../../../../pypy/interpreter/astcompiler/assemble.py -.. _`pypy/interpreter/astcompiler/symtable.py`: ../../../../pypy/interpreter/astcompiler/symtable.py -.. _`pypy/interpreter/astcompiler/asthelpers.py`: ../../../../pypy/interpreter/astcompiler/asthelpers.py -.. _`pypy/interpreter/astcompiler/ast.py`: ../../../../pypy/interpreter/astcompiler/ast.py -.. _`pypy/interpreter/typedef.py`: ../../../../pypy/interpreter/typedef.py -.. _`lib/`: -.. _`lib_pypy/`: ../../lib_pypy -.. _`lib/distributed/`: ../../lib_pypy/distributed -.. _`lib_pypy/stackless.py`: ../../lib_pypy/stackless.py -.. _`lib_pypy/pypy_test/`: ../../lib_pypy/pypy_test -.. _`module/`: +.. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ +.. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py +.. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ +.. _`lib-python/2.7.0/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7.0/dis.py +.. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ +.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ +.. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py +.. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py +.. _`pypy/annotation`: +.. _`pypy/annotation/`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/ +.. _`pypy/annotation/annrpython.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/annrpython.py +.. _`pypy/annotation/binaryop.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/binaryop.py +.. _`pypy/annotation/builtin.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/annotation/builtin.py +.. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ +.. _`pypy/bin/translatorshell.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/translatorshell.py +.. _`pypy/config/`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/ +.. _`pypy/config/pypyoption.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/pypyoption.py +.. _`pypy/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/config/translationoption.py +.. _`pypy/doc/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/ +.. _`pypy/doc/config/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/config/ +.. _`pypy/doc/discussion/`: https://bitbucket.org/pypy/pypy/src/default/pypy/doc/discussion/ +.. _`pypy/interpreter`: +.. _`pypy/interpreter/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/ +.. _`pypy/interpreter/argument.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/argument.py +.. _`pypy/interpreter/astcompiler`: +.. _`pypy/interpreter/astcompiler/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ +.. _`pypy/interpreter/astcompiler/assemble.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/assemble.py +.. _`pypy/interpreter/astcompiler/ast.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/ast.py +.. _`pypy/interpreter/astcompiler/astbuilder.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/astbuilder.py +.. _`pypy/interpreter/astcompiler/asthelpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/asthelpers.py +.. _`pypy/interpreter/astcompiler/codegen.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/codegen.py +.. _`pypy/interpreter/astcompiler/optimize.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/optimize.py +.. _`pypy/interpreter/astcompiler/symtable.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/symtable.py +.. _`pypy/interpreter/astcompiler/tools/Python.asdl`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/tools/Python.asdl +.. _`pypy/interpreter/astcompiler/tools/asdl_py.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/astcompiler/tools/asdl_py.py +.. _`pypy/interpreter/baseobjspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/baseobjspace.py +.. _`pypy/interpreter/eval.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/eval.py +.. _`pypy/interpreter/executioncontext.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/executioncontext.py +.. _`pypy/interpreter/function.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/function.py +.. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py +.. _`pypy/interpreter/generator.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/generator.py +.. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py +.. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py +.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py +.. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py +.. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py +.. _`pypy/interpreter/pyparser`: +.. _`pypy/interpreter/pyparser/`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/ +.. _`pypy/interpreter/pyparser/future.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/future.py +.. _`pypy/interpreter/pyparser/metaparser.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/metaparser.py +.. _`pypy/interpreter/pyparser/parser.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/parser.py +.. _`pypy/interpreter/pyparser/pyparse.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pyparse.py +.. _`pypy/interpreter/pyparser/pytokenizer.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyparser/pytokenizer.py +.. _`pypy/interpreter/typedef.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/typedef.py .. _`pypy/module`: -.. _`pypy/module/`: ../../../../pypy/module -.. _`pypy/module/__builtin__/__init__.py`: ../../../../pypy/module/__builtin__/__init__.py -.. _`pypy/module/_stackless/test/test_clonable.py`: ../../../../pypy/module/_stackless/test/test_clonable.py -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: ../../../../pypy/module/_stackless/test/test_composable_coroutine.py -.. _`objspace/`: -.. _`pypy/objspace`: ../../../../pypy/objspace -.. _`objspace/dump.py`: ../../../../pypy/objspace/dump.py -.. _`objspace/flow/`: ../../../../pypy/objspace/flow -.. _`objspace/std/`: -.. _`pypy/objspace/std`: ../../../../pypy/objspace/std -.. _`objspace/taint.py`: ../../../../pypy/objspace/taint.py -.. _`objspace/thunk.py`: -.. _`pypy/objspace/thunk.py`: ../../../../pypy/objspace/thunk.py -.. _`objspace/trace.py`: -.. _`pypy/objspace/trace.py`: ../../../../pypy/objspace/trace.py +.. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ +.. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/_stackless/test/test_composable_coroutine.py +.. _`pypy/objspace`: +.. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ +.. _`pypy/objspace/dump.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/dump.py +.. _`pypy/objspace/flow`: +.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ +.. _`pypy/objspace/flow/model.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/model.py +.. _`pypy/objspace/std`: +.. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ +.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py +.. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py +.. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py +.. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py +.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py +.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py +.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py +.. _`pypy/objspace/taint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/taint.py +.. _`pypy/objspace/thunk.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/thunk.py +.. _`pypy/objspace/trace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace.py .. _`pypy/rlib`: -.. _`rlib/`: ../../../../pypy/rlib -.. _`pypy/rlib/rarithmetic.py`: ../../../../pypy/rlib/rarithmetic.py -.. _`pypy/rlib/test`: ../../../../pypy/rlib/test +.. _`pypy/rlib/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/ +.. _`pypy/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/listsort.py +.. _`pypy/rlib/nonconst.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/nonconst.py +.. _`pypy/rlib/objectmodel.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/objectmodel.py +.. _`pypy/rlib/parsing/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/parsing/ +.. _`pypy/rlib/parsing/tree.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/parsing/tree.py +.. _`pypy/rlib/rarithmetic.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rarithmetic.py +.. _`pypy/rlib/rbigint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rbigint.py +.. _`pypy/rlib/rrandom.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rrandom.py +.. _`pypy/rlib/rsocket.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rsocket.py +.. _`pypy/rlib/rstack.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/rstack.py +.. _`pypy/rlib/streamio.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/streamio.py +.. _`pypy/rlib/test`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/test/ +.. _`pypy/rlib/unroll.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rlib/unroll.py .. _`pypy/rpython`: -.. _`pypy/rpython/`: -.. _`rpython/`: ../../../../pypy/rpython -.. _`rpython/lltypesystem/`: ../../../../pypy/rpython/lltypesystem -.. _`pypy/rpython/lltypesystem/lltype.py`: -.. _`rpython/lltypesystem/lltype.py`: ../../../../pypy/rpython/lltypesystem/lltype.py -.. _`rpython/memory/`: ../../../../pypy/rpython/memory -.. _`rpython/memory/gc/generation.py`: ../../../../pypy/rpython/memory/gc/generation.py -.. _`rpython/memory/gc/hybrid.py`: ../../../../pypy/rpython/memory/gc/hybrid.py -.. _`rpython/memory/gc/markcompact.py`: ../../../../pypy/rpython/memory/gc/markcompact.py -.. _`rpython/memory/gc/marksweep.py`: ../../../../pypy/rpython/memory/gc/marksweep.py -.. _`rpython/memory/gc/semispace.py`: ../../../../pypy/rpython/memory/gc/semispace.py -.. _`rpython/ootypesystem/`: ../../../../pypy/rpython/ootypesystem -.. _`rpython/ootypesystem/ootype.py`: ../../../../pypy/rpython/ootypesystem/ootype.py -.. _`rpython/rint.py`: ../../../../pypy/rpython/rint.py -.. _`rpython/rlist.py`: ../../../../pypy/rpython/rlist.py -.. _`rpython/rmodel.py`: ../../../../pypy/rpython/rmodel.py -.. _`pypy/rpython/rtyper.py`: ../../../../pypy/rpython/rtyper.py -.. _`pypy/rpython/test/test_llinterp.py`: ../../../../pypy/rpython/test/test_llinterp.py -.. _`pypy/test_all.py`: ../../../../pypy/test_all.py -.. _`tool/`: ../../../../pypy/tool -.. _`tool/algo/`: ../../../../pypy/tool/algo -.. _`tool/pytest/`: ../../../../pypy/tool/pytest +.. _`pypy/rpython/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +.. _`pypy/rpython/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/ +.. _`pypy/rpython/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/lltypesystem/lltype.py +.. _`pypy/rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/ +.. _`pypy/rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/generation.py +.. _`pypy/rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/hybrid.py +.. _`pypy/rpython/memory/gc/markcompact.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/markcompact.py +.. _`pypy/rpython/memory/gc/marksweep.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/marksweep.py +.. _`pypy/rpython/memory/gc/minimark.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/minimark.py +.. _`pypy/rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/minimarkpage.py +.. _`pypy/rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/memory/gc/semispace.py +.. _`pypy/rpython/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ +.. _`pypy/rpython/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ootypesystem/ootype.py +.. _`pypy/rpython/rint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rint.py +.. _`pypy/rpython/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rlist.py +.. _`pypy/rpython/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rmodel.py +.. _`pypy/rpython/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/rtyper.py +.. _`pypy/rpython/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/test/test_llinterp.py +.. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ +.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ +.. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ +.. _`pypy/tool/traceconfig.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/traceconfig.py .. _`pypy/translator`: -.. _`translator/`: ../../../../pypy/translator -.. _`translator/backendopt/`: ../../../../pypy/translator/backendopt -.. _`translator/c/`: ../../../../pypy/translator/c -.. _`translator/cli/`: ../../../../pypy/translator/cli -.. _`translator/goal/`: ../../../../pypy/translator/goal -.. _`pypy/translator/goal/targetnopstandalone.py`: ../../../../pypy/translator/goal/targetnopstandalone.py -.. _`translator/jvm/`: ../../../../pypy/translator/jvm -.. _`translator/stackless/`: ../../../../pypy/translator/stackless -.. _`translator/tool/`: ../../../../pypy/translator/tool -.. _`translator/js/`: http://codespeak.net/svn/pypy/branch/oo-jit/pypy/translator/js/ +.. _`pypy/translator/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/ +.. _`pypy/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/backendopt/ +.. _`pypy/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +.. _`pypy/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/cli/ +.. _`pypy/translator/goal/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/goal/ +.. _`pypy/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/jvm/ +.. _`pypy/translator/stackless/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/stackless/ +.. _`pypy/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/translator/tool/ diff --git a/pypy/doc/config/objspace.usemodules._ffi.rst b/pypy/doc/config/objspace.usemodules._ffi.txt copy from pypy/doc/config/objspace.usemodules._ffi.rst copy to pypy/doc/config/objspace.usemodules._ffi.txt diff --git a/pypy/doc/config/translation.jit.rst b/pypy/doc/config/translation.jit.txt copy from pypy/doc/config/translation.jit.rst copy to pypy/doc/config/translation.jit.txt diff --git a/pypy/doc/cleanup-todo.rst b/pypy/doc/cleanup-todo.rst deleted file mode 100644 --- a/pypy/doc/cleanup-todo.rst +++ /dev/null @@ -1,30 +0,0 @@ - -PyPy cleanup areas -================== - -This is a todo list that lists various areas of PyPy that should be cleaned up -(for whatever reason: less mess, less code duplication, etc). - -translation toolchain ---------------------- - - - low level backends should share more code - - all backends should have more consistent interfaces - - geninterp is a hack - - delegate finding type stuff like vtables etc to GC, cleaner interface for rtti, - simplify translator/c/gc.py - - clean up the tangle of including headers in the C backend - - make approach for loading modules more sane, mixedmodule capture - too many platform dependencies especially for pypy-cli - - review pdbplus, especially the graph commands, also in the light of - https://codespeak.net/issue/pypy-dev/issue303 and the fact that - we can have more than one translator/annotator around (with the - timeshifter) - -interpreter ------------ - - - review the things implemented at applevel whether they are performance- - critical - - - review CPython regression test suite, enable running tests, fix bugs diff --git a/pypy/doc/config/objspace.logbytecodes.rst b/pypy/doc/config/objspace.logbytecodes.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.logbytecodes.rst +++ /dev/null @@ -1,3 +0,0 @@ -Internal option. - -.. internal diff --git a/pypy/doc/discussion/translation-swamp.rst b/pypy/doc/discussion/translation-swamp.rst deleted file mode 100644 --- a/pypy/doc/discussion/translation-swamp.rst +++ /dev/null @@ -1,30 +0,0 @@ -=================================================================== -List of things that need to be improved for translation to be saner -=================================================================== - - - * understand nondeterminism after rtyping - - * experiment with different heuristics: - - * weigh backedges more (TESTING) - * consider size of outer function - * consider number of arguments (TESTING) - - * find a more deterministic inlining order (TESTING using number of callers) - - * experiment with using a base inlining threshold and then drive inlining by - malloc removal possibilities (using escape analysis) - - * move the inlining of gc helpers just before emitting the code. - throw the graph away (TESTING, need to do a new framework translation) - - * for gcc: use just one implement file (TRIED: turns out to be a bad idea, - because gcc uses too much ram). Need to experiment more now that - inlining should at least be more deterministic! - -things to improve the framework gc -================================== - - * find out whether a function can collect - diff --git a/pypy/doc/config/objspace.usemodules.__builtin__.rst b/pypy/doc/config/objspace.usemodules.__builtin__.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.__builtin__.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the '__builtin__' module. -This module is essential, included by default and should not be removed. diff --git a/pypy/doc/config/objspace.usemodules._bisect.rst b/pypy/doc/config/objspace.usemodules._bisect.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._bisect.rst +++ /dev/null @@ -1,4 +0,0 @@ -Use the '_bisect' module. -Used, optionally, by the 'bisect' standard lib module. This module is expected to be working and is included by default. - - diff --git a/pypy/doc/config/translation.insist.rst b/pypy/doc/config/translation.insist.rst deleted file mode 100644 --- a/pypy/doc/config/translation.insist.rst +++ /dev/null @@ -1,4 +0,0 @@ -Don't stop on the first `rtyping`_ error. Instead, try to rtype as much as -possible and show the collected error messages in the end. - -.. _`rtyping`: ../rtyper.html diff --git a/pypy/translator/cli/carbonpython.py b/pypy/translator/cli/carbonpython.py deleted file mode 100644 --- a/pypy/translator/cli/carbonpython.py +++ /dev/null @@ -1,160 +0,0 @@ -#! /usr/bin/env python -""" -Usage: carbonpython.py [dll-name] - -Compiles an RPython module into a .NET dll. -""" - -import sys -import new -import types -import os.path -import inspect - -from pypy.translator.driver import TranslationDriver -from pypy.translator.cli.entrypoint import DllEntryPoint - -class DllDef: - def __init__(self, name, namespace, functions=[], dontmangle=True, isnetmodule=False): - self.name = name - self.namespace = namespace - self.functions = functions # [(function, annotation), ...] - self.isnetmodule = isnetmodule - self.driver = TranslationDriver() - if dontmangle: - self.driver.config.translation.ootype.mangle = False - self.driver.setup_library(self) - - def add_function(self, func, inputtypes): - self.functions.append((func, inputtypes)) - - def get_entrypoint(self, bk): - graphs = [bk.getdesc(f).cachedgraph(None) for f, _ in self.functions] - return DllEntryPoint(self.name, graphs, self.isnetmodule) - - def compile(self): - # add all functions to the appropriate namespace - if self.namespace: - for func, _ in self.functions: - if not hasattr(func, '_namespace_'): - func._namespace_ = self.namespace - self.driver.proceed(['compile_cli']) - -class export(object): - def __new__(self, *args, **kwds): - if len(args) == 1 and isinstance(args[0], types.FunctionType): - func = args[0] - func._inputtypes_ = () - return func - return object.__new__(self, *args, **kwds) - - def __init__(self, *args, **kwds): - self.inputtypes = args - self.namespace = kwds.pop('namespace', None) - if len(kwds) > 0: - raise TypeError, "unexpected keyword argument: '%s'" % kwds.keys()[0] - - def __call__(self, func): - func._inputtypes_ = self.inputtypes - if self.namespace is not None: - func._namespace_ = self.namespace - return func - -def is_exported(obj): - return isinstance(obj, (types.FunctionType, types.UnboundMethodType)) \ - and hasattr(obj, '_inputtypes_') - -def collect_entrypoints(dic): - entrypoints = [] - for item in dic.itervalues(): - if is_exported(item): - entrypoints.append((item, item._inputtypes_)) - elif isinstance(item, types.ClassType) or isinstance(item, type): - entrypoints += collect_class_entrypoints(item) - return entrypoints - -def collect_class_entrypoints(cls): - try: - __init__ = cls.__init__ - if not is_exported(__init__): - return [] - except AttributeError: - return [] - - entrypoints = [(wrap_init(cls, __init__), __init__._inputtypes_)] - for item in cls.__dict__.itervalues(): - if item is not __init__.im_func and is_exported(item): - inputtypes = (cls,) + item._inputtypes_ - entrypoints.append((wrap_method(item), inputtypes)) - return entrypoints - -def getarglist(meth): - arglist, starargs, kwargs, defaults = inspect.getargspec(meth) - assert starargs is None, '*args not supported yet' - assert kwargs is None, '**kwds not supported yet' - assert defaults is None, 'default values not supported yet' - return arglist - -def wrap_init(cls, meth): - arglist = getarglist(meth)[1:] # discard self - args = ', '.join(arglist) - source = 'def __internal__ctor(%s): return %s(%s)' % ( - args, cls.__name__, args) - mydict = {cls.__name__: cls} - print source - exec source in mydict - return mydict['__internal__ctor'] - -def wrap_method(meth, is_init=False): - arglist = getarglist(meth) - name = '__internal__%s' % meth.func_name - selfvar = arglist[0] - args = ', '.join(arglist) - params = ', '.join(arglist[1:]) - source = 'def %s(%s): return %s.%s(%s)' % ( - name, args, selfvar, meth.func_name, params) - mydict = {} - print source - exec source in mydict - return mydict[name] - - -def compile_dll(filename, dllname=None, copy_dll=True): - dirname, name = os.path.split(filename) - if dllname is None: - dllname, _ = os.path.splitext(name) - elif dllname.endswith('.dll'): - dllname, _ = os.path.splitext(dllname) - module = new.module(dllname) - namespace = module.__dict__.get('_namespace_', dllname) - sys.path.insert(0, dirname) - execfile(filename, module.__dict__) - sys.path.pop(0) - - dll = DllDef(dllname, namespace) - dll.functions = collect_entrypoints(module.__dict__) - dll.compile() - if copy_dll: - dll.driver.copy_cli_dll() - -def main(argv): - if len(argv) == 2: - filename = argv[1] - dllname = None - elif len(argv) == 3: - filename = argv[1] - dllname = argv[2] - else: - print >> sys.stderr, __doc__ - sys.exit(2) - - if not filename.endswith('.py'): - filename += '.py' - if not os.path.exists(filename): - print >> sys.stderr, "Cannot find file %s" % filename - sys.exit(1) - compile_dll(filename, dllname) - -if __name__ == '__main__': - main(sys.argv) - diff --git a/pypy/doc/config/objspace.usemodules.exceptions.rst b/pypy/doc/config/objspace.usemodules.exceptions.txt copy from pypy/doc/config/objspace.usemodules.exceptions.rst copy to pypy/doc/config/objspace.usemodules.exceptions.txt diff --git a/pypy/doc/config/objspace.usepycfiles.rst b/pypy/doc/config/objspace.usepycfiles.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usepycfiles.rst +++ /dev/null @@ -1,4 +0,0 @@ -If this option is used, then PyPy imports and generates "pyc" files in the -same way as CPython. This is true by default and there is not much reason -to turn it off nowadays. If off, PyPy never produces "pyc" files and -ignores any "pyc" file that might already be present. diff --git a/pypy/doc/config/objspace.usemodules.cpyext.rst b/pypy/doc/config/objspace.usemodules.cpyext.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.cpyext.rst +++ /dev/null @@ -1,1 +0,0 @@ -Use (experimental) cpyext module, that tries to load and run CPython extension modules diff --git a/pypy/doc/config/objspace.usemodules.cmath.rst b/pypy/doc/config/objspace.usemodules.cmath.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.cmath.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'cmath' module. -This module is expected to be working and is included by default. diff --git a/pypy/doc/_ref.rst b/pypy/doc/_ref.rst deleted file mode 100644 --- a/pypy/doc/_ref.rst +++ /dev/null @@ -1,107 +0,0 @@ -.. _`demo/`: ../../demo -.. _`demo/pickle_coroutine.py`: ../../demo/pickle_coroutine.py -.. _`lib-python/`: ../../lib-python -.. _`lib-python/2.5.2/dis.py`: ../../lib-python/2.5.2/dis.py -.. _`annotation/`: -.. _`pypy/annotation`: ../../../../pypy/annotation -.. _`pypy/annotation/annrpython.py`: ../../../../pypy/annotation/annrpython.py -.. _`annotation/binaryop.py`: ../../../../pypy/annotation/binaryop.py -.. _`pypy/annotation/builtin.py`: ../../../../pypy/annotation/builtin.py -.. _`pypy/annotation/model.py`: ../../../../pypy/annotation/model.py -.. _`bin/`: ../../../../pypy/bin -.. _`config/`: ../../../../pypy/config -.. _`pypy/config/pypyoption.py`: ../../../../pypy/config/pypyoption.py -.. _`doc/`: ../../../../pypy/doc -.. _`doc/config/`: ../../../../pypy/doc/config -.. _`doc/discussion/`: ../../../../pypy/doc/discussion -.. _`interpreter/`: -.. _`pypy/interpreter`: ../../../../pypy/interpreter -.. _`pypy/interpreter/argument.py`: ../../../../pypy/interpreter/argument.py -.. _`interpreter/astcompiler/`: -.. _`pypy/interpreter/astcompiler`: ../../../../pypy/interpreter/astcompiler -.. _`pypy/interpreter/executioncontext.py`: ../../../../pypy/interpreter/executioncontext.py -.. _`pypy/interpreter/function.py`: ../../../../pypy/interpreter/function.py -.. _`interpreter/gateway.py`: -.. _`pypy/interpreter/gateway.py`: ../../../../pypy/interpreter/gateway.py -.. _`pypy/interpreter/generator.py`: ../../../../pypy/interpreter/generator.py -.. _`pypy/interpreter/mixedmodule.py`: ../../../../pypy/interpreter/mixedmodule.py -.. _`pypy/interpreter/module.py`: ../../../../pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: ../../../../pypy/interpreter/nestedscope.py -.. _`pypy/interpreter/pyopcode.py`: ../../../../pypy/interpreter/pyopcode.py -.. _`interpreter/pyparser/`: -.. _`pypy/interpreter/pyparser`: ../../../../pypy/interpreter/pyparser -.. _`pypy/interpreter/pyparser/pytokenizer.py`: ../../../../pypy/interpreter/pyparser/pytokenizer.py -.. _`pypy/interpreter/pyparser/parser.py`: ../../../../pypy/interpreter/pyparser/parser.py -.. _`pypy/interpreter/pyparser/pyparse.py`: ../../../../pypy/interpreter/pyparser/pyparse.py -.. _`pypy/interpreter/pyparser/future.py`: ../../../../pypy/interpreter/pyparser/future.py -.. _`pypy/interpreter/pyparser/metaparser.py`: ../../../../pypy/interpreter/pyparser/metaparser.py -.. _`pypy/interpreter/astcompiler/astbuilder.py`: ../../../../pypy/interpreter/astcompiler/astbuilder.py -.. _`pypy/interpreter/astcompiler/optimize.py`: ../../../../pypy/interpreter/astcompiler/optimize.py -.. _`pypy/interpreter/astcompiler/codegen.py`: ../../../../pypy/interpreter/astcompiler/codegen.py -.. _`pypy/interpreter/astcompiler/tools/asdl_py.py`: ../../../../pypy/interpreter/astcompiler/tools/asdl_py.py -.. _`pypy/interpreter/astcompiler/tools/Python.asdl`: ../../../../pypy/interpreter/astcompiler/tools/Python.asdl -.. _`pypy/interpreter/astcompiler/assemble.py`: ../../../../pypy/interpreter/astcompiler/assemble.py -.. _`pypy/interpreter/astcompiler/symtable.py`: ../../../../pypy/interpreter/astcompiler/symtable.py -.. _`pypy/interpreter/astcompiler/asthelpers.py`: ../../../../pypy/interpreter/astcompiler/asthelpers.py -.. _`pypy/interpreter/astcompiler/ast.py`: ../../../../pypy/interpreter/astcompiler/ast.py -.. _`pypy/interpreter/typedef.py`: ../../../../pypy/interpreter/typedef.py -.. _`lib/`: -.. _`lib_pypy/`: ../../lib_pypy -.. _`lib/distributed/`: ../../lib_pypy/distributed -.. _`lib_pypy/stackless.py`: ../../lib_pypy/stackless.py -.. _`lib_pypy/pypy_test/`: ../../lib_pypy/pypy_test -.. _`module/`: -.. _`pypy/module`: -.. _`pypy/module/`: ../../../../pypy/module -.. _`pypy/module/__builtin__/__init__.py`: ../../../../pypy/module/__builtin__/__init__.py -.. _`pypy/module/_stackless/test/test_clonable.py`: ../../../../pypy/module/_stackless/test/test_clonable.py -.. _`pypy/module/_stackless/test/test_composable_coroutine.py`: ../../../../pypy/module/_stackless/test/test_composable_coroutine.py -.. _`objspace/`: -.. _`pypy/objspace`: ../../../../pypy/objspace -.. _`objspace/dump.py`: ../../../../pypy/objspace/dump.py -.. _`objspace/flow/`: ../../../../pypy/objspace/flow -.. _`objspace/std/`: -.. _`pypy/objspace/std`: ../../../../pypy/objspace/std -.. _`objspace/taint.py`: ../../../../pypy/objspace/taint.py -.. _`objspace/thunk.py`: -.. _`pypy/objspace/thunk.py`: ../../../../pypy/objspace/thunk.py -.. _`objspace/trace.py`: -.. _`pypy/objspace/trace.py`: ../../../../pypy/objspace/trace.py -.. _`pypy/rlib`: -.. _`rlib/`: ../../../../pypy/rlib -.. _`pypy/rlib/rarithmetic.py`: ../../../../pypy/rlib/rarithmetic.py -.. _`pypy/rlib/test`: ../../../../pypy/rlib/test -.. _`pypy/rpython`: -.. _`pypy/rpython/`: -.. _`rpython/`: ../../../../pypy/rpython -.. _`rpython/lltypesystem/`: ../../../../pypy/rpython/lltypesystem -.. _`pypy/rpython/lltypesystem/lltype.py`: -.. _`rpython/lltypesystem/lltype.py`: ../../../../pypy/rpython/lltypesystem/lltype.py -.. _`rpython/memory/`: ../../../../pypy/rpython/memory -.. _`rpython/memory/gc/generation.py`: ../../../../pypy/rpython/memory/gc/generation.py -.. _`rpython/memory/gc/hybrid.py`: ../../../../pypy/rpython/memory/gc/hybrid.py -.. _`rpython/memory/gc/markcompact.py`: ../../../../pypy/rpython/memory/gc/markcompact.py -.. _`rpython/memory/gc/marksweep.py`: ../../../../pypy/rpython/memory/gc/marksweep.py -.. _`rpython/memory/gc/semispace.py`: ../../../../pypy/rpython/memory/gc/semispace.py -.. _`rpython/ootypesystem/`: ../../../../pypy/rpython/ootypesystem -.. _`rpython/ootypesystem/ootype.py`: ../../../../pypy/rpython/ootypesystem/ootype.py -.. _`rpython/rint.py`: ../../../../pypy/rpython/rint.py -.. _`rpython/rlist.py`: ../../../../pypy/rpython/rlist.py -.. _`rpython/rmodel.py`: ../../../../pypy/rpython/rmodel.py -.. _`pypy/rpython/rtyper.py`: ../../../../pypy/rpython/rtyper.py -.. _`pypy/rpython/test/test_llinterp.py`: ../../../../pypy/rpython/test/test_llinterp.py -.. _`pypy/test_all.py`: ../../../../pypy/test_all.py -.. _`tool/`: ../../../../pypy/tool -.. _`tool/algo/`: ../../../../pypy/tool/algo -.. _`tool/pytest/`: ../../../../pypy/tool/pytest -.. _`pypy/translator`: -.. _`translator/`: ../../../../pypy/translator -.. _`translator/backendopt/`: ../../../../pypy/translator/backendopt -.. _`translator/c/`: ../../../../pypy/translator/c -.. _`translator/cli/`: ../../../../pypy/translator/cli -.. _`translator/goal/`: ../../../../pypy/translator/goal -.. _`pypy/translator/goal/targetnopstandalone.py`: ../../../../pypy/translator/goal/targetnopstandalone.py -.. _`translator/jvm/`: ../../../../pypy/translator/jvm -.. _`translator/stackless/`: ../../../../pypy/translator/stackless -.. _`translator/tool/`: ../../../../pypy/translator/tool -.. _`translator/js/`: http://codespeak.net/svn/pypy/branch/oo-jit/pypy/translator/js/ diff --git a/pypy/doc/config/translation.profopt.rst b/pypy/doc/config/translation.profopt.txt copy from pypy/doc/config/translation.profopt.rst copy to pypy/doc/config/translation.profopt.txt diff --git a/pypy/tool/rest/convert.py b/pypy/tool/rest/convert.py deleted file mode 100644 --- a/pypy/tool/rest/convert.py +++ /dev/null @@ -1,163 +0,0 @@ -import py - -ExecutionFailed = py.process.cmdexec.Error -# utility functions to convert between various formats - -format_to_dotargument = {"png": "png", - "eps": "ps", - "ps": "ps", - "pdf": "ps", - } - -def ps2eps(ps): - # XXX write a pure python version - if not py.path.local.sysfind("ps2epsi") and \ - not py.path.local.sysfind("ps2eps"): - raise SystemExit("neither ps2eps nor ps2epsi found") - try: - eps = ps.new(ext=".eps") - py.process.cmdexec('ps2epsi "%s" "%s"' % (ps, eps)) - except ExecutionFailed: - py.process.cmdexec('ps2eps -l -f "%s"' % ps) - -def ps2pdf(ps, compat_level="1.2"): - if not py.path.local.sysfind("gs"): - raise SystemExit("ERROR: gs not found") - pdf = ps.new(ext=".pdf") - options = dict(OPTIONS="-dSAFER -dCompatibilityLevel=%s" % compat_level, - infile=ps, outfile=pdf) - cmd = ('gs %(OPTIONS)s -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite ' - '"-sOutputFile=%(outfile)s" %(OPTIONS)s -c .setpdfwrite ' - '-f "%(infile)s"') % options - py.process.cmdexec(cmd) - return pdf - -def eps2pdf(eps): - # XXX write a pure python version - if not py.path.local.sysfind("epstopdf"): - raise SystemExit("ERROR: epstopdf not found") - py.process.cmdexec('epstopdf "%s"' % eps) - -def dvi2eps(dvi, dest=None): - if dest is None: - dest = eps.new(ext=".eps") - command = 'dvips -q -E -n 1 -D 600 -p 1 -o "%s" "%s"' % (dest, dvi) - if not py.path.local.sysfind("dvips"): - raise SystemExit("ERROR: dvips not found") - py.process.cmdexec(command) - -def convert_dot(fn, new_extension): - if not py.path.local.sysfind("dot"): - raise SystemExit("ERROR: dot not found") - result = fn.new(ext=new_extension) - print result - arg = "-T%s" % (format_to_dotargument[new_extension], ) - py.std.os.system('dot "%s" "%s" > "%s"' % (arg, fn, result)) - if new_extension == "eps": - ps = result.new(ext="ps") - result.move(ps) - ps2eps(ps) - ps.remove() - elif new_extension == "pdf": - # convert to eps file first, to get the bounding box right - eps = result.new(ext="eps") - ps = result.new(ext="ps") - result.move(ps) - ps2eps(ps) - eps2pdf(eps) - ps.remove() - eps.remove() - return result - - -class latexformula2png(object): - def __init__(self, formula, dest, temp=None): - self.formula = formula - try: - import Image - self.Image = Image - self.scale = 2 # create a larger image - self.upscale = 5 # create the image upscale times larger, then scale it down - except ImportError: - self.scale = 2 - self.upscale = 1 - self.Image = None - self.output_format = ('pngmono', 'pnggray', 'pngalpha')[2] - if temp is None: - temp = py.test.ensuretemp("latexformula") - self.temp = temp - self.latex = self.temp.join('formula.tex') - self.dvi = self.temp.join('formula.dvi') - self.eps = self.temp.join('formula.eps') - self.png = self.temp.join('formula.png') - self.saveas(dest) - - def saveas(self, dest): - self.gen_latex() - self.gen_dvi() - dvi2eps(self.dvi, self.eps) - self.gen_png() - self.scale_image() - self.png.copy(dest) - - def gen_latex(self): - self.latex.write (""" - \\documentclass{article} - \\pagestyle{empty} - \\begin{document} - - %s - \\pagebreak - - \\end{document} - """ % (self.formula)) - - def gen_dvi(self): - origdir = py.path.local() - self.temp.chdir() - py.process.cmdexec('latex "%s"' % (self.latex)) - origdir.chdir() - - def gen_png(self): - tempdir = py.path.local.mkdtemp() - - re_bbox = py.std.re.compile('%%BoundingBox:\s*(\d+) (\d+) (\d+) (\d+)') - eps = self.eps.read() - x1, y1, x2, y2 = [int(i) for i in re_bbox.search(eps).groups()] - X = x2 - x1 + 2 - Y = y2 - y1 + 2 - mx = -x1 - my = -y1 - ps = self.temp.join('temp.ps') - source = self.eps - ps.write(""" - 1 1 1 setrgbcolor - newpath - -1 -1 moveto - %(X)d -1 lineto - %(X)d %(Y)d lineto - -1 %(Y)d lineto - closepath - fill - %(mx)d %(my)d translate - 0 0 0 setrgbcolor - (%(source)s) run - - """ % locals()) - - sx = int((x2 - x1) * self.scale * self.upscale) - sy = int((y2 - y1) * self.scale * self.upscale) - res = 72 * self.scale * self.upscale - command = ('gs -q -g%dx%d -r%dx%d -sDEVICE=%s -sOutputFile="%s" ' - '-dNOPAUSE -dBATCH "%s"') % ( - sx, sy, res, res, self.output_format, self.png, ps) - py.process.cmdexec(command) - - def scale_image(self): - if self.Image is None: - return - image = self.Image.open(str(self.png)) - image.resize((image.size[0] / self.upscale, - image.size[1] / self.upscale), - self.Image.ANTIALIAS).save(str(self.png)) - diff --git a/pypy/doc/config/objspace.usemodules._ffi.rst b/pypy/doc/config/objspace.usemodules._ffi.rst deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules._ffi.rst +++ /dev/null @@ -1,1 +0,0 @@ -Applevel interface to libffi. It is more high level than _rawffi, and most importantly it is JIT friendly diff --git a/pypy/doc/config/translation.backendopt.inline_threshold.rst b/pypy/doc/config/translation.backendopt.inline_threshold.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.inline_threshold.rst +++ /dev/null @@ -1,2 +0,0 @@ -Weight threshold used to decide whether to inline flowgraphs. -This is for basic inlining (:config:`translation.backendopt.inline`). diff --git a/pypy/doc/config/objspace.std.withdictmeasurement.rst b/pypy/doc/config/objspace.std.withdictmeasurement.txt copy from pypy/doc/config/objspace.std.withdictmeasurement.rst copy to pypy/doc/config/objspace.std.withdictmeasurement.txt diff --git a/pypy/doc/config/translation.backendopt.stack_optimization.rst b/pypy/doc/config/translation.backendopt.stack_optimization.txt copy from pypy/doc/config/translation.backendopt.stack_optimization.rst copy to pypy/doc/config/translation.backendopt.stack_optimization.txt diff --git a/pypy/doc/config/translation.make_jobs.rst b/pypy/doc/config/translation.make_jobs.txt copy from pypy/doc/config/translation.make_jobs.rst copy to pypy/doc/config/translation.make_jobs.txt diff --git a/pypy/doc/svn-help.rst b/pypy/doc/svn-help.rst deleted file mode 100644 --- a/pypy/doc/svn-help.rst +++ /dev/null @@ -1,153 +0,0 @@ - -Installing subversion for PyPy -============================== - -Jens-Uwe Mager has prepared some installation files which should -help you to install subversion on your computer. - -+ Download Unix source tarball or prepackaged versions_ for MacOS, Windows, FreeBSD and Linux - -+ Additional information for Windows users: - - * See Microsoft website_ if you have .DLL issues. - - * Windows Installer file for Tortoise SVN (like Tortoise CVS) GUI_ - (Pick the UNICODE version for Windows 2000 and XP and - see Win_ 2000, NT if you have problems loading it.) - -+ Local copy of MacOS_ X binary tar ball - (This requires at least OS X 10.3) - -+ Debian instructions below... - -Getting started ------------------ - -If you're just getting started with subversion, here's a simple how-to. -For complete information, you can go read the subversion guide_. - -**Download and install the appropriate installation file of subversion above.** - -For linux: - -download the tarball. unzip and untar it. Then type *./configure*. Then, as root, *make* followed by *make install*. Voila ... a subversion client. - -For Debian users:: - - $ apt-get install subversion-tools - -People using Debian *stable* first need to add the following line to ``/etc/apt/sources.list`` (thanks backports_!):: - - deb http://fs.cs.fhm.edu/mirror/backports.org/debian stable subversion - -Note that you can always go look at the files online_ with your browser, located at: http://codespeak.net/svn/pypy/trunk -But, you'll want to check out your own local copies to work on. - -Check out and Check in ----------------------------- - -In order to get the sourcecode and docs downloaded onto your drive, open a shell or commandline and type:: - - $ svn co http://codespeak.net/svn/pypy/trunk - -If you are behind a dump proxy this may or may not work; see below. - -Once you've got the files checked out to your own system, you can use your favorite text editor to change to files. Be sure to read the coding-guide_ and other documentation files before doing a lot of work on the source code. Before doing any work, make sure you're using the most recent update with:: - - $ svn up - -this will update whichever subdirectory you're in (doc or src). - -When you're ready to **check in** a file, - -cd to your local checked out sourcecode directory, and if necessary, copy the file over from wherever you worked on it:: - - $ cp ~/mydir/filename.ext filename.ext - -If you're adding a brand-new file:: - - $ svn add filename.ext - -Then, to **commit** it:: - - $ svn ci -m "your comments about what changes your committing" - $ your password: (this may not be necessary) - -You'll see something like the following:: - - Adding goals/stringcomp.py - Transmitting file data . - Committed revision 578. - -or:: - - Sending coding-guide.txt - Transmitting file data . - Committed revision 631. - -Check online on the `svn-commit archives`_ and you'll see your revision. Feel free to add a documentation file on any major changes you've made! - -.. _`svn-commit archives`: http://codespeak.net/pipermail/pypy-svn/ - -Some other useful subversion tricks: --------------------------------------- - -**Be sure to remember ``svn`` in the commandline in the following commands.** - -``$ svn mv filename.ext`` - to move or rename a file - -``$ svn rm filename.ext`` - to remove (delete) a file - -``$ svn status`` - will let you know what changes you've made compared to the current repository version - -``$ svn revert filename.ext`` - will fix problems if you deleted or moved a file without telling svn. - -``$ svn cleanup`` - last resort to fix it if you've got a totally messed up local copy. - Use this if you see error messages about ``locked`` files that you can't fix otherwise. - -Circumventing proxies ----------------------------- - -Some proxies don't let extended HTTP commands through. If you have an -error complaining about a bad request, you should use https: instead of -http: in the subversion URL. This will make use of SSL encryption, which -cannot be intercepted by proxies. - -Alternatively, if you want to change your proxy configuration, see the -subversion FAQ: http://subversion.tigris.org/faq.html#proxy - -How to Avoid Line-ending Hell ------------------------------ - -We will assume that whenever you create a .txt or a .py file, you would -like other people to be able to read it with the line endings their -OS prefers, even if that is different from the one your OS likes. This -could occasionally be wrong -- say when you are specifically testing -that code you are writing handles line endings properly -- but this is -what you want by default. Binary files, on the other hand, should be -stored exactly as is. This has to be set on every client. Here is how: - -In your home directory edit .subversion/config and comment in :: - - [miscellany] - enable-auto-props = yes - - [auto-props] - *.txt = svn:eol-style=native - *.py = svn:eol-style=native - - -.. _website: http://support.microsoft.com/default.aspx?scid=kb%3Ben-us%3B259403 -.. _GUI: http://tortoisesvn.tigris.org/servlets/ProjectDocumentList?folderID=616 -.. _MacOS: http://codespeak.net/~jum/svn-1.4.0-darwin-ppc.tar.gz -.. _versions: http://subversion.tigris.org/project_packages.html -.. _Win: http://www.microsoft.com/downloads/details.aspx?displaylang=en&FamilyID=4B6140F9-2D36-4977-8FA1-6F8A0F5DCA8F -.. _guide: http://svnbook.red-bean.com/book.html#svn-ch-1 -.. _backports: http://www.backports.org -.. _online: http://codespeak.net/svn/pypy/trunk/ -.. _coding-guide: coding-guide.html diff --git a/pypy/doc/config/translation.backendopt.storesink.rst b/pypy/doc/config/translation.backendopt.storesink.rst deleted file mode 100644 --- a/pypy/doc/config/translation.backendopt.storesink.rst +++ /dev/null @@ -1,1 +0,0 @@ -Store sinking optimization. On by default. diff --git a/pypy/doc/discussion/distribution-newattempt.rst b/pypy/doc/discussion/distribution-newattempt.rst deleted file mode 100644 --- a/pypy/doc/discussion/distribution-newattempt.rst +++ /dev/null @@ -1,65 +0,0 @@ -Distribution: -============= - -This is outcome of Armin's and Samuele's ideas and our discussion, -kept together by fijal. - -The communication layer: -======================== - -Communication layer is the layer which takes care of explicit -communication. Suppose we do have two (or more) running interpreters -on different machines or in different processes. Let's call it *local side* -(the one on which we're operating) and *remote side*. - -What we want to achieve is to have a transparent enough layer on local -side, which does not allow user to tell the objects local and remote apart -(despite __pypy__.internal_repr, which I would consider cheating). - -Because in pypy we have possibility to have different implementations -for types (even builtin ones), we can use that mechanism to implement -our simple RMI. - -The idea is to provide thin layer for accessing remote object, lays as -different implementation for any possible object. So if you perform any -operation on an object locally, which is really a remote object, you -perform all method lookup and do a call on it. Than proxy object -redirects the call to app-level code (socket, execnet, whatever) which -calls remote interpreter with given parameters. It's important that we -can always perform such a call, even if types are not marshallable, because -we can provide remote proxies of local objects to remote side in that case. - -XXX: Need to explain in a bit more informative way. - -Example: --------- - -Suppose we do have ``class A`` and instance ``a = A()`` on remote side -and we want to access this from a local side. We make an object of type -``object`` and we do copy -``__dict__`` keys with values, which correspond to objects on the remote -side (have the same type to user) but they've got different implementation. -(Ie. method calling will look like quite different). - -Even cooler example: --------------------- - -Reminding hpk's example of 5-liner remote file server. With this we make:: - - f = remote_side.import(open) - f("file_name").read() - -Implementation plans: ---------------------- - -We need: - -* app-level primitives for having 'remote proxy' accessible - -* some "serialiser" which is not truly serialising stuff, but making - sure communication will go. - -* interp-level proxy object which emulates every possible object which - delegates operations to app-level primitive proxy. - -* to make it work.... diff --git a/pypy/doc/config/translation.thread.rst b/pypy/doc/config/translation.thread.rst deleted file mode 100644 --- a/pypy/doc/config/translation.thread.rst +++ /dev/null @@ -1,2 +0,0 @@ -Enable threading. The only target where this has visible effect is PyPy (this -also enables the ``thread`` module then). diff --git a/pypy/doc/config/translation.no__thread.rst b/pypy/doc/config/translation.no__thread.rst deleted file mode 100644 --- a/pypy/doc/config/translation.no__thread.rst +++ /dev/null @@ -1,4 +0,0 @@ -Don't use gcc __thread attribute for fast thread local storage -implementation . Increases the chance that moving the resulting -executable to another same processor Linux machine will work. (see -:config:`translation.vanilla`). diff --git a/pypy/doc/parser.rst b/pypy/doc/parser.rst --- a/pypy/doc/parser.rst +++ b/pypy/doc/parser.rst @@ -100,4 +100,4 @@ information like the line number table and stack depth are computed. Finally, everything is passed to a brand new ``PyCode`` object. -.. include:: _ref.rst +.. include:: _ref.txt diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -16,13 +16,13 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.append(os.path.abspath('.')) +sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -120,7 +120,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +# html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. @@ -138,7 +138,7 @@ #html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +html_use_modindex = False # If false, no index is generated. #html_use_index = True @@ -191,8 +191,9 @@ #latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +latex_use_modindex = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} + From commits-noreply at bitbucket.org Sat Apr 30 16:08:31 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 16:08:31 +0200 (CEST) Subject: [pypy-svn] pypy default: missing accent Message-ID: <20110430140831.E3D07282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43810:3fe7be5a4156 Date: 2011-04-30 15:59 +0200 http://bitbucket.org/pypy/pypy/changeset/3fe7be5a4156/ Log: missing accent diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -137,7 +137,7 @@ Gustavo Niemeyer William Leslie Akira Li - Kristján Valur Jonsson + Kristján Valur Jónsson Bobby Impollonia Andrew Thompson Anders Sigfridsson diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -165,7 +165,7 @@ Gustavo Niemeyer William Leslie Akira Li - Kristján Valur Jonsson + Kristján Valur Jónsson Bobby Impollonia Andrew Thompson Anders Sigfridsson From commits-noreply at bitbucket.org Sat Apr 30 16:10:29 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 16:10:29 +0200 (CEST) Subject: [pypy-svn] pypy default: fix links Message-ID: <20110430141029.7EBF7282B52@codespeak.net> Author: Carl Friedrich Bolz Branch: Changeset: r43811:e88a7113759d Date: 2011-04-30 16:10 +0200 http://bitbucket.org/pypy/pypy/changeset/e88a7113759d/ Log: fix links diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -145,7 +145,7 @@ There are a number of environment variables that can be tweaked to influence the GC. (Their default value should be ok for most usages.) You can read more about them at the start of -`rpython/memory/gc/minimark.py`_. +`pypy/rpython/memory/gc/minimark.py`_. In more details: @@ -162,7 +162,7 @@ to the old stage. The dying case 2 objects are immediately freed. - The old stage is an area of memory containing old (small) objects. It - is handled by `rpython/memory/gc/minimarkpage.py`_. It is organized + is handled by `pypy/rpython/memory/gc/minimarkpage.py`_. It is organized as "arenas" of 256KB or 512KB, subdivided into "pages" of 4KB or 8KB. Each page can either be free, or contain small objects of all the same size. Furthermore at any point in time each object location can be diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -38,7 +38,7 @@ calling its ``frame.eval()`` method. This main entry point initialize appropriate namespaces and then interprets each bytecode instruction. Python's standard library contains -the `lib-python/2.7.0/dis.py`_ module which allows to view +the `lib-python/2.7/dis.py`_ module which allows to view the Virtual's machine bytecode instructions:: >>> import dis diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,7 +1,7 @@ .. _`demo/`: https://bitbucket.org/pypy/pypy/src/default/demo/ .. _`demo/pickle_coroutine.py`: https://bitbucket.org/pypy/pypy/src/default/demo/pickle_coroutine.py .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ -.. _`lib-python/2.7.0/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7.0/dis.py +.. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/stackless.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/stackless.py @@ -38,7 +38,6 @@ .. _`pypy/interpreter/executioncontext.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/executioncontext.py .. _`pypy/interpreter/function.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/function.py .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py -.. _`pypy/interpreter/generator.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/generator.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py .. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py @@ -73,7 +72,6 @@ .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py .. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py .. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py -.. _`pypy/objspace/taint.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/taint.py .. _`pypy/objspace/thunk.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/thunk.py .. _`pypy/objspace/trace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/trace.py .. _`pypy/rlib`: From commits-noreply at bitbucket.org Sat Apr 30 16:19:07 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 16:19:07 +0200 (CEST) Subject: [pypy-svn] pypy default: Update the version number here. Message-ID: <20110430141907.EE67F36C210@codespeak.net> Author: Armin Rigo Branch: Changeset: r43812:fed78469cb95 Date: 2011-04-30 16:18 +0200 http://bitbucket.org/pypy/pypy/changeset/fed78469cb95/ Log: Update the version number here. diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.5-alpha' +release = '1.5' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From commits-noreply at bitbucket.org Sat Apr 30 16:23:31 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 16:23:31 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: Updates. Message-ID: <20110430142331.C4CFC36C210@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r180:9727548ee6f8 Date: 2011-04-30 16:23 +0200 http://bitbucket.org/pypy/pypy.org/changeset/9727548ee6f8/ Log: Updates. diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -73,7 +73,7 @@
    • Linux binary (32bit)
    • Linux binary (64bit)
    • Mac OS/X binary (64bit)
    • -
    • Windows binary (32bit) (you may need to install the VS 2010 runtime libraries)
    • +
    • Windows binary (32bit) (BETA!) (you may need to install the VS 2010 runtime libraries)

    If your CPU is really old, it may not have SSE2. In this case, you need to translate yourself with the option --jit-backend=x86-without-sse2.

    @@ -164,16 +164,16 @@

    Checksums

    Here are the checksums for each of the downloads (md5 and sha1):

    -3dccf24c23e30b4a04cf122f704b4064  pypy-1.4.1-linux.tar.bz2
    -1fb62a813978c2581e9e09debad6b116  pypy-1.4.1-linux64.tar.bz2
    -769b3fb134944ee8c22ad0834970de3b  pypy-1.4.1-osx64.tar.bz2
    -xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-win32.zip
    -ebbbb156b1eb842e9e65d909ed5f9f6d  pypy-1.4.1-src.tar.bz2
    -6e2366377ad2f0c583074d3ba6f60d064549bef2  pypy-1.4.1-linux.tar.bz2
    -1cfd53343e19264905a00d2ffcf83e03e39dcbb3  pypy-1.4.1-linux64.tar.bz2
    -8e2830bef80b93f4d3c016b972fbdf7bcd403abc  pypy-1.4.1-osx64.tar.bz2
    -xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-win32.zip
    -922a8815377fe2e0c015338fa8b28ae16bf8c840  pypy-1.4.1-src.tar.bz2
    +08a2c95163c95f91772abb8bf5f8b9cb  pypy-1.5-linux.tar.bz2
    +32392b7986eb34cd657d5b6c5d242cc7  pypy-1.5-linux64.tar.bz2
    +b1417916bc01ebb9f95c666f5e397fb5  pypy-1.5-osx64.tar.bz2
    +b6cc12bed5e7243ed44aa3430eb14885  pypy-1.5-win32.zip
    +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-src.tar.bz2
    +b79b317f8736e9bdbf5647fe43258f722c2936f4  pypy-1.5-linux.tar.bz2
    +ad3fd4d454e14514d226809fb8b1cd86455ea1b0  pypy-1.5-linux64.tar.bz2
    +84922083fd5e52ff679718ea70ed5a74a5a048ad  pypy-1.5-osx64.tar.bz2
    +a328dbd273c30526588496108c53c0eec7a23e98  pypy-1.5-win32.zip
    +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  pypy-1.5-src.tar.bz2
     
    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -40,7 +40,7 @@ * `Linux binary (32bit)`__ * `Linux binary (64bit)`__ * `Mac OS/X binary (64bit)`__ -* `Windows binary (32bit)`__ (you may need to install the `VS 2010 runtime libraries`_) +* `Windows binary (32bit)`__ (BETA!) (you may need to install the `VS 2010 runtime libraries`_) .. __: http://pypy.org/download/pypy-1.5-linux.tar.bz2 .. __: http://pypy.org/download/pypy-1.5-linux64.tar.bz2 @@ -162,14 +162,14 @@ Here are the checksums for each of the downloads (md5 and sha1):: - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-linux.tar.bz2 - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-linux64.tar.bz2 - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-osx64.tar.bz2 - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-win32.zip + 08a2c95163c95f91772abb8bf5f8b9cb pypy-1.5-linux.tar.bz2 + 32392b7986eb34cd657d5b6c5d242cc7 pypy-1.5-linux64.tar.bz2 + b1417916bc01ebb9f95c666f5e397fb5 pypy-1.5-osx64.tar.bz2 + b6cc12bed5e7243ed44aa3430eb14885 pypy-1.5-win32.zip xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-src.tar.bz2 - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-linux.tar.bz2 - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-linux64.tar.bz2 - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-osx64.tar.bz2 - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-win32.zip + b79b317f8736e9bdbf5647fe43258f722c2936f4 pypy-1.5-linux.tar.bz2 + ad3fd4d454e14514d226809fb8b1cd86455ea1b0 pypy-1.5-linux64.tar.bz2 + 84922083fd5e52ff679718ea70ed5a74a5a048ad pypy-1.5-osx64.tar.bz2 + a328dbd273c30526588496108c53c0eec7a23e98 pypy-1.5-win32.zip xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-src.tar.bz2 From commits-noreply at bitbucket.org Sat Apr 30 16:29:50 2011 From: commits-noreply at bitbucket.org (cfbolz) Date: Sat, 30 Apr 2011 16:29:50 +0200 (CEST) Subject: [pypy-svn] extradoc extradoc: fix strange margins on the blog Message-ID: <20110430142950.AC5BB36C210@codespeak.net> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r3548:289593918ac2 Date: 2011-04-30 16:28 +0200 http://bitbucket.org/pypy/extradoc/changeset/289593918ac2/ Log: fix strange margins on the blog diff --git a/blog/template.xml b/blog/template.xml --- a/blog/template.xml +++ b/blog/template.xml @@ -190,7 +190,7 @@ - + From commits-noreply at bitbucket.org Sat Apr 30 16:30:00 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 16:30:00 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: md5/sha1 of the -src too. Message-ID: <20110430143000.DDFF3282B58@codespeak.net> Author: Armin Rigo Branch: extradoc Changeset: r181:dfc73ae1325b Date: 2011-04-30 16:29 +0200 http://bitbucket.org/pypy/pypy.org/changeset/dfc73ae1325b/ Log: md5/sha1 of the -src too. diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -168,12 +168,12 @@ 32392b7986eb34cd657d5b6c5d242cc7 pypy-1.5-linux64.tar.bz2 b1417916bc01ebb9f95c666f5e397fb5 pypy-1.5-osx64.tar.bz2 b6cc12bed5e7243ed44aa3430eb14885 pypy-1.5-win32.zip -xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-src.tar.bz2 +cb9ada2c50666318c3a2863da1fbe487 pypy-1.5-src.tar.bz2 b79b317f8736e9bdbf5647fe43258f722c2936f4 pypy-1.5-linux.tar.bz2 ad3fd4d454e14514d226809fb8b1cd86455ea1b0 pypy-1.5-linux64.tar.bz2 84922083fd5e52ff679718ea70ed5a74a5a048ad pypy-1.5-osx64.tar.bz2 a328dbd273c30526588496108c53c0eec7a23e98 pypy-1.5-win32.zip -xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-src.tar.bz2 +0ebcecaa4c725bf1a48272033d9f429b8a82b7e1 pypy-1.5-src.tar.bz2
    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -166,10 +166,10 @@ 32392b7986eb34cd657d5b6c5d242cc7 pypy-1.5-linux64.tar.bz2 b1417916bc01ebb9f95c666f5e397fb5 pypy-1.5-osx64.tar.bz2 b6cc12bed5e7243ed44aa3430eb14885 pypy-1.5-win32.zip - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-src.tar.bz2 + cb9ada2c50666318c3a2863da1fbe487 pypy-1.5-src.tar.bz2 b79b317f8736e9bdbf5647fe43258f722c2936f4 pypy-1.5-linux.tar.bz2 ad3fd4d454e14514d226809fb8b1cd86455ea1b0 pypy-1.5-linux64.tar.bz2 84922083fd5e52ff679718ea70ed5a74a5a048ad pypy-1.5-osx64.tar.bz2 a328dbd273c30526588496108c53c0eec7a23e98 pypy-1.5-win32.zip - xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx pypy-1.5-src.tar.bz2 + 0ebcecaa4c725bf1a48272033d9f429b8a82b7e1 pypy-1.5-src.tar.bz2 From commits-noreply at bitbucket.org Sat Apr 30 16:44:26 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 16:44:26 +0200 (CEST) Subject: [pypy-svn] pypy default: The hopefully last Subversion -> Mercurial. Message-ID: <20110430144426.0F915282B59@codespeak.net> Author: Armin Rigo Branch: Changeset: r43813:16a05478162d Date: 2011-04-30 16:44 +0200 http://bitbucket.org/pypy/pypy/changeset/16a05478162d/ Log: The hopefully last Subversion -> Mercurial. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -127,7 +127,7 @@ PyPy can be used to run Python programs on Linux, OS/X, Windows, on top of .NET, and on top of Java. To dig into PyPy it is recommended to try out the current -Subversion HEAD, which is always working or mostly working, +Mercurial default branch, which is always working or mostly working, instead of the latest release, which is `1.5`__. .. __: release-1.5.0.html From commits-noreply at bitbucket.org Sat Apr 30 16:52:41 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 16:52:41 +0200 (CEST) Subject: [pypy-svn] pypy default: This while loop doesn't make sense. At least we need to call Message-ID: <20110430145241.D7DE9282B59@codespeak.net> Author: Armin Rigo Branch: Changeset: r43814:e2d10544ac26 Date: 2011-04-30 16:52 +0200 http://bitbucket.org/pypy/pypy/changeset/e2d10544ac26/ Log: This while loop doesn't make sense. At least we need to call gc.collect() from within, in order to force files to be closed. diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/modified-2.7/test/test_argparse.py copy from lib-python/2.7/test/test_argparse.py copy to lib-python/modified-2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/modified-2.7/test/test_argparse.py @@ -50,6 +50,7 @@ try: shutil.rmtree(self.temp_dir) except WindowsError: + test_support.gc_collect() continue else: break From commits-noreply at bitbucket.org Sat Apr 30 17:04:19 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 17:04:19 +0200 (CEST) Subject: [pypy-svn] pypy default: Update. Message-ID: <20110430150419.AA85336C210@codespeak.net> Author: Armin Rigo Branch: Changeset: r43815:6a3a4b538f61 Date: 2011-04-30 17:04 +0200 http://bitbucket.org/pypy/pypy/changeset/6a3a4b538f61/ Log: Update. diff --git a/pypy/doc/release-1.5.0.rst b/pypy/doc/release-1.5.0.rst --- a/pypy/doc/release-1.5.0.rst +++ b/pypy/doc/release-1.5.0.rst @@ -47,8 +47,8 @@ - There is an `external fork`_ which includes an RPython version of the ``postgresql``. However, there are no prebuilt binaries for this. -- Our developer documentation was moved to Sphinx and cleaned up. It now lives - on http://pypy.readthedocs.org +- Our developer documentation was moved to Sphinx and cleaned up. + (click 'Dev Site' on http://pypy.org/ .) - and many small things :-) From commits-noreply at bitbucket.org Sat Apr 30 17:20:23 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 17:20:23 +0200 (CEST) Subject: [pypy-svn] pypy documentation-cleanup: close branch Message-ID: <20110430152023.8723B282B59@codespeak.net> Author: Armin Rigo Branch: documentation-cleanup Changeset: r43816:046381ad125d Date: 2011-04-30 17:20 +0200 http://bitbucket.org/pypy/pypy/changeset/046381ad125d/ Log: close branch From commits-noreply at bitbucket.org Sat Apr 30 18:05:09 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 18:05:09 +0200 (CEST) Subject: [pypy-svn] pypy default: hg merge post-release-1.5 Message-ID: <20110430160509.0DACE282B59@codespeak.net> Author: Armin Rigo Branch: Changeset: r43817:ea4e1fd5b529 Date: 2011-04-30 18:03 +0200 http://bitbucket.org/pypy/pypy/changeset/ea4e1fd5b529/ Log: hg merge post-release-1.5 diff --git a/pypy/translator/c/node.py b/pypy/translator/c/node.py --- a/pypy/translator/c/node.py +++ b/pypy/translator/c/node.py @@ -11,7 +11,7 @@ from pypy.translator.c.support import c_char_array_constant, barebonearray from pypy.translator.c.primitive import PrimitiveType, name_signed from pypy.rlib import exports -from pypy.rlib.rfloat import isinf, isnan +from pypy.rlib.rfloat import isfinite from pypy.rlib.rstackovf import _StackOverflow from pypy.translator.c import extfunc from pypy.translator.tool.cbuild import ExternalCompilationInfo @@ -793,7 +793,7 @@ node = db.getcontainernode(value._obj) expr = 'NULL /*%s*/' % node.name node.where_to_copy_me.append('&%s' % access_expr) - elif typeOf(value) == Float and (isinf(value) or isnan(value)): + elif typeOf(value) == Float and not isfinite(value): db.late_initializations.append(('%s' % access_expr, db.get(value))) expr = '0.0 /* patched later by %sinfinity */' % ( '-+'[value > 0]) diff --git a/pypy/translator/c/test/test_genc.py b/pypy/translator/c/test/test_genc.py --- a/pypy/translator/c/test/test_genc.py +++ b/pypy/translator/c/test/test_genc.py @@ -273,7 +273,7 @@ assert res == 1.5 def test_nan_and_special_values(): - from pypy.rlib.rfloat import isnan, isinf, copysign + from pypy.rlib.rfloat import isnan, isinf, isfinite, copysign inf = 1e300 * 1e300 assert isinf(inf) nan = inf/inf @@ -283,6 +283,7 @@ (inf, lambda x: isinf(x) and x > 0.0), (-inf, lambda x: isinf(x) and x < 0.0), (nan, isnan), + (42.0, isfinite), (0.0, lambda x: not x and copysign(1., x) == 1.), (-0.0, lambda x: not x and copysign(1., x) == -1.), ]: diff --git a/pypy/rlib/rfloat.py b/pypy/rlib/rfloat.py --- a/pypy/rlib/rfloat.py +++ b/pypy/rlib/rfloat.py @@ -158,12 +158,12 @@ return _formatd(x, code, precision, flags) def double_to_string(value, tp, precision, flags): - if isnan(value): - special = DIST_NAN + if isfinite(value): + special = DIST_FINITE elif isinf(value): special = DIST_INFINITY - else: - special = DIST_FINITE + else: #isnan(value): + special = DIST_NAN result = formatd(value, tp, precision, flags) return result, special @@ -344,7 +344,7 @@ def asinh(x): "NOT_RPYTHON" absx = abs(x) - if isnan(x) or isinf(x): + if not isfinite(x): return x if absx < _2_to_m28: return x @@ -405,3 +405,6 @@ r = math.floor(absx) return copysign(r, x) +def isfinite(x): + "NOT_RPYTHON" + return not isinf(x) and not isnan(x) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -617,7 +617,7 @@ if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) - w_dict = space.newdict(from_strdict_shared=w_obj.dict_w) + w_dict = w_obj.getdict(space) pto.c_tp_dict = make_ref(space, w_dict) @cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL) diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py --- a/pypy/jit/metainterp/test/test_ajit.py +++ b/pypy/jit/metainterp/test/test_ajit.py @@ -2118,6 +2118,45 @@ res = self.interp_operations(f, []) assert res + def test_bug688_multiple_immutable_fields(self): + myjitdriver = JitDriver(greens=[], reds=['counter','context']) + + class Tag: + pass + class InnerContext(): + _immutable_fields_ = ['variables','local_names'] + def __init__(self, variables): + self.variables = variables + self.local_names = [0] + + def store(self): + self.local_names[0] = 1 + + def retrieve(self): + variables = hint(self.variables, promote=True) + result = self.local_names[0] + if result == 0: + return -1 + else: + return -1 + def build(): + context = InnerContext(Tag()) + + context.store() + + counter = 0 + while True: + myjitdriver.jit_merge_point(context=context, counter = counter) + context.retrieve() + context.retrieve() + + counter += 1 + if counter > 10: + return 7 + assert self.meta_interp(build, []) == 7 + self.check_loops(getfield_gc_pure=0) + self.check_loops(getfield_gc_pure=2, everywhere=True) + class TestOOtype(BasicTests, OOJitMixin): def test_oohash(self): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -34,13 +34,7 @@ @staticmethod def allocate_and_init_instance(space, w_type=None, module=False, instance=False, classofinstance=None, - from_strdict_shared=None, strdict=False): - if from_strdict_shared is not None: - assert w_type is None - assert not module and not instance and classofinstance is None - w_self = StrDictImplementation(space) - w_self.content = from_strdict_shared - return w_self + strdict=False): if space.config.objspace.std.withcelldict and module: from pypy.objspace.std.celldict import ModuleDictImplementation assert w_type is None diff --git a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py --- a/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py +++ b/pypy/module/pypyjit/test_pypy_c/test_pypy_c_new.py @@ -1086,6 +1086,50 @@ --TICK-- jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=) """) + + def test_mutate_class(self): + def fn(n): + class A(object): + count = 1 + def __init__(self, a): + self.a = a + def f(self): + return self.count + i = 0 + a = A(1) + while i < n: + A.count += 1 # ID: mutate + i = a.f() # ID: meth1 + return i + # + log = self.run(fn, [1000], threshold=10) + assert log.result == 1000 + # + # first, we test the entry bridge + # ------------------------------- + entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) + ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') + assert log.opnames(ops) == ['guard_value', 'getfield_gc', 'guard_value', + 'getfield_gc', 'guard_nonnull_class'] + # the STORE_ATTR is folded away + assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] + # + # then, the actual loop + # ---------------------- + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i8 = getfield_gc_pure(p5, descr=) + i9 = int_lt(i8, i7) + guard_true(i9, descr=.*) + i11 = int_add(i8, 1) + i12 = force_token() + --TICK-- + p20 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p20, i11, descr=) + setfield_gc(ConstPtr(ptr21), p20, descr=) + jump(p0, p1, p2, p3, p4, p20, p6, i7, descr=) + """) + def test_intbound_simple(self): """ diff --git a/lib-python/modified-2.7/test/test_descr.py b/lib-python/modified-2.7/test/test_descr.py --- a/lib-python/modified-2.7/test/test_descr.py +++ b/lib-python/modified-2.7/test/test_descr.py @@ -3190,7 +3190,8 @@ except TypeError: pass else: - self.fail("%r's __dict__ can be modified" % cls) + if test_support.check_impl_detail(pypy=False): + self.fail("%r's __dict__ can be modified" % cls) # Modules also disallow __dict__ assignment class Module1(types.ModuleType, Base): diff --git a/pypy/jit/metainterp/optimizeutil.py b/pypy/jit/metainterp/optimizeutil.py --- a/pypy/jit/metainterp/optimizeutil.py +++ b/pypy/jit/metainterp/optimizeutil.py @@ -99,7 +99,9 @@ make_sure_not_resized(args) res = 0x345678 for arg in args: - if isinstance(arg, history.Const): + if arg is None: + y = 17 + elif isinstance(arg, history.Const): y = arg._get_hash_() else: y = compute_identity_hash(arg) diff --git a/pypy/rpython/test/test_rfloat.py b/pypy/rpython/test/test_rfloat.py --- a/pypy/rpython/test/test_rfloat.py +++ b/pypy/rpython/test/test_rfloat.py @@ -157,9 +157,9 @@ self.interpret(fn, [1.0, 2.0, 3.0]) def test_copysign(self): - import math + from pypy.rlib import rfloat def fn(x, y): - return math.copysign(x, y) + return rfloat.copysign(x, y) assert self.interpret(fn, [42, -1]) == -42 assert self.interpret(fn, [42, -0.0]) == -42 assert self.interpret(fn, [42, 0.0]) == 42 @@ -172,21 +172,42 @@ assert self.interpret(fn, [0]) == 42.3 def test_isnan(self): - import math - def fn(x): - inf = x * x - nan = inf / inf - return math.isnan(nan) - assert self.interpret(fn, [1e200]) + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isnan(n1 / n2) + assert self.interpret(fn, [1e200, 1e200]) # nan + assert not self.interpret(fn, [1e200, 1.0]) # +inf + assert not self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [42.5, 2.3]) # +finite + assert not self.interpret(fn, [42.5, -2.3]) # -finite def test_isinf(self): - import math - def fn(x): - inf = x * x - return math.isinf(inf) - assert self.interpret(fn, [1e200]) + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isinf(n1 / n2) + assert self.interpret(fn, [1e200, 1.0]) # +inf + assert self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [1e200, 1e200]) # nan + assert not self.interpret(fn, [42.5, 2.3]) # +finite + assert not self.interpret(fn, [42.5, -2.3]) # -finite - + def test_isfinite(self): + from pypy.rlib import rfloat + def fn(x, y): + n1 = x * x + n2 = y * y * y + return rfloat.isfinite(n1 / n2) + assert self.interpret(fn, [42.5, 2.3]) # +finite + assert self.interpret(fn, [42.5, -2.3]) # -finite + assert not self.interpret(fn, [1e200, 1.0]) # +inf + assert not self.interpret(fn, [1e200, -1.0]) # -inf + assert not self.interpret(fn, [1e200, 1e200]) # nan + + class TestLLtype(BaseTestRfloat, LLRtypeMixin): def test_hash(self): diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py --- a/pypy/jit/metainterp/optimizeopt/optimizer.py +++ b/pypy/jit/metainterp/optimizeopt/optimizer.py @@ -220,7 +220,7 @@ self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op def has_pure_result(self, opnum, args, descr): - op = ResOperation(opnum, args, None) + op = ResOperation(opnum, args, None, descr) key = self.optimizer.make_args_key(op) op = self.optimizer.pure_operations.get(key, None) if op is None: @@ -482,7 +482,7 @@ def make_args_key(self, op): n = op.numargs() - args = [None] * (n + 1) + args = [None] * (n + 2) for i in range(n): arg = op.getarg(i) try: @@ -493,6 +493,7 @@ arg = value.get_key_box() args[i] = arg args[n] = ConstInt(op.getopnum()) + args[n+1] = op.getdescr() return args def optimize_default(self, op): diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py --- a/pypy/rpython/lltypesystem/module/ll_math.py +++ b/pypy/rpython/lltypesystem/module/ll_math.py @@ -101,14 +101,20 @@ # Custom implementations def ll_math_isnan(y): - # By not calling into the extenal function the JIT can inline this. Floats - # are awesome. + # By not calling into the external function the JIT can inline this. + # Floats are awesome. return y != y def ll_math_isinf(y): # Use a bitwise OR so the JIT doesn't produce 2 different guards. return (y == INFINITY) | (y == -INFINITY) +def ll_math_isfinite(y): + # Use a custom hack that is reasonably well-suited to the JIT. + # Floats are awesome (bis). + z = 0.0 * y + return z == z # i.e.: z is not a NaN + ll_math_floor = math_floor diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -54,7 +54,6 @@ from pypy.objspace.std.slicetype import slice_typedef from pypy.objspace.std.longtype import long_typedef from pypy.objspace.std.unicodetype import unicode_typedef - from pypy.objspace.std.dictproxytype import dictproxy_typedef from pypy.objspace.std.nonetype import none_typedef from pypy.objspace.std.itertype import iter_typedef self.pythontypes = [value for key, value in result.__dict__.items() @@ -123,7 +122,6 @@ iterobject.W_FastTupleIterObject: [], iterobject.W_ReverseSeqIterObject: [], unicodeobject.W_UnicodeObject: [], - dictproxyobject.W_DictProxyObject: [], dictmultiobject.W_DictViewKeysObject: [], dictmultiobject.W_DictViewItemsObject: [], dictmultiobject.W_DictViewValuesObject: [], diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -1,21 +1,26 @@ - +from pypy.conftest import gettestobjspace class AppTestUserObject: def test_dictproxy(self): class NotEmpty(object): a = 1 - assert isinstance(NotEmpty.__dict__, dict) == False + NotEmpty.a = 1 + NotEmpty.a = 1 + NotEmpty.a = 1 + NotEmpty.a = 1 assert 'a' in NotEmpty.__dict__ assert 'a' in NotEmpty.__dict__.keys() assert 'b' not in NotEmpty.__dict__ - assert isinstance(NotEmpty.__dict__.copy(), dict) - assert NotEmpty.__dict__ == NotEmpty.__dict__.copy() - try: - NotEmpty.__dict__['b'] = 1 - except: - pass - else: - raise AssertionError, 'this should not have been writable' + NotEmpty.__dict__['b'] = 4 + assert NotEmpty.b == 4 + del NotEmpty.__dict__['b'] + assert NotEmpty.__dict__.get("b") is None + raises(TypeError, 'NotEmpty.__dict__[15] = "y"') + raises(KeyError, 'del NotEmpty.__dict__[15]') + assert NotEmpty.__dict__.setdefault("string", 1) == 1 + assert NotEmpty.__dict__.setdefault("string", 2) == 1 + assert NotEmpty.string == 1 + raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)') def test_dictproxyeq(self): class a(object): @@ -33,7 +38,13 @@ def test_str_repr(self): class a(object): pass - s = repr(a.__dict__) - assert s.startswith('') - s = str(a.__dict__) - assert s.startswith('{') and s.endswith('}') + s1 = repr(a.__dict__) + s2 = str(a.__dict__) + assert s1 == s2 + assert s1.startswith('{') and s1.endswith('}') + +class AppTestUserObjectMethodCache(AppTestUserObject): + def setup_class(cls): + cls.space = gettestobjspace( + **{"objspace.std.withmethodcachecounter": True}) + diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -284,6 +284,18 @@ # All empty frozenset subclass instances should have different ids assert len(set(map(id, efs))) == len(efs) + def test_subclass_union(self): + for base in [set, frozenset]: + class subset(base): + def __init__(self, *args): + self.x = args + s = subset([2]) + assert s.x == ([2],) + t = s | base([5]) + # obscure CPython behavior: + assert type(t) is subset + assert not hasattr(t, 'x') + def test_isdisjoint(self): assert set([1,2,3]).isdisjoint(set([4,5,6])) assert set([1,2,3]).isdisjoint(frozenset([4,5,6])) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -787,12 +787,10 @@ def newtuple(self, l): return tuple(l) - def newdict(self, module=False, instance=False, classofinstance=None, - from_strdict_shared=None): + def newdict(self, module=False, instance=False, classofinstance=None): return W_DictMultiObject.allocate_and_init_instance( self, module=module, instance=instance, - classofinstance=classofinstance, - from_strdict_shared=from_strdict_shared) + classofinstance=classofinstance) def finditem_str(self, w_dict, s): return w_dict.getitem_str(s) # assume it's a multidict diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py --- a/pypy/jit/backend/llgraph/llimpl.py +++ b/pypy/jit/backend/llgraph/llimpl.py @@ -408,6 +408,13 @@ guard_op = old_loop.operations[old_index] assert guard_op.is_guard() guard_op.jump_target = new_loop + # check that the bridge's inputargs are of the correct number and + # kind for the guard + if guard_op.fail_args is not None: + argkinds = [v.concretetype for v in guard_op.fail_args if v] + else: + argkinds = [] + assert argkinds == [v.concretetype for v in new_loop.inputargs] # ------------------------------ diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -1,15 +1,88 @@ from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.dictmultiobject import W_DictMultiObject, IteratorImplementation +from pypy.objspace.std.typeobject import unwrap_cell +from pypy.interpreter.error import OperationError -def descr_get_dictproxy(space, w_obj): - return W_DictProxyObject(w_obj.getdict(space)) -class W_DictProxyObject(W_Object): - from pypy.objspace.std.dictproxytype import dictproxy_typedef as typedef +class W_DictProxyObject(W_DictMultiObject): + def __init__(w_self, space, w_type): + W_DictMultiObject.__init__(w_self, space) + w_self.w_type = w_type - def __init__(w_self, w_dict): - w_self.w_dict = w_dict + def impl_getitem(self, w_lookup): + space = self.space + w_lookup_type = space.type(w_lookup) + if space.is_w(w_lookup_type, space.w_str): + return self.impl_getitem_str(space.str_w(w_lookup)) + else: + return None -registerimplementation(W_DictProxyObject) + def impl_getitem_str(self, lookup): + return self.w_type.getdictvalue(self.space, lookup) -register_all(vars()) + def impl_setitem(self, w_key, w_value): + space = self.space + if space.is_w(space.type(w_key), space.w_str): + self.impl_setitem_str(self.space.str_w(w_key), w_value) + else: + raise OperationError(space.w_TypeError, space.wrap("cannot add non-string keys to dict of a type")) + + def impl_setitem_str(self, name, w_value): + self.w_type.setdictvalue(self.space, name, w_value) + + def impl_setdefault(self, w_key, w_default): + space = self.space + w_result = self.impl_getitem(w_key) + if w_result is not None: + return w_result + self.impl_setitem(w_key, w_default) + return w_default + + def impl_delitem(self, w_key): + space = self.space + w_key_type = space.type(w_key) + if space.is_w(w_key_type, space.w_str): + if not self.w_type.deldictvalue(space, w_key): + raise KeyError + else: + raise KeyError + + def impl_length(self): + return len(self.w_type.dict_w) + + def impl_iter(self): + return DictProxyIteratorImplementation(self.space, self) + + def impl_keys(self): + space = self.space + return [space.wrap(key) for key in self.w_type.dict_w.iterkeys()] + + def impl_values(self): + return [unwrap_cell(self.space, w_value) for w_value in self.w_type.dict_w.itervalues()] + + def impl_items(self): + space = self.space + return [space.newtuple([space.wrap(key), unwrap_cell(self.space, w_value)]) + for (key, w_value) in self.w_type.dict_w.iteritems()] + + def impl_clear(self): + self.w_type.dict_w.clear() + self.w_type.mutated() + + def _as_rdict(self): + assert 0, "should be unreachable" + + def _clear_fields(self): + assert 0, "should be unreachable" + +class DictProxyIteratorImplementation(IteratorImplementation): + def __init__(self, space, dictimplementation): + IteratorImplementation.__init__(self, space, dictimplementation) + self.iterator = dictimplementation.w_type.dict_w.iteritems() + + def next_entry(self): + for key, w_value in self.iterator: + return (self.space.wrap(key), unwrap_cell(self.space, w_value)) + else: + return (None, None) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -310,7 +310,7 @@ def ne__Set_settypedef(space, w_left, w_other): rd = make_setdata_from_w_iterable(space, w_other) - return space.wrap(_is_eq(w_left.setdata, rd)) + return space.wrap(not _is_eq(w_left.setdata, rd)) ne__Set_frozensettypedef = ne__Set_settypedef ne__Frozenset_settypedef = ne__Set_settypedef diff --git a/pypy/objspace/std/dictproxytype.py b/pypy/objspace/std/dictproxytype.py deleted file mode 100644 --- a/pypy/objspace/std/dictproxytype.py +++ /dev/null @@ -1,51 +0,0 @@ -from pypy.interpreter import gateway -from pypy.interpreter.typedef import GetSetProperty -from pypy.interpreter.error import OperationError -from pypy.objspace.std.stdtypedef import StdTypeDef - -# ____________________________________________________________ - -def _proxymethod(name): - def fget(space, w_obj): - from pypy.objspace.std.dictproxyobject import W_DictProxyObject - if not isinstance(w_obj, W_DictProxyObject): - raise OperationError(space.w_TypeError, - space.wrap("expected dictproxy")) - return space.getattr(w_obj.w_dict, space.wrap(name)) - return GetSetProperty(fget) - -def _compareproxymethod(opname): - def compare(space, w_obj1, w_obj2): - from pypy.objspace.std.dictproxyobject import W_DictProxyObject - if not isinstance(w_obj1, W_DictProxyObject): - raise OperationError(space.w_TypeError, - space.wrap("expected dictproxy")) - return getattr(space, opname)(w_obj1.w_dict, w_obj2) - compare.func_name = "dictproxy_compare_%s" % (opname, ) - return gateway.interp2app(compare) - -# ____________________________________________________________ - -dictproxy_typedef = StdTypeDef("dictproxy", - has_key = _proxymethod('has_key'), - get = _proxymethod('get'), - keys = _proxymethod('keys'), - values = _proxymethod('values'), - items = _proxymethod('items'), - iterkeys = _proxymethod('iterkeys'), - itervalues = _proxymethod('itervalues'), - iteritems = _proxymethod('iteritems'), - copy = _proxymethod('copy'), - __len__ = _proxymethod('__len__'), - __getitem__ = _proxymethod('__getitem__'), - __contains__ = _proxymethod('__contains__'), - __str__ = _proxymethod('__str__'), - __iter__ = _proxymethod('__iter__'), - __lt__ = _compareproxymethod('lt'), - __le__ = _compareproxymethod('le'), - __eq__ = _compareproxymethod('eq'), - __ne__ = _compareproxymethod('ne'), - __gt__ = _compareproxymethod('gt'), - __ge__ = _compareproxymethod('ge'), -) -dictproxy_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -189,6 +189,30 @@ assert btag is atag assert btag is not None + def test_version_tag_when_changing_a_lot(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(1)) + assert w_A.version_tag() is not atag + assert space.int_w(space.getattr(w_A, w_x)) == 1 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(2)) + assert w_A.version_tag() is not atag + assert space.int_w(space.getattr(w_A, w_x)) == 2 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(3)) + assert w_A.version_tag() is atag + assert space.int_w(space.getattr(w_A, w_x)) == 3 + + space.setattr(w_A, w_x, space.newint(4)) + assert w_A.version_tag() is atag + assert space.int_w(space.getattr(w_A, w_x)) == 4 + + class AppTestVersionedType(test_typeobject.AppTestTypeObject): def setup_class(cls): cls.space = gettestobjspace(**{"objspace.std.withtypeversion": True}) diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py --- a/pypy/rlib/rbigint.py +++ b/pypy/rlib/rbigint.py @@ -1,7 +1,7 @@ from pypy.rlib.rarithmetic import LONG_BIT, intmask, r_uint, r_ulonglong from pypy.rlib.rarithmetic import ovfcheck, r_longlong, widen from pypy.rlib.rarithmetic import most_neg_value_of_same_type -from pypy.rlib.rfloat import isinf, isnan +from pypy.rlib.rfloat import isfinite from pypy.rlib.debug import make_sure_not_resized, check_regular_int from pypy.rlib.objectmodel import we_are_translated, specialize from pypy.rlib import jit @@ -173,9 +173,15 @@ def fromfloat(dval): """ Create a new bigint object from a float """ # This function is not marked as pure because it can raise + if isfinite(dval): + return rbigint._fromfloat_finite(dval) + else: + raise OverflowError + + @staticmethod + @jit.purefunction + def _fromfloat_finite(dval): sign = 1 - if isinf(dval) or isnan(dval): - raise OverflowError if dval < 0.0: sign = -1 dval = -dval diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -4,15 +4,25 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.typedef import weakref_descr +from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member from pypy.objspace.std.objecttype import object_typedef -from pypy.objspace.std.dictproxyobject import W_DictProxyObject from pypy.rlib.objectmodel import we_are_translated from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe from pypy.rlib.rarithmetic import intmask, r_uint +class TypeCell(W_Root): + def __init__(self, w_value=None): + self.w_value = w_value + +def unwrap_cell(space, w_value): + if (space.config.objspace.std.withtypeversion and + isinstance(w_value, TypeCell)): + return w_value.w_value + return w_value + # from compiler/misc.py MANGLE_LEN = 256 # magic constant from compile.c @@ -211,6 +221,17 @@ return compute_C3_mro(w_self.space, w_self) def getdictvalue(w_self, space, attr): + if space.config.objspace.std.withtypeversion: + version_tag = w_self.version_tag() + if version_tag is not None: + return unwrap_cell( + space, + w_self._pure_getdictvalue_no_unwrapping( + space, version_tag, attr)) + w_value = w_self._getdictvalue_no_unwrapping(space, attr) + return unwrap_cell(space, w_value) + + def _getdictvalue_no_unwrapping(w_self, space, attr): w_value = w_self.dict_w.get(attr, None) if w_self.lazyloaders and w_value is None: if attr in w_self.lazyloaders: @@ -225,6 +246,48 @@ return w_value return w_value + @purefunction + def _pure_getdictvalue_no_unwrapping(w_self, space, version_tag, attr): + return w_self._getdictvalue_no_unwrapping(space, attr) + + def setdictvalue(w_self, space, name, w_value): + if (not space.config.objspace.std.mutable_builtintypes + and not w_self.is_heaptype()): + msg = "can't set attributes on type object '%s'" + raise operationerrfmt(space.w_TypeError, msg, w_self.name) + if name == "__del__" and name not in w_self.dict_w: + msg = "a __del__ method added to an existing type will not be called" + space.warn(msg, space.w_RuntimeWarning) + if space.config.objspace.std.withtypeversion: + version_tag = w_self.version_tag() + if version_tag is not None: + w_curr = w_self._pure_getdictvalue_no_unwrapping( + space, version_tag, name) + if w_curr is not None: + if isinstance(w_curr, TypeCell): + w_curr.w_value = w_value + return True + w_value = TypeCell(w_value) + w_self.mutated() + w_self.dict_w[name] = w_value + return True + + def deldictvalue(w_self, space, w_key): + if w_self.lazyloaders: + w_self._freeze_() # force un-lazification + key = space.str_w(w_key) + if (not space.config.objspace.std.mutable_builtintypes + and not w_self.is_heaptype()): + msg = "can't delete attributes on type object '%s'" + raise operationerrfmt(space.w_TypeError, msg, w_self.name) + try: + del w_self.dict_w[key] + except KeyError: + return False + else: + w_self.mutated() + return True + def lookup(w_self, name): # note that this doesn't call __get__ on the result at all space = w_self.space @@ -280,7 +343,7 @@ space = w_self.space for w_class in w_self.mro_w: assert isinstance(w_class, W_TypeObject) - w_value = w_class.getdictvalue(space, key) + w_value = w_class._getdictvalue_no_unwrapping(space, key) if w_value is not None: return w_class, w_value return None, None @@ -293,7 +356,8 @@ if version_tag is None: tup = w_self._lookup_where(name) return tup - return w_self._pure_lookup_where_with_method_cache(name, version_tag) + w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) + return w_class, unwrap_cell(space, w_value) @purefunction def _pure_lookup_where_with_method_cache(w_self, name, version_tag): @@ -358,10 +422,10 @@ return False def getdict(w_self, space): # returning a dict-proxy! + from pypy.objspace.std.dictproxyobject import W_DictProxyObject if w_self.lazyloaders: w_self._freeze_() # force un-lazification - newdic = space.newdict(from_strdict_shared=w_self.dict_w) - return W_DictProxyObject(newdic) + return W_DictProxyObject(space, w_self) def unwrap(w_self, space): if w_self.instancetypedef.fakedcpytype is not None: @@ -395,15 +459,15 @@ def get_module(w_self): space = w_self.space if w_self.is_heaptype() and '__module__' in w_self.dict_w: - return w_self.dict_w['__module__'] + return w_self.getdictvalue(space, '__module__') else: # for non-heap types, CPython checks for a module.name in the # type name. That's a hack, so we're allowed to use a different # hack... if ('__module__' in w_self.dict_w and - space.is_true(space.isinstance(w_self.dict_w['__module__'], + space.is_true(space.isinstance(w_self.getdictvalue(space, '__module__'), space.w_str))): - return w_self.dict_w['__module__'] + return w_self.getdictvalue(space, '__module__') return space.wrap('__builtin__') def get_module_type_name(w_self): @@ -800,52 +864,9 @@ "type object '%s' has no attribute '%s'", w_type.name, name) -def setattr__Type_ANY_ANY(space, w_type, w_name, w_value): - # Note. This is exactly the same thing as descroperation.descr__setattr__, - # but it is needed at bootstrap to avoid a call to w_type.getdict() which - # would un-lazify the whole type. - name = space.str_w(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - space.set(w_descr, w_type, w_value) - return - - if (not space.config.objspace.std.mutable_builtintypes - and not w_type.is_heaptype()): - msg = "can't set attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_type.name) - if name == "__del__" and name not in w_type.dict_w: - msg = "a __del__ method added to an existing type will not be called" - space.warn(msg, space.w_RuntimeWarning) - w_type.mutated() - w_type.dict_w[name] = w_value - def eq__Type_Type(space, w_self, w_other): return space.is_(w_self, w_other) -def delattr__Type_ANY(space, w_type, w_name): - if w_type.lazyloaders: - w_type._freeze_() # force un-lazification - name = space.str_w(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - space.delete(w_descr, w_type) - return - if (not space.config.objspace.std.mutable_builtintypes - and not w_type.is_heaptype()): - msg = "can't delete attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_type.name) - try: - del w_type.dict_w[name] - except KeyError: - raise OperationError(space.w_AttributeError, w_name) - else: - w_type.mutated() - return - - # ____________________________________________________________ diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -65,7 +65,7 @@ cache_counter = __pypy__.method_cache_counter("f") # the cache hits come from A.f = ..., which first does a lookup on A as # well - assert cache_counter == (9, 11) + assert cache_counter == (17, 3) def test_subclasses(self): import __pypy__ @@ -148,3 +148,32 @@ assert cache_counter[0] >= 5 assert cache_counter[1] >= 1 # should be (27, 3) assert sum(cache_counter) == 10 + + def test_mutate_class(self): + import __pypy__ + class A(object): + x = 1 + y = 2 + __pypy__.reset_method_cache_counter() + a = A() + for i in range(100): + assert a.y == 2 + assert a.x == i + 1 + A.x += 1 + cache_counter = __pypy__.method_cache_counter("x") + assert cache_counter[0] >= 350 + assert cache_counter[1] >= 1 + assert sum(cache_counter) == 400 + + __pypy__.reset_method_cache_counter() + a = A() + for i in range(100): + assert a.y == 2 + setattr(a, "a%s" % i, i) + cache_counter = __pypy__.method_cache_counter("x") + assert cache_counter[0] == 0 # 0 hits, because all the attributes are new + + def test_get_module_from_namedtuple(self): + # this used to crash + from collections import namedtuple + assert namedtuple("a", "b").__module__ diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py --- a/pypy/objspace/std/typetype.py +++ b/pypy/objspace/std/typetype.py @@ -207,38 +207,28 @@ def descr_set__module(space, w_type, w_value): w_type = _check(space, w_type) - if not w_type.is_heaptype(): - raise operationerrfmt(space.w_TypeError, - "can't set %s.__module__", - w_type.name) - w_type.mutated() - w_type.dict_w['__module__'] = w_value + w_type.setdictvalue(space, '__module__', w_value) def descr_get___abstractmethods__(space, w_type): w_type = _check(space, w_type) # type itself has an __abstractmethods__ descriptor (this). Don't return it if not space.is_w(w_type, space.w_type): - try: - return w_type.dict_w["__abstractmethods__"] - except KeyError: - pass + w_result = w_type.getdictvalue(space, "__abstractmethods__") + if w_result is not None: + return w_result raise OperationError(space.w_AttributeError, space.wrap("__abstractmethods__")) def descr_set___abstractmethods__(space, w_type, w_new): w_type = _check(space, w_type) - w_type.dict_w["__abstractmethods__"] = w_new - w_type.mutated() + w_type.setdictvalue(space, "__abstractmethods__", w_new) w_type.set_abstract(space.is_true(w_new)) def descr_del___abstractmethods__(space, w_type): w_type = _check(space, w_type) - try: - del w_type.dict_w["__abstractmethods__"] - except KeyError: + if not w_type.deldictvalue(space, space.wrap("__abstractmethods__")): raise OperationError(space.w_AttributeError, space.wrap("__abstractmethods__")) - w_type.mutated() w_type.set_abstract(False) def descr___subclasses__(space, w_type): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -10,7 +10,7 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT from pypy.rlib.rfloat import ( - isinf, isnan, INFINITY, NAN, copysign, formatd, + isinf, isnan, isfinite, INFINITY, NAN, copysign, formatd, DTSF_ADD_DOT_0, DTSF_STR_PRECISION) from pypy.rlib.rbigint import rbigint from pypy.rlib.objectmodel import we_are_translated @@ -102,7 +102,7 @@ def float_hex__Float(space, w_float): value = w_float.floatval - if isinf(value) or isnan(value): + if not isfinite(value): return str__Float(space, w_float) if value == 0.0: if copysign(1., value) == -1.: @@ -136,15 +136,15 @@ def float2string(space, w_float, code, precision): x = w_float.floatval # we special-case explicitly inf and nan here - if isinf(x): + if isfinite(x): + s = formatd(x, code, precision, DTSF_ADD_DOT_0) + elif isinf(x): if x > 0.0: s = "inf" else: s = "-inf" - elif isnan(x): + else: # isnan(x): s = "nan" - else: - s = formatd(x, code, precision, DTSF_ADD_DOT_0) return space.wrap(s) def repr__Float(space, w_float): @@ -179,7 +179,7 @@ if opname == 'eq' or opname == 'ne': def do_compare_bigint(f1, b2): """f1 is a float. b2 is a bigint.""" - if isinf(f1) or isnan(f1) or math.floor(f1) != f1: + if not isfinite(f1) or math.floor(f1) != f1: return opname == 'ne' b1 = rbigint.fromfloat(f1) res = b1.eq(b2) @@ -189,7 +189,7 @@ else: def do_compare_bigint(f1, b2): """f1 is a float. b2 is a bigint.""" - if isinf(f1) or isnan(f1): + if not isfinite(f1): return op(f1, 0.0) if opname == 'gt' or opname == 'le': # 'float > long' <==> 'ceil(float) > long' @@ -457,8 +457,6 @@ if x == 0.0: if y < 0.0: - if isinf(y): - return space.wrap(INFINITY) raise OperationError(space.w_ZeroDivisionError, space.wrap("0.0 cannot be raised to " "a negative power")) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -303,11 +303,10 @@ return W_ListObject(list_w) def newdict(self, module=False, instance=False, classofinstance=None, - from_strdict_shared=None, strdict=False): + strdict=False): return W_DictMultiObject.allocate_and_init_instance( self, module=module, instance=instance, classofinstance=classofinstance, - from_strdict_shared=from_strdict_shared, strdict=strdict) def newslice(self, w_start, w_end, w_step): diff --git a/pypy/rpython/extfuncregistry.py b/pypy/rpython/extfuncregistry.py --- a/pypy/rpython/extfuncregistry.py +++ b/pypy/rpython/extfuncregistry.py @@ -36,6 +36,9 @@ register_external(rfloat.isnan, [float], bool, export_name="ll_math.ll_math_isnan", sandboxsafe=True, llimpl=ll_math.ll_math_isnan) +register_external(rfloat.isfinite, [float], bool, + export_name="ll_math.ll_math_isfinite", sandboxsafe=True, + llimpl=ll_math.ll_math_isfinite) register_external(rfloat.copysign, [float, float], float, export_name="ll_math.ll_math_copysign", sandboxsafe=True, llimpl=ll_math.ll_math_copysign) diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py --- a/pypy/jit/metainterp/optimizeopt/rewrite.py +++ b/pypy/jit/metainterp/optimizeopt/rewrite.py @@ -59,7 +59,8 @@ def find_rewritable_bool(self, op, args): try: oldopnum = opboolinvers[op.getopnum()] - targs = [args[0], args[1], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[0], args[1]], + None)) if self.try_boolinvers(op, targs): return True except KeyError: @@ -67,7 +68,8 @@ try: oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL - targs = [args[1], args[0], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) oldop = self.optimizer.pure_operations.get(targs, None) if oldop is not None and oldop.getdescr() is op.getdescr(): self.make_equal_to(op.result, self.getvalue(oldop.result)) @@ -77,7 +79,8 @@ try: oldopnum = opboolinvers[opboolreflex[op.getopnum()]] - targs = [args[1], args[0], ConstInt(oldopnum)] + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) if self.try_boolinvers(op, targs): return True except KeyError: diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -111,6 +111,7 @@ del X.__abstractmethods__ X() raises(AttributeError, getattr, type, "__abstractmethods__") + raises(TypeError, "int.__abstractmethods__ = ('abc', )") def test_call_type(self): assert type(42) is int @@ -1015,6 +1016,25 @@ __weakref__ = 42 assert B().__weakref__ == 42 + def test_change_dict(self): + class A(object): + pass + + a = A() + A.x = 1 + assert A.__dict__["x"] == 1 + raises(AttributeError, "del A.__dict__") + raises((AttributeError, TypeError), "A.__dict__ = {}") + + def test_mutate_dict(self): + class A(object): + pass + + a = A() + A.x = 1 + assert A.__dict__["x"] == 1 + A.__dict__['x'] = 5 + assert A.x == 5 class AppTestMutableBuiltintypes: diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py --- a/pypy/rlib/objectmodel.py +++ b/pypy/rlib/objectmodel.py @@ -219,6 +219,7 @@ same before and after translation, except for RPython instances on the lltypesystem. """ + assert x is not None result = object.__hash__(x) try: x.__dict__['__precomputed_identity_hash'] = result @@ -267,14 +268,15 @@ In RPython, floats cannot be used with ints in dicts, anyway. """ from pypy.rlib.rarithmetic import intmask - from pypy.rlib.rfloat import isinf, isnan - if isinf(f): - if f < 0.0: - return -271828 - else: - return 314159 - elif isnan(f): - return 0 + from pypy.rlib.rfloat import isfinite, isinf + if not isfinite(f): + if isinf(f): + if f < 0.0: + return -271828 + else: + return 314159 + else: #isnan(f): + return 0 v, expo = math.frexp(f) v *= TAKE_NEXT hipart = int(v) diff --git a/pypy/rlib/rstruct/ieee.py b/pypy/rlib/rstruct/ieee.py --- a/pypy/rlib/rstruct/ieee.py +++ b/pypy/rlib/rstruct/ieee.py @@ -87,12 +87,13 @@ raise ValueError("invalid size value") sign = rfloat.copysign(1.0, x) < 0.0 - if rfloat.isinf(x): - mant = r_ulonglong(0) - exp = MAX_EXP - MIN_EXP + 2 - elif rfloat.isnan(x): - mant = r_ulonglong(1) << (MANT_DIG-2) # other values possible - exp = MAX_EXP - MIN_EXP + 2 + if not rfloat.isfinite(x): + if rfloat.isinf(x): + mant = r_ulonglong(0) + exp = MAX_EXP - MIN_EXP + 2 + else: # rfloat.isnan(x): + mant = r_ulonglong(1) << (MANT_DIG-2) # other values possible + exp = MAX_EXP - MIN_EXP + 2 elif x == 0.0: mant = r_ulonglong(0) exp = 0 diff --git a/pypy/rpython/lltypesystem/module/test/test_ll_math.py b/pypy/rpython/lltypesystem/module/test/test_ll_math.py --- a/pypy/rpython/lltypesystem/module/test/test_ll_math.py +++ b/pypy/rpython/lltypesystem/module/test/test_ll_math.py @@ -22,11 +22,60 @@ assert ll_math.ll_math_isnan(nan) assert not ll_math.ll_math_isnan(inf) + def test_isfinite(self): + inf = 1e200 * 1e200 + nan = inf / inf + assert ll_math.ll_math_isfinite(0.0) + assert ll_math.ll_math_isfinite(-42.0) + assert not ll_math.ll_math_isfinite(nan) + assert not ll_math.ll_math_isnan(inf) + assert not ll_math.ll_math_isnan(-inf) + + def test_compiled_isnan(self): + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isnan(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(1e200, 1e200) # nan + assert not f(1e200, 1.0) # +inf + assert not f(1e200, -1.0) # -inf + assert not f(42.5, 2.3) # +finite + assert not f(42.5, -2.3) # -finite + def test_compiled_isinf(self): - def f(x): - return ll_math.ll_math_isinf(1. / x) - f = compile(f, [float], backendopt=False) - assert f(5.5e-309) + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isinf(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(1e200, 1.0) # +inf + assert f(1e200, -1.0) # -inf + assert not f(1e200, 1e200) # nan + assert not f(42.5, 2.3) # +finite + assert not f(42.5, -2.3) # -finite + + def test_compiled_isfinite(self): + def f(x, y): + n1 = normalize(x * x) + n2 = normalize(y * y * y) + return ll_math.ll_math_isfinite(n1 / n2) + f = compile(f, [float, float], backendopt=False) + assert f(42.5, 2.3) # +finite + assert f(42.5, -2.3) # -finite + assert not f(1e200, 1.0) # +inf + assert not f(1e200, -1.0) # -inf + assert not f(1e200, 1e200) # nan + + +from pypy.rpython.lltypesystem import lltype +_A = lltype.GcArray(lltype.Float) +def normalize(x): + # workaround: force the C compiler to cast to a double + a = lltype.malloc(_A, 1) + a[0] = x + import time; time.time() + return a[0] def make_test_case((fnname, args, expected), dict): From commits-noreply at bitbucket.org Sat Apr 30 18:05:10 2011 From: commits-noreply at bitbucket.org (arigo) Date: Sat, 30 Apr 2011 18:05:10 +0200 (CEST) Subject: [pypy-svn] pypy post-release-1.5: close branch Message-ID: <20110430160510.0FD06282B59@codespeak.net> Author: Armin Rigo Branch: post-release-1.5 Changeset: r43818:63309e5e8584 Date: 2011-04-30 18:04 +0200 http://bitbucket.org/pypy/pypy/changeset/63309e5e8584/ Log: close branch From commits-noreply at bitbucket.org Sat Apr 30 22:19:51 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 30 Apr 2011 22:19:51 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: Better language for the intro (thanks idan gazit). Message-ID: <20110430201951.D78E7282B59@codespeak.net> Author: Alex Gaynor Branch: extradoc Changeset: r182:254c0ece9c88 Date: 2011-04-30 16:19 -0400 http://bitbucket.org/pypy/pypy.org/changeset/254c0ece9c88/ Log: Better language for the intro (thanks idan gazit). diff --git a/source/index.txt b/source/index.txt --- a/source/index.txt +++ b/source/index.txt @@ -3,8 +3,8 @@ title: PyPy --- -PyPy is a `very compliant`_ implementation of the `Python`_ language (2.7.1). -PyPy has several advantages and distinctive features: +PyPy is a very fast, very compliant alternative implementation of the `Python`_ +language (2.7.1). It has several advantages and distinct features: * **Speed:** thanks to its Just-in-Time compiler, Python programs often run `faster`_ on PyPy. `(What is a JIT compiler?)`_ From commits-noreply at bitbucket.org Sat Apr 30 22:22:37 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 30 Apr 2011 22:22:37 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: Keep that a link. Message-ID: <20110430202237.148B9282B59@codespeak.net> Author: Alex Gaynor Branch: extradoc Changeset: r183:351cc4225e6b Date: 2011-04-30 16:22 -0400 http://bitbucket.org/pypy/pypy.org/changeset/351cc4225e6b/ Log: Keep that a link. diff --git a/source/index.txt b/source/index.txt --- a/source/index.txt +++ b/source/index.txt @@ -3,8 +3,8 @@ title: PyPy --- -PyPy is a very fast, very compliant alternative implementation of the `Python`_ -language (2.7.1). It has several advantages and distinct features: +PyPy is a very fast, `very compliant`_ alternative implementation of the +`Python`_ language (2.7.1). It has several advantages and distinct features: * **Speed:** thanks to its Just-in-Time compiler, Python programs often run `faster`_ on PyPy. `(What is a JIT compiler?)`_ From commits-noreply at bitbucket.org Sat Apr 30 22:40:43 2011 From: commits-noreply at bitbucket.org (alex_gaynor) Date: Sat, 30 Apr 2011 22:40:43 +0200 (CEST) Subject: [pypy-svn] pypy.org extradoc: New text for the index. thanks to idan gazit. Message-ID: <20110430204043.E2DB6282B59@codespeak.net> Author: Alex Gaynor Branch: extradoc Changeset: r184:8272d0d118d8 Date: 2011-04-30 16:40 -0400 http://bitbucket.org/pypy/pypy.org/changeset/8272d0d118d8/ Log: New text for the index. thanks to idan gazit. diff --git a/source/index.txt b/source/index.txt --- a/source/index.txt +++ b/source/index.txt @@ -3,8 +3,8 @@ title: PyPy --- -PyPy is a very fast, `very compliant`_ alternative implementation of the -`Python`_ language (2.7.1). It has several advantages and distinct features: +PyPy is a `fast`_, `compliant`_ alternative implementation of the `Python`_ +language (2.7.1). It has several advantages and distinct features: * **Speed:** thanks to its Just-in-Time compiler, Python programs often run `faster`_ on PyPy. `(What is a JIT compiler?)`_ @@ -18,6 +18,10 @@ * **Stackless:** PyPy can be configured to run in `stackless`_ mode, providing micro-threads for massive concurrency. + * **Compatibility:** PyPy is `highly compatible`_ with existing python code. + It supports `ctypes`_ and can run popular python libraries like `twisted`_ + and `django`_. + * As well as other `features`_. .. class:: download @@ -26,20 +30,22 @@ .. __: download.html -To read more about Python, look into `Python docs`_ and check our -Compatibility_ page. PyPy can run such python libraries as `twisted`_ -and `django`_ and supports `ctypes`_. +Want to know more? A good place to start is our detailed `speed`_ and +`compatibility`_ reports! .. _`stackless`: http://www.stackless.com/ .. _`Python`: http://python.org/ +.. _`fast`: http://speed.pypy.org/ .. _`faster`: http://speed.pypy.org/ .. _`(What is a JIT compiler?)`: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`run untrusted code`: features.html#sandboxing -.. _`very compliant`: compat.html +.. _`compliant`: compat.html .. _`Python docs`: http://docs.python.org/release/2.7.1/ .. _`twisted`: http://twistedmatrix.com/ .. _`django`: http://www.djangoproject.com/ .. _`ctypes`: http://docs.python.org/release/2.7.1/library/ctypes.html .. _`features`: features.html .. _`less space`: http://morepypy.blogspot.com/2009/10/gc-improvements.html -.. _Compatibility: compat.html +.. _`highly compatible`: compat.html +.. _`speed`: http://speed.pypy.org/ +.. _`compatibility`: compat.html